diff --git a/third-party/nwaku/.dockerignore b/third-party/nwaku/.dockerignore new file mode 100644 index 0000000..247ac61 --- /dev/null +++ b/third-party/nwaku/.dockerignore @@ -0,0 +1,9 @@ +/README.md +/Dockerfile +/.*ignore +/LICENSE* +/tests +/metrics +/nimcache +librln* +**/vendor/* diff --git a/third-party/nwaku/.editorconfig b/third-party/nwaku/.editorconfig new file mode 100644 index 0000000..e7f569e --- /dev/null +++ b/third-party/nwaku/.editorconfig @@ -0,0 +1,18 @@ +root = true + +[*] +charset = utf-8 +indent_style = space +indent_size = 2 +end_of_line = lf +trim_trailing_whitespace = true +insert_final_newline = true + + +[{Makefile, *.sh}] +indent_style = tab + +# Trailing spaces in markdown indicate word wrap +[{*.markdown,*.md}] +trim_trailing_spaces = false +max_line_length = 80 diff --git a/third-party/nwaku/.github/ISSUE_TEMPLATE/bug_report.md b/third-party/nwaku/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..8e54bbf --- /dev/null +++ b/third-party/nwaku/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,34 @@ +--- +name: Bug report +about: Report any bugs or unexpected behavior +title: 'bug: ' +labels: bug, track:maintenance +assignees: '' + +--- + +### Problem +A clear and concise description of what the bug is. + +### Impact +Indicate how significant you believe the impact of the bug is. Bugs that lead to data loss or corruption would be considered `critical`. In such cases, please also add the `critical` label. + +### To reproduce +If you can reproduce the behavior, steps to reproduce: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +### Expected behavior +A clear and concise description of what you expected to happen. + +### Screenshots/logs +If applicable, add screenshots or logs to help explain your problem. + +### nwaku version/commit hash +State the version of `nwaku` where you've encountered the bug or, if built off a specific commit, the relevant commit hash. You can check the version by running `./wakunode2 --version`. +- e.g. `v0.9` or `ed53bcd` + +### Additional context +Add any other context about the problem here. diff --git a/third-party/nwaku/.github/ISSUE_TEMPLATE/bump_dependencies.md b/third-party/nwaku/.github/ISSUE_TEMPLATE/bump_dependencies.md new file mode 100644 index 0000000..0413cbf --- /dev/null +++ b/third-party/nwaku/.github/ISSUE_TEMPLATE/bump_dependencies.md @@ -0,0 +1,48 @@ +--- +name: Bump dependencies +about: Bump vendor dependencies for release +title: 'Bump vendor dependencies for release 0.0.0' +labels: dependencies +assignees: '' + +--- + + + +Update `nwaku` "vendor" dependencies. + +### Items to bump +- [ ] dnsclient.nim ( update to the latest tag version ) +- [ ] nim-bearssl +- [ ] nimbus-build-system +- [ ] nim-chronicles +- [ ] nim-chronos +- [ ] nim-confutils +- [ ] nimcrypto +- [ ] nim-dnsdisc +- [ ] nim-eth +- [ ] nim-faststreams +- [ ] nim-http-utils +- [ ] nim-json-rpc +- [ ] nim-json-serialization +- [ ] nim-libbacktrace +- [ ] nim-libp2p ( update to the latest tag version ) +- [ ] nim-metrics +- [ ] nim-nat-traversal +- [ ] nim-presto +- [ ] nim-regex ( update to the latest tag version ) +- [ ] nim-results +- [ ] nim-secp256k1 +- [ ] nim-serialization +- [ ] nim-sqlite3-abi ( update to the latest tag version ) +- [ ] nim-stew +- [ ] nim-stint +- [ ] nim-taskpools ( update to the latest tag version ) +- [ ] nim-testutils ( update to the latest tag version ) +- [ ] nim-toml-serialization +- [ ] nim-unicodedb +- [ ] nim-unittest2 ( update to the latest tag version ) +- [ ] nim-web3 ( update to the latest tag version ) +- [ ] nim-websock ( update to the latest tag version ) +- [ ] nim-zlib +- [ ] zerokit ( this should be kept in version `v0.7.0` ) diff --git a/third-party/nwaku/.github/ISSUE_TEMPLATE/feature_request.md b/third-party/nwaku/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..52e2164 --- /dev/null +++ b/third-party/nwaku/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,26 @@ +--- +name: Feature request +about: Suggest an idea for the nwaku implementation +title: 'feat: ' +labels: track:production +assignees: '' + +--- + +### Problem +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +### Suggested solution +A clear and concise description of what you want to happen. + +### Alternatives considered +A clear and concise description of any alternative solutions or features you've considered. + +### Additional context +Add any other context or screenshots about the feature request here. + +### Acceptance criteria +A list of tasks that need to be done for the issue to be considered resolved. + +### Epic +Epic title and link the feature refers to. diff --git a/third-party/nwaku/.github/ISSUE_TEMPLATE/improvement.md b/third-party/nwaku/.github/ISSUE_TEMPLATE/improvement.md new file mode 100644 index 0000000..5dee34f --- /dev/null +++ b/third-party/nwaku/.github/ISSUE_TEMPLATE/improvement.md @@ -0,0 +1,17 @@ +--- +name: Improvement +about: Suggest improvements to the codebase or processes. This includes refactoring, + docs and any other chores. +title: 'chore:' +labels: track:maintenance +assignees: '' + +--- +### Background +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]; There is a spelling error in [...]; It's difficult to read the code in module [...] + +### Details +A clear and concise description of what you want to happen. + +### Acceptance criteria +A list of tasks that need to be done for the issue to be considered resolved. diff --git a/third-party/nwaku/.github/ISSUE_TEMPLATE/milestone.md b/third-party/nwaku/.github/ISSUE_TEMPLATE/milestone.md new file mode 100644 index 0000000..d167ce1 --- /dev/null +++ b/third-party/nwaku/.github/ISSUE_TEMPLATE/milestone.md @@ -0,0 +1,41 @@ +--- +name: Milestone Issue Template +about: Track Milestones +title: "[Milestone] " +labels: milestone +assignees: '' + +--- + + + + +**Planned start date**: +**Due date**: + +# Summary + +# Acceptance Criteria + + + +## Tasks + + + +# RAID (Risks, Assumptions, Issues and Dependencies) + + + + + + + + diff --git a/third-party/nwaku/.github/ISSUE_TEMPLATE/prepare_release.md b/third-party/nwaku/.github/ISSUE_TEMPLATE/prepare_release.md new file mode 100644 index 0000000..9553d56 --- /dev/null +++ b/third-party/nwaku/.github/ISSUE_TEMPLATE/prepare_release.md @@ -0,0 +1,72 @@ +--- +name: Prepare release +about: Execute tasks for the creation and publishing of a new release +title: 'Prepare release 0.0.0' +labels: release +assignees: '' + +--- + + + +### Items to complete + +All items below are to be completed by the owner of the given release. + +- [ ] Create release branch +- [ ] Assign release candidate tag to the release branch HEAD. e.g. v0.30.0-rc.0 +- [ ] Generate and edit releases notes in CHANGELOG.md +- [ ] Review possible update of [config-options](https://github.com/waku-org/docs.waku.org/blob/develop/docs/guides/nwaku/config-options.md) +- [ ] _End user impact_: Summarize impact of changes on Status end users (can be a comment in this issue). +- [ ] **Validate release candidate** + - [ ] Bump nwaku dependency in [waku-rust-bindings](https://github.com/waku-org/waku-rust-bindings) and make sure all examples and tests work + +- [ ] Automated testing + - [ ] Ensures js-waku tests are green against release candidate + - [ ] Ask Vac-QA and Vac-DST to perform available tests against release candidate + - [ ] Vac-QA + - [ ] Vac-DST (we need additional report. see [this](https://www.notion.so/DST-Reports-1228f96fb65c80729cd1d98a7496fe6f)) + + - [ ] **On Waku fleets** + - [ ] Lock `waku.test` fleet to release candidate version + - [ ] Continuously stress `waku.test` fleet for a week (e.g. from `wakudev`) + - [ ] Search _Kibana_ logs from the previous month (since last release was deployed), for possible crashes or errors in `waku.test` and `waku.sandbox`. + - Most relevant logs are `(fleet: "waku.test" OR fleet: "waku.sandbox") AND message: "SIGSEGV"` + - [ ] Run release candidate with `waku-simulator`, ensure that nodes connected to each other + - [ ] Unlock `waku.test` to resume auto-deployment of latest `master` commit + + - [ ] **On Status fleet** + - [ ] Deploy release candidate to `status.staging` + - [ ] Perform [sanity check](https://www.notion.so/How-to-test-Nwaku-on-Status-12c6e4b9bf06420ca868bd199129b425) and log results as comments in this issue. + - [ ] Connect 2 instances to `status.staging` fleet, one in relay mode, the other one in light client. + - [ ] 1:1 Chats with each other + - [ ] Send and receive messages in a community + - [ ] Close one instance, send messages with second instance, reopen first instance and confirm messages sent while offline are retrieved from store + - [ ] Perform checks based _end user impact_ + - [ ] Inform other (Waku and Status) CCs to point their instance to `status.staging` for a few days. Ping Status colleagues from their Discord server or [Status community](https://status.app/c/G3kAAMSQtb05kog3aGbr3kiaxN4tF5xy4BAGEkkLwILk2z3GcoYlm5hSJXGn7J3laft-tnTwDWmYJ18dP_3bgX96dqr_8E3qKAvxDf3NrrCMUBp4R9EYkQez9XSM4486mXoC3mIln2zc-TNdvjdfL9eHVZ-mGgs=#zQ3shZeEJqTC1xhGUjxuS4rtHSrhJ8vUYp64v6qWkLpvdy9L9) (not blocking point.) + - [ ] Ask Status-QA to perform sanity checks (as described above) + checks based on _end user impact_; do specify the version being tested + - [ ] Ask Status-QA or infra to run the automated Status e2e tests against `status.staging` + - [ ] Get other CCs sign-off: they comment on this PR "used app for a week, no problem", or problem reported, resolved and new RC + - [ ] **Get Status-QA sign-off**. Ensuring that `status.test` update will not disturb ongoing activities. + +- [ ] **Proceed with release** + + - [ ] Assign a release tag to the same commit that contains the validated release-candidate tag + - [ ] Create GitHub release + - [ ] Deploy the release to DockerHub + - [ ] Announce the release + +- [ ] **Promote release to fleets**. + - [ ] Update infra config with any deprecated arguments or changed options + - [ ] [Deploy final release to `waku.sandbox` fleet](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox) + - [ ] [Deploy final release to `status.staging` fleet](https://ci.infra.status.im/job/nim-waku/job/deploy-shards-staging/) + - [ ] [Deploy final release to `status.prod` fleet](https://ci.infra.status.im/job/nim-waku/job/deploy-shards-test/) + +- [ ] **Post release** + - [ ] Submit a PR from the release branch to master. Important to commit the PR with "create a merge commit" option. + - [ ] Update waku-org/nwaku-compose with the new release version. + - [ ] Update version in js-waku repo. [update only this](https://github.com/waku-org/js-waku/blob/7c0ce7b2eca31cab837da0251e1e4255151be2f7/.github/workflows/ci.yml#L135) by submitting a PR. diff --git a/third-party/nwaku/.github/ISSUE_TEMPLATE/research-related-issue.md b/third-party/nwaku/.github/ISSUE_TEMPLATE/research-related-issue.md new file mode 100644 index 0000000..b90b7d8 --- /dev/null +++ b/third-party/nwaku/.github/ISSUE_TEMPLATE/research-related-issue.md @@ -0,0 +1,19 @@ +--- +name: Research-related issue +about: Use this template if your issue is related to any Vac research tracks +title: 'research:' +labels: '' +assignees: '' + +--- + +### Problem + +### Acceptance criteria + +### Details + +### Possible Solutions + +### Research track +Indicate the Vac research track that this issue relates to. Please also add the relevant track as a label. diff --git a/third-party/nwaku/.github/pull_request_template.md b/third-party/nwaku/.github/pull_request_template.md new file mode 100644 index 0000000..b5aba5c --- /dev/null +++ b/third-party/nwaku/.github/pull_request_template.md @@ -0,0 +1,8 @@ + +## Description + +## Changes + +## Issue + +closes # diff --git a/third-party/nwaku/.github/workflows/auto_assign_pr.yml b/third-party/nwaku/.github/workflows/auto_assign_pr.yml new file mode 100644 index 0000000..39847b0 --- /dev/null +++ b/third-party/nwaku/.github/workflows/auto_assign_pr.yml @@ -0,0 +1,12 @@ +name: Auto Assign PR to Creator + +on: + pull_request: + types: + - opened + +jobs: + assign_creator: + runs-on: ubuntu-22.04 + steps: + - uses: toshimaru/auto-author-assign@v1.6.2 \ No newline at end of file diff --git a/third-party/nwaku/.github/workflows/ci.yml b/third-party/nwaku/.github/workflows/ci.yml new file mode 100644 index 0000000..5b32193 --- /dev/null +++ b/third-party/nwaku/.github/workflows/ci.yml @@ -0,0 +1,191 @@ +name: ci + +on: + pull_request: + push: + branches: + - master + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +env: + NPROC: 2 + MAKEFLAGS: "-j${NPROC}" + NIMFLAGS: "--parallelBuild:${NPROC} --colors:off -d:chronicles_colors:none" + +jobs: + changes: # changes detection + runs-on: ubuntu-22.04 + permissions: + pull-requests: read + steps: + - uses: actions/checkout@v4 + name: Checkout code + id: checkout + - uses: dorny/paths-filter@v2 + id: filter + with: + filters: | + common: + - '.github/workflows/**' + - 'vendor/**' + - 'Makefile' + - 'waku.nimble' + - 'library/**' + v2: + - 'waku/**' + - 'apps/**' + - 'tools/**' + - 'tests/all_tests_v2.nim' + - 'tests/**' + docker: + - 'docker/**' + + outputs: + common: ${{ steps.filter.outputs.common }} + v2: ${{ steps.filter.outputs.v2 }} + docker: ${{ steps.filter.outputs.docker }} + + build: + needs: changes + if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-22.04, macos-13] + runs-on: ${{ matrix.os }} + timeout-minutes: 60 + + name: build-${{ matrix.os }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Get submodules hash + id: submodules + run: | + echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT + + - name: Cache submodules + uses: actions/cache@v3 + with: + path: | + vendor/ + .git/modules + key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }} + + - name: Build binaries + run: make V=1 QUICK_AND_DIRTY_COMPILER=1 all tools + + build-windows: + needs: changes + if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }} + uses: ./.github/workflows/windows-build.yml + with: + branch: ${{ github.ref }} + + test: + needs: changes + if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-22.04, macos-13] + runs-on: ${{ matrix.os }} + timeout-minutes: 60 + + name: test-${{ matrix.os }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Get submodules hash + id: submodules + run: | + echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT + + - name: Cache submodules + uses: actions/cache@v3 + with: + path: | + vendor/ + .git/modules + key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }} + + - name: Run tests + run: | + postgres_enabled=0 + if [ ${{ runner.os }} == "Linux" ]; then + sudo docker run --rm -d -e POSTGRES_PASSWORD=test123 -p 5432:5432 postgres:15.4-alpine3.18 + postgres_enabled=1 + fi + + export MAKEFLAGS="-j1" + export NIMFLAGS="--colors:off -d:chronicles_colors:none" + export USE_LIBBACKTRACE=0 + + make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test + make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled testwakunode2 + + build-docker-image: + needs: changes + if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' || needs.changes.outputs.docker == 'true' }} + uses: waku-org/nwaku/.github/workflows/container-image.yml@master + secrets: inherit + + nwaku-nwaku-interop-tests: + needs: build-docker-image + uses: waku-org/waku-interop-tests/.github/workflows/nim_waku_PR.yml@SMOKE_TEST_0.0.1 + with: + node_nwaku: ${{ needs.build-docker-image.outputs.image }} + + secrets: inherit + + js-waku-node: + needs: build-docker-image + uses: waku-org/js-waku/.github/workflows/test-node.yml@master + with: + nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }} + test_type: node + + js-waku-node-optional: + needs: build-docker-image + uses: waku-org/js-waku/.github/workflows/test-node.yml@master + with: + nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }} + test_type: node-optional + + lint: + name: "Lint" + runs-on: ubuntu-22.04 + needs: build + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Get submodules hash + id: submodules + run: | + echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT + + - name: Cache submodules + uses: actions/cache@v3 + with: + path: | + vendor/ + .git/modules + key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }} + + - name: Build nph + run: | + make build-nph + + - name: Check nph formatting + run: | + shopt -s extglob # Enable extended globbing + NPH=$(make print-nph-path) + echo "using nph at ${NPH}" + "${NPH}" examples waku tests tools apps *.@(nim|nims|nimble) + git diff --exit-code diff --git a/third-party/nwaku/.github/workflows/container-image.yml b/third-party/nwaku/.github/workflows/container-image.yml new file mode 100644 index 0000000..cfa66d2 --- /dev/null +++ b/third-party/nwaku/.github/workflows/container-image.yml @@ -0,0 +1,99 @@ +name: container-image-build + +on: + workflow_call: + inputs: + image_tag: + type: string + default: ${{ github.event.number }} + outputs: + image: + description: The resulting image link + value: ${{ jobs.build-docker-image.outputs.image }} + +env: + NPROC: 2 + MAKEFLAGS: "-j${NPROC}" + NIMFLAGS: "--parallelBuild:${NPROC}" + +# This workflow should not run for outside contributors +# If org secrets are not available, we'll avoid building and publishing the docker image and we'll pass the workflow +jobs: + build-docker-image: + strategy: + matrix: + os: [ubuntu-22.04] + runs-on: ${{ matrix.os }} + timeout-minutes: 60 + + name: docker-build-${{ matrix.os }} + outputs: + image: ${{ steps.build.outputs.image }} + steps: + - name: Check secrets + id: secrets + continue-on-error: true + run: | + if [[ -z "$QUAY_PASSWORD" || -z "$QUAY_USER" ]]; then + echo "User does not have access to secrets, skipping workflow" + exit 1 + fi + env: + QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }} + QUAY_USER: ${{ secrets.QUAY_USER }} + + - name: Checkout code + if: ${{ steps.secrets.outcome == 'success' }} + uses: actions/checkout@v4 + + - name: Get submodules hash + id: submodules + if: ${{ steps.secrets.outcome == 'success' }} + run: | + echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT + + - name: Cache submodules + if: ${{ steps.secrets.outcome == 'success' }} + uses: actions/cache@v3 + with: + path: | + vendor/ + .git/modules + key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }} + + - name: Build binaries + id: build + if: ${{ steps.secrets.outcome == 'success' }} + run: | + + make -j${NPROC} V=1 QUICK_AND_DIRTY_COMPILER=1 NIMFLAGS="-d:disableMarchNative -d:postgres -d:chronicles_colors:none" wakunode2 + + SHORT_REF=$(git rev-parse --short HEAD) + + TAG=$([ "${PR_NUMBER}" == "" ] && echo "${SHORT_REF}" || echo "${PR_NUMBER}") + IMAGE=quay.io/wakuorg/nwaku-pr:${TAG} + + echo "image=${IMAGE}" >> $GITHUB_OUTPUT + echo "commit_hash=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT + + docker login -u ${QUAY_USER} -p ${QUAY_PASSWORD} quay.io + docker build -t ${IMAGE} -f docker/binaries/Dockerfile.bn.amd64 --label quay.expires-after=30d . + docker push ${IMAGE} + env: + QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }} + QUAY_USER: ${{ secrets.QUAY_USER }} + PR_NUMBER: ${{ inputs.image_tag}} + + - name: Comment PR + uses: thollander/actions-comment-pull-request@v2 + if: ${{ github.event_name == 'pull_request' && steps.secrets.outcome == 'success' }} + with: + message: | + You can find the image built from this PR at + + ``` + ${{steps.build.outputs.image}} + ``` + + Built from ${{ steps.build.outputs.commit_hash }} + comment_tag: execution-rln-v${{ matrix.rln_version }} diff --git a/third-party/nwaku/.github/workflows/pr-lint.yml b/third-party/nwaku/.github/workflows/pr-lint.yml new file mode 100644 index 0000000..d3ac05f --- /dev/null +++ b/third-party/nwaku/.github/workflows/pr-lint.yml @@ -0,0 +1,54 @@ +name: "Lint PR" + +on: + pull_request_target: + types: + - opened + - edited + - synchronize + +jobs: + labels: + runs-on: ubuntu-22.04 + + steps: + - uses: actions/checkout@v4 + name: Checkout code + id: checkout + - uses: dorny/paths-filter@v2 + id: filter + with: + filters: | + config: + - 'apps/wakunode2/external_config.nim' + - 'apps/networkmonitor/networkmonitor_config.nim' + - 'apps/chat2/config_chat2.nim' + - 'apps/chat2bridge/config_chat2bridge.nim' + + db_schema: + - 'waku/waku_archive/driver/postgres_driver/postgres_driver.nim' + - 'waku/waku_archive/driver/sqlite_driver/queries.nim' + - name: Comment config change + uses: thollander/actions-comment-pull-request@v2 + if: ${{steps.filter.outputs.config == 'true'}} + with: + message: | + This PR may contain changes to **configuration options** of one of the apps. + + If you are introducing a breaking change (i.e. the set of options in latest release would no longer be applicable) make sure the original option is preserved with a *deprecation* note for 2 following releases before it is actually removed. + + Please also make sure the label `release-notes` is added to make sure any changes to the user interface are properly announced in changelog and release notes. + comment_tag: configs + + - name: Comment DB schema change + uses: thollander/actions-comment-pull-request@v2 + if: ${{steps.filter.outputs.db_schema == 'true'}} + with: + header: pr-title-lint-error + message: | + This PR may contain changes to **database schema** of one of the drivers. + + If you are introducing any changes to the schema, make sure the upgrade from the latest release to this change passes without any errors/issues. + + Please make sure the label `release-notes` is added to make sure upgrade instructions properly highlight this change. + comment_tag: db_schema diff --git a/third-party/nwaku/.github/workflows/pre-release.yml b/third-party/nwaku/.github/workflows/pre-release.yml new file mode 100644 index 0000000..b138a22 --- /dev/null +++ b/third-party/nwaku/.github/workflows/pre-release.yml @@ -0,0 +1,163 @@ +name: Pre-Release + +on: + push: + tags: + - 'v*-rc.*' + schedule: + - cron: 13 3 * * * + workflow_dispatch: + +env: + RELEASE_NAME: nightly + + NPROC: 2 + MAKEFLAGS: "-j${NPROC}" + NIMFLAGS: "--parallelBuild:${NPROC}" + +jobs: + tag-name: + runs-on: ubuntu-22.04 + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Vars + id: vars + run: | + TAG=$([[ "${{github.ref}}" == "refs/heads/master" ]] && echo "${{env.RELEASE_NAME}}" || echo ${{github.ref}} | sed 's#refs/tags/##') + echo "tag=${TAG}" >> $GITHUB_OUTPUT + outputs: + tag: ${{steps.vars.outputs.tag}} + + build-and-publish: + needs: tag-name + strategy: + matrix: + os: [ubuntu-22.04, macos-13] + arch: [amd64] + include: + - os: macos-13 + arch: arm64 + runs-on: ${{ matrix.os }} + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: prep variables + id: vars + run: | + ARCH=${{matrix.arch}} + + echo "arch=${ARCH}" >> $GITHUB_OUTPUT + + NWAKU_ARTIFACT_NAME=$(echo "nwaku-${ARCH}-${{runner.os}}-${{ needs.tag-name.outputs.tag }}.tar.gz" | tr "[:upper:]" "[:lower:]") + NWAKU_TOOLS_ARTIFACT_NAME=$(echo "nwaku-tools-${ARCH}-${{runner.os}}-${{ needs.tag-name.outputs.tag }}.tar.gz" | tr "[:upper:]" "[:lower:]") + + echo "nwaku=${NWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT + echo "nwakutools=${NWAKU_TOOLS_ARTIFACT_NAME}" >> $GITHUB_OUTPUT + + + - name: build artifacts + id: build + run: | + OS=$([[ "${{runner.os}}" == "macOS" ]] && echo "macosx" || echo "linux") + + make QUICK_AND_DIRTY_COMPILER=1 V=1 CI=false NIMFLAGS="-d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" \ + update + + make QUICK_AND_DIRTY_COMPILER=1 V=1 CI=false\ + NIMFLAGS="-d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" \ + wakunode2\ + chat2\ + tools + + tar -cvzf ${{steps.vars.outputs.nwaku}} ./build/wakunode2 ./build/chat2 + tar -cvzf ${{steps.vars.outputs.nwakutools}} ./build/wakucanary ./build/networkmonitor + + - name: upload artifacts + uses: actions/upload-artifact@v4 + with: + name: wakunode2 + path: ${{steps.vars.outputs.nwaku}} + retention-days: 2 + + - name: upload artifacts + uses: actions/upload-artifact@v4 + with: + name: wakutools + path: ${{steps.vars.outputs.nwakutools}} + retention-days: 2 + + build-docker-image: + needs: tag-name + uses: waku-org/nwaku/.github/workflows/container-image.yml@master + with: + image_tag: ${{ needs.tag-name.outputs.tag }} + secrets: inherit + + js-waku-node: + needs: build-docker-image + uses: waku-org/js-waku/.github/workflows/test-node.yml@master + with: + nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }} + test_type: node + debug: waku* + + js-waku-node-optional: + needs: build-docker-image + uses: waku-org/js-waku/.github/workflows/test-node.yml@master + with: + nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }} + test_type: node-optional + debug: waku* + + create-release-candidate: + runs-on: ubuntu-22.04 + needs: [ tag-name, build-and-publish ] + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + ref: master + + - name: download artifacts + uses: actions/download-artifact@v4 + + - name: prep variables + id: vars + run: | + REF=$(echo ${{github.ref}} | sed 's#.*/##') + + echo "ref=${REF}" >> $GITHUB_OUTPUT + + - name: generate release notes + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + set -x + gh release view ${{ needs.tag-name.outputs.tag }} &>/dev/null &&\ + gh release delete -y ${{ needs.tag-name.outputs.tag }} &&\ + [[ "${{ needs.tag-name.outputs.tag }}" == "nightly" ]] && git tag -d ${{ needs.tag-name.outputs.tag }} + + RELEASE_NOTES_TAG=$([[ "${{ needs.tag-name.outputs.tag }}" != "nightly" ]] && echo "-t ${{steps.vars.outputs.ref}}" || echo "") + + docker run \ + -t \ + --rm \ + -v ${PWD}:/opt/sv4git/repo:z \ + -u $(id -u) \ + docker.io/wakuorg/sv4git:latest \ + release-notes ${RELEASE_NOTES_TAG} --previous $(git tag -l --sort -creatordate | grep -e "^v[0-9]*\.[0-9]*\.[0-9]*$") |\ + sed -E 's@#([0-9]+)@[#\1](https://github.com/waku-org/nwaku/issues/\1)@g' > release_notes.md + + sed -i "s/^## .*/Generated at $(date)/" release_notes.md + + cat release_notes.md + + TARGET=$([[ "${{ needs.tag-name.outputs.tag }}" == "nightly" ]] && echo "--target ${{steps.vars.outputs.ref}}" || echo "") + + gh release create ${{ needs.tag-name.outputs.tag }} --prerelease ${TARGET} \ + --title ${{ needs.tag-name.outputs.tag }} --notes-file release_notes.md \ + wakunode2/* wakutools/* diff --git a/third-party/nwaku/.github/workflows/release-assets.yml b/third-party/nwaku/.github/workflows/release-assets.yml new file mode 100644 index 0000000..2c7c260 --- /dev/null +++ b/third-party/nwaku/.github/workflows/release-assets.yml @@ -0,0 +1,65 @@ +name: Upload Release Asset + +on: + push: + tags: + - 'v*' # "e.g. v0.4" + + workflow_dispatch: + +env: + NPROC: 2 + +jobs: + build-and-upload: + strategy: + matrix: + os: [ubuntu-22.04, macos-13] + arch: [amd64] + include: + - os: macos-13 + arch: arm64 + runs-on: ${{ matrix.os }} + timeout-minutes: 60 + + name: ${{ matrix.os }} - ${{ matrix.arch }} + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Get submodules hash + id: submodules + run: | + echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT + + - name: Cache submodules + uses: actions/cache@v3 + with: + path: | + vendor/ + .git/modules + key: ${{ runner.os }}-${{matrix.arch}}-submodules-${{ steps.submodules.outputs.hash }} + + - name: prep variables + id: vars + run: | + NWAKU_ARTIFACT_NAME=$(echo "nwaku-${{matrix.arch}}-${{runner.os}}.tar.gz" | tr "[:upper:]" "[:lower:]") + + echo "nwaku=${NWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT + + - name: Install dependencies + run: | + OS=$([[ "${{runner.os}}" == "macOS" ]] && echo "macosx" || echo "linux") + + make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" V=1 update + make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false wakunode2 + make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" CI=false chat2 + tar -cvzf ${{steps.vars.outputs.nwaku}} ./build/ + + - name: Upload asset + uses: actions/upload-artifact@v4.4.0 + with: + name: ${{steps.vars.outputs.nwaku}} + path: ${{steps.vars.outputs.nwaku}} + if-no-files-found: error diff --git a/third-party/nwaku/.github/workflows/sync-labels.yml b/third-party/nwaku/.github/workflows/sync-labels.yml new file mode 100644 index 0000000..e53797b --- /dev/null +++ b/third-party/nwaku/.github/workflows/sync-labels.yml @@ -0,0 +1,17 @@ +name: Sync labels +on: + push: + branches: + - master + paths: + - .github/labels.yml +jobs: + build: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v2 + - uses: micnncim/action-label-syncer@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + manifest: .github/labels.yml diff --git a/third-party/nwaku/.github/workflows/windows-build.yml b/third-party/nwaku/.github/workflows/windows-build.yml new file mode 100644 index 0000000..ed6d2cb --- /dev/null +++ b/third-party/nwaku/.github/workflows/windows-build.yml @@ -0,0 +1,104 @@ +name: ci / build-windows + +on: + workflow_call: + inputs: + branch: + required: true + type: string + +jobs: + build: + runs-on: windows-latest + + defaults: + run: + shell: msys2 {0} + + env: + MSYSTEM: MINGW64 + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Setup MSYS2 + uses: msys2/setup-msys2@v2 + with: + update: true + install: >- + git + base-devel + mingw-w64-x86_64-toolchain + make + cmake + upx + mingw-w64-x86_64-rust + mingw-w64-x86_64-postgresql + mingw-w64-x86_64-gcc + mingw-w64-x86_64-gcc-libs + mingw-w64-x86_64-libwinpthread-git + mingw-w64-x86_64-zlib + mingw-w64-x86_64-openssl + mingw-w64-x86_64-python + mingw-w64-x86_64-cmake + mingw-w64-x86_64-llvm + mingw-w64-x86_64-clang + + - name: Add UPX to PATH + run: | + echo "/usr/bin:$PATH" >> $GITHUB_PATH + echo "/mingw64/bin:$PATH" >> $GITHUB_PATH + echo "/usr/lib:$PATH" >> $GITHUB_PATH + echo "/mingw64/lib:$PATH" >> $GITHUB_PATH + + - name: Verify dependencies + run: | + which upx gcc g++ make cmake cargo rustc python + + - name: Updating submodules + run: git submodule update --init --recursive + + - name: Creating tmp directory + run: mkdir -p tmp + + - name: Building Nim + run: | + cd vendor/nimbus-build-system/vendor/Nim + ./build_all.bat + cd ../../../.. + + - name: Building miniupnpc + run: | + cd vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc + make -f Makefile.mingw CC=gcc CXX=g++ libminiupnpc.a V=1 + cd ../../../../.. + + - name: Building libnatpmp + run: | + cd ./vendor/nim-nat-traversal/vendor/libnatpmp-upstream + make CC="gcc -fPIC -D_WIN32_WINNT=0x0600 -DNATPMP_STATICLIB" libnatpmp.a V=1 + cd ../../../../ + + - name: Building wakunode2.exe + run: | + make wakunode2 LOG_LEVEL=DEBUG V=3 -j8 + + - name: Building libwaku.dll + run: | + make libwaku STATIC=0 LOG_LEVEL=DEBUG V=1 -j + + - name: Check Executable + run: | + if [ -f "./build/wakunode2.exe" ]; then + echo "wakunode2.exe build successful" + else + echo "Build failed: wakunode2.exe not found" + exit 1 + fi + if [ -f "./build/libwaku.dll" ]; then + echo "libwaku.dll build successful" + else + echo "Build failed: libwaku.dll not found" + exit 1 + fi diff --git a/third-party/nwaku/.gitignore b/third-party/nwaku/.gitignore new file mode 100644 index 0000000..7430c3e --- /dev/null +++ b/third-party/nwaku/.gitignore @@ -0,0 +1,81 @@ +/nimcache + +# Executables shall be put in an ignored build/ directory +/build + +# Nimble packages +/vendor/.nimble + +# Generated Files +*.generated.nim + +# ntags/ctags output +/tags + +# a symlink that can't be added to the repo because of Windows +/waku.nims + +# Ignore dynamic, static libs and libtool archive files +*.so +*.dylib +*.a +*.la +*.exe +*.dll + +.DS_Store + +# Ignore simulation generated metrics files +/metrics/prometheus +/metrics/waku-sim-all-nodes-grafana-dashboard.json + +*.log +/package-lock.json +/package.json +node_modules/ +/.update.timestamp + +# Ignore Jetbrains IDE files +.idea/ + +# ignore vscode files +.vscode/ + +# RLN / keystore +rlnKeystore.json +*.tar.gz + +# Nimbus Build System +nimbus-build-system.paths + +# sqlite db +*.db +*.db-shm +*.db-wal +*.sqlite3 +*.sqlite3-shm +*.sqlite3-wal + +/examples/nodejs/build/ +/examples/rust/target/ + + +# Coverage +coverage_html_report/ +*.info + +# Wildcard +*.ignore.* + +# Ignore all possible node runner directories +**/keystore/ +**/rln_tree/ +**/certs/ + +# simple qt example +.qmake.stash +main-qt +waku_handler.moc.cpp + +# Nix build result +result diff --git a/third-party/nwaku/.gitmodules b/third-party/nwaku/.gitmodules new file mode 100644 index 0000000..408def1 --- /dev/null +++ b/third-party/nwaku/.gitmodules @@ -0,0 +1,190 @@ +[submodule "vendor/nim-eth"] + path = vendor/nim-eth + url = https://github.com/status-im/nim-eth.git + ignore = dirty + branch = master +[submodule "vendor/nim-secp256k1"] + path = vendor/nim-secp256k1 + url = https://github.com/status-im/nim-secp256k1.git + ignore = dirty + branch = master +[submodule "vendor/nim-libp2p"] + path = vendor/nim-libp2p + url = https://github.com/vacp2p/nim-libp2p.git + ignore = dirty + branch = master +[submodule "vendor/nim-stew"] + path = vendor/nim-stew + url = https://github.com/status-im/nim-stew.git + ignore = dirty + branch = master +[submodule "vendor/nimbus-build-system"] + path = vendor/nimbus-build-system + url = https://github.com/status-im/nimbus-build-system.git + ignore = dirty + branch = master +[submodule "vendor/nim-nat-traversal"] + path = vendor/nim-nat-traversal + url = https://github.com/status-im/nim-nat-traversal.git + ignore = dirty + branch = master +[submodule "vendor/nim-libbacktrace"] + path = vendor/nim-libbacktrace + url = https://github.com/status-im/nim-libbacktrace.git + ignore = dirty + branch = master +[submodule "vendor/nim-confutils"] + path = vendor/nim-confutils + url = https://github.com/status-im/nim-confutils.git + ignore = dirty + branch = master +[submodule "vendor/nim-chronicles"] + path = vendor/nim-chronicles + url = https://github.com/status-im/nim-chronicles.git + ignore = dirty + branch = master +[submodule "vendor/nim-faststreams"] + path = vendor/nim-faststreams + url = https://github.com/status-im/nim-faststreams.git + ignore = dirty + branch = master +[submodule "vendor/nim-chronos"] + path = vendor/nim-chronos + url = https://github.com/status-im/nim-chronos.git + ignore = dirty + branch = master +[submodule "vendor/nim-json-serialization"] + path = vendor/nim-json-serialization + url = https://github.com/status-im/nim-json-serialization.git + ignore = dirty + branch = master +[submodule "vendor/nim-serialization"] + path = vendor/nim-serialization + url = https://github.com/status-im/nim-serialization.git + ignore = dirty + branch = master +[submodule "vendor/nimcrypto"] + path = vendor/nimcrypto + url = https://github.com/cheatfate/nimcrypto.git + ignore = dirty + branch = master +[submodule "vendor/nim-metrics"] + path = vendor/nim-metrics + url = https://github.com/status-im/nim-metrics.git + ignore = dirty + branch = master +[submodule "vendor/nim-stint"] + path = vendor/nim-stint + url = https://github.com/status-im/nim-stint.git + ignore = dirty + branch = master +[submodule "vendor/nim-json-rpc"] + path = vendor/nim-json-rpc + url = https://github.com/status-im/nim-json-rpc.git + ignore = dirty + branch = master +[submodule "vendor/nim-http-utils"] + path = vendor/nim-http-utils + url = https://github.com/status-im/nim-http-utils.git + ignore = dirty + branch = master +[submodule "vendor/nim-bearssl"] + path = vendor/nim-bearssl + url = https://github.com/status-im/nim-bearssl.git + ignore = dirty + branch = master +[submodule "vendor/nim-sqlite3-abi"] + path = vendor/nim-sqlite3-abi + url = https://github.com/arnetheduck/nim-sqlite3-abi.git + ignore = dirty + branch = master +[submodule "vendor/nim-web3"] + path = vendor/nim-web3 + url = https://github.com/status-im/nim-web3.git +[submodule "vendor/nim-testutils"] + path = vendor/nim-testutils + url = https://github.com/status-im/nim-testutils.git + ignore = untracked + branch = master +[submodule "vendor/nim-unittest2"] + path = vendor/nim-unittest2 + url = https://github.com/status-im/nim-unittest2.git + ignore = untracked + branch = master +[submodule "vendor/nim-websock"] + path = vendor/nim-websock + url = https://github.com/status-im/nim-websock.git + ignore = untracked + branch = main +[submodule "vendor/nim-zlib"] + path = vendor/nim-zlib + url = https://github.com/status-im/nim-zlib.git + ignore = untracked + branch = master +[submodule "vendor/nim-dnsdisc"] + path = vendor/nim-dnsdisc + url = https://github.com/status-im/nim-dnsdisc.git + ignore = untracked + branch = main +[submodule "vendor/dnsclient.nim"] + path = vendor/dnsclient.nim + url = https://github.com/ba0f3/dnsclient.nim.git + ignore = untracked + branch = master +[submodule "vendor/nim-toml-serialization"] + path = vendor/nim-toml-serialization + url = https://github.com/status-im/nim-toml-serialization.git +[submodule "vendor/nim-presto"] + path = vendor/nim-presto + url = https://github.com/status-im/nim-presto.git + ignore = untracked + branch = master +[submodule "vendor/zerokit"] + path = vendor/zerokit + url = https://github.com/vacp2p/zerokit.git + ignore = dirty + branch = v0.5.1 +[submodule "vendor/nim-regex"] + path = vendor/nim-regex + url = https://github.com/nitely/nim-regex.git + ignore = untracked + branch = master +[submodule "vendor/nim-unicodedb"] + path = vendor/nim-unicodedb + url = https://github.com/nitely/nim-unicodedb.git + ignore = untracked + branch = master +[submodule "vendor/nim-taskpools"] + path = vendor/nim-taskpools + url = https://github.com/status-im/nim-taskpools.git + ignore = untracked + branch = stable +[submodule "vendor/nim-results"] + ignore = untracked + branch = master + path = vendor/nim-results + url = https://github.com/arnetheduck/nim-results.git +[submodule "vendor/db_connector"] + path = vendor/db_connector + url = https://github.com/nim-lang/db_connector.git + ignore = untracked + branch = devel +[submodule "vendor/nph"] + ignore = untracked + branch = master + path = vendor/nph + url = https://github.com/arnetheduck/nph.git +[submodule "vendor/nim-minilru"] + path = vendor/nim-minilru + url = https://github.com/status-im/nim-minilru.git + ignore = untracked + branch = master +[submodule "vendor/waku-rlnv2-contract"] + path = vendor/waku-rlnv2-contract + url = https://github.com/waku-org/waku-rlnv2-contract.git + ignore = untracked + branch = master +[submodule "vendor/mix"] + path = vendor/mix + url = https://github.com/vacp2p/mix/ + branch = main diff --git a/third-party/nwaku/.sv4git.yml b/third-party/nwaku/.sv4git.yml new file mode 100644 index 0000000..8975e69 --- /dev/null +++ b/third-party/nwaku/.sv4git.yml @@ -0,0 +1,22 @@ +version: "1.1" #config version + +tag: + pattern: "v%d.%d.%d" + filter: "v*" + +release-notes: + sections: # Array with each section of release note. Check template section for more information. + - name: Features # Name used on section. + section-type: commits # Type of the section, supported types: commits, breaking-changes. + commit-types: [feat] # Commit types for commit section-type, one commit type cannot be in more than one section. + - name: Bug Fixes + section-type: commits + commit-types: [fix, bug] + - name: Changes + section-type: commits + commit-types: [chore, docs, build, refactor, docker] + +commit-message: + + issue: + regex: '#[0-9]+' # Regex for issue id. \ No newline at end of file diff --git a/third-party/nwaku/.sv4git/templates/releasenotes-md.tpl b/third-party/nwaku/.sv4git/templates/releasenotes-md.tpl new file mode 100644 index 0000000..a513e69 --- /dev/null +++ b/third-party/nwaku/.sv4git/templates/releasenotes-md.tpl @@ -0,0 +1,8 @@ +## {{if .Release}}{{.Release}}{{end}}{{if and (not .Date.IsZero) .Release}} ({{end}}{{timefmt .Date "2006-01-02"}}{{if and (not .Date.IsZero) .Release}}){{end}} +{{- range $section := .Sections }} +{{- if (eq $section.SectionType "commits") }} +{{- template "rn-md-section-commits.tpl" $section }} +{{- else if (eq $section.SectionType "breaking-changes")}} +{{- template "rn-md-section-breaking-changes.tpl" $section }} +{{- end}} +{{- end}} diff --git a/third-party/nwaku/.sv4git/templates/rn-md-section-commits.tpl b/third-party/nwaku/.sv4git/templates/rn-md-section-commits.tpl new file mode 100644 index 0000000..2732fcd --- /dev/null +++ b/third-party/nwaku/.sv4git/templates/rn-md-section-commits.tpl @@ -0,0 +1,7 @@ +{{- if .}}{{- if ne .SectionName ""}} + +### {{.SectionName}} +{{range $k,$v := .Items}} +- {{if $v.Message.Scope}}**{{$v.Message.Scope}}:** {{end}}{{$v.Message.Description}} ([{{$v.Hash}}](https://github.com/waku-org/nwaku/commit/{{$v.Hash}})){{if $v.Message.Metadata.issue}} ([https://github.com/waku-org/nwaku/issues/{{$v.Message.Metadata.issue}}]({{$v.Message.Metadata.issue}})){{end}} +{{- end}} +{{- end}}{{- end}} \ No newline at end of file diff --git a/third-party/nwaku/CHANGELOG.md b/third-party/nwaku/CHANGELOG.md new file mode 100644 index 0000000..dc07379 --- /dev/null +++ b/third-party/nwaku/CHANGELOG.md @@ -0,0 +1,2495 @@ +## v0.36.0 (2025-06-20) +### Notes + +- Extended REST API for better debugging + - Extended `/health` report + - Very detailed access to peers and actual status through [`/admin/v1/peers/...` endpoints](https://waku-org.github.io/waku-rest-api/#get-/admin/v1/peers/stats) + - Dynamic log level change with[ `/admin/v1/log-level`](https://waku-org.github.io/waku-rest-api/#post-/admin/v1/log-level/-logLevel-) + +- The `rln-relay-eth-client-address` parameter, from now on, should be passed as an array of RPC addresses. +- new `preset` parameter. `preset=twn` is the RLN-protected Waku Network (cluster 1). Overrides other values. +- Removed `dns-addrs` parameter as it was duplicated and unused. +- Removed `rln-relay-id-key`, `rln-relay-id-commitment-key`, `rln-relay-bandwidth-threshold` parameters. +- Effectively removed `pubsub-topic`, which was deprecated in `v0.33.0`. +- Removed `store-sync-max-payload-size` parameter. +- Removed `dns-discovery-name-server` and `discv5-only` parameters. + +### Features + +- Update implementation for new contract abi ([#3390](https://github.com/waku-org/nwaku/issues/3390)) ([ee4058b2d](https://github.com/waku-org/nwaku/commit/ee4058b2d)) +- Lighptush v3 for lite-protocol-tester ([#3455](https://github.com/waku-org/nwaku/issues/3455)) ([3f3c59488](https://github.com/waku-org/nwaku/commit/3f3c59488)) +- Retrieve metrics from libwaku ([#3452](https://github.com/waku-org/nwaku/issues/3452)) ([f016ede60](https://github.com/waku-org/nwaku/commit/f016ede60)) +- Dynamic logging via REST API ([#3451](https://github.com/waku-org/nwaku/issues/3451)) ([9fe8ef8d2](https://github.com/waku-org/nwaku/commit/9fe8ef8d2)) +- Add waku_disconnect_all_peers to libwaku ([#3438](https://github.com/waku-org/nwaku/issues/3438)) ([7f51d103b](https://github.com/waku-org/nwaku/commit/7f51d103b)) +- Extend node /health REST endpoint with all protocol's state ([#3419](https://github.com/waku-org/nwaku/issues/3419)) ([1632496a2](https://github.com/waku-org/nwaku/commit/1632496a2)) +- Deprecate sync / local merkle tree ([#3312](https://github.com/waku-org/nwaku/issues/3312)) ([50fe7d727](https://github.com/waku-org/nwaku/commit/50fe7d727)) +- Refactor waku sync DOS protection ([#3391](https://github.com/waku-org/nwaku/issues/3391)) ([a81f9498c](https://github.com/waku-org/nwaku/commit/a81f9498c)) +- Waku Sync dashboard new panel & update ([#3379](https://github.com/waku-org/nwaku/issues/3379)) ([5ed6aae10](https://github.com/waku-org/nwaku/commit/5ed6aae10)) +- Enhance Waku Sync logs and metrics ([#3370](https://github.com/waku-org/nwaku/issues/3370)) ([f6c680a46](https://github.com/waku-org/nwaku/commit/f6c680a46)) +- Add waku_get_connected_peers_info to libwaku ([#3356](https://github.com/waku-org/nwaku/issues/3356)) ([0eb9c6200](https://github.com/waku-org/nwaku/commit/0eb9c6200)) +- Add waku_relay_get_peers_in_mesh to libwaku ([#3352](https://github.com/waku-org/nwaku/issues/3352)) ([ef9074443](https://github.com/waku-org/nwaku/commit/ef9074443)) +- Add waku_relay_get_connected_peers to libwaku ([#3353](https://github.com/waku-org/nwaku/issues/3353)) ([7250d7392](https://github.com/waku-org/nwaku/commit/7250d7392)) +- Introduce `preset` option ([#3346](https://github.com/waku-org/nwaku/issues/3346)) ([0eaf90465](https://github.com/waku-org/nwaku/commit/0eaf90465)) +- Add store sync dashboard panel ([#3307](https://github.com/waku-org/nwaku/issues/3307)) ([ef8ee233f](https://github.com/waku-org/nwaku/commit/ef8ee233f)) + +### Bug Fixes + +- Fix typo from DIRVER to DRIVER ([#3442](https://github.com/waku-org/nwaku/issues/3442)) ([b9a4d7702](https://github.com/waku-org/nwaku/commit/b9a4d7702)) +- Fix discv5 protocol id in libwaku ([#3447](https://github.com/waku-org/nwaku/issues/3447)) ([f7be4c2f0](https://github.com/waku-org/nwaku/commit/f7be4c2f0)) +- Fix dnsresolver ([#3440](https://github.com/waku-org/nwaku/issues/3440)) ([e42e28cc6](https://github.com/waku-org/nwaku/commit/e42e28cc6)) +- Misc sync fixes, added debug logging ([#3411](https://github.com/waku-org/nwaku/issues/3411)) ([b9efa874d](https://github.com/waku-org/nwaku/commit/b9efa874d)) +- Relay unsubscribe ([#3422](https://github.com/waku-org/nwaku/issues/3422)) ([9fc631e10](https://github.com/waku-org/nwaku/commit/9fc631e10)) +- Fix build_rln.sh update version to download v0.7.0 ([#3425](https://github.com/waku-org/nwaku/issues/3425)) ([2678303bf](https://github.com/waku-org/nwaku/commit/2678303bf)) +- Timestamp based validation ([#3406](https://github.com/waku-org/nwaku/issues/3406)) ([1512bdaf0](https://github.com/waku-org/nwaku/commit/1512bdaf0)) +- Enable WebSocket connection also in case only websocket-secure-support enabled ([#3417](https://github.com/waku-org/nwaku/issues/3417)) ([698fe6525](https://github.com/waku-org/nwaku/commit/698fe6525)) +- Fix addPeer could unintentionally override metadata of previously stored peer with defaults and empty ([#3403](https://github.com/waku-org/nwaku/issues/3403)) ([5cccaaac6](https://github.com/waku-org/nwaku/commit/5cccaaac6)) +- Fix bad HttpCode conversion, add missing lightpush v3 rest api tests ([#3389](https://github.com/waku-org/nwaku/issues/3389)) ([7ff055e42](https://github.com/waku-org/nwaku/commit/7ff055e42)) +- Adjust mistaken comments and broken link ([#3381](https://github.com/waku-org/nwaku/issues/3381)) ([237f7abbb](https://github.com/waku-org/nwaku/commit/237f7abbb)) +- Avoid libwaku's redundant allocs ([#3380](https://github.com/waku-org/nwaku/issues/3380)) ([ac454a30b](https://github.com/waku-org/nwaku/commit/ac454a30b)) +- Avoid performing nil check for userData ([#3365](https://github.com/waku-org/nwaku/issues/3365)) ([b8707b6a5](https://github.com/waku-org/nwaku/commit/b8707b6a5)) +- Fix waku sync timing ([#3337](https://github.com/waku-org/nwaku/issues/3337)) ([b01b1837d](https://github.com/waku-org/nwaku/commit/b01b1837d)) +- Fix filter out ephemeral msg from waku sync ([#3332](https://github.com/waku-org/nwaku/issues/3332)) ([4b963d8f5](https://github.com/waku-org/nwaku/commit/4b963d8f5)) +- Apply latest nph formating ([#3334](https://github.com/waku-org/nwaku/issues/3334)) ([77105a6c2](https://github.com/waku-org/nwaku/commit/77105a6c2)) +- waku sync 2.0 codecs ENR support ([#3326](https://github.com/waku-org/nwaku/issues/3326)) ([bf735e777](https://github.com/waku-org/nwaku/commit/bf735e777)) +- waku sync mounting ([#3321](https://github.com/waku-org/nwaku/issues/3321)) ([380d2e338](https://github.com/waku-org/nwaku/commit/380d2e338)) +- Fix rest-relay-cache-capacity ([#3454](https://github.com/waku-org/nwaku/issues/3454)) ([fed4dc280](https://github.com/waku-org/nwaku/commit/fed4dc280)) + +### Changes + +- Lower waku sync log lvl ([#3461](https://github.com/waku-org/nwaku/issues/3461)) ([4277a5349](https://github.com/waku-org/nwaku/commit/4277a5349)) +- Refactor to unify online and health monitors ([#3456](https://github.com/waku-org/nwaku/issues/3456)) ([2e40f2971](https://github.com/waku-org/nwaku/commit/2e40f2971)) +- Refactor rm discv5-only ([#3453](https://github.com/waku-org/nwaku/issues/3453)) ([b998430d5](https://github.com/waku-org/nwaku/commit/b998430d5)) +- Add extra debug REST helper via getting peer statistics ([#3443](https://github.com/waku-org/nwaku/issues/3443)) ([f4ad7a332](https://github.com/waku-org/nwaku/commit/f4ad7a332)) +- Expose online state in libwaku ([#3433](https://github.com/waku-org/nwaku/issues/3433)) ([e7f5c8cb2](https://github.com/waku-org/nwaku/commit/e7f5c8cb2)) +- Add heaptrack support build for Nim v2.0.12 builds ([#3424](https://github.com/waku-org/nwaku/issues/3424)) ([91885fb9e](https://github.com/waku-org/nwaku/commit/91885fb9e)) +- Remove debug for js-waku ([#3423](https://github.com/waku-org/nwaku/issues/3423)) ([5628dc6ad](https://github.com/waku-org/nwaku/commit/5628dc6ad)) +- Bump dependencies for v0.36 ([#3410](https://github.com/waku-org/nwaku/issues/3410)) ([005815746](https://github.com/waku-org/nwaku/commit/005815746)) +- Enhance feedback on error CLI ([#3405](https://github.com/waku-org/nwaku/issues/3405)) ([3464d81a6](https://github.com/waku-org/nwaku/commit/3464d81a6)) +- Allow multiple rln eth clients ([#3402](https://github.com/waku-org/nwaku/issues/3402)) ([861710bc7](https://github.com/waku-org/nwaku/commit/861710bc7)) +- Separate internal and CLI configurations ([#3357](https://github.com/waku-org/nwaku/issues/3357)) ([dd8d66431](https://github.com/waku-org/nwaku/commit/dd8d66431)) +- Avoid double relay subscription ([#3396](https://github.com/waku-org/nwaku/issues/3396)) ([7d5eb9374](https://github.com/waku-org/nwaku/commit/7d5eb9374) [#3429](https://github.com/waku-org/nwaku/issues/3429)) ([ee5932ebc](https://github.com/waku-org/nwaku/commit/ee5932ebc)) +- Improve disconnection handling ([#3385](https://github.com/waku-org/nwaku/issues/3385)) ([1ec9b8d96](https://github.com/waku-org/nwaku/commit/1ec9b8d96)) +- Return all peers from REST admin ([#3395](https://github.com/waku-org/nwaku/issues/3395)) ([f6fdd960f](https://github.com/waku-org/nwaku/commit/f6fdd960f)) +- Simplify rln_relay code a little ([#3392](https://github.com/waku-org/nwaku/issues/3392)) ([7a6c00bd0](https://github.com/waku-org/nwaku/commit/7a6c00bd0)) +- Extended /admin/v1 RESP API with different option to look at current connected/relay/mesh state of the node ([#3382](https://github.com/waku-org/nwaku/issues/3382)) ([3db00f39e](https://github.com/waku-org/nwaku/commit/3db00f39e)) +- Timestamp set to now in publish if not provided ([#3373](https://github.com/waku-org/nwaku/issues/3373)) ([f7b424451](https://github.com/waku-org/nwaku/commit/f7b424451)) +- Update lite-protocol-tester for handling shard argument ([#3371](https://github.com/waku-org/nwaku/issues/3371)) ([5ab69edd7](https://github.com/waku-org/nwaku/commit/5ab69edd7)) +- Fix unused and deprecated imports ([#3368](https://github.com/waku-org/nwaku/issues/3368)) ([6ebb49a14](https://github.com/waku-org/nwaku/commit/6ebb49a14)) +- Expect camelCase JSON for libwaku store queries ([#3366](https://github.com/waku-org/nwaku/issues/3366)) ([ccb4ed51d](https://github.com/waku-org/nwaku/commit/ccb4ed51d)) +- Maintenance to c and c++ simple examples ([#3367](https://github.com/waku-org/nwaku/issues/3367)) ([25d30d44d](https://github.com/waku-org/nwaku/commit/25d30d44d)) +- Skip two flaky tests ([#3364](https://github.com/waku-org/nwaku/issues/3364)) ([b672617b2](https://github.com/waku-org/nwaku/commit/b672617b2)) +- Retrieve protocols in new added peer from discv5 ([#3354](https://github.com/waku-org/nwaku/issues/3354)) ([df58643ea](https://github.com/waku-org/nwaku/commit/df58643ea)) +- Better keystore management ([#3358](https://github.com/waku-org/nwaku/issues/3358)) ([a914fdccc](https://github.com/waku-org/nwaku/commit/a914fdccc)) +- Remove pubsub topics arguments ([#3350](https://github.com/waku-org/nwaku/issues/3350)) ([9778b45c6](https://github.com/waku-org/nwaku/commit/9778b45c6)) +- New performance measurement metrics for non-relay protocols ([#3299](https://github.com/waku-org/nwaku/issues/3299)) ([68c50a09a](https://github.com/waku-org/nwaku/commit/68c50a09a)) +- Start triggering CI for windows build ([#3316](https://github.com/waku-org/nwaku/issues/3316)) ([55ac6ba9f](https://github.com/waku-org/nwaku/commit/55ac6ba9f)) +- Less logs for rendezvous ([#3319](https://github.com/waku-org/nwaku/issues/3319)) ([6df05bae2](https://github.com/waku-org/nwaku/commit/6df05bae2)) +- Add test reporting doc to benchmarks dir ([#3238](https://github.com/waku-org/nwaku/issues/3238)) ([94554a6e0](https://github.com/waku-org/nwaku/commit/94554a6e0)) +- Improve epoch monitoring ([#3197](https://github.com/waku-org/nwaku/issues/3197)) ([b0c025f81](https://github.com/waku-org/nwaku/commit/b0c025f81)) + +### This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`WAKU2-LIGHTPUSH v3`](https://github.com/waku-org/specs/blob/master/standards/core/lightpush.md) | `draft` | `/vac/waku/lightpush/3.0.0` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | +| [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/feat--waku-sync/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` | + + +## v0.35.1 (2025-03-30) + +### Bug fixes + +* Update RLN references ([3287](https://github.com/waku-org/nwaku/pull/3287)) ([ea961fa](https://github.com/waku-org/nwaku/pull/3287/commits/ea961faf4ed4f8287a2043a6b5d84b660745072b)) + +**Info:** before upgrading to this version, make sure you delete the previous rln_tree folder, i.e., +the one that is passed through this CLI: `--rln-relay-tree-path`. + +### Features +* lightpush v3 ([#3279](https://github.com/waku-org/nwaku/pull/3279)) ([e0b563ff](https://github.com/waku-org/nwaku/commit/e0b563ffe5af20bd26d37cd9b4eb9ed9eb82ff80)) + Upgrade for Waku Llightpush protocol with enhanced error handling. Read specification [here](https://github.com/waku-org/specs/blob/master/standards/core/lightpush.md) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`WAKU2-LIGHTPUSH v3`](https://github.com/waku-org/specs/blob/master/standards/core/lightpush.md) | `draft` | `/vac/waku/lightpush/3.0.0` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | +| [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/feat--waku-sync/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` | + +## v0.35.0 (2025-03-03) + +### Notes + +- Deprecated parameter + - max-relay-peers + +- New parameters + - relay-service-ratio + + String value with peers distribution within max-connections parameter. + This percentage ratio represents the relay peers to service peers. + For example, 60:40, tells that 60% of the max-connections will be used for relay protocol + and the other 40% of max-connections will be reserved for other service protocols (e.g., + filter, lightpush, store, metadata, etc.) + + - rendezvous + + boolean attribute that optionally activates waku rendezvous discovery server. + True by default. + +### Release highlights + +- New filter approach to keep push stream opened within subscription period. +- Waku sync protocol. +- Libwaku async +- Lite-protocol-tester enhancements. +- New panels and metrics in RLN to control outstanding request quota. + +### Features + +- waku sync shard matching check ([#3259](https://github.com/waku-org/nwaku/issues/3259)) ([42fd6b827](https://github.com/waku-org/nwaku/commit/42fd6b827)) +- waku store sync 2.0 config & setup ([#3217](https://github.com/waku-org/nwaku/issues/3217)) ([7f64dc03a](https://github.com/waku-org/nwaku/commit/7f64dc03a)) +- waku store sync 2.0 protocols & tests ([#3216](https://github.com/waku-org/nwaku/issues/3216)) ([6ee494d90](https://github.com/waku-org/nwaku/commit/6ee494d90)) +- waku store sync 2.0 storage & tests ([#3215](https://github.com/waku-org/nwaku/issues/3215)) ([54a7a6875](https://github.com/waku-org/nwaku/commit/54a7a6875)) +- waku store sync 2.0 common types & codec ([#3213](https://github.com/waku-org/nwaku/issues/3213)) ([29fda2dab](https://github.com/waku-org/nwaku/commit/29fda2dab)) +- add txhash-based eligibility checks for incentivization PoC ([#3166](https://github.com/waku-org/nwaku/issues/3166)) ([505ec84ce](https://github.com/waku-org/nwaku/commit/505ec84ce)) +- connection change event ([#3225](https://github.com/waku-org/nwaku/issues/3225)) ([e81a5517b](https://github.com/waku-org/nwaku/commit/e81a5517b)) +- libwaku add protected topic ([#3211](https://github.com/waku-org/nwaku/issues/3211)) ([d932dd10c](https://github.com/waku-org/nwaku/commit/d932dd10c)) +- topic health tracking ([#3212](https://github.com/waku-org/nwaku/issues/3212)) ([6020a673b](https://github.com/waku-org/nwaku/commit/6020a673b)) +- allowing configuration of application level callbacks ([#3206](https://github.com/waku-org/nwaku/issues/3206)) ([049fbeabb](https://github.com/waku-org/nwaku/commit/049fbeabb)) +- waku rendezvous wrapper ([#2962](https://github.com/waku-org/nwaku/issues/2962)) ([650a9487e](https://github.com/waku-org/nwaku/commit/650a9487e)) +- making dns discovery async ([#3175](https://github.com/waku-org/nwaku/issues/3175)) ([d7d00bfd7](https://github.com/waku-org/nwaku/commit/d7d00bfd7)) +- remove Waku Sync 1.0 & Negentropy ([#3185](https://github.com/waku-org/nwaku/issues/3185)) ([2ab9c3d36](https://github.com/waku-org/nwaku/commit/2ab9c3d36)) +- add waku_dial_peer and get_connected_peers to libwaku ([#3149](https://github.com/waku-org/nwaku/issues/3149)) ([507b1fc4d](https://github.com/waku-org/nwaku/commit/507b1fc4d)) +- running periodicaly peer exchange if discv5 is disabled ([#3150](https://github.com/waku-org/nwaku/issues/3150)) ([400d7a54f](https://github.com/waku-org/nwaku/commit/400d7a54f)) + +### Bug Fixes + +- avoid double db migration for sqlite ([#3244](https://github.com/waku-org/nwaku/issues/3244)) ([2ce245354](https://github.com/waku-org/nwaku/commit/2ce245354)) +- libwaku waku_relay_unsubscribe ([#3207](https://github.com/waku-org/nwaku/issues/3207)) ([ab0c1d4aa](https://github.com/waku-org/nwaku/commit/ab0c1d4aa)) +- libwaku support string and int64 for timestamps ([#3205](https://github.com/waku-org/nwaku/issues/3205)) ([2022f54f5](https://github.com/waku-org/nwaku/commit/2022f54f5)) +- lite-protocol-tester receiver exit check ([#3187](https://github.com/waku-org/nwaku/issues/3187)) ([beb21c78f](https://github.com/waku-org/nwaku/commit/beb21c78f)) +- linting error ([#3156](https://github.com/waku-org/nwaku/issues/3156)) ([99ac68447](https://github.com/waku-org/nwaku/commit/99ac68447)) + +### Changes + +- more efficient metrics usage ([#3298](https://github.com/waku-org/nwaku/issues/3298)) ([6f004d5d4](https://github.com/waku-org/nwaku/commit/6f004d5d4))([c07e278d8](https://github.com/waku-org/nwaku/commit/c07e278d82c3aa771b9988e85bad7422890e4d74)) +- filter refactor subscription management and react when the remote peer closes the stream. See the following commits in chronological order: + - issue: [#3281](https://github.com/waku-org/nwaku/issues/3281) commit: [5392b8ea4](https://github.com/waku-org/nwaku/commit/5392b8ea4) + - issue: [#3198](https://github.com/waku-org/nwaku/issues/3198) commit: [287e9b12c](https://github.com/waku-org/nwaku/commit/287e9b12c) + - issue: [#3267](https://github.com/waku-org/nwaku/issues/3267) commit: [46747fd49](https://github.com/waku-org/nwaku/commit/46747fd49) +- send msg hash as string on libwaku message event ([#3234](https://github.com/waku-org/nwaku/issues/3234)) ([9c209b4c3](https://github.com/waku-org/nwaku/commit/9c209b4c3)) +- separate heaptrack from debug build ([#3249](https://github.com/waku-org/nwaku/issues/3249)) ([81f24cc25](https://github.com/waku-org/nwaku/commit/81f24cc25)) +- capping mechanism for relay and service connections ([#3184](https://github.com/waku-org/nwaku/issues/3184)) ([2942782f9](https://github.com/waku-org/nwaku/commit/2942782f9)) +- add extra migration to sqlite and improving error message ([#3240](https://github.com/waku-org/nwaku/issues/3240)) ([bfd60ceab](https://github.com/waku-org/nwaku/commit/bfd60ceab)) +- optimize libwaku size ([#3242](https://github.com/waku-org/nwaku/issues/3242)) ([9c0ad8517](https://github.com/waku-org/nwaku/commit/9c0ad8517)) +- golang example end using negentropy dependency plus simple readme.md ([#3235](https://github.com/waku-org/nwaku/issues/3235)) ([0e0fcfb1a](https://github.com/waku-org/nwaku/commit/0e0fcfb1a)) +- enhance libwaku store protocol and more ([#3223](https://github.com/waku-org/nwaku/issues/3223)) ([22ce9ee87](https://github.com/waku-org/nwaku/commit/22ce9ee87)) +- add two RLN metrics and panel ([#3181](https://github.com/waku-org/nwaku/issues/3181)) ([1b532e8ab](https://github.com/waku-org/nwaku/commit/1b532e8ab)) +- libwaku async ([#3180](https://github.com/waku-org/nwaku/issues/3180)) ([47a623541](https://github.com/waku-org/nwaku/commit/47a623541)) +- filter protocol in libwaku ([#3177](https://github.com/waku-org/nwaku/issues/3177)) ([f856298ca](https://github.com/waku-org/nwaku/commit/f856298ca)) +- add supervisor for lite-protocol-tester infra ([#3176](https://github.com/waku-org/nwaku/issues/3176)) ([a7264d68c](https://github.com/waku-org/nwaku/commit/a7264d68c)) +- libwaku better error handling and better waku thread destroy handling ([#3167](https://github.com/waku-org/nwaku/issues/3167)) ([294dd03c4](https://github.com/waku-org/nwaku/commit/294dd03c4)) +- libwaku allow several multiaddresses for a single peer in store queries ([#3171](https://github.com/waku-org/nwaku/issues/3171)) ([3cb8ebdd8](https://github.com/waku-org/nwaku/commit/3cb8ebdd8)) +- naming connectPeer procedure ([#3157](https://github.com/waku-org/nwaku/issues/3157)) ([b3656d6ee](https://github.com/waku-org/nwaku/commit/b3656d6ee)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | +| [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/master/standards/core/sync.md) | `draft` | `/vac/waku/reconciliation/1.0.0` & `/vac/waku/transfer/1.0.0` | + +## v0.34.0 (2024-10-29) + +### Notes: + +* The `--protected-topic` CLI configuration has been removed. Equivalent flag, `--protected-shard`, shall be used instead. + +### Features + +- change latency buckets ([#3153](https://github.com/waku-org/nwaku/issues/3153)) ([956fde6e](https://github.com/waku-org/nwaku/commit/956fde6e)) +- libwaku: ping peer ([#3144](https://github.com/waku-org/nwaku/issues/3144)) ([de11e576](https://github.com/waku-org/nwaku/commit/de11e576)) +- initial windows support ([#3107](https://github.com/waku-org/nwaku/issues/3107)) ([ff21c01e](https://github.com/waku-org/nwaku/commit/ff21c01e)) +- circuit relay support ([#3112](https://github.com/waku-org/nwaku/issues/3112)) ([cfde7eea](https://github.com/waku-org/nwaku/commit/cfde7eea)) + +### Bug Fixes + +- peer exchange libwaku response handling ([#3141](https://github.com/waku-org/nwaku/issues/3141)) ([76606421](https://github.com/waku-org/nwaku/commit/76606421)) +- add more logs, stagger intervals & set prune offset to 10% for waku sync ([#3142](https://github.com/waku-org/nwaku/issues/3142)) ([a386880b](https://github.com/waku-org/nwaku/commit/a386880b)) +- add log and archive message ingress for sync ([#3133](https://github.com/waku-org/nwaku/issues/3133)) ([80c7581a](https://github.com/waku-org/nwaku/commit/80c7581a)) +- add a limit of max 10 content topics per query ([#3117](https://github.com/waku-org/nwaku/issues/3117)) ([c35dc549](https://github.com/waku-org/nwaku/commit/c35dc549)) +- avoid segfault by setting a default num peers requested in Peer eXchange ([#3122](https://github.com/waku-org/nwaku/issues/3122)) ([82fd5dde](https://github.com/waku-org/nwaku/commit/82fd5dde)) +- returning peerIds in base 64 ([#3105](https://github.com/waku-org/nwaku/issues/3105)) ([37edaf62](https://github.com/waku-org/nwaku/commit/37edaf62)) +- changing libwaku's error handling format ([#3093](https://github.com/waku-org/nwaku/issues/3093)) ([2e6c299d](https://github.com/waku-org/nwaku/commit/2e6c299d)) +- remove spammy log ([#3091](https://github.com/waku-org/nwaku/issues/3091)) ([1d2b910f](https://github.com/waku-org/nwaku/commit/1d2b910f)) +- avoid out connections leak ([#3077](https://github.com/waku-org/nwaku/issues/3077)) ([eb2bbae6](https://github.com/waku-org/nwaku/commit/eb2bbae6)) +- rejecting excess relay connections ([#3065](https://github.com/waku-org/nwaku/issues/3065)) ([8b0884c7](https://github.com/waku-org/nwaku/commit/8b0884c7)) +- static linking negentropy in ARM based mac ([#3046](https://github.com/waku-org/nwaku/issues/3046)) ([256b7853](https://github.com/waku-org/nwaku/commit/256b7853)) + +### Changes + +- support ping with multiple multiaddresses and close stream ([#3154](https://github.com/waku-org/nwaku/issues/3154)) ([3665991a](https://github.com/waku-org/nwaku/commit/3665991a)) +- liteprotocoltester: easy setup fleets ([#3125](https://github.com/waku-org/nwaku/issues/3125)) ([268e7e66](https://github.com/waku-org/nwaku/commit/268e7e66)) +- saving peers enr capabilities ([#3127](https://github.com/waku-org/nwaku/issues/3127)) ([69d9524f](https://github.com/waku-org/nwaku/commit/69d9524f)) +- networkmonitor: add missing field on RlnRelay init, set default for num of shard ([#3136](https://github.com/waku-org/nwaku/issues/3136)) ([edcb0e15](https://github.com/waku-org/nwaku/commit/edcb0e15)) +- add to libwaku peer id retrieval proc ([#3124](https://github.com/waku-org/nwaku/issues/3124)) ([c5a825e2](https://github.com/waku-org/nwaku/commit/c5a825e2)) +- adding to libwaku dial and disconnect by peerIds ([#3111](https://github.com/waku-org/nwaku/issues/3111)) ([25da8102](https://github.com/waku-org/nwaku/commit/25da8102)) +- dbconn: add requestId info as a comment in the database logs ([#3110](https://github.com/waku-org/nwaku/issues/3110)) ([30c072a4](https://github.com/waku-org/nwaku/commit/30c072a4)) +- improving get_peer_ids_by_protocol by returning the available protocols of connected peers ([#3109](https://github.com/waku-org/nwaku/issues/3109)) ([ed0ee5be](https://github.com/waku-org/nwaku/commit/ed0ee5be)) +- remove warnings ([#3106](https://github.com/waku-org/nwaku/issues/3106)) ([c861fa9f](https://github.com/waku-org/nwaku/commit/c861fa9f)) +- better store logs ([#3103](https://github.com/waku-org/nwaku/issues/3103)) ([21b03551](https://github.com/waku-org/nwaku/commit/21b03551)) +- Improve binding for waku_sync ([#3102](https://github.com/waku-org/nwaku/issues/3102)) ([c3756e3a](https://github.com/waku-org/nwaku/commit/c3756e3a)) +- improving and temporarily skipping flaky rln test ([#3094](https://github.com/waku-org/nwaku/issues/3094)) ([a6ed80a5](https://github.com/waku-org/nwaku/commit/a6ed80a5)) +- update master after release v0.33.1 ([#3089](https://github.com/waku-org/nwaku/issues/3089)) ([54c3083d](https://github.com/waku-org/nwaku/commit/54c3083d)) +- re-arrange function based on responsibility of peer-manager ([#3086](https://github.com/waku-org/nwaku/issues/3086)) ([0f8e8740](https://github.com/waku-org/nwaku/commit/0f8e8740)) +- waku_keystore: give some more context in case of error ([#3064](https://github.com/waku-org/nwaku/issues/3064)) ([3ad613ca](https://github.com/waku-org/nwaku/commit/3ad613ca)) +- bump negentropy ([#3078](https://github.com/waku-org/nwaku/issues/3078)) ([643ab20f](https://github.com/waku-org/nwaku/commit/643ab20f)) +- Optimize store ([#3061](https://github.com/waku-org/nwaku/issues/3061)) ([5875ed63](https://github.com/waku-org/nwaku/commit/5875ed63)) +- wrap peer store ([#3051](https://github.com/waku-org/nwaku/issues/3051)) ([729e63f5](https://github.com/waku-org/nwaku/commit/729e63f5)) +- disabling metrics for libwaku ([#3058](https://github.com/waku-org/nwaku/issues/3058)) ([b358c90f](https://github.com/waku-org/nwaku/commit/b358c90f)) +- test peer connection management ([#3049](https://github.com/waku-org/nwaku/issues/3049)) ([711e7db1](https://github.com/waku-org/nwaku/commit/711e7db1)) +- updating upload and download artifact actions to v4 ([#3047](https://github.com/waku-org/nwaku/issues/3047)) ([7c4a9717](https://github.com/waku-org/nwaku/commit/7c4a9717)) +- Better database query logs and logarithmic scale in grafana store panels ([#3048](https://github.com/waku-org/nwaku/issues/3048)) ([d68b06f1](https://github.com/waku-org/nwaku/commit/d68b06f1)) +- extending store metrics ([#3042](https://github.com/waku-org/nwaku/issues/3042)) ([fd83b42f](https://github.com/waku-org/nwaku/commit/fd83b42f)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | +| [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/master/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` | + +## v0.33.1 (2024-10-03) + +### Bug fixes + +* Fix out connections leak ([3077](https://github.com/waku-org/nwaku/pull/3077)) ([eb2bbae6](https://github.com/waku-org/nwaku/commit/eb2bbae6)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | +| [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/feat--waku-sync/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` | + +## v0.33.0 (2024-09-30) + +#### Notes: + +* The `--pubsub-topic` CLI configuration has been deprecated and support for it will be removed on release v0.35.0. In order to migrate, please use the `--shard` configuration instead. For example, instead of `--pubsub-topic=/waku/2/rs//`, use `--cluster-id=` once and `--shard=` for each subscribed shard +* The `--rest-private` CLI configuration has been removed. Please delete any reference to it when running your nodes +* Introduced the `--reliability` CLI configuration, activating the new experimental StoreV3 message confirmation protocol +* DOS protection configurations of non-relay, req/resp protocols are changed + * `--request-rate-limit` and `--request-rate-period` options are no longer supported. + * `--rate-limit` CLI configuration is now available. + - The new flag can describe various rate-limit requirements for each protocol supported. The setting can be repeated, each instance can define exactly one rate-limit option. + - Format is `:volume/period` + - If protocol is not given, settings will be taken as default for un-set protocols. Ex: 80/2s + - Supported protocols are: lightpush|filter|px|store|storev2|storev3 + - `volume` must be an integer value, representing number of requests over the period of time allowed. + - `period ` must be an integer with defined unit as one of h|m|s|ms + - If not set, no rate limit will be applied to request/response protocols, except for the filter protocol. + + +### Release highlights + +* a new experimental reliability protocol has been implemented, leveraging StoreV3 to confirm message delivery +* Peer Exchange protocol can now be protected by rate-limit boundary checks. +* Fine-grained configuration of DOS protection is available with this release. See, "Notes" above. + +### Bug Fixes + +- rejecting excess relay connections ([#3063](https://github.com/waku-org/nwaku/issues/3063)) ([8b0884c7](https://github.com/waku-org/nwaku/commit/8b0884c7)) +- make Peer Exchange's rpc status_code optional for backward compatibility ([#3059](https://github.com/waku-org/nwaku/pull/3059)) ([5afa9b13](https://github.com/waku-org/nwaku/commit/5afa9b13)) +- px protocol decode - do not treat missing response field as error ([#3054](https://github.com/waku-org/nwaku/issues/3054)) ([9b445ac4](https://github.com/waku-org/nwaku/commit/9b445ac4)) +- setting up node with modified config ([#3036](https://github.com/waku-org/nwaku/issues/3036)) ([8f289925](https://github.com/waku-org/nwaku/commit/8f289925)) +- get back health check for postgres legacy ([#3010](https://github.com/waku-org/nwaku/issues/3010)) ([5a0edff7](https://github.com/waku-org/nwaku/commit/5a0edff7)) +- libnegentropy integration ([#2996](https://github.com/waku-org/nwaku/issues/2996)) ([c3cb06ac](https://github.com/waku-org/nwaku/commit/c3cb06ac)) +- peer-exchange issue ([#2889](https://github.com/waku-org/nwaku/issues/2889)) ([43157102](https://github.com/waku-org/nwaku/commit/43157102)) + +### Changes + +- append current version in agentString which is used by the identify protocol ([#3057](https://github.com/waku-org/nwaku/pull/3057)) ([368bb3c1](https://github.com/waku-org/nwaku/commit/368bb3c1)) +- rate limit peer exchange protocol, enhanced response status in RPC ([#3035](https://github.com/waku-org/nwaku/issues/3035)) ([0a7f16a3](https://github.com/waku-org/nwaku/commit/0a7f16a3)) +- Switch libnegentropy library build from shared to static linkage ([#3041](https://github.com/waku-org/nwaku/issues/3041)) ([83f25c3e](https://github.com/waku-org/nwaku/commit/83f25c3e)) +- libwaku reduce repetitive code by adding a template handling resp returns ([#3032](https://github.com/waku-org/nwaku/issues/3032)) ([1713f562](https://github.com/waku-org/nwaku/commit/1713f562)) +- libwaku - extending the library with peer_manager and peer_exchange features ([#3026](https://github.com/waku-org/nwaku/issues/3026)) ([5ea1cf0c](https://github.com/waku-org/nwaku/commit/5ea1cf0c)) +- use submodule nph in CI to check lint ([#3027](https://github.com/waku-org/nwaku/issues/3027)) ([ce9a8c46](https://github.com/waku-org/nwaku/commit/ce9a8c46)) +- deprecating pubsub topic ([#2997](https://github.com/waku-org/nwaku/issues/2997)) ([a3cd2a1a](https://github.com/waku-org/nwaku/commit/a3cd2a1a)) +- lightpush - error metric less variable by only setting a fixed string ([#3020](https://github.com/waku-org/nwaku/issues/3020)) ([d3e6717a](https://github.com/waku-org/nwaku/commit/d3e6717a)) +- enhance libpq management ([#3015](https://github.com/waku-org/nwaku/issues/3015)) ([45319f09](https://github.com/waku-org/nwaku/commit/45319f09)) +- per limit split of PostgreSQL queries ([#3008](https://github.com/waku-org/nwaku/issues/3008)) ([e1e05afb](https://github.com/waku-org/nwaku/commit/e1e05afb)) +- Added metrics to liteprotocoltester ([#3002](https://github.com/waku-org/nwaku/issues/3002)) ([8baf627f](https://github.com/waku-org/nwaku/commit/8baf627f)) +- extending store metrics ([#2995](https://github.com/waku-org/nwaku/issues/2995)) ([fd83b42f](https://github.com/waku-org/nwaku/commit/fd83b42f)) +- Better timing and requestId detail for slower store db queries ([#2994](https://github.com/waku-org/nwaku/issues/2994)) ([e8a49b76](https://github.com/waku-org/nwaku/commit/e8a49b76)) +- remove unused setting from external_config.nim ([#3004](https://github.com/waku-org/nwaku/issues/3004)) ([fd84363e](https://github.com/waku-org/nwaku/commit/fd84363e)) +- delivery monitor for store v3 reliability protocol ([#2977](https://github.com/waku-org/nwaku/issues/2977)) ([0f68274c](https://github.com/waku-org/nwaku/commit/0f68274c)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | +| [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/feat--waku-sync/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` | + +## v0.32.0 (2024-08-30) + +#### Notes: + +* A new `discv5-only` CLI flag was introduced, which if set to true will perform optimizations for nodes that only run the DiscV5 service +* The `protected-topic` CLI config item has been deprecated in favor of the new `protected-shard` configuration. Protected topics are still supported and will be completely removed in two releases time for `v0.34.0` + +### Release highlights + +* Merged Nwaku Sync protocol for synchronizing store nodes +* Added Store Resume mechanism to retrieve messages sent when the node was offline + +### Features + +- Nwaku Sync ([#2403](https://github.com/waku-org/nwaku/issues/2403)) ([2cc86c51](https://github.com/waku-org/nwaku/commit/2cc86c51)) +- misc. updates for discovery network analysis ([#2930](https://github.com/waku-org/nwaku/issues/2930)) ([4340eb75](https://github.com/waku-org/nwaku/commit/4340eb75)) +- store resume ([#2919](https://github.com/waku-org/nwaku/issues/2919)) ([aed2a113](https://github.com/waku-org/nwaku/commit/aed2a113)) + +### Bug Fixes + +- return on insert error ([#2956](https://github.com/waku-org/nwaku/issues/2956)) ([5f0fbd78](https://github.com/waku-org/nwaku/commit/5f0fbd78)) +- network monitor improvements ([#2939](https://github.com/waku-org/nwaku/issues/2939)) ([80583237](https://github.com/waku-org/nwaku/commit/80583237)) +- add back waku discv5 metrics ([#2927](https://github.com/waku-org/nwaku/issues/2927)) ([e4e01fab](https://github.com/waku-org/nwaku/commit/e4e01fab)) +- update and shift unittest ([#2934](https://github.com/waku-org/nwaku/issues/2934)) ([08973add](https://github.com/waku-org/nwaku/commit/08973add)) +- handle rln-relay-message-limit ([#2867](https://github.com/waku-org/nwaku/issues/2867)) ([8d107b0d](https://github.com/waku-org/nwaku/commit/8d107b0d)) + +### Changes + +- libwaku retrieve my enr and adapt golang example ([#2987](https://github.com/waku-org/nwaku/issues/2987)) ([1ff9f1dd](https://github.com/waku-org/nwaku/commit/1ff9f1dd)) +- run `ANALYZE messages` regularly for better db performance ([#2986](https://github.com/waku-org/nwaku/issues/2986)) ([32f2d85d](https://github.com/waku-org/nwaku/commit/32f2d85d)) +- liteprotocoltester for simulation and for fleets ([#2813](https://github.com/waku-org/nwaku/issues/2813)) ([f4fa73e9](https://github.com/waku-org/nwaku/commit/f4fa73e9)) +- lock in nph version and add pre-commit hook ([#2938](https://github.com/waku-org/nwaku/issues/2938)) ([d63e3430](https://github.com/waku-org/nwaku/commit/d63e3430)) +- logging received message info via onValidated observer ([#2973](https://github.com/waku-org/nwaku/issues/2973)) ([e8bce67d](https://github.com/waku-org/nwaku/commit/e8bce67d)) +- deprecating protected topics in favor of protected shards ([#2983](https://github.com/waku-org/nwaku/issues/2983)) ([e51ffe07](https://github.com/waku-org/nwaku/commit/e51ffe07)) +- rename NsPubsubTopic ([#2974](https://github.com/waku-org/nwaku/issues/2974)) ([67439057](https://github.com/waku-org/nwaku/commit/67439057)) +- install dig ([#2975](https://github.com/waku-org/nwaku/issues/2975)) ([d24b56b9](https://github.com/waku-org/nwaku/commit/d24b56b9)) +- print WakuMessageHash as hex strings ([#2969](https://github.com/waku-org/nwaku/issues/2969)) ([2fd4eb62](https://github.com/waku-org/nwaku/commit/2fd4eb62)) +- updating dependencies for release 0.32.0 ([#2971](https://github.com/waku-org/nwaku/issues/2971)) ([dfd42a7c](https://github.com/waku-org/nwaku/commit/dfd42a7c)) +- bump negentropy to latest master ([#2968](https://github.com/waku-org/nwaku/issues/2968)) ([b36cb075](https://github.com/waku-org/nwaku/commit/b36cb075)) +- keystore: verbose error message when credential is not found ([#2943](https://github.com/waku-org/nwaku/issues/2943)) ([0f11ee14](https://github.com/waku-org/nwaku/commit/0f11ee14)) +- upgrade peer exchange mounting ([#2953](https://github.com/waku-org/nwaku/issues/2953)) ([42f1bed0](https://github.com/waku-org/nwaku/commit/42f1bed0)) +- replace statusim.net instances with status.im ([#2941](https://github.com/waku-org/nwaku/issues/2941)) ([f534549a](https://github.com/waku-org/nwaku/commit/f534549a)) +- updating doc reference to https rpc ([#2937](https://github.com/waku-org/nwaku/issues/2937)) ([bb7bba35](https://github.com/waku-org/nwaku/commit/bb7bba35)) +- Simplification of store legacy code ([#2931](https://github.com/waku-org/nwaku/issues/2931)) ([d4e8a0da](https://github.com/waku-org/nwaku/commit/d4e8a0da)) +- add peer filtering by cluster for waku peer exchange ([#2932](https://github.com/waku-org/nwaku/issues/2932)) ([b4618f98](https://github.com/waku-org/nwaku/commit/b4618f98)) +- return all connected peers from REST API ([#2923](https://github.com/waku-org/nwaku/issues/2923)) ([a29eca77](https://github.com/waku-org/nwaku/commit/a29eca77)) +- adding lint job to the CI ([#2925](https://github.com/waku-org/nwaku/issues/2925)) ([086cc8ed](https://github.com/waku-org/nwaku/commit/086cc8ed)) +- improve sonda dashboard ([#2918](https://github.com/waku-org/nwaku/issues/2918)) ([6d385cef](https://github.com/waku-org/nwaku/commit/6d385cef)) +- Add new custom built and test target to make in order to enable easy build or test single nim modules ([#2913](https://github.com/waku-org/nwaku/issues/2913)) ([ad25f437](https://github.com/waku-org/nwaku/commit/ad25f437)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | +| [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/feat--waku-sync/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` | + +## v0.31.1 (2024-08-02) + +### Changes + +- Optimize hash queries with lookup table ([#2933](https://github.com/waku-org/nwaku/issues/2933)) ([6463885bf](https://github.com/waku-org/nwaku/commit/6463885bf)) + +### Bug fixes + +* Use of detach finalize when needed [2966](https://github.com/waku-org/nwaku/pull/2966) +* Prevent legacy store from creating new partitions as that approach blocked the database. +[2931](https://github.com/waku-org/nwaku/pull/2931) + +* lightpush better feedback in case the lightpush service node does not have peers [2951](https://github.com/waku-org/nwaku/pull/2951) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`WAKU2-STORE`](https://github.com/waku-org/specs/blob/master/standards/core/store.md) | `draft` | `/vac/waku/store-query/3.0.0` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | + +## v0.31.0 (2024-07-16) +### Notes + +* Named sharding has been deprecated in favor of static sharding. Topics in formats other than `/waku/2/rs//` are no longer supported + +### Features + +- DOS protection of non relay protocols - rate limit phase3 (#2897) ([ba418ab5b](https://github.com/waku-org/nwaku/commit/ba418ab5b)) +- sonda tool (#2893) ([e269dca9c](https://github.com/waku-org/nwaku/commit/e269dca9c)) +- add proper per shard bandwidth metric calculation (#2851) ([8f14c0473](https://github.com/waku-org/nwaku/commit/8f14c0473)) + +### Bug Fixes + +- bug(storev3): can't advance cursor [#2745](https://github.com/waku-org/nwaku/issues/2745) +- chore(storev3): only select the messageHash column when using a store query with include_data: false [#2637](https://github.com/waku-org/nwaku/issues/2637) +- rln_keystore_generator improve error handling for unrecoverable failure (#2881) ([1c9eb2741](https://github.com/waku-org/nwaku/commit/1c9eb2741)) +- duplicate message forwarding in filter service (#2842) ([99149ea9d](https://github.com/waku-org/nwaku/commit/99149ea9d)) +- only set disconnect time on left event (#2831) ([01050138c](https://github.com/waku-org/nwaku/commit/01050138c)) +- adding peer exchange peers to the peerStore (#2824) ([325e13169](https://github.com/waku-org/nwaku/commit/325e13169)) +- ci use --tags to match non-annotated tags (#2814) ([317c83dc1](https://github.com/waku-org/nwaku/commit/317c83dc1)) +- update peers ENRs in peer store in case they are updated (#2818) ([cda18f96c](https://github.com/waku-org/nwaku/commit/cda18f96c)) +- mount metadata in wakucanary (#2793) ([3b27aee82](https://github.com/waku-org/nwaku/commit/3b27aee82)) + +### Changes + +- setting filter handling logs to trace (#2914) ([5c539fe13](https://github.com/waku-org/nwaku/commit/5c539fe13)) +- enhance postgres and retention policy logs (#2884) ([71ee42de5](https://github.com/waku-org/nwaku/commit/71ee42de5)) +- improving logging under debugDiscv5 flag (#2899) ([8578fb0c3](https://github.com/waku-org/nwaku/commit/8578fb0c3)) +- archive and drivers refactor (#2761) ([f54ba10bc](https://github.com/waku-org/nwaku/commit/f54ba10bc)) +- new release process to include Status fleets (#2825) ([4264666a3](https://github.com/waku-org/nwaku/commit/4264666a3)) +- sqlite make sure code is always run (#2891) ([4ac4ab2a4](https://github.com/waku-org/nwaku/commit/4ac4ab2a4)) +- deprecating named sharding (#2723) ([e1518cf9f](https://github.com/waku-org/nwaku/commit/e1518cf9f)) +- bump dependencies for v0.31.0 (#2885) ([fd6a71cdd](https://github.com/waku-org/nwaku/commit/fd6a71cdd)) +- refactor relative path to better absolute (#2861) ([8bfad3ab4](https://github.com/waku-org/nwaku/commit/8bfad3ab4)) +- saving agent and protoVersion in peerStore (#2860) ([cae0c7e37](https://github.com/waku-org/nwaku/commit/cae0c7e37)) +- unit test for duplicate message push (#2852) ([31c632e42](https://github.com/waku-org/nwaku/commit/31c632e42)) +- remove all pre-nim-1.6 deadcode from codebase (#2857) ([9bd8c33ae](https://github.com/waku-org/nwaku/commit/9bd8c33ae)) +- nim-chronos bump submodule (#2850) ([092add1ca](https://github.com/waku-org/nwaku/commit/092add1ca)) +- ignore arbitrary data stored in `multiaddrs` enr key (#2853) ([76d5b2642](https://github.com/waku-org/nwaku/commit/76d5b2642)) +- add origin to peers admin endpoint (#2848) ([7205f95cf](https://github.com/waku-org/nwaku/commit/7205f95cf)) +- add discv5 logs (#2811) ([974b8a39a](https://github.com/waku-org/nwaku/commit/974b8a39a)) +- archive.nim - increase the max limit of content topics per query to 100 (#2846) ([a05fa0691](https://github.com/waku-org/nwaku/commit/a05fa0691)) +- update content-topic parsing for filter (#2835) ([733edae43](https://github.com/waku-org/nwaku/commit/733edae43)) +- better descriptive log (#2826) ([94947a850](https://github.com/waku-org/nwaku/commit/94947a850)) +- zerokit: bump submodule (#2830) ([c483acee3](https://github.com/waku-org/nwaku/commit/c483acee3)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`WAKU2-STORE`](https://github.com/waku-org/specs/blob/master/standards/core/store.md) | `draft` | `/vac/waku/store-query/3.0.0` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | + + +## v0.30.2 (2024-07-12) + +### Release highlights + +* RLN message limit to 100 messages per epoch. +* Avoid exclusive access when creating new partitions in the PostgreSQL messages table. + +### Changes + +- chore(rln): rln message limit to 100 ([#2883](https://github.com/waku-org/nwaku/pull/2883)) +- fix: postgres_driver better partition creation without exclusive access [28bdb70b](https://github.com/waku-org/nwaku/commit/28bdb70be46d3fb3a6f992b3f9f2de1defd85a30) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | + +## v0.30.1 (2024-07-03) + +### Notes + +* Before upgrading to this version, if you are currently using RLN, make sure to remove your existing `keystore` folder and `rln_tree` +and start your installation from scratch, as +explained in [nwaku-compose](https://github.com/waku-org/nwaku-compose/blob/1b56575df9ddb904af0941a19ea1df3d36bfddfa/README.md). + +### Release highlights + +* RLN_v2 is used. The maximum rate can be set to `N` messages per epoch, instead of just one message per epoch. See [this](https://github.com/waku-org/nwaku/issues/2345) for more details. Notice that we established an epoch of 10 minutes. + + +### Changes + +- rln-relay: add chain-id flag to wakunode and restrict usage if mismatches rpc provider ([#2858](https://github.com/waku-org/nwaku/pull/2858)) +- rln: fix nullifierlog vulnerability ([#2855](https://github.com/waku-org/nwaku/pull/2855)) +- chore: add TWN parameters for RLNv2 ([#2843](https://github.com/waku-org/nwaku/pull/2843)) +- fix(rln-relay): clear nullifier log only if length is over max epoch gap ([#2836](https://github.com/waku-org/nwaku/pull/2836)) +- rlnv2: clean fork of rlnv2 ([#2828](https://github.com/waku-org/nwaku/issues/2828)) ([a02832fe](https://github.com/waku-org/nwaku/commit/a02832fe)) +- zerokit: bump submodule ([#2830](https://github.com/waku-org/nwaku/issues/2830)) ([bd064882](https://github.com/waku-org/nwaku/commit/bd064882)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | + +## v0.29.0 (2024-06-19) + +## What's Changed + +Notes: + +* Named sharding will be deprecated in favor of static sharding. Topics in formats other than `/waku/2/rs//` will stop being supported starting from `v0.31.0` + +Release highlights: + +* Android support in libwaku +* Discovery is available in libwaku +* New LiteProcotolTester tool +* RLN proofs as a lightpush service + +### Features + +- RLN proofs as a lightpush service ([#2768](https://github.com/waku-org/nwaku/issues/2768)) ([0561e5bd](https://github.com/waku-org/nwaku/commit/0561e5bd)) +- Push newly released nwaku image with latest-release tag ([#2732](https://github.com/waku-org/nwaku/issues/2732)) ([736ce1cb](https://github.com/waku-org/nwaku/commit/736ce1cb)) +- Rln-relay: use arkzkey variant of zerokit ([#2681](https://github.com/waku-org/nwaku/issues/2681)) ([e7b0777d](https://github.com/waku-org/nwaku/commit/e7b0777d)) + +### Bug Fixes + +- Better sync lock in partition creation ([#2783](https://github.com/waku-org/nwaku/issues/2783)) ([8d3bbb1b](https://github.com/waku-org/nwaku/pull/2809/commits/8d3bbb1b4e79b15c8cf18bb91d366e9ec1153301)) +- Multi nat initialization causing dead lock in waku tests + serialize test runs to avoid timing and port occupied issues ([#2799](https://github.com/waku-org/nwaku/issues/2799)) ([5989de88](https://github.com/waku-org/nwaku/commit/5989de88)) +- Increase on chain group manager starting balance ([#2795](https://github.com/waku-org/nwaku/issues/2795)) ([e72bb7e7](https://github.com/waku-org/nwaku/commit/e72bb7e7)) +- More detailed logs to differentiate shards with peers ([#2794](https://github.com/waku-org/nwaku/issues/2794)) ([55a87d21](https://github.com/waku-org/nwaku/commit/55a87d21)) +- waku_archive: only allow a single instance to execute migrations ([#2736](https://github.com/waku-org/nwaku/issues/2736)) ([88b8e186](https://github.com/waku-org/nwaku/commit/88b8e186)) +- Move postgres related tests under linux conditional ([57ecb3e0](https://github.com/waku-org/nwaku/commit/57ecb3e0)) +- Invalid cursor returning messages ([#2724](https://github.com/waku-org/nwaku/issues/2724)) ([a65b13fc](https://github.com/waku-org/nwaku/commit/a65b13fc)) +- Do not print the db url on error ([#2725](https://github.com/waku-org/nwaku/issues/2725)) ([40296f9d](https://github.com/waku-org/nwaku/commit/40296f9d)) +- Use `when` instead of `if` for adding soname on linux ([#2721](https://github.com/waku-org/nwaku/issues/2721)) ([cbaefeb3](https://github.com/waku-org/nwaku/commit/cbaefeb3)) +- Store v3 bug fixes ([#2718](https://github.com/waku-org/nwaku/issues/2718)) ([4a6ec468](https://github.com/waku-org/nwaku/commit/4a6ec468)) + + +### Changes + +- Set msg_hash logs to notice level ([#2737](https://github.com/waku-org/nwaku/issues/2737)) ([f5d87c5b](https://github.com/waku-org/nwaku/commit/f5d87c5b)) +- Minor enhancements ([#2789](https://github.com/waku-org/nwaku/issues/2789)) ([31bd6d71](https://github.com/waku-org/nwaku/commit/31bd6d71)) +- postgres_driver - acquire/release advisory lock when creating partitions ([#2784](https://github.com/waku-org/nwaku/issues/2784)) ([c5d19c44](https://github.com/waku-org/nwaku/commit/c5d19c44)) +- Setting fail-fast to false in matrixed github actions ([#2787](https://github.com/waku-org/nwaku/issues/2787)) ([005349cc](https://github.com/waku-org/nwaku/commit/005349cc)) +- Simple link refactor ([#2781](https://github.com/waku-org/nwaku/issues/2781)) ([77adfccd](https://github.com/waku-org/nwaku/commit/77adfccd)) +- Improving liteprotocolteseter stats ([#2750](https://github.com/waku-org/nwaku/issues/2750)) ([4c7c8a15](https://github.com/waku-org/nwaku/commit/4c7c8a15)) +- Extract common prefixes into a constant for multiple query ([#2747](https://github.com/waku-org/nwaku/issues/2747)) ([dfc979a8](https://github.com/waku-org/nwaku/commit/dfc979a8)) +- wakucanary: fix fitler protocol, add storev3 ([#2735](https://github.com/waku-org/nwaku/issues/2735)) ([e0079cd0](https://github.com/waku-org/nwaku/commit/e0079cd0)) +- Bump nim-libp2p version ([#2661](https://github.com/waku-org/nwaku/issues/2661)) ([6fbab633](https://github.com/waku-org/nwaku/commit/6fbab633)) +- Link validation process docs to the release process file ([#2714](https://github.com/waku-org/nwaku/issues/2714)) ([ebe69be8](https://github.com/waku-org/nwaku/commit/ebe69be8)) +- Android support ([#2554](https://github.com/waku-org/nwaku/issues/2554)) ([1e2aa57a](https://github.com/waku-org/nwaku/commit/1e2aa57a)) +- Discovery in libwaku ([#2711](https://github.com/waku-org/nwaku/issues/2711)) ([74646848](https://github.com/waku-org/nwaku/commit/74646848)) +- libwaku - allow to properly set the log level in libwaku and unify a little ([#2708](https://github.com/waku-org/nwaku/issues/2708)) ([3faffdbc](https://github.com/waku-org/nwaku/commit/3faffdbc)) +- waku_discv5, peer_manager - add more logs help debug discovery issues ([#2705](https://github.com/waku-org/nwaku/issues/2705)) ([401630ee](https://github.com/waku-org/nwaku/commit/401630ee)) +- Generic change to reduce the number of compilation warnings ([#2696](https://github.com/waku-org/nwaku/issues/2696)) ([78132dc1](https://github.com/waku-org/nwaku/commit/78132dc1)) + + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | + +## v0.28.1 (2024-05-29) + +This patch release fixes the following bug: +- Store node does not retrieve messages because the meta field is missing in queries. + +### Bug Fix + +- Commit that fixes the bug [8b42f199](https://github.com/waku-org/nwaku/commit/8b42f199baf4e00794c4cec4d8601c3f6c330a20) + +This is a patch release that is fully backwards-compatible with release `v0.28.0`. + +It supports the same [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | + + +## v0.28.0 (2024-05-22) + +## What's Changed + +Release highlights: + +* Store V3 has been merged +* Implemented an enhanced and more robust node health check mechanism +* Introduced the Waku object to libwaku in order to setup a node and its protocols + +### Features + +- Added message size check before relay for lightpush ([#2695](https://github.com/waku-org/nwaku/issues/2695)) ([9dfdfa27](https://github.com/waku-org/nwaku/commit/9dfdfa27)) +- adding json string support to bindings config ([#2685](https://github.com/waku-org/nwaku/issues/2685)) ([be5471c6](https://github.com/waku-org/nwaku/commit/be5471c6)) +- Added flexible rate limit checks for store, legacy store and lightpush ([#2668](https://github.com/waku-org/nwaku/issues/2668)) ([026d804a](https://github.com/waku-org/nwaku/commit/026d804a)) +- store v3 return pubsub topics ([#2676](https://github.com/waku-org/nwaku/issues/2676)) ([d700006a](https://github.com/waku-org/nwaku/commit/d700006a)) +- supporting meta field in store ([#2609](https://github.com/waku-org/nwaku/issues/2609)) ([a46d4451](https://github.com/waku-org/nwaku/commit/a46d4451)) +- store v3 ([#2431](https://github.com/waku-org/nwaku/issues/2431)) ([0b0fbfad](https://github.com/waku-org/nwaku/commit/0b0fbfad)) + +### Bug Fixes + +- use await instead of waitFor in async tests ([#2690](https://github.com/waku-org/nwaku/issues/2690)) ([a37c9ba9](https://github.com/waku-org/nwaku/commit/a37c9ba9)) +- message cache removal crash ([#2682](https://github.com/waku-org/nwaku/issues/2682)) ([fa26d05f](https://github.com/waku-org/nwaku/commit/fa26d05f)) +- add `meta` to sqlite migration scripts ([#2675](https://github.com/waku-org/nwaku/issues/2675)) ([82f95999](https://github.com/waku-org/nwaku/commit/82f95999)) +- content_script_version_4.nim: migration failed when dropping unexisting constraing ([#2672](https://github.com/waku-org/nwaku/issues/2672)) ([38f8b08c](https://github.com/waku-org/nwaku/commit/38f8b08c)) +- **filter:** log is too large ([#2665](https://github.com/waku-org/nwaku/issues/2665)) ([cee020f2](https://github.com/waku-org/nwaku/commit/cee020f2)) +- issue [#2644](https://github.com/waku-org/nwaku/issues/2644) properly ([#2663](https://github.com/waku-org/nwaku/issues/2663)) ([853ec186](https://github.com/waku-org/nwaku/commit/853ec186)) +- store v3 validate cursor & remove messages ([#2636](https://github.com/waku-org/nwaku/issues/2636)) ([e03d1165](https://github.com/waku-org/nwaku/commit/e03d1165)) +- **waku_keystore:** sigsegv on different appInfo ([#2654](https://github.com/waku-org/nwaku/issues/2654)) ([5dd645cf](https://github.com/waku-org/nwaku/commit/5dd645cf)) +- **rln-relay:** persist metadata every batch during initial sync ([#2649](https://github.com/waku-org/nwaku/issues/2649)) ([a9e19efd](https://github.com/waku-org/nwaku/commit/a9e19efd)) +- handle named sharding in enr ([#2647](https://github.com/waku-org/nwaku/issues/2647)) ([8d1b0834](https://github.com/waku-org/nwaku/commit/8d1b0834)) +- parse shards properly in enr config for non twn ([#2633](https://github.com/waku-org/nwaku/issues/2633)) ([6e6cb298](https://github.com/waku-org/nwaku/commit/6e6cb298)) +- proto field numbers & status desc ([#2632](https://github.com/waku-org/nwaku/issues/2632)) ([843fe217](https://github.com/waku-org/nwaku/commit/843fe217)) +- missing rate limit setting for legacy store protocol ([#2631](https://github.com/waku-org/nwaku/issues/2631)) ([5f65565c](https://github.com/waku-org/nwaku/commit/5f65565c)) +- **rln-relay:** enforce error callback to remove exception raised from retryWrapper ([#2622](https://github.com/waku-org/nwaku/issues/2622)) ([9c9883a6](https://github.com/waku-org/nwaku/commit/9c9883a6)) +- **rln-relay:** increase retries for 1 minute recovery time ([#2614](https://github.com/waku-org/nwaku/issues/2614)) ([1a23700d](https://github.com/waku-org/nwaku/commit/1a23700d)) +- **ci:** unique comment_tag to reference rln version ([#2613](https://github.com/waku-org/nwaku/issues/2613)) ([2c01fa0f](https://github.com/waku-org/nwaku/commit/2c01fa0f)) +- don't use WakuMessageSize in req/resp protocols ([#2601](https://github.com/waku-org/nwaku/issues/2601)) ([e61e4ff9](https://github.com/waku-org/nwaku/commit/e61e4ff9)) +- create options api for cors preflight request ([#2598](https://github.com/waku-org/nwaku/issues/2598)) ([768c61b1](https://github.com/waku-org/nwaku/commit/768c61b1)) +- node restart test issue ([#2576](https://github.com/waku-org/nwaku/issues/2576)) ([4a8e62ac](https://github.com/waku-org/nwaku/commit/4a8e62ac)) +- **doc:** update REST API docs ([#2581](https://github.com/waku-org/nwaku/issues/2581)) ([006d43ae](https://github.com/waku-org/nwaku/commit/006d43ae)) + +### Changes + +- move code from wakunode2 to a more generic place, waku ([#2670](https://github.com/waku-org/nwaku/issues/2670)) ([840e0122](https://github.com/waku-org/nwaku/commit/840e0122)) +- closing ping streams ([#2692](https://github.com/waku-org/nwaku/issues/2692)) ([7d4857ea](https://github.com/waku-org/nwaku/commit/7d4857ea)) +- Postgres enhance get oldest timestamp ([#2687](https://github.com/waku-org/nwaku/issues/2687)) ([8451cf8e](https://github.com/waku-org/nwaku/commit/8451cf8e)) +- **rln-relay:** health check should account for window of roots ([#2664](https://github.com/waku-org/nwaku/issues/2664)) ([6a1af922](https://github.com/waku-org/nwaku/commit/6a1af922)) +- updating TWN bootstrap fleet to waku.sandbox ([#2638](https://github.com/waku-org/nwaku/issues/2638)) ([22f64bbd](https://github.com/waku-org/nwaku/commit/22f64bbd)) +- simplify migration script postgres version_4 ([#2674](https://github.com/waku-org/nwaku/issues/2674)) ([91c85738](https://github.com/waku-org/nwaku/commit/91c85738)) +- big refactor to add waku component in libwaku instead of only waku node ([#2658](https://github.com/waku-org/nwaku/issues/2658)) ([2463527b](https://github.com/waku-org/nwaku/commit/2463527b)) +- simplify app.nim and move discovery items to appropriate modules ([#2657](https://github.com/waku-org/nwaku/issues/2657)) ([404810aa](https://github.com/waku-org/nwaku/commit/404810aa)) +- log enhancement for message reliability analysis ([#2640](https://github.com/waku-org/nwaku/issues/2640)) ([d5e0e4a9](https://github.com/waku-org/nwaku/commit/d5e0e4a9)) +- metrics server. Simplify app.nim module ([#2650](https://github.com/waku-org/nwaku/issues/2650)) ([4a110f65](https://github.com/waku-org/nwaku/commit/4a110f65)) +- change nim-libp2p branch from unstable to master ([#2648](https://github.com/waku-org/nwaku/issues/2648)) ([d09c9c91](https://github.com/waku-org/nwaku/commit/d09c9c91)) +- Enabling to use a full node for lightpush via rest api without lightpush client configured ([#2626](https://github.com/waku-org/nwaku/issues/2626)) ([2a4c0f15](https://github.com/waku-org/nwaku/commit/2a4c0f15)) +- **rln-relay:** resultify rln-relay 1/n ([#2607](https://github.com/waku-org/nwaku/issues/2607)) ([1d7ff288](https://github.com/waku-org/nwaku/commit/1d7ff288)) +- ci.yml - avoid calling brew link libpq --force on macos ([#2627](https://github.com/waku-org/nwaku/issues/2627)) ([05f332ed](https://github.com/waku-org/nwaku/commit/05f332ed)) +- an enhanced version of convenient node health check script ([#2624](https://github.com/waku-org/nwaku/issues/2624)) ([7f8d8e80](https://github.com/waku-org/nwaku/commit/7f8d8e80)) +- **rln-db-inspector:** add more logging to find zero leaf indices ([#2617](https://github.com/waku-org/nwaku/issues/2617)) ([40752b1e](https://github.com/waku-org/nwaku/commit/40752b1e)) +- addition of waku_api/rest/builder.nim and reduce app.nim ([#2623](https://github.com/waku-org/nwaku/issues/2623)) ([b28207ab](https://github.com/waku-org/nwaku/commit/b28207ab)) +- Separation of node health and initialization state from rln_relay ([#2612](https://github.com/waku-org/nwaku/issues/2612)) ([6d135b0d](https://github.com/waku-org/nwaku/commit/6d135b0d)) +- enabling rest api as default ([#2600](https://github.com/waku-org/nwaku/issues/2600)) ([6bc79bc7](https://github.com/waku-org/nwaku/commit/6bc79bc7)) +- move app.nim and networks_config.nim to waku/factory ([#2608](https://github.com/waku-org/nwaku/issues/2608)) ([1ba9df4b](https://github.com/waku-org/nwaku/commit/1ba9df4b)) +- workflow to autoassign PR ([#2604](https://github.com/waku-org/nwaku/issues/2604)) ([10d36c39](https://github.com/waku-org/nwaku/commit/10d36c39)) +- start moving discovery modules to waku/discovery ([#2587](https://github.com/waku-org/nwaku/issues/2587)) ([828583ad](https://github.com/waku-org/nwaku/commit/828583ad)) +- don't create docker images for users without org's secrets ([#2585](https://github.com/waku-org/nwaku/issues/2585)) ([51ec12be](https://github.com/waku-org/nwaku/commit/51ec12be)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | + +The Waku v1 implementation has been removed from this repository and can be found in a separate [Waku Legacy](https://github.com/waku-org/waku-legacy) repository. + +## v0.27.0 (2024-04-19) + +> **Note:** + +> - Filter v1 protocol and its REST-API access have been deprecated. +> - A new field of the `WakuMetadataRequest` protobuf for shards was introduced. The old shards field (2) will be deprecated in 2 releases time +> - CLI flags `--requestRateLimit` and `--requestRatePeriod` have been added for rate limiting configuration. Period is measured in seconds. Limits are measured per protocol per period of time. Over limit will result in TOO_MANY_REQUEST (429) response. + +## What's Changed + +Release highlights: + +* Introduced configurable rate limiting for lightpush and store requests +* Sync time has been considerably reduced for node initialization +* Significant refactors were made to node initialization and `WakuArchive` logic as work towards C-bindings and Store V3 features + +### Features + +- Added simple, configurable rate limit for lightpush and store-query ([#2390](https://github.com/waku-org/nwaku/issues/2390)) ([a00f350c](https://github.com/waku-org/nwaku/commit/a00f350c)) +- examples/golang/waku.go add new example ([#2559](https://github.com/waku-org/nwaku/issues/2559)) ([8d66a548](https://github.com/waku-org/nwaku/commit/8d66a548)) +- **c-bindings:** rln relay ([#2544](https://github.com/waku-org/nwaku/issues/2544)) ([2aa835e3](https://github.com/waku-org/nwaku/commit/2aa835e3)) +- **incentivization:** add codec for eligibility proof and status ([#2419](https://github.com/waku-org/nwaku/issues/2419)) ([65530264](https://github.com/waku-org/nwaku/commit/65530264)) +- **rest:** add support to ephemeral field ([#2525](https://github.com/waku-org/nwaku/issues/2525)) ([c734f60d](https://github.com/waku-org/nwaku/commit/c734f60d)) +- archive update for store v3 ([#2451](https://github.com/waku-org/nwaku/issues/2451)) ([505479b8](https://github.com/waku-org/nwaku/commit/505479b8)) +- **c-bindings:** add function to dealloc nodes ([#2499](https://github.com/waku-org/nwaku/issues/2499)) ([8341864d](https://github.com/waku-org/nwaku/commit/8341864d)) + +### Bug Fixes + +- **rln-relay:** reduce sync time ([#2577](https://github.com/waku-org/nwaku/issues/2577)) ([480a62fa](https://github.com/waku-org/nwaku/commit/480a62fa)) +- rest store: content_topic -> contentTopic in the response ([#2584](https://github.com/waku-org/nwaku/issues/2584)) ([d2578553](https://github.com/waku-org/nwaku/commit/d2578553)) +- **c-bindings:** rln credential path key ([#2564](https://github.com/waku-org/nwaku/issues/2564)) ([3d752b11](https://github.com/waku-org/nwaku/commit/3d752b11)) +- cluster-id 0 disc5 issue ([#2562](https://github.com/waku-org/nwaku/issues/2562)) ([a76c9587](https://github.com/waku-org/nwaku/commit/a76c9587)) +- regex for rpc endpoint ([#2563](https://github.com/waku-org/nwaku/issues/2563)) ([c87545d5](https://github.com/waku-org/nwaku/commit/c87545d5)) +- **rln:** set a minimum epoch gap ([#2555](https://github.com/waku-org/nwaku/issues/2555)) ([b5e4795f](https://github.com/waku-org/nwaku/commit/b5e4795f)) +- fix regresion + remove deprecated flag ([#2556](https://github.com/waku-org/nwaku/issues/2556)) ([47ad0fb0](https://github.com/waku-org/nwaku/commit/47ad0fb0)) +- **networkmanager:** regularly disconnect from random peers ([#2553](https://github.com/waku-org/nwaku/issues/2553)) ([70c53fc0](https://github.com/waku-org/nwaku/commit/70c53fc0)) +- remove subscription queue limit ([#2551](https://github.com/waku-org/nwaku/issues/2551)) ([94ff5eab](https://github.com/waku-org/nwaku/commit/94ff5eab)) +- peer_manager - extend the number of connection requests to known peers ([#2534](https://github.com/waku-org/nwaku/issues/2534)) ([2173fe22](https://github.com/waku-org/nwaku/commit/2173fe22)) +- **2491:** Fix metadata protocol disconnecting light nodes ([#2533](https://github.com/waku-org/nwaku/issues/2533)) ([33774fad](https://github.com/waku-org/nwaku/commit/33774fad)) +- **rest:** filter/v2/subscriptions response ([#2529](https://github.com/waku-org/nwaku/issues/2529)) ([7aea2d4f](https://github.com/waku-org/nwaku/commit/7aea2d4f)) +- **store:** retention policy regex ([#2532](https://github.com/waku-org/nwaku/issues/2532)) ([23a291b3](https://github.com/waku-org/nwaku/commit/23a291b3)) +- enable autosharding in any cluster ([#2505](https://github.com/waku-org/nwaku/issues/2505)) ([5a225809](https://github.com/waku-org/nwaku/commit/5a225809)) +- introduce new field for shards in metadata protocol ([#2511](https://github.com/waku-org/nwaku/issues/2511)) ([f9f92b7d](https://github.com/waku-org/nwaku/commit/f9f92b7d)) +- **rln-relay:** handle empty metadata returned by getMetadata proc ([#2516](https://github.com/waku-org/nwaku/issues/2516)) ([1274b15d](https://github.com/waku-org/nwaku/commit/1274b15d)) + +### Changes + +- adding migration script adding i_query index ([#2578](https://github.com/waku-org/nwaku/issues/2578)) ([4117fe65](https://github.com/waku-org/nwaku/commit/4117fe65)) +- bumping chronicles version ([#2583](https://github.com/waku-org/nwaku/issues/2583)) ([a04e0d99](https://github.com/waku-org/nwaku/commit/a04e0d99)) +- add ARM64 support for Linux/MacOS ([#2580](https://github.com/waku-org/nwaku/issues/2580)) ([269139cf](https://github.com/waku-org/nwaku/commit/269139cf)) +- **rln:** update submodule + rln patch version ([#2574](https://github.com/waku-org/nwaku/issues/2574)) ([24f6fed8](https://github.com/waku-org/nwaku/commit/24f6fed8)) +- bumping dependencies for 0.27.0 ([#2572](https://github.com/waku-org/nwaku/issues/2572)) ([f68ac792](https://github.com/waku-org/nwaku/commit/f68ac792)) +- **c-bindings:** node initialization ([#2547](https://github.com/waku-org/nwaku/issues/2547)) ([6d0f6d82](https://github.com/waku-org/nwaku/commit/6d0f6d82)) +- remove deprecated legacy filter protocol ([#2507](https://github.com/waku-org/nwaku/issues/2507)) ([e8613172](https://github.com/waku-org/nwaku/commit/e8613172)) +- switch wakuv2 to waku fleet ([#2519](https://github.com/waku-org/nwaku/issues/2519)) ([18a05359](https://github.com/waku-org/nwaku/commit/18a05359)) +- create nph.md ([#2536](https://github.com/waku-org/nwaku/issues/2536)) ([a576e624](https://github.com/waku-org/nwaku/commit/a576e624)) +- Better postgres duplicate insert ([#2535](https://github.com/waku-org/nwaku/issues/2535)) ([693a1778](https://github.com/waku-org/nwaku/commit/693a1778)) +- add 150 kB to msg size histogram metric ([#2430](https://github.com/waku-org/nwaku/issues/2430)) ([2c1391d3](https://github.com/waku-org/nwaku/commit/2c1391d3)) +- content_script_version_2: add simple protection and rename messages_backup if exists ([#2531](https://github.com/waku-org/nwaku/issues/2531)) ([c6c376b5](https://github.com/waku-org/nwaku/commit/c6c376b5)) +- **vendor:** update nim-libp2p path ([#2527](https://github.com/waku-org/nwaku/issues/2527)) ([3c823756](https://github.com/waku-org/nwaku/commit/3c823756)) +- adding node factory tests ([#2524](https://github.com/waku-org/nwaku/issues/2524)) ([a1b3e090](https://github.com/waku-org/nwaku/commit/a1b3e090)) +- factory cleanup ([#2523](https://github.com/waku-org/nwaku/issues/2523)) ([8d7eb3a6](https://github.com/waku-org/nwaku/commit/8d7eb3a6)) +- **rln-relay-v2:** wakunode testing + improvements ([#2501](https://github.com/waku-org/nwaku/issues/2501)) ([059cb975](https://github.com/waku-org/nwaku/commit/059cb975)) +- update CHANGELOG for v0.26.0 release ([#2518](https://github.com/waku-org/nwaku/issues/2518)) ([097cb362](https://github.com/waku-org/nwaku/commit/097cb362)) +- migrating logic from wakunode2.nim to node_factory.nim ([#2504](https://github.com/waku-org/nwaku/issues/2504)) ([dcc88ee0](https://github.com/waku-org/nwaku/commit/dcc88ee0)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | + +The Waku v1 implementation has been removed from this repository and can be found in a separate [Waku Legacy](https://github.com/waku-org/waku-legacy) repository. + +## v0.26.0 (2024-03-07) + +> **Note:** +> - JSON-RPC API has been removed completely. Instead we recommend you to utilize REST API endpoints that have same and extended functionality. +> Please have a look at Waku's REST-API reference: https://waku-org.github.io/waku-rest-api +> - Support for Cross-Origin-Resource-Sharing (CORS headers) is added for our REST-API services. This allows you to access our REST-API from a browser. +> New repeatable CLI flag is added by this feature: +> `--rest-allow-origin="example.com"` or `--rest-allow-origin="127.0.0.0:*"` +> Flag allows using wildcards (`*` and `?`) in the origin string. +> - Store protocol now has a better support for controlling DB size of Postgres store. This feature needs no user action. + +> **Announcement:** +> +> Please notice that from the next release (0.27.0) we will deprecate features. +> +> - We will decomission the Filter v1 protocol and its REST-API access. + +### Features + +- Postgres partition implementation ([#2506](https://github.com/waku-org/nwaku/issues/2506)) ([161a10ec](https://github.com/waku-org/nwaku/commit/161a10ec)) +- **waku-stealth-commitments:** waku stealth commitment protocol ([#2490](https://github.com/waku-org/nwaku/issues/2490)) ([0def4904](https://github.com/waku-org/nwaku/commit/0def4904)) +- **bindings:** generate a random private key ([#2446](https://github.com/waku-org/nwaku/issues/2446)) ([56ff30ca](https://github.com/waku-org/nwaku/commit/56ff30ca)) +- prioritise yamux above mplex ([#2417](https://github.com/waku-org/nwaku/issues/2417)) ([ce151efc](https://github.com/waku-org/nwaku/commit/ce151efc)) +- supporting meta field in WakuMessage ([#2384](https://github.com/waku-org/nwaku/issues/2384)) ([3903f130](https://github.com/waku-org/nwaku/commit/3903f130)) +- `eventCallback` per wakunode and `userData` ([#2418](https://github.com/waku-org/nwaku/issues/2418)) ([707f3e8b](https://github.com/waku-org/nwaku/commit/707f3e8b)) +- **rln-relay-v2:** nonce/messageId manager ([#2413](https://github.com/waku-org/nwaku/issues/2413)) ([50308eda](https://github.com/waku-org/nwaku/commit/50308eda)) +- **networkmonitor:** add support for rln ([#2401](https://github.com/waku-org/nwaku/issues/2401)) ([9c0e9431](https://github.com/waku-org/nwaku/commit/9c0e9431)) +- **rln-relay-v2:** rln-keystore-generator updates ([#2392](https://github.com/waku-org/nwaku/issues/2392)) ([2d46c351](https://github.com/waku-org/nwaku/commit/2d46c351)) +- add yamux support ([#2397](https://github.com/waku-org/nwaku/issues/2397)) ([1b402667](https://github.com/waku-org/nwaku/commit/1b402667)) + +### Bug Fixes + +- **rln-relay:** make nullifier log abide by epoch ordering ([#2508](https://github.com/waku-org/nwaku/issues/2508)) ([beba14dc](https://github.com/waku-org/nwaku/commit/beba14dc)) +- **postgres:** import under feature flag ([#2500](https://github.com/waku-org/nwaku/issues/2500)) ([e692edf6](https://github.com/waku-org/nwaku/commit/e692edf6)) +- notify Waku Metadata when Waku Filter subscribe to a topic ([#2493](https://github.com/waku-org/nwaku/issues/2493)) ([91e3f8cd](https://github.com/waku-org/nwaku/commit/91e3f8cd)) +- time on 32 bits architecture ([#2492](https://github.com/waku-org/nwaku/issues/2492)) ([0a751228](https://github.com/waku-org/nwaku/commit/0a751228)) +- return message id on `waku_relay_publish` ([#2485](https://github.com/waku-org/nwaku/issues/2485)) ([045091a9](https://github.com/waku-org/nwaku/commit/045091a9)) +- **bindings:** base64 payload and key for content topic ([#2435](https://github.com/waku-org/nwaku/issues/2435)) ([d01585e9](https://github.com/waku-org/nwaku/commit/d01585e9)) +- **rln-relay:** regex pattern match for extended domains ([#2444](https://github.com/waku-org/nwaku/issues/2444)) ([29b0c0b8](https://github.com/waku-org/nwaku/commit/29b0c0b8)) +- checking for keystore file existence ([#2427](https://github.com/waku-org/nwaku/issues/2427)) ([8f487a21](https://github.com/waku-org/nwaku/commit/8f487a21)) +- **rln-relay:** graceful shutdown with non-zero exit code ([#2429](https://github.com/waku-org/nwaku/issues/2429)) ([22026b7e](https://github.com/waku-org/nwaku/commit/22026b7e)) +- check max message size in validator according to configured value ([#2424](https://github.com/waku-org/nwaku/issues/2424)) ([731dfcbd](https://github.com/waku-org/nwaku/commit/731dfcbd)) +- **wakunode2:** move node config inside app init branch ([#2423](https://github.com/waku-org/nwaku/issues/2423)) ([0dac9f9d](https://github.com/waku-org/nwaku/commit/0dac9f9d)) + +### Changes + +- **rln_db_inspector:** include in wakunode2 binary ([#2292](https://github.com/waku-org/nwaku/issues/2292)) ([a9d0e481](https://github.com/waku-org/nwaku/commit/a9d0e481)) +- Update link to DNS discovery tutorial ([#2496](https://github.com/waku-org/nwaku/issues/2496)) ([9ef2eccb](https://github.com/waku-org/nwaku/commit/9ef2eccb)) +- **rln-relay-v2:** added tests for static rln-relay-v2 ([#2484](https://github.com/waku-org/nwaku/issues/2484)) ([5b174fb3](https://github.com/waku-org/nwaku/commit/5b174fb3)) +- moving node initialization code to node_factory.nim ([#2479](https://github.com/waku-org/nwaku/issues/2479)) ([361fe2cd](https://github.com/waku-org/nwaku/commit/361fe2cd)) +- Postgres migrations ([#2477](https://github.com/waku-org/nwaku/issues/2477)) ([560f949a](https://github.com/waku-org/nwaku/commit/560f949a)) +- **rln-relay-v2:** added tests for onchain rln-relay-v2 ([#2482](https://github.com/waku-org/nwaku/issues/2482)) ([88ff9282](https://github.com/waku-org/nwaku/commit/88ff9282)) +- remove json rpc ([#2416](https://github.com/waku-org/nwaku/issues/2416)) ([c994ee04](https://github.com/waku-org/nwaku/commit/c994ee04)) +- **ci:** use git describe for image version ([55ff6674](https://github.com/waku-org/nwaku/commit/55ff6674)) +- Implemented CORS handling for nwaku REST server ([#2470](https://github.com/waku-org/nwaku/issues/2470)) ([d832f92a](https://github.com/waku-org/nwaku/commit/d832f92a)) +- remove rln epoch hardcoding ([#2483](https://github.com/waku-org/nwaku/issues/2483)) ([3f4f6d7e](https://github.com/waku-org/nwaku/commit/3f4f6d7e)) +- **cbindings:** cbindings rust simple libwaku integration example ([#2089](https://github.com/waku-org/nwaku/issues/2089)) ([a4993005](https://github.com/waku-org/nwaku/commit/a4993005)) +- adding NIMFLAGS usage to readme ([#2469](https://github.com/waku-org/nwaku/issues/2469)) ([a1d5cbd9](https://github.com/waku-org/nwaku/commit/a1d5cbd9)) +- bumping nim-libp2p after yamux timeout fix ([#2468](https://github.com/waku-org/nwaku/issues/2468)) ([216531b0](https://github.com/waku-org/nwaku/commit/216531b0)) +- new proc to foster different size retention policy implementations ([#2463](https://github.com/waku-org/nwaku/issues/2463)) ([d5305282](https://github.com/waku-org/nwaku/commit/d5305282)) +- **rln-relay:** use anvil instead of ganache in onchain tests ([#2449](https://github.com/waku-org/nwaku/issues/2449)) ([f6332ac6](https://github.com/waku-org/nwaku/commit/f6332ac6)) +- bindings return multiaddress array ([#2461](https://github.com/waku-org/nwaku/issues/2461)) ([7aea145e](https://github.com/waku-org/nwaku/commit/7aea145e)) +- **ci:** fix IMAGE_NAME to use harbor.status.im ([b700d046](https://github.com/waku-org/nwaku/commit/b700d046)) +- **rln-relay:** remove wss support from node config ([#2442](https://github.com/waku-org/nwaku/issues/2442)) ([2060cfab](https://github.com/waku-org/nwaku/commit/2060cfab)) +- **ci:** reuse discord send function from library ([1151d50f](https://github.com/waku-org/nwaku/commit/1151d50f)) +- **rln-relay-v2:** add tests for serde ([#2421](https://github.com/waku-org/nwaku/issues/2421)) ([d0377056](https://github.com/waku-org/nwaku/commit/d0377056)) +- add stdef.h to libwaku.h ([#2409](https://github.com/waku-org/nwaku/issues/2409)) ([d58aca01](https://github.com/waku-org/nwaku/commit/d58aca01)) +- automatically generating certs if not provided (Waku Canary) ([#2408](https://github.com/waku-org/nwaku/issues/2408)) ([849d76d6](https://github.com/waku-org/nwaku/commit/849d76d6)) +- Simplify configuration for the waku network ([#2404](https://github.com/waku-org/nwaku/issues/2404)) ([985d092f](https://github.com/waku-org/nwaku/commit/985d092f)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | + +The Waku v1 implementation has been removed from this repository and can be found in a separate [Waku Legacy](https://github.com/waku-org/waku-legacy) repository. + +## v0.25.0 (2024-02-06) + +> **Note:** +> Waku Filter v2 now has three additional configuration options +> `--filter-max-peers-to-serve=1000` drives how many peers can subscribe at once and +> `--filter-max-criteria=1000` defines what is the maximum criterion stored per each peers +> +> This release introduces a major change in Filter v2 protocol subscription management. +> From now each subscribed peer needs to refresh its living subscriptions by sending a SUBSCRIBER_PING message every 5 minutes by default, otherwise the peer's subscription will be removed. +> `--filter-subscription-timeout=300` defines configurable timeout for the subscriptions (*in seconds*). +> +> New experimental feature, shard aware peer manager for relay protocol can be activated by the flag: +> `--relay-shard-manager=true|false` +> It is disabled by default. + +> **Announcement:** +> +> Please notice that from the next release (0.26.0) we will deprecate features. +> +> - JSON-RPC API will be removed completely. Instead we recommend you to utilize REST API endpoints that have same and extended functionality. +> - We will retire websockets support for RLN on-chain group management. You are expected to use HTTP version of ETH_CLIENT_ADDRESS + +### Features + +- running validators in /relay/v1/auto/messages/{topic} ([#2394](https://github.com/waku-org/nwaku/issues/2394)) ([e4e147bc](https://github.com/waku-org/nwaku/commit/e4e147bc)) +- **rln-relay-v2:** update C FFI api's and serde ([#2385](https://github.com/waku-org/nwaku/issues/2385)) ([b88facd0](https://github.com/waku-org/nwaku/commit/b88facd0)) +- running validators in /relay/v1/messages/{pubsubTopic} ([#2373](https://github.com/waku-org/nwaku/issues/2373)) ([59d8b620](https://github.com/waku-org/nwaku/commit/59d8b620)) +- shard aware relay peer management ([#2332](https://github.com/waku-org/nwaku/issues/2332)) ([edca1df1](https://github.com/waku-org/nwaku/commit/edca1df1)) + +### Bug Fixes + +- adding rln validator as default ([#2367](https://github.com/waku-org/nwaku/issues/2367)) ([bb58a63a](https://github.com/waku-org/nwaku/commit/bb58a63a)) +- Fix test for filter client receiving messages after restart ([#2360](https://github.com/waku-org/nwaku/issues/2360)) ([7de91d92](https://github.com/waku-org/nwaku/commit/7de91d92)) +- making filter admin data test order independent ([#2355](https://github.com/waku-org/nwaku/issues/2355)) ([8a9fad29](https://github.com/waku-org/nwaku/commit/8a9fad29)) + +### Changes + +- **rln-relay-v2:** use rln-v2 contract code ([#2381](https://github.com/waku-org/nwaku/issues/2381)) ([c55ca067](https://github.com/waku-org/nwaku/commit/c55ca067)) +- v0.25 vendor bump and associated fixes ([#2352](https://github.com/waku-org/nwaku/issues/2352)) ([761ce7b1](https://github.com/waku-org/nwaku/commit/761ce7b1)) +- handle errors w.r.t. configured cluster-id and pubsub topics ([#2368](https://github.com/waku-org/nwaku/issues/2368)) ([e04e35e2](https://github.com/waku-org/nwaku/commit/e04e35e2)) +- add coverage target to Makefile ([#2382](https://github.com/waku-org/nwaku/issues/2382)) ([57378873](https://github.com/waku-org/nwaku/commit/57378873)) +- Add check spell allowed words ([#2383](https://github.com/waku-org/nwaku/issues/2383)) ([c1121dd1](https://github.com/waku-org/nwaku/commit/c1121dd1)) +- adding nwaku compose image update to release process ([#2370](https://github.com/waku-org/nwaku/issues/2370)) ([4f06dcff](https://github.com/waku-org/nwaku/commit/4f06dcff)) +- changing digest and hash log format from bytes to hex ([#2363](https://github.com/waku-org/nwaku/issues/2363)) ([025c6ec9](https://github.com/waku-org/nwaku/commit/025c6ec9)) +- log messageHash for lightpush request that helps in debugging ([#2366](https://github.com/waku-org/nwaku/issues/2366)) ([42204115](https://github.com/waku-org/nwaku/commit/42204115)) +- **rln-relay:** enabled http based polling in OnchainGroupManager ([#2364](https://github.com/waku-org/nwaku/issues/2364)) ([efdc5244](https://github.com/waku-org/nwaku/commit/efdc5244)) +- improve POST /relay/v1/auto/messages/{topic} error handling ([#2339](https://github.com/waku-org/nwaku/issues/2339)) ([f841454e](https://github.com/waku-org/nwaku/commit/f841454e)) +- Refactor of FilterV2 subscription management with Time-to-live maintenance ([#2341](https://github.com/waku-org/nwaku/issues/2341)) ([c3358409](https://github.com/waku-org/nwaku/commit/c3358409)) +- Bump `nim-dnsdisc` ([#2354](https://github.com/waku-org/nwaku/issues/2354)) ([3d816c08](https://github.com/waku-org/nwaku/commit/3d816c08)) +- postgres-adoption.md add metadata title, description, and better first-readable-title ([#2346](https://github.com/waku-org/nwaku/issues/2346)) ([2f8e8bcb](https://github.com/waku-org/nwaku/commit/2f8e8bcb)) +- fix typo ([#2348](https://github.com/waku-org/nwaku/issues/2348)) ([a4a8dee3](https://github.com/waku-org/nwaku/commit/a4a8dee3)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | + +The Waku v1 implementation has been removed from this repository and can be found in a separate [Waku Legacy](https://github.com/waku-org/waku-legacy) repository. + +## v0.24.0 (2024-01-10) + +> Note: The Waku message size limit (150 KiB) is now enforced according to the specifications. To change this limit please use `--max-msg-size="1MiB"` + +> Note: `--ip-colocation-limit=2` is the new parameter for limiting connections from the same IP + +## What's Changed + +Release highlights: +* IP colocation filter can now be changed via a configuration parameter. +* New filter admin endpoint can now be used to access subscription data. +* Waku message size limit can now be changed via a configuration parameter. + +### Features + +- feat: adding filter data admin endpoint (REST) [#2314](https://github.com/waku-org/nwaku/pull/2314) +- ip colocation is parameterizable. if set to 0, it is disabled [#2323](https://github.com/waku-org/nwaku/pull/2323) + +### Bug Fixes +- fix: revert "feat: shard aware peer management [#2151](https://github.com/waku-org/nwaku/pull/2151)" [#2312](https://github.com/waku-org/nwaku/pull/2312) +- fix: setting connectivity loop interval to 15 seconds [#2307](https://github.com/waku-org/nwaku/pull/2307) +- fix: set record to the Waku node builder in the examples as it is required [#2328](https://github.com/waku-org/nwaku/pull/2328) +- fix(discv5): add bootnode filter exception [#2267](https://github.com/waku-org/nwaku/pull/2267) + + +### Changes +- update CHANGELOG.md for 0.23.0 [#2309](https://github.com/waku-org/nwaku/pull/2309) +- test(store): Implement store tests [#2235](https://github.com/waku-org/nwaku/pull/2235), [#2240](https://github.com/waku-org/nwaku/commit/86353e22a871820c132deee077f65e7af4356671) +- refactor(store): HistoryQuery.direction [#2263](https://github.com/waku-org/nwaku/pull/2263) +- test_driver_postgres: enhance test coverage, multiple and single topic [#2301](https://github.com/waku-org/nwaku/pull/2301) +- chore: examples/nodejs - adapt code to latest callback and ctx/userData definitions [#2281](https://github.com/waku-org/nwaku/pull/2281) +- chore: update `CHANGELOG.md` to reflect bug fix for issue [#2317](https://github.com/waku-org/nwaku/issues/2317) [#2340](https://github.com/waku-org/nwaku/pull/2340) in v0.23.1 +- test(peer-connection-managenent): functional tests [#2321](https://github.com/waku-org/nwaku/pull/2321) +- docs: update post-release steps [#2336](https://github.com/waku-org/nwaku/pull/2336) +- docs: fix typos across various documentation files [#2310](https://github.com/waku-org/nwaku/pull/2310) +- test(peer-connection-managenent): functional tests [#2321](https://github.com/waku-org/nwaku/pull/2321) +- bump vendors for 0.24.0 [#2333](https://github.com/waku-org/nwaku/pull/2333) +- test(autosharding): functional tests [#2318](https://github.com/waku-org/nwaku/pull/2318) +- docs: add benchmark around postgres adoption [#2316](https://github.com/waku-org/nwaku/pull/2316) +- chore: set max Waku message size to 150KiB according to spec [#2298](https://github.com/waku-org/nwaku/pull/2298) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | + +The Waku v1 implementation has been removed from this repository and can be found in a separate [Waku Legacy](https://github.com/waku-org/waku-legacy) repository. + +## v0.23.1 (2023-01-09) + +This patch release fixes the following bug: +- Sort order ignored in store nodes. + +### Bug Fix + +- Bug definition: [#2317](https://github.com/waku-org/nwaku/issues/2317) +- Commit that fixes the bug [fae20bff](https://github.com/waku-org/nwaku/commit/fae20bff) + +This is a patch release that is fully backwards-compatible with release `v0.23.0`. + +It supports the same [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | + +The Waku v1 implementation has been removed from this repository and can be found in a separate [Waku Legacy](https://github.com/waku-org/waku-legacy) repository. + + +## v0.23.0 (2023-12-18) + +## What's Changed + +Release highlights: +* Bug fix in Postgres when querying more than one content topic. +* :warning: Add new DB column `messageHash`. This requires a manual database update in _Postgres_. +* Updated deterministic message hash algorithm. +* REST admin can inform whether a node supports lightpush and/or filter protocols. +* Improvements to cluster id and shards setup. +* Properly apply RLN when publishing from REST or jsonrpc API. +* Remove trailing commas from the RLN keystore json generated during credentials registration. +* General test cleanup, better relay tests and new filter unsubscribe tests. +* Rewrite docs for clarity and update screenshots. + +### Features + +- setting image deployment to harbor registry ([93dd5ae5](https://github.com/waku-org/nwaku/commit/93dd5ae5)) +- Add new DB column `messageHash` ([#2202](https://github.com/waku-org/nwaku/issues/2202)) ([aeb77a3e](https://github.com/waku-org/nwaku/commit/aeb77a3e)) + +### Bug Fixes + +- make rln rate limit spec compliant ([#2294](https://github.com/waku-org/nwaku/issues/2294)) ([5847f49d](https://github.com/waku-org/nwaku/commit/5847f49d)) +- update num-msgs archive metrics every minute and not only at the beginning ([#2287](https://github.com/waku-org/nwaku/issues/2287)) ([0fc617ff](https://github.com/waku-org/nwaku/commit/0fc617ff)) +- **rln-relay:** graceful retries on rpc calls ([#2250](https://github.com/waku-org/nwaku/issues/2250)) ([15c1f974](https://github.com/waku-org/nwaku/commit/15c1f974)) +- add protection in rest service to always publish with timestamp if user doesn't provide it ([#2261](https://github.com/waku-org/nwaku/issues/2261)) ([42f19579](https://github.com/waku-org/nwaku/commit/42f19579)) +- remove trailing commas from keystore json ([#2200](https://github.com/waku-org/nwaku/issues/2200)) ([103d3981](https://github.com/waku-org/nwaku/commit/103d3981)) +- **dockerfile:** update dockerignore and base image ([#2262](https://github.com/waku-org/nwaku/issues/2262)) ([c86dc442](https://github.com/waku-org/nwaku/commit/c86dc442)) +- waku_filter_v2/common: PEER_DIAL_FAILURE ret code change: 200 -> 504 ([#2236](https://github.com/waku-org/nwaku/issues/2236)) ([6301bec0](https://github.com/waku-org/nwaku/commit/6301bec0)) +- extended Postgres code to support retention policy + refactoring ([#2244](https://github.com/waku-org/nwaku/issues/2244)) ([a1ed517f](https://github.com/waku-org/nwaku/commit/a1ed517f)) +- admin REST API to be enabled only if config is set ([#2218](https://github.com/waku-org/nwaku/issues/2218)) ([110de90f](https://github.com/waku-org/nwaku/commit/110de90f)) +- **rln:** error in api when rate limit ([#2212](https://github.com/waku-org/nwaku/issues/2212)) ([51f36099](https://github.com/waku-org/nwaku/commit/51f36099)) +- **relay:** Failing protocol tests ([#2224](https://github.com/waku-org/nwaku/issues/2224)) ([c9e869fb](https://github.com/waku-org/nwaku/commit/c9e869fb)) +- **tests:** Compilation failure fix ([#2222](https://github.com/waku-org/nwaku/issues/2222)) ([a5da1fc4](https://github.com/waku-org/nwaku/commit/a5da1fc4)) +- **rest:** properly check if rln is used ([#2205](https://github.com/waku-org/nwaku/issues/2205)) ([2cb0989a](https://github.com/waku-org/nwaku/commit/2cb0989a)) + +### Changes + +- archive - move error to trace level when insert row fails ([#2283](https://github.com/waku-org/nwaku/issues/2283)) ([574cdf55](https://github.com/waku-org/nwaku/commit/574cdf55)) +- including content topics on FilterSubscribeRequest logs ([#2295](https://github.com/waku-org/nwaku/issues/2295)) ([306c8a62](https://github.com/waku-org/nwaku/commit/306c8a62)) +- vendor bump for 0.23.0 ([#2274](https://github.com/waku-org/nwaku/issues/2274)) ([385daf16](https://github.com/waku-org/nwaku/commit/385daf16)) +- peer_manager.nim - reduce logs from debug to trace ([#2279](https://github.com/waku-org/nwaku/issues/2279)) ([0cc0c805](https://github.com/waku-org/nwaku/commit/0cc0c805)) +- Cbindings allow mounting the Store protocol from libwaku ([#2276](https://github.com/waku-org/nwaku/issues/2276)) ([28142f40](https://github.com/waku-org/nwaku/commit/28142f40)) +- Better feedback invalid content topic ([#2254](https://github.com/waku-org/nwaku/issues/2254)) ([72a1f8c7](https://github.com/waku-org/nwaku/commit/72a1f8c7)) +- fix typos ([#2239](https://github.com/waku-org/nwaku/issues/2239)) ([958b9bd7](https://github.com/waku-org/nwaku/commit/958b9bd7)) +- creating prepare_release template ([#2225](https://github.com/waku-org/nwaku/issues/2225)) ([5883dbeb](https://github.com/waku-org/nwaku/commit/5883dbeb)) +- **rest:** refactor message cache ([#2221](https://github.com/waku-org/nwaku/issues/2221)) ([bebaa59c](https://github.com/waku-org/nwaku/commit/bebaa59c)) +- updating nim-json-serialization dependency ([#2248](https://github.com/waku-org/nwaku/issues/2248)) ([9f4e6f45](https://github.com/waku-org/nwaku/commit/9f4e6f45)) +- **store-archive:** Remove duplicated code ([#2234](https://github.com/waku-org/nwaku/issues/2234)) ([38e100e9](https://github.com/waku-org/nwaku/commit/38e100e9)) +- refactoring peer storage ([#2243](https://github.com/waku-org/nwaku/issues/2243)) ([c301e880](https://github.com/waku-org/nwaku/commit/c301e880)) +- postres driver allow setting the max number of connection from a parameter ([#2246](https://github.com/waku-org/nwaku/issues/2246)) ([b31c1823](https://github.com/waku-org/nwaku/commit/b31c1823)) +- deterministic message hash algorithm updated ([#2233](https://github.com/waku-org/nwaku/issues/2233)) ([a22ee604](https://github.com/waku-org/nwaku/commit/a22ee604)) +- **REST:** returning lightpush support and updated filter protocol ([#2219](https://github.com/waku-org/nwaku/issues/2219)) ([59ee3c69](https://github.com/waku-org/nwaku/commit/59ee3c69)) +- mics. improvements to cluster id and shards setup ([#2187](https://github.com/waku-org/nwaku/issues/2187)) ([897f4879](https://github.com/waku-org/nwaku/commit/897f4879)) +- update docs for rln-keystore-generator ([#2210](https://github.com/waku-org/nwaku/issues/2210)) ([8c5666d2](https://github.com/waku-org/nwaku/commit/8c5666d2)) +- removing automatic vacuuming from retention policy code ([#2228](https://github.com/waku-org/nwaku/issues/2228)) ([9ff441ab](https://github.com/waku-org/nwaku/commit/9ff441ab)) +- decoupling announced and listen addresses ([#2203](https://github.com/waku-org/nwaku/issues/2203)) ([ef8ffbdb](https://github.com/waku-org/nwaku/commit/ef8ffbdb)) +- **release:** update changelog for v0.22.0 release ([#2216](https://github.com/waku-org/nwaku/issues/2216)) ([9c4fdac6](https://github.com/waku-org/nwaku/commit/9c4fdac6)) +- Allow text/plain content type descriptor for json formatted content body ([#2209](https://github.com/waku-org/nwaku/issues/2209)) ([6d81e384](https://github.com/waku-org/nwaku/commit/6d81e384)) +- rewrite for clarity, update screenshots ([#2206](https://github.com/waku-org/nwaku/issues/2206)) ([a0ef3c2f](https://github.com/waku-org/nwaku/commit/a0ef3c2f)) +- **release:** update changelog for v0.21.3 release ([#2208](https://github.com/waku-org/nwaku/issues/2208)) ([f74474b4](https://github.com/waku-org/nwaku/commit/f74474b4)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | + +The Waku v1 implementation has been removed from this repository and can be found in a separate [Waku Legacy](https://github.com/waku-org/waku-legacy) repository. + +## v0.22.0 (2023-11-15) + +> Note: The `--topic` option is now deprecated in favor of a more specific options `--pubsub-topic` & `--content-topic` + +> Note: The `--ext-multiaddr-only` CLI flag was introduced for cases in which the user wants to manually set their announced addresses + +## What's Changed + +Release highlights: +* simplified the process of generating RLN credentials through the new `generateRlnKeystore` subcommand +* added support for configuration of port 0 in order to bind to kernel selected ports +* shards are now automatically updated in metadata protocol when supported shards change on runtime +* introduced `messageHash` attribute to SQLite which will later replace the `id` attribute + +### Features + +- rln-keystore-generator is now a subcommand ([#2189](https://github.com/waku-org/nwaku/issues/2189)) ([3498a846](https://github.com/waku-org/nwaku/commit/3498a846)) +- amending computeDigest func. + related test cases ([#2132](https://github.com/waku-org/nwaku/issues/2132))" ([#2180](https://github.com/waku-org/nwaku/issues/2180)) ([d7ef3ca1](https://github.com/waku-org/nwaku/commit/d7ef3ca1)) +- **discv5:** filter out peers without any listed capability ([#2186](https://github.com/waku-org/nwaku/issues/2186)) ([200a11da](https://github.com/waku-org/nwaku/commit/200a11da)) +- metadata protocol shard subscription ([#2149](https://github.com/waku-org/nwaku/issues/2149)) ([bcf8e963](https://github.com/waku-org/nwaku/commit/bcf8e963)) +- REST APIs discovery handlers ([#2109](https://github.com/waku-org/nwaku/issues/2109)) ([7ca516a5](https://github.com/waku-org/nwaku/commit/7ca516a5)) +- implementing port 0 support ([#2125](https://github.com/waku-org/nwaku/issues/2125)) ([f7b9afc2](https://github.com/waku-org/nwaku/commit/f7b9afc2)) +- messageHash attribute added in SQLite + testcase ([#2142](https://github.com/waku-org/nwaku/issues/2142))" ([#2154](https://github.com/waku-org/nwaku/issues/2154)) ([13aeebe4](https://github.com/waku-org/nwaku/commit/13aeebe4)) +- messageHash attribute added in SQLite + testcase ([#2142](https://github.com/waku-org/nwaku/issues/2142)) ([9cd8c73d](https://github.com/waku-org/nwaku/commit/9cd8c73d)) +- amending computeDigest func. + related test cases ([#2132](https://github.com/waku-org/nwaku/issues/2132)) ([1669f710](https://github.com/waku-org/nwaku/commit/1669f710)) + +### Bug Fixes + +- typo ([6dd28063](https://github.com/waku-org/nwaku/commit/6dd28063)) +- lightpush rest ([#2176](https://github.com/waku-org/nwaku/issues/2176)) ([fa467e24](https://github.com/waku-org/nwaku/commit/fa467e24)) +- **ci:** fix Docker tag for latest and release jobs ([52759faa](https://github.com/waku-org/nwaku/commit/52759faa)) +- **rest:** fix bug in rest api when sending rln message ([#2169](https://github.com/waku-org/nwaku/issues/2169)) ([250e8b98](https://github.com/waku-org/nwaku/commit/250e8b98)) +- updating v0.21.1 release date in changelog ([#2160](https://github.com/waku-org/nwaku/issues/2160)) ([3be61636](https://github.com/waku-org/nwaku/commit/3be61636)) + +### Changes + +- Optimize postgres - prepared statements in select ([#2182](https://github.com/waku-org/nwaku/issues/2182)) ([6da1aeec](https://github.com/waku-org/nwaku/commit/6da1aeec)) +- **release:** update changelog for v0.21.2 release ([#2188](https://github.com/waku-org/nwaku/issues/2188)) ([d0a93e7c](https://github.com/waku-org/nwaku/commit/d0a93e7c)) +- upgrade dependencies v0.22 ([#2185](https://github.com/waku-org/nwaku/issues/2185)) ([b9563ae0](https://github.com/waku-org/nwaku/commit/b9563ae0)) +- Optimize postgres - use of rowCallback approach ([#2171](https://github.com/waku-org/nwaku/issues/2171)) ([2b4ca4d0](https://github.com/waku-org/nwaku/commit/2b4ca4d0)) +- **networking:** lower dhigh to limit amplification factor ([#2168](https://github.com/waku-org/nwaku/issues/2168)) ([f0f69b32](https://github.com/waku-org/nwaku/commit/f0f69b32)) +- Minor Postgres optimizations ([#2166](https://github.com/waku-org/nwaku/issues/2166)) ([282c2e81](https://github.com/waku-org/nwaku/commit/282c2e81)) +- adding patch release instructions to release doc ([#2157](https://github.com/waku-org/nwaku/issues/2157)) ([cc01bb07](https://github.com/waku-org/nwaku/commit/cc01bb07)) +- **release:** update changelog for v0.21.1 release ([#2155](https://github.com/waku-org/nwaku/issues/2155)) ([b109a583](https://github.com/waku-org/nwaku/commit/b109a583)) +- adding ext-multiaddr-only CLI flag ([#2141](https://github.com/waku-org/nwaku/issues/2141)) ([944dfdaa](https://github.com/waku-org/nwaku/commit/944dfdaa)) +- bumping nim-libp2p to include WSS fix ([#2150](https://github.com/waku-org/nwaku/issues/2150)) ([817a7b2e](https://github.com/waku-org/nwaku/commit/817a7b2e)) +- **cbindings:** avoid using global var in libwaku.nim ([#2118](https://github.com/waku-org/nwaku/issues/2118)) ([1e8f5771](https://github.com/waku-org/nwaku/commit/1e8f5771)) +- adding postgres flag to manual docker job instructions ([#2139](https://github.com/waku-org/nwaku/issues/2139)) ([459331e3](https://github.com/waku-org/nwaku/commit/459331e3)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | +| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` | + +The Waku v1 implementation has been removed from this repository and can be found in a separate [Waku Legacy](https://github.com/waku-org/waku-legacy) repository. + +## Upgrade instructions + +* Note that the `--topic` CLI option is now deprecated in favor of a more specific options `--pubsub-topic` & `--content-topic`. + +## v0.21.3 (2023-11-09) + +This patch release adds the following feature: +- Adding generateRlnKeystore subcommand for RLN membership generation + +### Features + +- rln-keystore-generator is now a subcommand ([#2189](https://github.com/waku-org/nwaku/issues/2189)) ([1e919177](https://github.com/waku-org/nwaku/commit/1e919177)) + +This is a patch release that is fully backwards-compatible with release `v0.21.0`, `v0.21.1` and `v0.21.2`. + +It supports the same [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation has been removed from this repository and can be found in a separate [Waku Legacy](https://github.com/waku-org/waku-legacy) repository. + +## v0.21.2 (2023-11-07) + +This patch release addresses the following issue: +- Inability to send RLN messages through the REST API + +### Bug Fixes + +- **rest:** fix bug in rest api when sending rln message ([#2169](https://github.com/waku-org/nwaku/issues/2169)) ([33decd7a](https://github.com/waku-org/nwaku/commit/33decd7a)) + +This is a patch release that is fully backwards-compatible with release `v0.21.0` and `v0.21.1`. + +It supports the same [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation has been removed from this repository and can be found in a separate [Waku Legacy](https://github.com/waku-org/waku-legacy) repository. + +## v0.21.1 (2023-10-26) + +This patch release addresses the following issues: +- WSS connections being suddenly terminated under rare conditions +- Ability for the user to control announced multiaddresses + +### Changes + +- adding ext-multiaddr-only CLI flag ([#2141](https://github.com/waku-org/nwaku/issues/2141)) ([e2dfc2ed](https://github.com/waku-org/nwaku/commit/e2dfc2ed)) +- bumping nim-libp2p to include WSS fix ([#2150](https://github.com/waku-org/nwaku/issues/2150)) ([18b5149a](https://github.com/waku-org/nwaku/commit/18b5149a)) + +This is a patch release that is fully backwards-compatible with release `v0.21.0`. + +It supports the same [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation has been removed from this repository and can be found in a separate [Waku Legacy](https://github.com/waku-org/waku-legacy) repository. + +## v0.21.0 (2023-10-18) + +> Note: This is the last release supporting the `--topic` option. It is being deprecated in favor of a more specific options `--pubsub-topic` & `--content-topic` + +## What's Changed + +Release highlights: +* Implemented a req/resp [protocol](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) that provides information about the node's medatadata +* Added REST APIs for Filter v2 and Lightpush protocols' services +* Ported /admin endpoint to REST +* Added a size-based retention policy for the user to set a limit for SQLite storage used + +### Features + +- add new metadata protocol ([#2062](https://github.com/waku-org/nwaku/issues/2062)) ([d5c3ade5](https://github.com/waku-org/nwaku/commit/d5c3ade5)) +- /admin rest api endpoint ([#2094](https://github.com/waku-org/nwaku/issues/2094)) ([7b5c36b1](https://github.com/waku-org/nwaku/commit/7b5c36b1)) +- **coverage:** Add simple coverage ([#2067](https://github.com/waku-org/nwaku/issues/2067)) ([d864db3f](https://github.com/waku-org/nwaku/commit/d864db3f)) +- added RELAY openapi definitions ([#2081](https://github.com/waku-org/nwaku/issues/2081)) ([56dbe2a7](https://github.com/waku-org/nwaku/commit/56dbe2a7)) +- **wakucanary:** add latency measurement using ping protocol ([#2074](https://github.com/waku-org/nwaku/issues/2074)) ([6cb9a8da](https://github.com/waku-org/nwaku/commit/6cb9a8da)) +- Autosharding API for RELAY subscriptions ([#1983](https://github.com/waku-org/nwaku/issues/1983)) ([1763b1ef](https://github.com/waku-org/nwaku/commit/1763b1ef)) +- **networkmonitor:** add ping latencies, optimize reconnections ([#2068](https://github.com/waku-org/nwaku/issues/2068)) ([ed473545](https://github.com/waku-org/nwaku/commit/ed473545)) +- peer manager can filter peers by shard ([#2063](https://github.com/waku-org/nwaku/issues/2063)) ([0d9e9fbd](https://github.com/waku-org/nwaku/commit/0d9e9fbd)) +- lightpush rest api ([#2052](https://github.com/waku-org/nwaku/issues/2052)) ([02a814bd](https://github.com/waku-org/nwaku/commit/02a814bd)) +- HTTP REST API: Filter support v2 ([#1890](https://github.com/waku-org/nwaku/issues/1890)) ([dac072f8](https://github.com/waku-org/nwaku/commit/dac072f8)) + +### Bug Fixes + +- fix wrong install of filter rest api ([#2133](https://github.com/waku-org/nwaku/issues/2133)) ([5277d122](https://github.com/waku-org/nwaku/commit/5277d122)) +- consider WS extMultiAddrs before publishing host address ([#2122](https://github.com/waku-org/nwaku/issues/2122)) ([a5b1cfd0](https://github.com/waku-org/nwaku/commit/a5b1cfd0)) +- return erring response if lightpush request is invalid ([#2083](https://github.com/waku-org/nwaku/issues/2083)) ([2c5eb427](https://github.com/waku-org/nwaku/commit/2c5eb427)) +- sqlite limited delete query bug ([#2111](https://github.com/waku-org/nwaku/issues/2111)) ([06bc433a](https://github.com/waku-org/nwaku/commit/06bc433a)) +- cluster id & sharding terminology ([#2104](https://github.com/waku-org/nwaku/issues/2104)) ([a47dc9e6](https://github.com/waku-org/nwaku/commit/a47dc9e6)) +- **ci:** update the dependency list in pre-release WF ([#2088](https://github.com/waku-org/nwaku/issues/2088)) ([e85f05b0](https://github.com/waku-org/nwaku/commit/e85f05b0)) +- **ci:** fix name of discord notify method ([aaf10e08](https://github.com/waku-org/nwaku/commit/aaf10e08)) +- update wakuv2 fleet DNS discovery enrtree ([89854a96](https://github.com/waku-org/nwaku/commit/89854a96)) +- libwaku.nim: unsubscribe -> unsubscribeAll to make it build properly ([#2082](https://github.com/waku-org/nwaku/issues/2082)) ([3264a4f5](https://github.com/waku-org/nwaku/commit/3264a4f5)) +- **archive:** dburl check ([#2071](https://github.com/waku-org/nwaku/issues/2071)) ([a27d005f](https://github.com/waku-org/nwaku/commit/a27d005f)) +- filter discv5 bootstrap nodes by shards ([#2073](https://github.com/waku-org/nwaku/issues/2073)) ([d178105d](https://github.com/waku-org/nwaku/commit/d178105d)) +- **rln-relay:** segfault when no params except rln-relay are passed in ([#2047](https://github.com/waku-org/nwaku/issues/2047)) ([45fe2d3b](https://github.com/waku-org/nwaku/commit/45fe2d3b)) +- **sqlite:** Properly set user_version to 7 so that the migration procedure is not started ([#2031](https://github.com/waku-org/nwaku/issues/2031)) ([aa3e1a66](https://github.com/waku-org/nwaku/commit/aa3e1a66)) + +### Changes + +- remove js-node tests as release candidate dependencies ([#2123](https://github.com/waku-org/nwaku/issues/2123)) ([ce5fb340](https://github.com/waku-org/nwaku/commit/ce5fb340)) +- added size based retention policy ([#2098](https://github.com/waku-org/nwaku/issues/2098)) ([25d6e52e](https://github.com/waku-org/nwaku/commit/25d6e52e)) +- Clarify running instructions ([#2038](https://github.com/waku-org/nwaku/issues/2038)) ([12e8b122](https://github.com/waku-org/nwaku/commit/12e8b122)) +- **rln:** add more hardcoded memberhips to static group ([#2108](https://github.com/waku-org/nwaku/issues/2108)) ([1042cacd](https://github.com/waku-org/nwaku/commit/1042cacd)) +- Revert lightpush error handling to allow zero peer publish again succeed ([#2099](https://github.com/waku-org/nwaku/issues/2099)) ([f05528d4](https://github.com/waku-org/nwaku/commit/f05528d4)) +- adding NetConfig test suite ([#2091](https://github.com/waku-org/nwaku/issues/2091)) ([23b49ca5](https://github.com/waku-org/nwaku/commit/23b49ca5)) +- **cbindings:** Adding cpp example that integrates the 'libwaku' ([#2079](https://github.com/waku-org/nwaku/issues/2079)) ([8455b8dd](https://github.com/waku-org/nwaku/commit/8455b8dd)) +- **networkmonitor:** refactor setConnectedPeersMetrics, make it partially concurrent, add version ([#2080](https://github.com/waku-org/nwaku/issues/2080)) ([c5aa9704](https://github.com/waku-org/nwaku/commit/c5aa9704)) +- resolving DNS IP and publishing it when no extIp is provided ([#2030](https://github.com/waku-org/nwaku/issues/2030)) ([7797b2cd](https://github.com/waku-org/nwaku/commit/7797b2cd)) +- Adding -d:postgres flag when creating a Docker image for release and PRs ([#2076](https://github.com/waku-org/nwaku/issues/2076)) ([7a376f59](https://github.com/waku-org/nwaku/commit/7a376f59)) +- Moved external APIs out of node ([#2069](https://github.com/waku-org/nwaku/issues/2069)) ([3e72e830](https://github.com/waku-org/nwaku/commit/3e72e830)) +- bump nim-libp2p, nim-toml-serialization, nim-unicodedb, nim-unittest2, nim-websock, nim-zlib, & nimbus-build-system ([#2065](https://github.com/waku-org/nwaku/issues/2065)) ([dc25057a](https://github.com/waku-org/nwaku/commit/dc25057a)) +- **ci:** add js-waku as a dependency for pre-release createion ([#2022](https://github.com/waku-org/nwaku/issues/2022)) ([28b04000](https://github.com/waku-org/nwaku/commit/28b04000)) +- Updating nim-chronicles, nim-chronos, nim-presto, nimcrypto, nim-libp2p, and nim-nat-transversal ([#2043](https://github.com/waku-org/nwaku/issues/2043)) ([f617cd97](https://github.com/waku-org/nwaku/commit/f617cd97)) +- **cbindings:** Thread-safe communication between the main thread and the Waku Thread ([#1978](https://github.com/waku-org/nwaku/issues/1978)) ([72f90663](https://github.com/waku-org/nwaku/commit/72f90663)) +- **rln-relay:** logs, updated submodule, leaves_set metric ([#2024](https://github.com/waku-org/nwaku/issues/2024)) ([2e515a06](https://github.com/waku-org/nwaku/commit/2e515a06)) +- **release:** update changelog for v0.20.0 release ([#2026](https://github.com/waku-org/nwaku/issues/2026)) ([9085b1b3](https://github.com/waku-org/nwaku/commit/9085b1b3)) +- **postgres:** not loading the libpq library by default & better user feedback ([#2028](https://github.com/waku-org/nwaku/issues/2028)) ([e8602021](https://github.com/waku-org/nwaku/commit/e8602021)) +- move SubscriptionManager under waku_core ([#2025](https://github.com/waku-org/nwaku/issues/2025)) ([563b2b20](https://github.com/waku-org/nwaku/commit/563b2b20)) +- **README:** List possible WSL Issue ([#1995](https://github.com/waku-org/nwaku/issues/1995)) ([ebe715e9](https://github.com/waku-org/nwaku/commit/ebe715e9)) +- **ci:** add js-waku test to pre-release workflow ([#2017](https://github.com/waku-org/nwaku/issues/2017)) ([e8776fd6](https://github.com/waku-org/nwaku/commit/e8776fd6)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation has been removed from this repository and can be found in a separate [Waku Legacy](https://github.com/waku-org/waku-legacy) repository. + +## Upgrade instructions + +* Note that the `--topic` CLI option is being deprecated in favor of a more specific options `--pubsub-topic` & `--content-topic`. This is the last release supporting the `--topic` option. +* The size-based retention policy has been tested with SQLite storage and is still on validation phases for Postgres + +## 2023-09-14 v0.20.0 + +> Note: IP address 0.0.0.0 is no longer advertised by a node + +> Note: Multiple CLI options have been removed in this release, please see _Upgrade instructions_ section for details. + +## What's Changed + +Release highlights: +* RLN is now part of standard release (is no longer EXPERIMENTAL feature) +* Interop tests between nwaku and js-waku are now gating PRs and releases +* Libwaku has been made more threadsafe (1 out of 3 improvements applied.) +* Added autosharding option on various protocol APIs + + + +### Features + +- **rln-relay:** removed rln from experimental 🚀 ([#2001](https://github.com/waku-org/nwaku/issues/2001)) ([645b0343](https://github.com/waku-org/nwaku/commit/645b0343)) +- Rest endoint /health for rln ([#2011](https://github.com/waku-org/nwaku/issues/2011)) ([fc6194bb](https://github.com/waku-org/nwaku/commit/fc6194bb)) +- **rln_db_inspector:** create rln_db_inspector tool ([#1999](https://github.com/waku-org/nwaku/issues/1999)) ([ec42e2c7](https://github.com/waku-org/nwaku/commit/ec42e2c7)) +- **relay:** ordered validator execution ([#1966](https://github.com/waku-org/nwaku/issues/1966)) ([debc5f19](https://github.com/waku-org/nwaku/commit/debc5f19)) +- **discv5:** topic subscriptions update discv5 filter predicate ([#1918](https://github.com/waku-org/nwaku/issues/1918)) ([4539dfc7](https://github.com/waku-org/nwaku/commit/4539dfc7)) +- topic subscriptions updates discv5 ENR ([#1875](https://github.com/waku-org/nwaku/issues/1875)) ([c369b329](https://github.com/waku-org/nwaku/commit/c369b329)) +- **rln_keystore_generator:** wired to onchain group manager ([#1931](https://github.com/waku-org/nwaku/issues/1931)) ([c9b48ea1](https://github.com/waku-org/nwaku/commit/c9b48ea1)) +- **rln:** init rln_keystore_generator ([#1925](https://github.com/waku-org/nwaku/issues/1925)) ([3d849541](https://github.com/waku-org/nwaku/commit/3d849541)) +- update various protocols to autoshard ([#1857](https://github.com/waku-org/nwaku/issues/1857)) ([cf301396](https://github.com/waku-org/nwaku/commit/cf301396)) + +### Bug Fixes + +- **rln-relay:** waku_rln_number_registered_memberships metrics appropriately handled ([#2018](https://github.com/waku-org/nwaku/issues/2018)) ([a4e78330](https://github.com/waku-org/nwaku/commit/a4e78330)) +- prevent IP 0.0.0.0 from being published and update peers with empty ENR data ([#1982](https://github.com/waku-org/nwaku/issues/1982)) ([47ae19c1](https://github.com/waku-org/nwaku/commit/47ae19c1)) +- **rln-relay:** missed roots during sync ([#2015](https://github.com/waku-org/nwaku/issues/2015)) ([21604e6b](https://github.com/waku-org/nwaku/commit/21604e6b)) +- **p2p:** fix possible connectivity issue ([#1996](https://github.com/waku-org/nwaku/issues/1996)) ([7d9d8a3f](https://github.com/waku-org/nwaku/commit/7d9d8a3f)) +- **rln-db-inspector:** use valueOr pattern ([#2012](https://github.com/waku-org/nwaku/issues/2012)) ([a8095d87](https://github.com/waku-org/nwaku/commit/a8095d87)) +- **tests:** relay tests use random port to avoid conflict ([#1998](https://github.com/waku-org/nwaku/issues/1998)) ([b991682b](https://github.com/waku-org/nwaku/commit/b991682b)) +- **ci:** incorrect use of braces ([#1987](https://github.com/waku-org/nwaku/issues/1987)) ([4ed41457](https://github.com/waku-org/nwaku/commit/4ed41457)) +- **Makefile:** invalid path to crate build ([#1981](https://github.com/waku-org/nwaku/issues/1981)) ([1a318c29](https://github.com/waku-org/nwaku/commit/1a318c29)) +- --topic should be ignore when using --pubsub-topic or --content-topic ([#1977](https://github.com/waku-org/nwaku/issues/1977)) ([037b1662](https://github.com/waku-org/nwaku/commit/037b1662)) +- **tests:** fix flaky test ([#1972](https://github.com/waku-org/nwaku/issues/1972)) ([f262397d](https://github.com/waku-org/nwaku/commit/f262397d)) +- **rln-relay:** deserialization of valid merkle roots ([#1973](https://github.com/waku-org/nwaku/issues/1973)) ([d262837e](https://github.com/waku-org/nwaku/commit/d262837e)) +- **ci:** rename tools artifact to prevent conflict ([#1971](https://github.com/waku-org/nwaku/issues/1971)) ([26c06b27](https://github.com/waku-org/nwaku/commit/26c06b27)) +- **Makefile:** rln was enabled by default ([#1964](https://github.com/waku-org/nwaku/issues/1964)) ([9b1d2904](https://github.com/waku-org/nwaku/commit/9b1d2904)) +- **rln-relay:** modify keystore credentials logic ([#1956](https://github.com/waku-org/nwaku/issues/1956)) ([e7b2b88f](https://github.com/waku-org/nwaku/commit/e7b2b88f)) +- **Makefile:** error out if rln-keystore-generator not compiled with rln flag ([#1960](https://github.com/waku-org/nwaku/issues/1960)) ([ac258550](https://github.com/waku-org/nwaku/commit/ac258550)) +- **rln-relay:** sync from deployed block number ([#1955](https://github.com/waku-org/nwaku/issues/1955)) ([bd3be219](https://github.com/waku-org/nwaku/commit/bd3be219)) +- **rln-relay:** window of acceptable roots synced to rln metadata ([#1953](https://github.com/waku-org/nwaku/issues/1953)) ([01634f57](https://github.com/waku-org/nwaku/commit/01634f57)) +- **rln-relay:** bump zerokit to v0.3.2 ([#1951](https://github.com/waku-org/nwaku/issues/1951)) ([32aa1c5b](https://github.com/waku-org/nwaku/commit/32aa1c5b)) +- **rln-relay:** flush_interval incorrectly set ([#1933](https://github.com/waku-org/nwaku/issues/1933)) ([c07d63db](https://github.com/waku-org/nwaku/commit/c07d63db)) +- **rln-relay:** RLN DB should be aware of chain and contract address ([#1932](https://github.com/waku-org/nwaku/issues/1932)) ([1ae5b5a9](https://github.com/waku-org/nwaku/commit/1ae5b5a9)) +- **rln-relay:** waitFor startup, otherwise valid proofs will be marked invalid ([#1920](https://github.com/waku-org/nwaku/issues/1920)) ([6c6302f9](https://github.com/waku-org/nwaku/commit/6c6302f9)) +- **test:** fix flaky rln test ([#1923](https://github.com/waku-org/nwaku/issues/1923)) ([0ac8a7f0](https://github.com/waku-org/nwaku/commit/0ac8a7f0)) +- **rln-relay:** remove registration capability ([#1916](https://github.com/waku-org/nwaku/issues/1916)) ([f08315cd](https://github.com/waku-org/nwaku/commit/f08315cd)) +- **rln-relay:** invalid start index being set results in invalid proofs ([#1915](https://github.com/waku-org/nwaku/issues/1915)) ([b3bb7a11](https://github.com/waku-org/nwaku/commit/b3bb7a11)) +- **rln-relay:** should error out on rln-relay mount failure ([#1904](https://github.com/waku-org/nwaku/issues/1904)) ([8c568cab](https://github.com/waku-org/nwaku/commit/8c568cab)) +- **rln-relay:** timeout on macos runners, use fixed version of ganache ([#1913](https://github.com/waku-org/nwaku/issues/1913)) ([c9772af0](https://github.com/waku-org/nwaku/commit/c9772af0)) +- no enr record in chat2 ([#1907](https://github.com/waku-org/nwaku/issues/1907)) ([fc604ca5](https://github.com/waku-org/nwaku/commit/fc604ca5)) + +### Changes + +- **ci:** add js-waku test to pre-release workflow ([#2017](https://github.com/waku-org/nwaku/issues/2017)) ([e8776fd6](https://github.com/waku-org/nwaku/commit/e8776fd6)) +- **rln-relay:** updated docs ([#1993](https://github.com/waku-org/nwaku/issues/1993)) ([76e34077](https://github.com/waku-org/nwaku/commit/76e34077)) +- **ci:** execute js-waku integration tests on image build ([#2006](https://github.com/waku-org/nwaku/issues/2006)) ([5d976df9](https://github.com/waku-org/nwaku/commit/5d976df9)) +- **rln-relay:** add isReady check ([#1989](https://github.com/waku-org/nwaku/issues/1989)) ([5638bd06](https://github.com/waku-org/nwaku/commit/5638bd06)) +- **rln-relay:** clean up nullifier table every MaxEpochGap ([#1994](https://github.com/waku-org/nwaku/issues/1994)) ([483f40c8](https://github.com/waku-org/nwaku/commit/483f40c8)) +- **ci:** use commit instead of master for docker image ([#1990](https://github.com/waku-org/nwaku/issues/1990)) ([98850192](https://github.com/waku-org/nwaku/commit/98850192)) +- **rln-relay:** log levels for certain logs ([#1986](https://github.com/waku-org/nwaku/issues/1986)) ([97a7c9d0](https://github.com/waku-org/nwaku/commit/97a7c9d0)) +- **rln-relay:** use the only key from keystore if only 1 exists ([#1984](https://github.com/waku-org/nwaku/issues/1984)) ([a14c3261](https://github.com/waku-org/nwaku/commit/a14c3261)) +- **ci:** enable experimental for the PR image builds ([#1976](https://github.com/waku-org/nwaku/issues/1976)) ([1b835b4e](https://github.com/waku-org/nwaku/commit/1b835b4e)) +- **rln-relay:** confirm that the provided credential is correct using onchain query ([#1980](https://github.com/waku-org/nwaku/issues/1980)) ([be48891f](https://github.com/waku-org/nwaku/commit/be48891f)) +- **api:** validate rln message before sending (rest + rpc) ([#1968](https://github.com/waku-org/nwaku/issues/1968)) ([05c98864](https://github.com/waku-org/nwaku/commit/05c98864)) +- **cbindings:** Thread-safe libwaku. WakuNode instance created directly from the Waku Thread ([#1957](https://github.com/waku-org/nwaku/issues/1957)) ([68e8d9a7](https://github.com/waku-org/nwaku/commit/68e8d9a7)) +- add debug log indicating succesful message pushes and also log the message hash ([#1965](https://github.com/waku-org/nwaku/issues/1965)) ([e272bec9](https://github.com/waku-org/nwaku/commit/e272bec9)) +- **rln-keystore-generator:** log out the membership index upon registration ([#1963](https://github.com/waku-org/nwaku/issues/1963)) ([7d53aec1](https://github.com/waku-org/nwaku/commit/7d53aec1)) +- **rln-relay:** integrate waku rln registry ([#1943](https://github.com/waku-org/nwaku/issues/1943)) ([cc9f8d42](https://github.com/waku-org/nwaku/commit/cc9f8d42)) +- **ci:** add a job checking config options and db schema ([#1927](https://github.com/waku-org/nwaku/issues/1927)) ([505d1967](https://github.com/waku-org/nwaku/commit/505d1967)) +- **rln_keystore_generator:** generate and persist credentials ([#1928](https://github.com/waku-org/nwaku/issues/1928)) ([07945a37](https://github.com/waku-org/nwaku/commit/07945a37)) +- **rln-relay:** rename keystore application to waku-rln-relay ([#1924](https://github.com/waku-org/nwaku/issues/1924)) ([8239b455](https://github.com/waku-org/nwaku/commit/8239b455)) +- **rln:** remove old and add new rln metric ([#1926](https://github.com/waku-org/nwaku/issues/1926)) ([56c228f8](https://github.com/waku-org/nwaku/commit/56c228f8)) +- **rln:** run rln in all relay pubsubtopics + remove cli flags ([#1917](https://github.com/waku-org/nwaku/issues/1917)) ([af95b571](https://github.com/waku-org/nwaku/commit/af95b571)) +- **release:** update changelog for delayed v0.19.0 release ([#1911](https://github.com/waku-org/nwaku/issues/1911)) ([78690787](https://github.com/waku-org/nwaku/commit/78690787)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation has been removed from this repository and can be found in a separate [Waku Legacy](https://github.com/waku-org/waku-legacy) repository. + +## Upgrade instructions + +* Note that the `--topic` CLI option is being deprecated in favor of a more specific options `--pubsub-topic` & `--content-topic`. The `--topic` option will be available for next release with a deprecation note. +* CLI option `--store-resume-peer` has been removed. +* Following options related to RLN have been removed: + * `--rln-relay-membership-group-index` + * `--rln-relay-pubsub-topic` + * `--rln-relay-content-topic` + + +## 2023-08-16 v0.19.0 + +> Note that the `--topic` CLI option is being deprecated in favor a more specific option `--pubsub-topic`. + +> The Waku v1 implementation has been removed from this repository and can be found in a separate [Waku Legacy](https://github.com/waku-org/waku-legacy) repository. + + +## What's Changed + +Release highlights: +* Improved connection management, including management for non-relay peers and limiting the number of connections from a single IP +* Postgres support has been added as a backend for archive module +* RLN initialization optimizations +* Update to the latest nim-libp2p +* Removed Waku v1 and also references to `v2` from the current version +* Basic implementation of Autosharding for the Waku Network +* REST API implementation for Filter protocol + +### Features + +- **ci:** add docker image builds per PR ([#1881](https://github.com/waku-org/nwaku/issues/1881)) ([84f94d5d](https://github.com/waku-org/nwaku/commit/84f94d5d)) +- Rest API interface for legacy (v1) filter service. ([#1851](https://github.com/waku-org/nwaku/issues/1851)) ([08ff6672](https://github.com/waku-org/nwaku/commit/08ff6672)) +- autosharding content topics in config ([#1856](https://github.com/waku-org/nwaku/issues/1856)) ([afb93e29](https://github.com/waku-org/nwaku/commit/afb93e29)) +- autosharding core algorithm ([#1854](https://github.com/waku-org/nwaku/issues/1854)) ([bbff1ac1](https://github.com/waku-org/nwaku/commit/bbff1ac1)) +- **cbindings:** tiny waku relay example in Python ([#1793](https://github.com/waku-org/nwaku/issues/1793)) ([0b2cfae5](https://github.com/waku-org/nwaku/commit/0b2cfae5)) +- **rln-relay:** close db connection appropriately ([#1858](https://github.com/waku-org/nwaku/issues/1858)) ([76c73b62](https://github.com/waku-org/nwaku/commit/76c73b62)) +- enable TcpNoDelay ([#1470](https://github.com/waku-org/nwaku/issues/1470)) ([08f3bba3](https://github.com/waku-org/nwaku/commit/08f3bba3)) +- limit relay connections below max conns ([#1813](https://github.com/waku-org/nwaku/issues/1813)) ([17b24cde](https://github.com/waku-org/nwaku/commit/17b24cde)) +- **postgres:** integration of postgres in wakunode2 ([#1808](https://github.com/waku-org/nwaku/issues/1808)) ([88b7481f](https://github.com/waku-org/nwaku/commit/88b7481f)) +- discovery peer filtering for relay shard ([#1804](https://github.com/waku-org/nwaku/issues/1804)) ([a4da87bb](https://github.com/waku-org/nwaku/commit/a4da87bb)) +- **rln-relay:** resume onchain sync from persisted tree db ([#1805](https://github.com/waku-org/nwaku/issues/1805)) ([bbded9ee](https://github.com/waku-org/nwaku/commit/bbded9ee)) +- **rln-relay:** metadata ffi api ([#1803](https://github.com/waku-org/nwaku/issues/1803)) ([045f07c6](https://github.com/waku-org/nwaku/commit/045f07c6)) + +### Bug Fixes + +- bring back default topic in config ([#1902](https://github.com/waku-org/nwaku/issues/1902)) ([d5d2243c](https://github.com/waku-org/nwaku/commit/d5d2243c)) +- **ci:** only add comment on PR and do not duplicate it ([#1908](https://github.com/waku-org/nwaku/issues/1908)) ([b785b6ba](https://github.com/waku-org/nwaku/commit/b785b6ba)) +- **ci:** add mising OS arch option to image build ([#1905](https://github.com/waku-org/nwaku/issues/1905)) ([2575f3c4](https://github.com/waku-org/nwaku/commit/2575f3c4)) +- **wakucanary:** add missing return on timeout ([#1901](https://github.com/waku-org/nwaku/issues/1901)) ([7dce0b9e](https://github.com/waku-org/nwaku/commit/7dce0b9e)) +- fixes out of bounds crash when waku2 is not set ([#1895](https://github.com/waku-org/nwaku/issues/1895)) ([03363f1b](https://github.com/waku-org/nwaku/commit/03363f1b)) +- **wakucanary:** add enr record to builder ([#1882](https://github.com/waku-org/nwaku/issues/1882)) ([831a093f](https://github.com/waku-org/nwaku/commit/831a093f)) +- check nil before calling clearTimer ([#1869](https://github.com/waku-org/nwaku/issues/1869)) ([2fc48842](https://github.com/waku-org/nwaku/commit/2fc48842)) +- **rln-relay:** mark duplicated messages as spam ([#1867](https://github.com/waku-org/nwaku/issues/1867)) ([4756ccc1](https://github.com/waku-org/nwaku/commit/4756ccc1)) +- **ci:** do not depend on number of procesors with job name ([#1863](https://github.com/waku-org/nwaku/issues/1863)) ([c560af11](https://github.com/waku-org/nwaku/commit/c560af11)) +- **libp2p:** Updating nim-libp2p to fix the `wss` connectivity issue ([#1848](https://github.com/waku-org/nwaku/issues/1848)) ([1d3410c7](https://github.com/waku-org/nwaku/commit/1d3410c7)) +- **rln-relay:** chunk event fetching ([#1830](https://github.com/waku-org/nwaku/issues/1830)) ([e4d9ee1f](https://github.com/waku-org/nwaku/commit/e4d9ee1f)) +- **discv5:** Fixing issue that prevented the wakunode2 from starting ([#1829](https://github.com/waku-org/nwaku/issues/1829)) ([3aefade6](https://github.com/waku-org/nwaku/commit/3aefade6)) +- sanity-check the docker image start ([ae05f0a8](https://github.com/waku-org/nwaku/commit/ae05f0a8)) +- **ci:** fix broken test with wrong import ([#1820](https://github.com/waku-org/nwaku/issues/1820)) ([4573e8c5](https://github.com/waku-org/nwaku/commit/4573e8c5)) +- temporary fix to disable default experimental builds on fleets ([#1810](https://github.com/waku-org/nwaku/issues/1810)) ([e9028618](https://github.com/waku-org/nwaku/commit/e9028618)) +- **rln-relay:** tree race condition upon initialization ([#1807](https://github.com/waku-org/nwaku/issues/1807)) ([f8e270fb](https://github.com/waku-org/nwaku/commit/f8e270fb)) +- fix mac docker build alpine version ([#1801](https://github.com/waku-org/nwaku/issues/1801)) ([fce845bb](https://github.com/waku-org/nwaku/commit/fce845bb)) +- **rln-relay:** flaky static group manager test ([#1798](https://github.com/waku-org/nwaku/issues/1798)) ([0e9ecbd6](https://github.com/waku-org/nwaku/commit/0e9ecbd6)) + +### Changes + +- remove references to v2 ([#1898](https://github.com/waku-org/nwaku/issues/1898)) ([b9d5d28a](https://github.com/waku-org/nwaku/commit/b9d5d28a)) +- **submodules:** use zerokit v0.3.1 only ([#1886](https://github.com/waku-org/nwaku/issues/1886)) ([311f5ea0](https://github.com/waku-org/nwaku/commit/311f5ea0)) +- remove Waku v1 and wakubridge code ([#1874](https://github.com/waku-org/nwaku/issues/1874)) ([ab344a9d](https://github.com/waku-org/nwaku/commit/ab344a9d)) +- **cbindings:** libwaku - run waku node in a secondary working thread ([#1865](https://github.com/waku-org/nwaku/issues/1865)) ([069c1ad2](https://github.com/waku-org/nwaku/commit/069c1ad2)) +- update docs link ([#1850](https://github.com/waku-org/nwaku/issues/1850)) ([d2b6075b](https://github.com/waku-org/nwaku/commit/d2b6075b)) +- **changelog:** release notes for v0.19.0 ([#1861](https://github.com/waku-org/nwaku/issues/1861)) ([32c1276f](https://github.com/waku-org/nwaku/commit/32c1276f)) +- **rln-relay:** verify proofs based on bandwidth usage ([#1844](https://github.com/waku-org/nwaku/issues/1844)) ([3fe4522a](https://github.com/waku-org/nwaku/commit/3fe4522a)) +- **rln-relay:** bump zerokit ([#1838](https://github.com/waku-org/nwaku/issues/1838)) ([4f0bdf9a](https://github.com/waku-org/nwaku/commit/4f0bdf9a)) +- bump nim-libp2p to 224f92e ([661638da](https://github.com/waku-org/nwaku/commit/661638da)) +- **refactor:** Move record creation & fix libwaku compilation ([#1833](https://github.com/waku-org/nwaku/issues/1833)) ([97d3b9f7](https://github.com/waku-org/nwaku/commit/97d3b9f7)) +- discv5 re-org clean-up ([#1823](https://github.com/waku-org/nwaku/issues/1823)) ([cf46fb7c](https://github.com/waku-org/nwaku/commit/cf46fb7c)) +- **networking:** disconnect due to colocation ip in conn handler ([#1821](https://github.com/waku-org/nwaku/issues/1821)) ([e12c979c](https://github.com/waku-org/nwaku/commit/e12c979c)) +- **rln-relay:** bump zerokit for version fix ([#1822](https://github.com/waku-org/nwaku/issues/1822)) ([add294a9](https://github.com/waku-org/nwaku/commit/add294a9)) +- move discv5 out of node. ([#1818](https://github.com/waku-org/nwaku/issues/1818)) ([62d36530](https://github.com/waku-org/nwaku/commit/62d36530)) +- **archive:** Moving waku archive logic from app.nim to the archive module ([#1817](https://github.com/waku-org/nwaku/issues/1817)) ([52894a82](https://github.com/waku-org/nwaku/commit/52894a82)) +- add peer manager config to builder ([#1816](https://github.com/waku-org/nwaku/issues/1816)) ([71c4ac16](https://github.com/waku-org/nwaku/commit/71c4ac16)) +- discv5 re-org setup ([#1815](https://github.com/waku-org/nwaku/issues/1815)) ([44f9d8dc](https://github.com/waku-org/nwaku/commit/44f9d8dc)) +- **databases:** Creation of the databases folder to keep the logic for sqlite and postgres ([#1811](https://github.com/waku-org/nwaku/issues/1811)) ([a44d4bfb](https://github.com/waku-org/nwaku/commit/a44d4bfb)) +- **deps:** bump libp2p & websock ([#1800](https://github.com/waku-org/nwaku/issues/1800)) ([f6e89c31](https://github.com/waku-org/nwaku/commit/f6e89c31)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation has been removed from this repository and can be found in a separate [Waku Legacy](https://github.com/waku-org/waku-legacy) repository. + +## Upgrade instructions + +* Note that the `--topic` CLI option is being deprecated in favor a more specific option `--pubsub-topic`. The `--topic` option will be available for next 2 releases with a deprecation note. + +## 2023-06-14 v0.18.0 + +> Note that there is a new naming scheme for release artifacts. + +## What's Changed + +Release highlights: +* Support for Gossipsub scoring +* [Rendezvous discovery protocol](https://docs.libp2p.io/concepts/discovery-routing/rendezvous/) enabled by default with relay +* Initial support for postgresql as Store backend +* Atomic operations for insertions and deletions included in rln-relay + +### Features + +- **postgres:** complete implementation of driver and apply more tests ([#1785](https://github.com/waku-org/nwaku/issues/1785)) ([5fc5770d](https://github.com/waku-org/nwaku/commit/5fc5770d)) +- **postgres:** adding a postgres async pool to make the db interactions asynchronous ([#1779](https://github.com/waku-org/nwaku/issues/1779)) ([cb2e3d86](https://github.com/waku-org/nwaku/commit/cb2e3d86)) +- **rln-relay:** pass in index to keystore credentials ([#1777](https://github.com/waku-org/nwaku/issues/1777)) ([a00aa8cc](https://github.com/waku-org/nwaku/commit/a00aa8cc)) +- **networking:** integrate gossipsub scoring ([#1769](https://github.com/waku-org/nwaku/issues/1769)) ([34a92631](https://github.com/waku-org/nwaku/commit/34a92631)) +- **discv5:** added find random nodes with predicate ([#1762](https://github.com/waku-org/nwaku/issues/1762)) ([#1763](https://github.com/waku-org/nwaku/issues/1763)) ([21737c7c](https://github.com/waku-org/nwaku/commit/21737c7c)) +- **wakunode2:** enable libp2p rendezvous protocol by default ([#1770](https://github.com/waku-org/nwaku/issues/1770)) ([835a409d](https://github.com/waku-org/nwaku/commit/835a409d)) +- **postgresql:** align previous work's PR[#1590](https://github.com/waku-org/nwaku/issues/1590) changes into master ([#1764](https://github.com/waku-org/nwaku/issues/1764)) ([7df6f4c8](https://github.com/waku-org/nwaku/commit/7df6f4c8)) +- **networking:** prune peers from same ip beyond collocation limit ([#1765](https://github.com/waku-org/nwaku/issues/1765)) ([047d1cf0](https://github.com/waku-org/nwaku/commit/047d1cf0)) +- **ci:** add nightly builds ([#1758](https://github.com/waku-org/nwaku/issues/1758)) ([473af70a](https://github.com/waku-org/nwaku/commit/473af70a)) +- **postgresql:** 1st commit to async sql (waku_archive/driver...) ([#1755](https://github.com/waku-org/nwaku/issues/1755)) ([59ca03a8](https://github.com/waku-org/nwaku/commit/59ca03a8)) +- **ci:** add release-notes target ([#1734](https://github.com/waku-org/nwaku/issues/1734)) ([ceb54b18](https://github.com/waku-org/nwaku/commit/ceb54b18)) +- **rln-relay:** use new atomic_operation ffi api ([#1733](https://github.com/waku-org/nwaku/issues/1733)) ([611e9539](https://github.com/waku-org/nwaku/commit/611e9539)) + +### Bug Fixes + +- **ci:** enforce basic CPU instruction set to prevent CI issues ([#1759](https://github.com/waku-org/nwaku/issues/1759)) ([35520bd0](https://github.com/waku-org/nwaku/commit/35520bd0)) +- **test:** wait more for gossip ([#1753](https://github.com/waku-org/nwaku/issues/1753)) ([0fce3d83](https://github.com/waku-org/nwaku/commit/0fce3d83)) +- **rln-relay:** keystore usage ([#1750](https://github.com/waku-org/nwaku/issues/1750)) ([36266b43](https://github.com/waku-org/nwaku/commit/36266b43)) +- **ci:** fix flaky test for dos topic ([#1747](https://github.com/waku-org/nwaku/issues/1747)) ([46e231d0](https://github.com/waku-org/nwaku/commit/46e231d0)) +- **rln-relay:** trace log ([#1743](https://github.com/waku-org/nwaku/issues/1743)) ([5eae60e8](https://github.com/waku-org/nwaku/commit/5eae60e8)) +- **ci:** make experimental default to true in fleet deployment ([#1742](https://github.com/waku-org/nwaku/issues/1742)) ([b148c305](https://github.com/waku-org/nwaku/commit/b148c305)) + +### Changes + +- **rln:** bump zerokit ([#1787](https://github.com/waku-org/nwaku/issues/1787)) ([9c04b59b](https://github.com/waku-org/nwaku/commit/9c04b59b)) +- **ci:** extend and rename nightly workflow to support RC builds ([#1784](https://github.com/waku-org/nwaku/issues/1784)) ([96074071](https://github.com/waku-org/nwaku/commit/96074071)) +- **rln-relay:** pass in the path to the tree db ([#1782](https://github.com/waku-org/nwaku/issues/1782)) ([dba84248](https://github.com/waku-org/nwaku/commit/dba84248)) +- **rln-relay:** update tree_config ([#1781](https://github.com/waku-org/nwaku/issues/1781)) ([ba8ec704](https://github.com/waku-org/nwaku/commit/ba8ec704)) +- **ci:** properly set os and architecture for nightly and release ([#1780](https://github.com/waku-org/nwaku/issues/1780)) ([44bcf0f2](https://github.com/waku-org/nwaku/commit/44bcf0f2)) +- **ci:** remove add-to-project workflow ([#1778](https://github.com/waku-org/nwaku/issues/1778)) ([a9505892](https://github.com/waku-org/nwaku/commit/a9505892)) +- **ci:** add experimental builds to nightly ([#1761](https://github.com/waku-org/nwaku/issues/1761)) ([ffac7761](https://github.com/waku-org/nwaku/commit/ffac7761)) +- **px:** close px streams after resp is sent ([#1746](https://github.com/waku-org/nwaku/issues/1746)) ([3c2d2891](https://github.com/waku-org/nwaku/commit/3c2d2891)) +- **docs:** fix docs and mark some as deprecated ([#1754](https://github.com/waku-org/nwaku/issues/1754)) ([b51fb616](https://github.com/waku-org/nwaku/commit/b51fb616)) +- **makefile:** unify where chronicles_log_level is set ([#1748](https://github.com/waku-org/nwaku/issues/1748)) ([39902dc2](https://github.com/waku-org/nwaku/commit/39902dc2)) +- **rln-relay:** docs and config update for testnet 3 ([#1738](https://github.com/waku-org/nwaku/issues/1738)) ([bb9d231b](https://github.com/waku-org/nwaku/commit/bb9d231b)) +- **rln-relay:** update metrics dashboard ([#1745](https://github.com/waku-org/nwaku/issues/1745)) ([0ced2195](https://github.com/waku-org/nwaku/commit/0ced2195)) +- **rln-relay:** updated metrics for testnet 3 ([#1744](https://github.com/waku-org/nwaku/issues/1744)) ([62578746](https://github.com/waku-org/nwaku/commit/62578746)) +- **networking:** set and use target outbound connections + prune ([#1739](https://github.com/waku-org/nwaku/issues/1739)) ([87f694a8](https://github.com/waku-org/nwaku/commit/87f694a8)) +- proper use of setupNat ([#1740](https://github.com/waku-org/nwaku/issues/1740)) ([665484c1](https://github.com/waku-org/nwaku/commit/665484c1)) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## Upgrade instructions + +There is a new naming scheme for release artifacts - `nwaku-${ARCHITECTURE}-${OS}-${VERSION}.tar.gz`. If you use any automation to download latest release, you may need to update it. + +The `--topics` config option has been deprecated to unify the configuration style. It is still available in this release but will be removed in the next one. The new option `--topic` is introduced, which can be used repeatedly to achieve the same behavior. + +## 2023-05-17 v0.17.0 + +> Note that the --topics config item has been deprecated and support will be dropped in future releases. To configure support for multiple pubsub topics, use the new --topic parameter repeatedly. + +## What's Changed + +Release highlights: +* New REST API for Waku Store protocol. +* New Filter protocol implentation. See [12/WAKU2-FILTER](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md). +* Initial C bindings support. +* Support for Heaptrack to investigate memory utilization ([tutorial](https://github.com/waku-org/nwaku/blob/master/docs/tutorial/heaptrack.md)). + +### Features + +- **cbindings:** first commit - waku relay ([#1632](https://github.com/waku-org/nwaku/issues/1632)) ([#1714](https://github.com/waku-org/nwaku/issues/1714)) ([2defbd23](https://github.com/waku-org/nwaku/commit/2defbd23)) +- example using filter and lightpush ([#1720](https://github.com/waku-org/nwaku/issues/1720)) ([8987d4a3](https://github.com/waku-org/nwaku/commit/8987d4a3)) +- configure protected topics via cli ([#1696](https://github.com/waku-org/nwaku/issues/1696)) ([16b44523](https://github.com/waku-org/nwaku/commit/16b44523)) +- **mem-analysis:** Adding Dockerfile_with_heaptrack ([#1681](https://github.com/waku-org/nwaku/issues/1681)) ([9b9172ab](https://github.com/waku-org/nwaku/commit/9b9172ab)) +- add metrics with msg size histogram ([#1697](https://github.com/waku-org/nwaku/issues/1697)) ([67e96ba8](https://github.com/waku-org/nwaku/commit/67e96ba8)) +- curate peers shared over px protocol ([#1671](https://github.com/waku-org/nwaku/issues/1671)) ([14305c61](https://github.com/waku-org/nwaku/commit/14305c61)) +- **enr:** added support for relay shards field ([96162536](https://github.com/waku-org/nwaku/commit/96162536)) +- add tools maket target and build tools in CI ([#1668](https://github.com/waku-org/nwaku/issues/1668)) ([d5979e94](https://github.com/waku-org/nwaku/commit/d5979e94)) +- integrate new filter protocol, other improvements ([#1637](https://github.com/waku-org/nwaku/issues/1637)) ([418efca2](https://github.com/waku-org/nwaku/commit/418efca2)) +- **rest-api-store:** new rest api to retrieve store waku messages ([#1611](https://github.com/waku-org/nwaku/issues/1611)) ([#1630](https://github.com/waku-org/nwaku/issues/1630)) ([b2acb54d](https://github.com/waku-org/nwaku/commit/b2acb54d)) +- **node:** added waku node builder type ([e931fa5d](https://github.com/waku-org/nwaku/commit/e931fa5d)) +- dos protected topic relay msgs based on meta field ([#1614](https://github.com/waku-org/nwaku/issues/1614)) ([c26dcb2b](https://github.com/waku-org/nwaku/commit/c26dcb2b)) +- further filter improvements ([#1617](https://github.com/waku-org/nwaku/issues/1617)) ([d920b973](https://github.com/waku-org/nwaku/commit/d920b973)) +- **common:** added extensible implementation of the enr typed record ([ac56e1dc](https://github.com/waku-org/nwaku/commit/ac56e1dc)) +- **rln-relay:** fetch release from zerokit ci, or build ([#1603](https://github.com/waku-org/nwaku/issues/1603)) ([179be681](https://github.com/waku-org/nwaku/commit/179be681)) +- **filter-v2:** new filter protocol increment - message handling and clients ([#1600](https://github.com/waku-org/nwaku/issues/1600)) ([be446b98](https://github.com/waku-org/nwaku/commit/be446b98)) + +### Fixes + +- **ci:** remove target flag from docker command ([#1725](https://github.com/waku-org/nwaku/issues/1725)) ([d822cdc5](https://github.com/waku-org/nwaku/commit/d822cdc5)) +- wakunode2 config. adding new 'topic' config parameter. ([#1727](https://github.com/waku-org/nwaku/issues/1727)) ([2ec9809c](https://github.com/waku-org/nwaku/commit/2ec9809c)) +- streams was used instead of connections ([#1722](https://github.com/waku-org/nwaku/issues/1722)) ([b9e0763e](https://github.com/waku-org/nwaku/commit/b9e0763e)) +- change filter request default behaviour to ping ([#1721](https://github.com/waku-org/nwaku/issues/1721)) ([7c39be9a](https://github.com/waku-org/nwaku/commit/7c39be9a)) +- **rln-relay:** handle invalid deletes ([#1717](https://github.com/waku-org/nwaku/issues/1717)) ([81dffee8](https://github.com/waku-org/nwaku/commit/81dffee8)) +- fix filter v2 proto fields ([#1716](https://github.com/waku-org/nwaku/issues/1716)) ([68a39c65](https://github.com/waku-org/nwaku/commit/68a39c65)) +- unstable peers in mesh ([#1710](https://github.com/waku-org/nwaku/issues/1710)) ([703c3ab5](https://github.com/waku-org/nwaku/commit/703c3ab5)) +- **networkmonitor:** break import dependency with wakunode2 app ([043feacd](https://github.com/waku-org/nwaku/commit/043feacd)) +- import nimchronos instead heartbeat ([#1695](https://github.com/waku-org/nwaku/issues/1695)) ([7d12adf6](https://github.com/waku-org/nwaku/commit/7d12adf6)) +- **rest:** change rest server result error type to string ([d5ef9331](https://github.com/waku-org/nwaku/commit/d5ef9331)) +- **rln-relay:** scope of getEvents ([#1672](https://github.com/waku-org/nwaku/issues/1672)) ([b62193e5](https://github.com/waku-org/nwaku/commit/b62193e5)) +- **logs:** fix log reporting wrong ok connected peers ([#1675](https://github.com/waku-org/nwaku/issues/1675)) ([1a885b96](https://github.com/waku-org/nwaku/commit/1a885b96)) +- move canBeConnected to PeerManager and check for potential overflow ([#1670](https://github.com/waku-org/nwaku/issues/1670)) ([d5c2770c](https://github.com/waku-org/nwaku/commit/d5c2770c)) +- wrap untracked protocol handler exceptions ([9e1432c9](https://github.com/waku-org/nwaku/commit/9e1432c9)) +- **wakunode2:** made setup nat return errors ([1cfb251b](https://github.com/waku-org/nwaku/commit/1cfb251b)) +- fixed multiple bare except warnings ([caf78249](https://github.com/waku-org/nwaku/commit/caf78249)) +- bump libp2p with traffic metrics fix ([#1642](https://github.com/waku-org/nwaku/issues/1642)) ([0ef46673](https://github.com/waku-org/nwaku/commit/0ef46673)) +- **rln-relay:** buildscript bad cp ([#1636](https://github.com/waku-org/nwaku/issues/1636)) ([bd9857c1](https://github.com/waku-org/nwaku/commit/bd9857c1)) +- **wakunode2:** fix main warnings and drop swap support ([f95147f5](https://github.com/waku-org/nwaku/commit/f95147f5)) +- **rln-relay:** on chain registration ([#1627](https://github.com/waku-org/nwaku/issues/1627)) ([b1bafda2](https://github.com/waku-org/nwaku/commit/b1bafda2)) +- connect instead of dialing relay peers ([#1622](https://github.com/waku-org/nwaku/issues/1622)) ([85f33a8e](https://github.com/waku-org/nwaku/commit/85f33a8e)) +- fix hash size greater than 32 ([#1621](https://github.com/waku-org/nwaku/issues/1621)) ([c42ac16f](https://github.com/waku-org/nwaku/commit/c42ac16f)) + +### Changes + +- **ci:** cache all of submodules/deps to speed up build time ([#1731](https://github.com/waku-org/nwaku/issues/1731)) ([4394c69d](https://github.com/waku-org/nwaku/commit/4394c69d)) +- **rln-relay:** update args to contract ([#1724](https://github.com/waku-org/nwaku/issues/1724)) ([b277ce10](https://github.com/waku-org/nwaku/commit/b277ce10)) +- **rln-relay:** use new config for ffi ([#1718](https://github.com/waku-org/nwaku/issues/1718)) ([44c54312](https://github.com/waku-org/nwaku/commit/44c54312)) +- adding new tutorial on how to handle heaptrack with nim waku ([#1719](https://github.com/waku-org/nwaku/issues/1719)) ([4b59e472](https://github.com/waku-org/nwaku/commit/4b59e472)) +- add timestamp and ephemeral for opt-in dos validator ([#1713](https://github.com/waku-org/nwaku/issues/1713)) ([3e0a693d](https://github.com/waku-org/nwaku/commit/3e0a693d)) +- add test vectors dos protection validator ([#1711](https://github.com/waku-org/nwaku/issues/1711)) ([eaa162ee](https://github.com/waku-org/nwaku/commit/eaa162ee)) +- add validator for dos protec metrics and move to app ([#1704](https://github.com/waku-org/nwaku/issues/1704)) ([3e146869](https://github.com/waku-org/nwaku/commit/3e146869)) +- use QUICK_AND_DIRTY_COMPILER flag for CI ([#1708](https://github.com/waku-org/nwaku/issues/1708)) ([21510425](https://github.com/waku-org/nwaku/commit/21510425)) +- move networkmonitor and wakucanary to apps directory ([209579b0](https://github.com/waku-org/nwaku/commit/209579b0)) +- **wakunode2:** flatten and simplify app setup ([#1705](https://github.com/waku-org/nwaku/issues/1705)) ([ce92fc1a](https://github.com/waku-org/nwaku/commit/ce92fc1a)) +- **wakunode2:** split setup logic into app module ([c8081c88](https://github.com/waku-org/nwaku/commit/c8081c88)) +- add payload bytes to trace log ([#1703](https://github.com/waku-org/nwaku/issues/1703)) ([c6d291d3](https://github.com/waku-org/nwaku/commit/c6d291d3)) +- refactor flaky test with while ([#1698](https://github.com/waku-org/nwaku/issues/1698)) ([dca0e9b2](https://github.com/waku-org/nwaku/commit/dca0e9b2)) +- **core:** move peers utils module to waku_core ([e041e043](https://github.com/waku-org/nwaku/commit/e041e043)) +- decouple test2 target from testcommon ([91baa232](https://github.com/waku-org/nwaku/commit/91baa232)) +- **core:** move utils time module to waku_core ([93b0c071](https://github.com/waku-org/nwaku/commit/93b0c071)) +- add deprecation notice to utils module. move heartbeat to common ([e8dceb2a](https://github.com/waku-org/nwaku/commit/e8dceb2a)) +- **core:** rename waku_message module to waku_core ([c9b6b230](https://github.com/waku-org/nwaku/commit/c9b6b230)) +- flatten waku v2 protocols folder ([d7b72ac7](https://github.com/waku-org/nwaku/commit/d7b72ac7)) +- fix test failing intermittently ([#1679](https://github.com/waku-org/nwaku/issues/1679)) ([8d213e85](https://github.com/waku-org/nwaku/commit/8d213e85)) +- **networking:** get relay number of connections from protocol conns/streams ([#1609](https://github.com/waku-org/nwaku/issues/1609)) ([73cbafa6](https://github.com/waku-org/nwaku/commit/73cbafa6)) +- allow to call store api endpoints without a storenode ([#1575](https://github.com/waku-org/nwaku/issues/1575)) ([#1647](https://github.com/waku-org/nwaku/issues/1647)) ([0b4a2e68](https://github.com/waku-org/nwaku/commit/0b4a2e68)) +- bump container image versions to v0.16.0 in quickstart ([#1640](https://github.com/waku-org/nwaku/issues/1640)) ([5c33d9d1](https://github.com/waku-org/nwaku/commit/5c33d9d1)) +- **node:** remove deprecated constructor and extend testlib with builder ([9dadc1f5](https://github.com/waku-org/nwaku/commit/9dadc1f5)) +- do not mount relay more than once ([#1650](https://github.com/waku-org/nwaku/issues/1650)) ([5d853b86](https://github.com/waku-org/nwaku/commit/5d853b86)) +- pointed all waku node imports to the barrel import ([e8448dfd](https://github.com/waku-org/nwaku/commit/e8448dfd)) +- **node:** added waku_node barrel import and split config module ([13942888](https://github.com/waku-org/nwaku/commit/13942888)) +- remove deprecated enr record init method ([0627b4f8](https://github.com/waku-org/nwaku/commit/0627b4f8)) +- **deps:** upgrade nim-chronos and nim-presto to latest version ([7c229ece](https://github.com/waku-org/nwaku/commit/7c229ece)) +- remove waku swap protocol ([2b5fd2a2](https://github.com/waku-org/nwaku/commit/2b5fd2a2)) +- **deps:** upgrade nim-confutils to latest version ([67fa736d](https://github.com/waku-org/nwaku/commit/67fa736d)) +- **rln-relay:** gracefully handle chain forks ([#1623](https://github.com/waku-org/nwaku/issues/1623)) ([00a3812b](https://github.com/waku-org/nwaku/commit/00a3812b)) +- bump nim-libp2p 53b060f ([#1633](https://github.com/waku-org/nwaku/issues/1633)) ([11ff93c2](https://github.com/waku-org/nwaku/commit/11ff93c2)) +- added testcommon target to makefile ([048ca45d](https://github.com/waku-org/nwaku/commit/048ca45d)) +- increase meta size to 64 bytes + tests ([#1629](https://github.com/waku-org/nwaku/issues/1629)) ([1f793756](https://github.com/waku-org/nwaku/commit/1f793756)) +- **enr:** move waku enr multiaddr to typedrecord and builder extensions ([2ffd2f80](https://github.com/waku-org/nwaku/commit/2ffd2f80)) +- **enr:** added waku2 capabilities accessor ([157724d9](https://github.com/waku-org/nwaku/commit/157724d9)) +- **rln-relay:** reduce exports ([#1615](https://github.com/waku-org/nwaku/issues/1615)) ([2f3ba3d6](https://github.com/waku-org/nwaku/commit/2f3ba3d6)) +- add dash between target and version ([#1613](https://github.com/waku-org/nwaku/issues/1613)) ([24d62791](https://github.com/waku-org/nwaku/commit/24d62791)) +- **release:** added regression checking and clarifications ([#1610](https://github.com/waku-org/nwaku/issues/1610)) ([b495dd7b](https://github.com/waku-org/nwaku/commit/b495dd7b)) + + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1`
`/vac/waku/filter-subscribe/2.0.0-beta1`
`/vac/waku/filter-push/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## Upgrade instructions + +* The `--topics` config option has been deprecated to unify the configuration style. It still available in this and will be in next release, but will be removed after that. The new option `--topic` is introduced, which can be use repeatedly to achieve the same behaviour. + +## 2023-03-15 v0.16.0 + +## What's Changed + +Release highlights: +- a fix for an issue that prevented the node from generating high-resolution (up to nanosecond) timestamps +- introduction of an application-defined `meta` attribute to the Waku Message. This can be quite valuable for network-wide deduplication, deterministic hashing, validity checking and other planned improvements to the protocol +- many optimizations in RLN implementation and its underlying dependencies + +### Features + +- Integrated a new group manager for RLN-protected relay [1496](https://github.com/waku-org/nwaku/pull/1496) +- Added application-defined meta attribute to Waku Message according to RFC [14/WAKU2-MESSAGE](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/14/message.md#message-attributes) [1581](https://github.com/waku-org/nwaku/pull/1581) +- Implemented deterministic hashing scheme for Waku Messages according to RFC [14/WAKU2-MESSAGE](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/14/message.md#deterministic-message-hashing) [1586](https://github.com/waku-org/nwaku/pull/1586) + +### Changes + +- Upgraded nim-sqlite3-abi to the latest version [1565](https://github.com/waku-org/nwaku/pull/1565) +- Better validation of protocol buffers [1563](https://github.com/waku-org/nwaku/pull/1563) +- Improved underlying Zerokit performance and FFI [1571](https://github.com/waku-org/nwaku/pull/1571) +- Node peer ID now logged with relay trace logging [1574](https://github.com/waku-org/nwaku/pull/1574) +- Continued refactoring of several protocol implementations to improve maintainability and readability +- Refactored and cleaned up peer manager [1539](https://github.com/waku-org/nwaku/pull/1539) +- Removed unused and legacy websocket submodule [1580](https://github.com/waku-org/nwaku/pull/1580) [1582](https://github.com/waku-org/nwaku/pull/1582) +- Use base64 URL-safe encoding for noise [1569](https://github.com/waku-org/nwaku/pull/1569) +- Various general improvements to RLN implementation [1585](https://github.com/waku-org/nwaku/pull/1585) [1587](https://github.com/waku-org/nwaku/pull/1587) +- Started on implementation for new and improved filter protocol [1584](https://github.com/waku-org/nwaku/pull/1584) +- Updated pubsub and content topic namespacing to reflect latest changes in RFC [23/WAKU2-TOPICS](https://github.com/vacp2p/rfc-index/blob/main/waku/informational/23/topics.md) [1589](https://github.com/waku-org/nwaku/pull/1589) +- Unified internal peer data models [1597](https://github.com/waku-org/nwaku/pull/1597) +- Improved internal implementation of Waku ENR encoding and decoding [1598](https://github.com/waku-org/nwaku/pull/1598) [1599](https://github.com/waku-org/nwaku/pull/1599) +- Underlying dependency for RLN implementation now loaded as a static library [1578](https://github.com/waku-org/nwaku/pull/1578) + +### Fixes + +- Fixed internally generated timestamps to allow higher resolution than seconds [1570](https://github.com/waku-org/nwaku/pull/1570) +- Fixed padded base64 usage for encoding and decoding payloads on the JSON RPC API [1572](https://github.com/waku-org/nwaku/pull/1572) +- Fixed incorrect relative module imports [1591](https://github.com/waku-org/nwaku/pull/1591) +- Fixed RLN relay erroneously storing messages from multiple apps [1594](https://github.com/waku-org/nwaku/pull/1594) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`18/WAKU2-SWAP`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) | `draft` | `/vac/waku/swap/2.0.0-beta1` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## 2023-02-15 v0.15.0 + +Release highlights: +- Relay connectivity is now maintained by a management loop that selects from the peerstore +- Ability to manually specify `multiaddrs` for the nwaku node to advertise +- Two important fixes related to historical message queries: + - fixed archive bug that resulted in duplicate messages in store query response + - fixed query page size limit not being respected + +### Features + +- New connectivity loop to maintain relay connectivity from peerstore [1482](https://github.com/waku-org/nwaku/pull/1482) [1462](https://github.com/waku-org/nwaku/pull/1462) +- Support for manually specifying `multiaddrs` to advertise [1509](https://github.com/waku-org/nwaku/pull/1509) [1512](https://github.com/waku-org/nwaku/pull/1512) +- Added dynamic keystore for membership credential storage and management [1466](https://github.com/waku-org/nwaku/pull/1466) + +### Changes + +- Abstracted RLN relay group management into its own API [1465](https://github.com/waku-org/nwaku/pull/1465) +- Prune peers from peerstore when exceeding capacity [1513](https://github.com/waku-org/nwaku/pull/1513) +- Removed Kilic submodule [1517](https://github.com/waku-org/nwaku/pull/1517) +- Continued refactoring of several protocol implementations to improve maintainability and readability +- Refactored and improved JSON RPC API +- Added safe default values for peer-store-capacity [1525](https://github.com/waku-org/nwaku/pull/1525) +- Improvements in regular CI test reliability and repeatability +- Improved archive query performance [1510](https://github.com/waku-org/nwaku/pull/1510) +- Added better e2e trace logging for relay messages [1526](https://github.com/waku-org/nwaku/pull/1526) +- Relay RPC API now encodes message payloads in base64 [572](https://github.com/vacp2p/rfc/pull/572) [1555](https://github.com/waku-org/nwaku/pull/1555) + +### Fixes + +- Fixed Waku archive queries returning duplicate messages due to incorrect reordering [1511](https://github.com/waku-org/nwaku/pull/1511) +- Fixed Admin RPC API crashing on returning peer with no multiaddresses [1507](https://github.com/waku-org/nwaku/pull/1507) +- Fixed page size limit not being respected in store query responses [1520](https://github.com/waku-org/nwaku/pull/1520) +- Fixed nwaku subscribing to default pubsub topic even if not configured [1548](https://github.com/waku-org/nwaku/pull/1548) +- Fixed underlying issue causing node to incorrectly report it's unreachable [1518](https://github.com/waku-org/nwaku/pull/1518) [1546](https://github.com/waku-org/nwaku/pull/1546) +- Fixed Relay RPC API not adhering to RFC [1139](https://github.com/waku-org/nwaku/issues/1139) +- Fixed message IDs in nwaku diverging from those in go-waku [1556](https://github.com/waku-org/nwaku/pull/1556) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`18/WAKU2-SWAP`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) | `draft` | `/vac/waku/swap/2.0.0-beta1` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## 2023-01-16 v0.14.0 + +Release highlights: +- An important fix for the Waku message archive returning inconsistent responses to history queries. +- Support for [AutoNAT](https://docs.libp2p.io/concepts/nat/autonat/) and [libp2p Circuit Relay](https://docs.libp2p.io/concepts/nat/circuit-relay/) that allows, among other things, for [NAT hole punching](https://docs.libp2p.io/concepts/nat/hole-punching/). +- Support for structured logging in JSON format. +- A fix for an underlying file descriptor leak that affected websocket connections. + +### Features + +- Support for [AutoNAT](https://docs.libp2p.io/concepts/nat/autonat/) +- Support for [libp2p Circuit Relay](https://docs.libp2p.io/concepts/nat/circuit-relay/) (server only) +- New Waku Archive implementation. This allows easy addition of drivers for different technologies to store historical messages. +- Support for structured logging and specifying log format. +- Node now keeps track of its external reachability. + +### Changes + +- Zerokit RLN library now statically linked. +- Use extended key generation in Zerokit API to comply with [32/RLN](https://github.com/vacp2p/rfc-index/blob/main/vac/32/rln-v1.md). +- Re-enable root validation in [`17/WAKU-RLN-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) implementation. +- [Network monitoring tool](https://github.com/status-im/nwaku/tree/2336522d7f478337237a5a4ec8c5702fb4babc7d/tools#networkmonitor) now supports DNS discovery. +- Added [dashboard](https://github.com/waku-org/nwaku/blob/3e0e1cb2398297fca761aa74f52d32fa837d556c/metrics/waku-network-monitor-dashboard.json) for network monitoring. +- Continued refactoring of several protocol implementations to improve maintainability and readability. +- Removed swap integration from store protocol. +- Peerstore now consolidated with libp2p peerstore. +- Peerstore now also tracks peer direction. +- SIGSEGV signals are now handled and logged properly. +- Waku v2 no longer imports libraries from Waku v1. +- Improved build and CI processes: + - Added support for an `EXPERIMENTAL` compiler flag. + - Simplified project Makefile. + - Split Dockerfile into production and experimental stages. + - Removed obsolete simulation libraries from build. +- Improved parallellisation (and therefore processing time) when dialing several peers simultaneously. +- Waku Archive now responds with error to historical queries containing more than 10 content topics. + +### Fixes + +- Fixed support for optional fields in several protocol rpc codecs. [#1393](https://github.com/waku-org/nwaku/pull/1393) [#1395](https://github.com/waku-org/nwaku/pull/1395) [#1396](https://github.com/waku-org/nwaku/pull/1396) +- Fixed clients with `--store=false` not installing Store Client JSON-RPC API handlers. [#1382](https://github.com/waku-org/nwaku/pull/1382) +- Fixed SQLite driver returning inconsistent responses to store queries. [#1415](https://github.com/waku-org/nwaku/pull/1415) +- Fixed peer exchange discv5 loop starting before discv5 has started. [#1407](https://github.com/waku-org/nwaku/pull/1407) +- Fixed wakubridge test timing. [#1429](https://github.com/waku-org/nwaku/pull/1429) +- Fixed bug in Noise module types equating `T_ss` incorrectly to `"se"` and not `"ss"`. [#1432](https://github.com/waku-org/nwaku/pull/1432) +- Fixed Ctrl-C quitting resulting in unreleased resources and exit failures. [#1416](https://github.com/waku-org/nwaku/pull/1416) +- Fixed CI workflows not cloning repo on startup. [#1454](https://github.com/waku-org/nwaku/pull/1454) [#1455](https://github.com/waku-org/nwaku/pull/1455) +- Fixed Admin API peer connection not returning error response if peer can't be connected. [#1476](https://github.com/waku-org/nwaku/pull/1476) +- Fixed underlying file descriptor leak. [#1483](https://github.com/waku-org/nwaku/pull/1483) + +### Docs + +- Added [instructions](https://github.com/waku-org/nwaku/blob/3e0e1cb2398297fca761aa74f52d32fa837d556c/docs/operators/quickstart.md) for running nwaku with docker compose. + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`18/WAKU2-SWAP`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) | `draft` | `/vac/waku/swap/2.0.0-beta1` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## 2022-11-15 v0.13.0 + +Release highlights: +- A [Waku canary tool](https://github.com/status-im/nwaku/tree/2336522d7f478337237a5a4ec8c5702fb4babc7d/tools#waku-canary-tool) to check if nodes are reachable and what protocols they support. +- Simplified configuration for store protocol. This [new guide](https://github.com/status-im/nwaku/blob/4e5318bfbb204bd1239c95472d7b84b6a326dd9d/docs/operators/how-to/configure-store.md) explains how to configure store from this release forward. +- Support for environment variables to configure a nwaku node. See our [configuration guide](https://github.com/status-im/nwaku/blob/384abed614050bf3aa90c901d7f5e8bc383e8b22/docs/operators/how-to/configure.md) for more. +- A Waku [network monitoring tool](https://github.com/status-im/nwaku/tree/2336522d7f478337237a5a4ec8c5702fb4babc7d/tools#networkmonitor) to report network metrics, including network size, discoverable peer capabilities and more. + +### Features + +- Added Waku canary tool to check if i) a given node is reachable and ii) it supports a set of protocols. +- Simplified [Waku store configuration](https://github.com/status-im/nwaku/blob/4e5318bfbb204bd1239c95472d7b84b6a326dd9d/docs/operators/how-to/configure-store.md). +- Decoupled Waku peer persistence configuration from message store configuration. +- Added keyfile support for secure storage of RLN credentials. +- Added configurable libp2p agent string to nwaku switch. +- Support for [configuration with environment variables](https://github.com/status-im/nwaku/blob/384abed614050bf3aa90c901d7f5e8bc383e8b22/docs/operators/how-to/configure.md). +- Added [example module](https://github.com/status-im/nwaku/tree/2336522d7f478337237a5a4ec8c5702fb4babc7d/examples/v2) to showcase basic nwaku relay usage. +- Added a nwaku [network monitoring tool](https://github.com/status-im/nwaku/tree/2336522d7f478337237a5a4ec8c5702fb4babc7d/tools#networkmonitor) to provide metrics on peers, network size and more. + +### Changes + +- Removed support for Kilic's RLN library (obsolete). +- Improved logging for [`17/WAKU-RLN-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) implementation. +- Connection to eth node for RLN now more stable, maintains state and logs failures. +- Waku apps and tools now moved to their own subdirectory. +- Continued refactoring of several protocol implementations to improve maintainability and readability. +- Periodically log metrics when running RLN spam protection. +- Added metrics dashboard for RLN spam protection. +- Github CI test workflows are now run selectively, based on the content of a PR. +- Improved reliability of CI runs and added email notifications. +- Discv5 discovery loop now triggered to fill a [34/WAKU2-PEER-EXCHANGE](https://github.com/waku-org/specs/blob/master/standards/core/peer-exchange.md) peer list cache asynchronously. +- Upgraded to Nim v1.6.6. +- Cleaned up compiler warnings on unused imports. +- Improved exception handling and annotation. +- [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) no longer enabled by default on nwaku nodes. +- Merkle tree roots for RLN membership changes now on a per-block basis to allow poorly connected peers to operate within a window of acceptable roots. + +### Fixes + +- Fixed encoding of ID commitments for RLN from Big-Endian to Little-Endian. [#1256](https://github.com/status-im/nwaku/pull/1256) +- Fixed maxEpochGap to be the maximum allowed epoch gap (RLN). [#1257](https://github.com/status-im/nwaku/pull/1257) +- Fixed store cursors being retrieved incorrectly (truncated) from DB. [#1263](https://github.com/status-im/nwaku/pull/1263) +- Fixed message indexed by store cursor being excluded from history query results. [#1263](https://github.com/status-im/nwaku/pull/1263) +- Fixed log-level configuration being ignored by the nwaku node. [#1272](https://github.com/status-im/nwaku/pull/1272) +- Fixed incorrect error message when failing to set [34/WAKU2-PEER-EXCHANGE](https://github.com/waku-org/specs/blob/master/standards/core/peer-exchange.md) peer. [#1298](https://github.com/status-im/nwaku/pull/1298) +- Fixed and replaced deprecated `TaintedString` type. [#1326](https://github.com/status-im/nwaku/pull/1326) +- Fixed and replaced unreliable regex library and usage. [#1327](https://github.com/status-im/nwaku/pull/1327) [#1328](https://github.com/status-im/nwaku/pull/1328) +- Fixed and replaced deprecated `ganache-cli` node package with `ganache` for RLN onchain tests. Added graceful daemon termination. [#1347](https://github.com/status-im/nwaku/pull/1347) + +### Docs + +- Added cross client RLN testnet [tutorial](https://github.com/status-im/nwaku/blob/44d8a2026dc31a37e181043ceb67e2822376dc03/docs/tutorial/rln-chat-cross-client.md). +- Fixed broken link to Kibana in [cluster documentation](https://github.com/status-im/nwaku/blob/5e90085242e9e4d6f3cf307e189efbf7e59da9f9/docs/contributors/cluster-logs.md). +- Added an improved [quickstart guide](https://github.com/status-im/nwaku/blob/8f5363ea8f5e95fc1104307aa0d2fc59fda13698/docs/operators/quickstart.md) for operators. +- Added a [Docker usage guide](https://github.com/status-im/nwaku/blob/8f5363ea8f5e95fc1104307aa0d2fc59fda13698/docs/operators/docker-quickstart.md#prerequisites) for operators. +- Added operator [guide on running RLN spam prevention](https://github.com/status-im/nwaku/blob/bd516788cb39132ccbf0a4dcf0880e9694beb233/docs/operators/how-to/run-with-rln.md) on nwaku nodes. +- Extended guidelines on nwaku [configuration methods](https://github.com/status-im/nwaku/blob/384abed614050bf3aa90c901d7f5e8bc383e8b22/docs/operators/how-to/configure.md) for operators. +- Added new [store configuration guide](https://github.com/status-im/nwaku/blob/4e5318bfbb204bd1239c95472d7b84b6a326dd9d/docs/operators/how-to/configure-store.md) to reflect simplified options. + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`18/WAKU2-SWAP`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) | `draft` | `/vac/waku/swap/2.0.0-beta1` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## 2022-10-06 v0.12.0 + +Release highlights: +- The performance and stability of the message `store` has improved dramatically. Query durations, even for long-term stores, have improved by more than a factor of 10. +- Support for Waku Peer Exchange - a discovery method for resource-restricted nodes. +- Messages can now be marked as "ephemeral" to prevent them from being stored. +- [Zerokit](https://github.com/vacp2p/zerokit) is now the default implementation for spam-protected `relay` with RLN. + +The full list of changes is below. + +### Features + +- Default support for [Zerokit](https://github.com/vacp2p/zerokit) version of [`17/WAKU-RLN-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) implementation. +- Added Filter REST API OpenAPI specification. +- Added POC implementation for [43/WAKU2-DEVICE-PAIRING](https://github.com/waku-org/specs/blob/master/standards/application/device-pairing.md). +- [14/WAKU2-MESSAGE](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/14/message.md) can now be marked as `ephemeral` to prevent them from being stored. +- Support for [34/WAKU2-PEER-EXCHANGE](https://github.com/waku-org/specs/blob/master/standards/core/peer-exchange.md). + +### Changes + +- [`17/WAKU-RLN-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) implementation now handles on-chain transaction errors. +- [`17/WAKU-RLN-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) implementation now validates the Merkle tree root against a window of acceptable roots. +- Added metrics for [`17/WAKU-RLN-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) implementation. +- Continued refactoring of several protocol implementations to improve maintainability and readability. +- Cleaned up nwaku imports and dependencies. +- Refactored and organised nwaku unit tests. +- Nwaku now periodically logs node metrics by default. +- Further improvements to the `store` implementation: + - Better logging and query traceability. + - More useful metrics to measure query and insertion time. + - Reworked indexing for faster inserts and queries. + - Reworked data model to use a simple, single timestamp for indexing, ordering and querying. + - Improved retention policy management with periodic execution. + - Run sqlite database vacuum at node start. + - Improved logging when migrating the database to a newer version. +- `relay` no longer auto-mounted on all nwaku nodes. +- The most complete node ENR now included in response to API requests for node `info()`. +- Updated Grafana dashboards included with nwaku. +- Github CI test execution now skipped for doc-only changes. + +### Fixes + +- Fixed nwaku unnecessary sleep when no dynamic bootstrap nodes retrieved. +- Fixed [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) not working from browser-based clients due to nwaku peer manager failing to reuse existing connection. +- Waku Message payload now correctly encoded as base64 in the Relay REST API. +- Fixed handling of bindParam(uint32) in sqlite. +- `chat2` application now correctly selects a random store node on startup. +- Fixed macos builds failing due to an unsupported dependency. +- Fixed nwaku not reconnecting to previously discovered nodes after losing connection. +- Fixed nwaku failing to start switch transports with external IP configuration. +- Fixed SIGSEGV crash when attempting to start nwaku store without `db-path` configuration. + +### Docs + +- Improved [RLN testnet tutorial](https://github.com/status-im/nwaku/blob/14abdef79677ddc828ff396ece321e05cedfca17/docs/tutorial/onchain-rln-relay-chat2.md) +- Added [tutorial](https://github.com/status-im/nwaku/blob/14abdef79677ddc828ff396ece321e05cedfca17/docs/operators/droplet-quickstart.md) on running nwaku from a DigitalOcean droplet. +- Added [guide](https://github.com/status-im/nwaku/blob/14abdef79677ddc828ff396ece321e05cedfca17/docs/operators/how-to/monitor.md) on how to monitor nwaku using Prometheus and Grafana. + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`18/WAKU2-SWAP`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) | `draft` | `/vac/waku/swap/2.0.0-beta1` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## 2022-08-15 v0.11 + +Release highlights: +- Major improvements in the performance of historical message queries to longer-term, sqlite-only message stores. +- Introduction of an HTTP REST API with basic functionality +- On-chain RLN group management. This was also integrated into an [example spam-protected chat application](https://github.com/status-im/nwaku/blob/4f93510fc9a938954dd85593f8dc4135a1c367de/docs/tutorial/onchain-rln-relay-chat2.md). + +The full list of changes is below. + +### Features + +- Support for on-chain group membership management in the [`17/WAKU-RLN-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) implementation. +- Integrated HTTP REST API for external access to some `wakunode2` functionality: + - Debug REST API exposes debug information about a `wakunode2`. + - Relay REST API allows basic pub/sub functionality according to [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md). +- [`35/WAKU2-NOISE`](https://github.com/waku-org/specs/blob/master/standards/application/noise.md) implementation now adds padding to ChaChaPoly encryptions to increase security and reduce metadata leakage. + +### Changes + +- Significantly improved the SQLite-only historical message `store` query performance. +- Refactored several protocol implementations to improve maintainability and readability. +- Major code reorganization for the [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) implementation to improve maintainability. This will also make the `store` extensible to support multiple implementations. +- Disabled compiler log colors when running in a CI environment. +- Refactored [`35/WAKU2-NOISE`](https://github.com/waku-org/specs/blob/master/standards/application/noise.md) implementation into smaller submodules. +- [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) implementation can now optionally be compiled with [Zerokit RLN](https://github.com/vacp2p/zerokit/tree/64f508363946b15ac6c52f8b59d8a739a33313ec/rln). Previously only [Kilic's RLN](https://github.com/kilic/rln/tree/7ac74183f8b69b399e3bc96c1ae8ab61c026dc43) was supported. + +### Fixes + +- Fixed wire encoding of protocol buffers to use proto3. +- Fixed Waku v1 <> Waku v2 bridge losing connection to statically configured v1 nodes. +- Fixed underlying issue causing DNS discovery to fail for records containing multiple strings. + +### Docs + +- Updated [release process](https://github.com/status-im/nwaku/blob/4f93510fc9a938954dd85593f8dc4135a1c367de/docs/contributors/release-process.md) documentation. +- Added [tutorial](https://github.com/status-im/nwaku/blob/4f93510fc9a938954dd85593f8dc4135a1c367de/docs/tutorial/onchain-rln-relay-chat2.md) on how to run a spam-protected chat2 application with on-chain group management. + + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`18/WAKU2-SWAP`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) | `draft` | `/vac/waku/swap/2.0.0-beta1` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## 2022-06-15 v0.10 + +Release highlights: +- Support for key exchange using Noise handshakes. +- Support for a SQLite-only historical message `store`. This allows for cheaper, longer-term historical message storage on disk rather than in memory. +- Several fixes for native WebSockets, including slow or hanging connections and connections dropping unexpectedly due to timeouts. +- A fix for a memory leak in nodes running a local SQLite database. + +### Features + +- Support for [`35/WAKU2-NOISE`](https://github.com/waku-org/specs/blob/master/standards/application/noise.md) handshakes as key exchange protocols. +- Support for TOML config files via `--config-file=`. +- Support for `--version` command. This prints the current tagged version (or compiled commit hash, if not on a version). +- Support for running `store` protocol from a `filter` client, storing only the filtered messages. +- Start of an HTTP REST API implementation. +- Support for a memory-efficient SQLite-only `store` configuration. + +### Changes + +- Added index on `receiverTimestamp` in the SQLite `store` to improve query performance. +- GossipSub [Peer Exchange](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#prune-backoff-and-peer-exchange) is now disabled by default. This is a more secure option. +- Progress towards dynamic group management for the [`17/WAKU-RLN-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) implementation. +- Nodes with `--keep-alive` enabled now sends more regular pings to keep connections more reliably alive. +- Disabled `swap` protocol by default. +- Reduced unnecessary and confusing logging, especially during startup. +- Added discv5 UDP port to the node's main discoverable ENR. + +### Fixes + +- The in-memory `store` now checks the validity of message timestamps before storing. +- Fixed underlying bug that caused connection leaks in the HTTP client. +- Fixed Docker image compilation to use the correct external variable for compile-time flags (`NIMFLAGS` instead of `NIM_PARAMS`). +- Fixed issue where `--dns4-domain-name` caused an unhandled exception if no external port was available. +- Avoids unnecessarily calling DB migration if a `--db-path` is set but nothing is persisted in the DB. This led to a misleading warning log. +- Fixed underlying issues that caused WebSocket connections to hang. +- Fixed underlying issue that caused WebSocket connections to time out after 10 mins. +- Fixed memory leak in nodes that implements a SQLite database. + +### Docs + +- Added [tutorial](https://github.com/status-im/nwaku/blob/16dd267bd9d25ff24c64fc5c92a20eb0d322217c/docs/operators/how-to/configure-key.md) on how to generate and configure a node key. +- Added first [guide](https://github.com/status-im/nwaku/tree/16dd267bd9d25ff24c64fc5c92a20eb0d322217c/docs/operators) for nwaku operators. + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`18/WAKU2-SWAP`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) | `draft` | `/vac/waku/swap/2.0.0-beta1` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## 2022-03-31 v0.9 + +Release highlights: + +- Support for Peer Exchange (PX) when a peer prunes a [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) mesh due to oversubscription. This can significantly increase mesh stability. +- Improved start-up times through managing the size of the underlying persistent message storage. +- New websocket connections are no longer blocked due to parsing failures in other connections. + +The full list of changes is below. + +### Features + +- Support for bootstrapping [`33/WAKU-DISCV5`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/33/discv5.md) via [DNS discovery](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/10/waku2.md#discovery-methods) +- Support for GossipSub [Peer Exchange](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#prune-backoff-and-peer-exchange) + +### Changes + +- Waku v1 <> v2 bridge now supports DNS `multiaddrs` +- Waku v1 <> v2 bridge now validates content topics before attempting to bridge a message from Waku v2 to Waku v1 +- Persistent message storage now auto deletes messages once over specified `--store-capacity`. This can significantly improve node start-up times. +- Renamed Waku v1 <> v2 bridge `make` target and binary to `wakubridge` +- Increased `store` logging to assist with debugging +- Increased `rln-relay` logging to assist with debugging +- Message metrics no longer include the content topic as a dimension to keep Prometheus metric cardinality under control +- Waku v2 `toy-chat` application now sets the sender timestamp when creating messages +- The type of the `proof` field of the `WakuMessage` is changed to `RateLimitProof` +- Added method to the JSON-RPC API that returns the git tag and commit hash of the binary +- The node's ENR is now included in the JSON-RPC API response when requesting node info + +### Fixes + +- Fixed incorrect conversion of seconds to nanosecond timestamps +- Fixed store queries blocking due to failure in resource clean up +- Fixed underlying issue where new websocket connections are blocked due to parsing failures in other connections +- Fixed failure to log the ENR necessary for a discv5 connection to the node + +### Docs + +- Added [RAM requirements](https://github.com/status-im/nim-waku/tree/ee96705c7fbe4063b780ac43b7edee2f6c4e351b/waku/v2#wakunode) to `wakunode2` build instructions +- Added [tutorial](https://github.com/status-im/nim-waku/blob/ee96705c7fbe4063b780ac43b7edee2f6c4e351b/docs/tutorial/rln-chat2-live-testnet.md) on communicating with waku2 test fleets via the chat2 `toy-chat` application in spam-protected mode using [`17/WAKU-RLN-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md). +- Added a [section on bug reporting](https://github.com/status-im/nim-waku/blob/ee96705c7fbe4063b780ac43b7edee2f6c4e351b/README.md#bugs-questions--features) to `wakunode2` README +- Fixed broken links in the [JSON-RPC API Tutorial](https://github.com/status-im/nim-waku/blob/5ceef37e15a15c52cbc589f0b366018e81a958ef/docs/tutorial/jsonrpc-api.md) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`18/WAKU2-SWAP`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) | `draft` | `/vac/waku/swap/2.0.0-beta1` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## 2022-03-03 v0.8 + +Release highlights: + +- Working demonstration and integration of [`17/WAKU-RLN-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) in the Waku v2 `toy-chat` application +- Beta support for ambient peer discovery using [a version of Discovery v5](https://github.com/vacp2p/rfc/pull/487) +- A fix for the issue that caused a `store` node to run out of memory after serving a number of historical queries +- Ability to configure a `dns4` domain name for a node and resolve other dns-based `multiaddrs` + +The full list of changes is below. + +### Features + +- [`17/WAKU-RLN-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) implementation now supports spam-protection for a specific combination of `pubsubTopic` and `contentTopic` (available under the `rln` compiler flag). +- [`17/WAKU-RLN-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) integrated into chat2 `toy-chat` (available under the `rln` compiler flag) +- Added support for resolving dns-based `multiaddrs` +- A Waku v2 node can now be configured with a domain name and `dns4` `multiaddr` +- Support for ambient peer discovery using [`33/WAKU-DISCV5`](https://github.com/vacp2p/rfc/pull/487) + +### Changes + +- Metrics: now monitoring content topics and the sources of new connections +- Metrics: improved default fleet monitoring dashboard +- Introduced a `Timestamp` type (currently an alias for int64). +- All timestamps changed to nanosecond resolution. +- `timestamp` field number in WakuMessage object changed from `4` to `10` +- [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) identifier updated to `/vac/waku/store/2.0.0-beta4` +- `toy-chat` application now uses DNS discovery to connect to existing fleets + +### Fixes + +- Fixed underlying bug that caused occasional failures when reading the certificate for secure websockets +- Fixed `store` memory usage issues when responding to history queries + +### Docs + +- Documented [use of domain certificates](https://github.com/status-im/nim-waku/tree/2972a5003568848164033da3fe0d7f52a3d54824/waku/v2#enabling-websocket) for secure websockets +- Documented [how to configure a `dns4` domain name](https://github.com/status-im/nim-waku/tree/2972a5003568848164033da3fe0d7f52a3d54824/waku/v2#using-dns-discovery-to-connect-to-existing-nodes) for a node +- Clarified [use of DNS discovery](https://github.com/status-im/nim-waku/tree/2972a5003568848164033da3fe0d7f52a3d54824/waku/v2#using-dns-discovery-to-connect-to-existing-nodes) and provided current URLs for discoverable fleet nodes +- Added [tutorial](https://github.com/status-im/nim-waku/blob/2972a5003568848164033da3fe0d7f52a3d54824/docs/tutorial/rln-chat2-local-test.md) on using [`17/WAKU-RLN-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) with the chat2 `toy-chat` application +- Added [tutorial](https://github.com/status-im/nim-waku/blob/2972a5003568848164033da3fe0d7f52a3d54824/docs/tutorial/bridge.md) on how to configure and a use a [`15/WAKU-BRIDGE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/15/bridge.md) + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` | +| [`18/WAKU2-SWAP`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) | `draft` | `/vac/waku/swap/2.0.0-beta1` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## 2022-01-19 v0.7 + +Release highlights: + +- Support for secure websockets. +- Ability to remove unreachable clients in a `filter` node. +- Several fixes to improve `store` performance and decrease query times. Query time for large stores decreased from longer than 8 min to under 100 ms. +- Fix for a long-standing bug that prevented proper database migration in some deployed Docker containers. + +The full list of changes is below. + +### Features + +- Support for secure websocket transport + +### Changes + +- Filter nodes can now remove unreachable clients +- The WakuInfo `listenStr` is deprecated and replaced with a sequence of `listenAddresses` to accommodate multiple transports +- Removed cached `peerInfo` on local node. Rely on underlying libp2p switch instead +- Metrics: added counters for protocol messages +- Waku v2 node discovery now supports [`31/WAKU2-ENR`](https://github.com/waku-org/specs/blob/master/standards/core/enr.md) +- resuming the history via `resume` now takes the answers of all peers in `peerList` into consideration and consolidates them into one deduplicated list + +### Fixes + +- Fixed database migration failure in the Docker image +- All `HistoryResponse` messages are now auto-paginated to a maximum of 100 messages per response +- Increased maximum length for reading from a libp2p input stream to allow largest possible protocol messages, including `HistoryResponse` messages at max size +- Significantly improved `store` node query performance +- Implemented a GossipSub `MessageIdProvider` for `11/WAKU2-RELAY` messages instead of relying on the unstable default +- Receiver timestamps for message indexing in the `store` now have consistent millisecond resolution + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`17/WAKU-RLN-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) | `raw` | `/vac/waku/waku-rln-relay/2.0.0-alpha1` | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta3` | +| [`18/WAKU2-SWAP`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) | `draft` | `/vac/waku/swap/2.0.0-beta1` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## 2021-11-05 v0.6 + +Some useful features and fixes in this release, include: +- two methods for Waku v2 node discovery +- support for unsecure websockets, which paves the way for native browser usage +- a fix for `nim-waku` store nodes running out of memory due to store size: the number of stored messages can now easily be configured +- a fix for densely connected nodes refusing new connections: the maximum number of allowed connections can now easily be configured +- support for larger message sizes (up from 64kb to 1Mb per message) + +The full list of changes is below. + +### Features + +- Waku v2 node discovery via DNS following [EIP-1459](https://eips.ethereum.org/EIPS/eip-1459) +- Waku v2 node discovery via [Node Discovery v5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md) + +### Changes + +- Pagination of historical queries are now simplified +- GossipSub [prune backoff period](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#prune-backoff-and-peer-exchange) is now the recommended 1 minute +- Bridge now uses content topic format according to [23/WAKU2-TOPICS](https://github.com/vacp2p/rfc-index/blob/main/waku/informational/23/topics.md) +- Better internal differentiation between local and remote peer info +- Maximum number of libp2p connections is now configurable +- `udp-port` CLI option has been removed for binaries where it's not used +- Waku v2 now supports unsecure WebSockets +- Waku v2 now supports larger message sizes of up to 1 Mb by default +- Further experimental development of [RLN for spam protection](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md). +These changes are disabled by default under a compiler flag. Changes include: + - Per-message rate limit proof defined + - RLN proof generation and verification integrated into Waku v2 + - RLN tree depth changed from 32 to 20 + - Support added for static membership group formation + +#### Docs + +- Added [contributor guidelines](https://github.com/status-im/nim-waku/blob/master/docs/contributors/waku-fleets.md) on Waku v2 fleet monitoring and management +- Added [basic tutorial](https://github.com/status-im/nim-waku/blob/master/docs/tutorial/dns-disc.md) on using Waku v2 DNS-based discovery + +### Fixes + +- Bridge between `toy-chat` and matterbridge now shows correct announced addresses +- Bridge no longer re-encodes already encoded payloads when publishing to V1 +- Bridge now populates WakuMessage timestamps when publishing to V2 +- Store now has a configurable maximum number of stored messages +- Network simulations for Waku v1 and Waku v2 are runnable again + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`17/WAKU-RLN`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) | `raw` | `/vac/waku/waku-rln-relay/2.0.0-alpha1` | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta3` | +| [`18/WAKU2-SWAP`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) | `draft` | `/vac/waku/swap/2.0.0-beta1` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## 2021-07-26 v0.5.1 + +This patch release contains the following fix: +- Support for multiple protocol IDs when reconnecting to previously connected peers: +A bug in `v0.5` caused clients using persistent peer storage to only support the mounted protocol ID. + +This is a patch release that is fully backwards-compatible with release `v0.5`. +It supports the same [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`17/WAKU-RLN`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) | `raw` | `/vac/waku/waku-rln-relay/2.0.0-alpha1` | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta3` | +| [`18/WAKU2-SWAP`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) | `draft` | `/vac/waku/swap/2.0.0-beta1` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## 2021-07-23 v0.5 + +This release contains the following: + +### Features +- Support for keep-alives using [libp2p ping protocol](https://docs.libp2p.io/concepts/protocols/#ping). +- DB migration for the message and peer stores. +- Support for multiple protocol IDs. Mounted protocols now match versions of the same protocol that adds a postfix to the stable protocol ID. + +### Changes +- Bridge topics are now configurable. +- The `resume` Nim API now eliminates duplicates messages before storing them. +- The `resume` Nim API now fetches historical messages in page sequence. +- Added support for stable version of `relay` protocol, with protocol ID `/vac/waku/relay/2.0.0`. +- Added optional `timestamp` to `WakuRelayMessage`. +- Removed `PCRE` as a prerequisite for building Waku v1 and Waku v2. +- Improved [`swap`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) metrics. + +#### General refactoring +- Refactored modules according to [Nim best practices](https://hackmd.io/1imOGULZRsed2HpgmzGleA). +- Simplified the [way protocols get notified](https://github.com/status-im/nim-waku/issues/574) of new messages. +- Refactored `wakunode2` setup into 6 distinct phases with improved logging and error handling. +- Moved `Whisper` types and protocol from the `nim-eth` module to `nim-waku`. + +#### Docs +- Added [database migration tutorial](https://github.com/status-im/nim-waku/blob/master/docs/tutorial/db-migration.md). +- Added [tutorial to setup `websockify`](https://github.com/status-im/nim-waku/blob/master/docs/tutorial/websocket.md). + +#### Schema +- Updated the `Message` table of the persistent message store: + - Added `senderTimestamp` column. + - Renamed the `timestamp` column to `receiverTimestamp` and changes its type to `REAL`. + +#### API +- Added optional `timestamp` to [`WakuRelayMessage`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/16/rpc.md) on JSON-RPC API. + +### Fixes +- Conversion between topics for the Waku v1 <-> v2 bridge now follows the [RFC recommendation](https://github.com/vacp2p/rfc-index/blob/main/waku/informational/23/topics.md). +- Fixed field order of `HistoryResponse` protobuf message: the field numbers of the `HistoryResponse` are shifted up by one to match up the [13/WAKU2-STORE](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) specs. + +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`17/WAKU-RLN`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) | `raw` | `/vac/waku/waku-rln-relay/2.0.0-alpha1` | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta3` | +| [`18/WAKU2-SWAP`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) | `draft` | `/vac/waku/swap/2.0.0-beta1` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## 2021-06-03 v0.4 + +This release contains the following: + +### Features + +- Initial [`toy-chat` implementation](https://github.com/vacp2p/rfc-index/blob/main/waku/informational/22/toy-chat.md) + +### Changes + +- The [toy-chat application](https://github.com/status-im/nim-waku/blob/master/docs/tutorial/chat2.md) can now perform `lightpush` and request content-filtered messages from remote peers. +- The [toy-chat application](https://github.com/status-im/nim-waku/blob/master/docs/tutorial/chat2.md) now uses default content topic `/toy-chat/2/huilong/proto` +- Improve `toy-chat` [briding to matterbridge]((https://github.com/status-im/nim-waku/blob/master/docs/tutorial/chat2.md#bridge-messages-between-chat2-and-matterbridge)) +- Improve [`swap`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) logging and enable soft mode by default +- Content topics are no longer in a redundant nested structure +- Improve error handling + +#### API + +- [JSON-RPC Store API](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/16/rpc.md): Added an optional time-based query to filter historical messages. +- [Nim API](https://github.com/status-im/nim-waku/blob/master/docs/api/v2/node.md): Added `resume` method. + +### Fixes + +- Connections between nodes no longer become unstable due to keep-alive errors if mesh grows large +- Re-enable `lightpush` tests and fix Windows CI failure + +The [Waku v2 suite of protocols](https://github.com/waku-org/specs) are still in a raw/draft state. +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`17/WAKU-RLN`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) | `raw` | `/vac/waku/waku-rln-relay/2.0.0-alpha1` | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `draft` | `/vac/waku/relay/2.0.0-beta2` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta3` | +| [`18/WAKU2-SWAP`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) | `draft` | `/vac/waku/swap/2.0.0-beta1` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` | + +The Waku v1 implementation is stable but not under active development. + +## 2021-05-11 v0.3 + +This release contains the following: + +### Features + +- Start of [`RLN relay` implementation](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) +- Start of [`swap` implementation](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) +- Start of [fault-tolerant `store` implementation](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/application/21/fault-tolerant-store.md) +- Initial [`bridge` implementation](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/15/bridge.md) between Waku v1 and v2 protocols +- Initial [`lightpush` implementation](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) +- A peer manager for `relay`, `filter`, `store` and `swap` peers +- Persistent storage for peers: A node with this feature enabled will now attempt to reconnect to `relay` peers after a restart. It will respect the gossipsub [PRUNE backoff](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#prune-backoff-and-peer-exchange) period before attempting to do so. +- `--persist-peers` CLI option to persist peers in local storage +- `--persist-messages` CLI option to store historical messages locally +- `--keep-alive` CLI option to maintain a stable connection to `relay` peers on idle topics +- A CLI chat application ([`chat2`](https://github.com/status-im/nim-waku/blob/master/docs/tutorial/chat2.md)) over Waku v2 with [bridging to matterbridge](https://github.com/status-im/nim-waku/blob/master/docs/tutorial/chat2.md#bridge-messages-between-chat2-and-matterbridge) + +### Changes +- Enable `swap` protocol by default and improve logging +#### General refactoring + +- Split out `waku_types` types into the right place; create `utils` folder. +- Change type of `contentTopic` in [`ContentFilter`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md#protobuf) to `string`. +- Replace sequence of `contentTopics` in [`ContentFilter`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md#protobuf) with a single `contentTopic`. +- Add `timestamp` field to [`WakuMessage`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/14/message.md#payloads). +- Ensure CLI config parameters use a consistent naming scheme. Summary of changes [here](https://github.com/status-im/nim-waku/pull/543). + +#### Docs + +Several clarifications and additions aimed at contributors, including + - information on [how to query Status test fleet](https://github.com/status-im/nim-waku/blob/master/docs/faq.md) for node addresses, + - [how to view logs](https://github.com/status-im/nim-waku/blob/master/docs/contributors/cluster-logs.md), and + - [how to update submodules](https://github.com/status-im/nim-waku/blob/master/docs/contributors/git-submodules.md). + +#### Schema + +- Add `Message` table to the persistent message store. This table replaces the old `messages` table. It has two additional columns, namely + - `pubsubTopic`, and + - `version`. +- Add `Peer` table for persistent peer storage. + +#### API + +- [JSON-RPC Admin API](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/16/rpc.md): Added a [`post` method](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/16/rpc.md#post_waku_v2_admin_v1_peers) to connect to peers on an ad-hoc basis. +- [Nim API](https://github.com/status-im/nim-waku/blob/master/docs/api/v2/node.md): PubSub topic `subscribe` and `unsubscribe` no longer returns a future (removed `async` designation). +- [`HistoryQuery`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md#historyquery): Added `pubsubTopic` field. Message history can now be filtered and queried based on the `pubsubTopic`. +- [`HistoryQuery`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md#historyquery): Added support for querying a time window by specifying start and end times. + +### Fixes + +- Running nodes can now be shut down gracefully +- Content filtering now works on any PubSub topic and not just the `waku` default. +- Nodes can now mount protocols without supporting `relay` as a capability + +The [Waku v2 suite of protocols](https://github.com/waku-org/specs) are still in a raw/draft state. +This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/): +| Protocol | Spec status | Protocol id | +| ---: | :---: | :--- | +| [`17/WAKU-RLN`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/17/rln-relay.md) | `raw` | `/vac/waku/waku-rln-relay/2.0.0-alpha1` | +| [`18/WAKU2-SWAP`](https://github.com/vacp2p/rfc-index/blob/main/waku/deprecated/18/swap.md) | `raw` | `/vac/waku/swap/2.0.0-alpha1` | +| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `raw` | `/vac/waku/lightpush/2.0.0-alpha1` | +| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `draft` | `/vac/waku/relay/2.0.0-beta2` | +| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` | +| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta3` | + +The Waku v1 implementation is stable but not under active development. + +## 2021-01-05 v0.2 + +This release contains the following changes: + +- Calls to `publish` a message on `wakunode2` now `await` instead of `discard` dispatched [`WakuRelay`](https://github.com/vacp2p/specs/blob/master/specs/waku/v2/waku-relay.md) procedures. +- [`StrictNoSign`](https://github.com/libp2p/specs/tree/master/pubsub#message-signing) enabled. +- Add JSON-RPC API for external access to `wakunode2` functionality: + - Admin API retrieves information about peers registered on the `wakunode2`. + - Debug API exposes debug information about a `wakunode2`. + - Filter API saves bandwidth by allowing light nodes to filter for specific content. + - Private API enables symmetric or asymmetric cryptography to encrypt/decrypt message payloads. + - Relay API allows basic pub/sub functionality. + - Store API retrieves historical messages. +- Add tutorial on how to use JSON-RPC API. +- Refactor: Move `waku_filter` protocol into its own module. + +The Waku v2 implementation, and [most protocols it consist of](https://specs.vac.dev/specs/waku/), +are still in a draft/beta state. The Waku v1 implementation is stable but not under active development. + +## 2020-11-30 v0.1 + +Initial beta release. + +This release contains: + +- A Nim implementation of the [Waku v1 protocol](https://specs.vac.dev/waku/waku.html). +- A Nim implementation of the [Waku v2 protocol](https://specs.vac.dev/specs/waku/v2/waku-v2.html). +- CLI applications `wakunode` and `wakunode2` that allows you to run a Waku v1 or v2 node. +- Examples of Waku v1 and v2 usage. +- Various tests of above. + +Currenty the Waku v2 implementation, and [most protocols it consist of](https://specs.vac.dev/specs/waku/), +are in a draft/beta state. The Waku v1 implementation is stable but not under active development. + +Feedback welcome! diff --git a/third-party/nwaku/Dockerfile b/third-party/nwaku/Dockerfile new file mode 100644 index 0000000..6afb2bc --- /dev/null +++ b/third-party/nwaku/Dockerfile @@ -0,0 +1,93 @@ +# BUILD NIM APP ---------------------------------------------------------------- +FROM rust:1.81.0-alpine3.19 AS nim-build + +ARG NIMFLAGS +ARG MAKE_TARGET=wakunode2 +ARG NIM_COMMIT +ARG LOG_LEVEL=TRACE +ARG HEAPTRACK_BUILD=0 + +# Get build tools and required header files +RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq + +WORKDIR /app +COPY . . + +# workaround for alpine issue: https://github.com/alpinelinux/docker-alpine/issues/383 +RUN apk update && apk upgrade + +# Ran separately from 'make' to avoid re-doing +RUN git submodule update --init --recursive + +RUN if [ "$HEAPTRACK_BUILD" = "1" ]; then \ + git apply --directory=vendor/nimbus-build-system/vendor/Nim docs/tutorial/nim.2.2.4_heaptracker_addon.patch; \ + fi + +# Slowest build step for the sake of caching layers +RUN make -j$(nproc) deps QUICK_AND_DIRTY_COMPILER=1 ${NIM_COMMIT} + +# Build the final node binary +RUN make -j$(nproc) ${NIM_COMMIT} $MAKE_TARGET LOG_LEVEL=${LOG_LEVEL} NIMFLAGS="${NIMFLAGS}" + + +# PRODUCTION IMAGE ------------------------------------------------------------- + +FROM alpine:3.18 AS prod + +ARG MAKE_TARGET=wakunode2 + +LABEL maintainer="jakub@status.im" +LABEL source="https://github.com/waku-org/nwaku" +LABEL description="Wakunode: Waku client" +LABEL commit="unknown" +LABEL version="unknown" + +# DevP2P, LibP2P, and JSON RPC ports +EXPOSE 30303 60000 8545 + +# Referenced in the binary +RUN apk add --no-cache libgcc libpq-dev bind-tools + +# Copy to separate location to accomodate different MAKE_TARGET values +COPY --from=nim-build /app/build/$MAKE_TARGET /usr/local/bin/ + +# Copy migration scripts for DB upgrades +COPY --from=nim-build /app/migrations/ /app/migrations/ + +# Symlink the correct wakunode binary +RUN ln -sv /usr/local/bin/$MAKE_TARGET /usr/bin/wakunode + +ENTRYPOINT ["/usr/bin/wakunode"] + +# By default just show help if called without arguments +CMD ["--help"] + + +# DEBUG IMAGE ------------------------------------------------------------------ + +# Build debug tools: heaptrack +FROM alpine:3.18 AS heaptrack-build + +RUN apk update +RUN apk add -- gdb git g++ make cmake zlib-dev boost-dev libunwind-dev +RUN git clone https://github.com/KDE/heaptrack.git /heaptrack + +WORKDIR /heaptrack/build +# going to a commit that builds properly. We will revisit this for new releases +RUN git reset --hard f9cc35ebbdde92a292fe3870fe011ad2874da0ca +RUN cmake -DCMAKE_BUILD_TYPE=Release .. +RUN make -j$(nproc) + + +# Debug image +FROM prod AS debug-with-heaptrack + +RUN apk add --no-cache gdb libunwind + +# Add heaptrack +COPY --from=heaptrack-build /heaptrack/build/ /heaptrack/build/ + +ENV LD_LIBRARY_PATH=/heaptrack/build/lib/heaptrack/ +RUN ln -s /heaptrack/build/bin/heaptrack /usr/local/bin/heaptrack + +ENTRYPOINT ["/heaptrack/build/bin/heaptrack", "/usr/bin/wakunode"] diff --git a/third-party/nwaku/Dockerfile.lightpushWithMix.compile b/third-party/nwaku/Dockerfile.lightpushWithMix.compile new file mode 100644 index 0000000..e39b88d --- /dev/null +++ b/third-party/nwaku/Dockerfile.lightpushWithMix.compile @@ -0,0 +1,58 @@ +# BUILD NIM APP ---------------------------------------------------------------- +FROM rust:1.81.0-alpine3.19 AS nim-build + +ARG NIMFLAGS +ARG MAKE_TARGET=lightpushwithmix +ARG NIM_COMMIT +ARG LOG_LEVEL=TRACE + +# Get build tools and required header files +RUN apk add --no-cache bash git build-base openssl-dev pcre-dev linux-headers curl jq + +WORKDIR /app +COPY . . + +# workaround for alpine issue: https://github.com/alpinelinux/docker-alpine/issues/383 +RUN apk update && apk upgrade + +# Ran separately from 'make' to avoid re-doing +RUN git submodule update --init --recursive + +# Slowest build step for the sake of caching layers +RUN make -j$(nproc) deps QUICK_AND_DIRTY_COMPILER=1 ${NIM_COMMIT} + +# Build the final node binary +RUN make -j$(nproc) ${NIM_COMMIT} $MAKE_TARGET LOG_LEVEL=${LOG_LEVEL} NIMFLAGS="${NIMFLAGS}" + + +# REFERENCE IMAGE as BASE for specialized PRODUCTION IMAGES---------------------------------------- +FROM alpine:3.18 AS base_lpt + +ARG MAKE_TARGET=lightpushwithmix + +LABEL maintainer="prem@waku.org" +LABEL source="https://github.com/waku-org/nwaku" +LABEL description="Lite Push With Mix: Waku light-client" +LABEL commit="unknown" +LABEL version="unknown" + +# DevP2P, LibP2P, and JSON RPC ports +EXPOSE 30303 60000 8545 + +# Referenced in the binary +RUN apk add --no-cache libgcc pcre-dev libpq-dev \ + wget \ + iproute2 \ + python3 \ + jq + +# Fix for 'Error loading shared library libpcre.so.3: No such file or directory' +RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3 + +COPY --from=nim-build /app/build/lightpush_publisher_mix /usr/bin/ +RUN chmod +x /usr/bin/lightpush_publisher_mix + +# Standalone image to be used manually and in lpt-runner ------------------------------------------- +FROM base_lpt AS standalone_lpt + +ENTRYPOINT ["/usr/bin/lightpush_publisher_mix"] diff --git a/third-party/nwaku/LICENSE-APACHEv2 b/third-party/nwaku/LICENSE-APACHEv2 new file mode 100644 index 0000000..7b6a3cb --- /dev/null +++ b/third-party/nwaku/LICENSE-APACHEv2 @@ -0,0 +1,205 @@ +nim-waku is licensed under the Apache License version 2 +Copyright (c) 2018 Status Research & Development GmbH +----------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Status Research & Development GmbH + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/third-party/nwaku/LICENSE-MIT b/third-party/nwaku/LICENSE-MIT new file mode 100644 index 0000000..aab8020 --- /dev/null +++ b/third-party/nwaku/LICENSE-MIT @@ -0,0 +1,25 @@ +nim-waku is licensed under the MIT License +Copyright (c) 2018 Status Research & Development GmbH +----------------------------------------------------- + +The MIT License (MIT) + +Copyright (c) 2018 Status Research & Development GmbH + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/third-party/nwaku/Makefile b/third-party/nwaku/Makefile new file mode 100644 index 0000000..d5cf088 --- /dev/null +++ b/third-party/nwaku/Makefile @@ -0,0 +1,546 @@ +# Copyright (c) 2022 Status Research & Development GmbH. Licensed under +# either of: +# - Apache License, version 2.0 +# - MIT license +# at your option. This file may not be copied, modified, or distributed except +# according to those terms. +export BUILD_SYSTEM_DIR := vendor/nimbus-build-system +export EXCLUDED_NIM_PACKAGES := vendor/nim-dnsdisc/vendor +LINK_PCRE := 0 +FORMAT_MSG := "\\x1B[95mFormatting:\\x1B[39m" +# we don't want an error here, so we can handle things later, in the ".DEFAULT" target +-include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk + + +ifeq ($(NIM_PARAMS),) +# "variables.mk" was not included, so we update the submodules. +GIT_SUBMODULE_UPDATE := git submodule update --init --recursive +.DEFAULT: + +@ echo -e "Git submodules not found. Running '$(GIT_SUBMODULE_UPDATE)'.\n"; \ + $(GIT_SUBMODULE_UPDATE); \ + echo +# Now that the included *.mk files appeared, and are newer than this file, Make will restart itself: +# https://www.gnu.org/software/make/manual/make.html#Remaking-Makefiles +# +# After restarting, it will execute its original goal, so we don't have to start a child Make here +# with "$(MAKE) $(MAKECMDGOALS)". Isn't hidden control flow great? + +else # "variables.mk" was included. Business as usual until the end of this file. + +# Determine the OS +detected_OS := $(shell uname -s) +ifneq (,$(findstring MINGW,$(detected_OS))) + detected_OS := Windows +endif + +ifeq ($(detected_OS),Windows) + # Update MINGW_PATH to standard MinGW location + MINGW_PATH = /mingw64 + NIM_PARAMS += --passC:"-I$(MINGW_PATH)/include" + NIM_PARAMS += --passL:"-L$(MINGW_PATH)/lib" + NIM_PARAMS += --passL:"-Lvendor/nim-nat-traversal/vendor/miniupnp/miniupnpc" + NIM_PARAMS += --passL:"-Lvendor/nim-nat-traversal/vendor/libnatpmp-upstream" + + LIBS = -lws2_32 -lbcrypt -liphlpapi -luserenv -lntdll -lminiupnpc -lnatpmp -lpq + NIM_PARAMS += $(foreach lib,$(LIBS),--passL:"$(lib)") +endif + +########## +## Main ## +########## +.PHONY: all test update clean + +# default target, because it's the first one that doesn't start with '.' +all: | wakunode2 example2 chat2 chat2bridge libwaku + +test_file := $(word 2,$(MAKECMDGOALS)) +define test_name +$(shell echo '$(MAKECMDGOALS)' | cut -d' ' -f3-) +endef + +test: +ifeq ($(strip $(test_file)),) + $(MAKE) testcommon + $(MAKE) testwaku +else + $(MAKE) compile-test TEST_FILE="$(test_file)" TEST_NAME="$(call test_name)" +endif +# this prevents make from erroring on unknown targets like "Index" +%: + @true + +waku.nims: + ln -s waku.nimble $@ + +update: | update-common + rm -rf waku.nims && \ + $(MAKE) waku.nims $(HANDLE_OUTPUT) + $(MAKE) build-nph + +clean: + rm -rf build + +# must be included after the default target +-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk + +## Possible values: prod; debug +TARGET ?= prod + +## Git version +GIT_VERSION ?= $(shell git describe --abbrev=6 --always --tags) +## Compilation parameters. If defined in the CLI the assignments won't be executed +NIM_PARAMS := $(NIM_PARAMS) -d:git_version=\"$(GIT_VERSION)\" + +## Heaptracker options +HEAPTRACKER ?= 0 +HEAPTRACKER_INJECT ?= 0 +ifeq ($(HEAPTRACKER), 1) +# Assumes Nim's lib/system/alloc.nim is patched! +TARGET := debug-with-heaptrack + +ifeq ($(HEAPTRACKER_INJECT), 1) +# the Nim compiler will load 'libheaptrack_inject.so' +HEAPTRACK_PARAMS := -d:heaptracker -d:heaptracker_inject +NIM_PARAMS := $(NIM_PARAMS) -d:heaptracker -d:heaptracker_inject +else +# the Nim compiler will load 'libheaptrack_preload.so' +HEAPTRACK_PARAMS := -d:heaptracker +NIM_PARAMS := $(NIM_PARAMS) -d:heaptracker +endif + +endif +## end of Heaptracker options + +################## +## Dependencies ## +################## +.PHONY: deps libbacktrace + +rustup: +ifeq (, $(shell which cargo)) +# Install Rustup if it's not installed +# -y: Assume "yes" for all prompts +# --default-toolchain stable: Install the stable toolchain + curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain stable +endif + +rln-deps: rustup + ./scripts/install_rln_tests_dependencies.sh + +deps: | deps-common nat-libs waku.nims + + +### nim-libbacktrace + +# "-d:release" implies "--stacktrace:off" and it cannot be added to config.nims +ifeq ($(DEBUG), 0) +NIM_PARAMS := $(NIM_PARAMS) -d:release +else +NIM_PARAMS := $(NIM_PARAMS) -d:debug +endif + +ifeq ($(USE_LIBBACKTRACE), 0) +NIM_PARAMS := $(NIM_PARAMS) -d:disable_libbacktrace +endif + +libbacktrace: + + $(MAKE) -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0 + +clean-libbacktrace: + + $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT) + +# Extend deps and clean targets +ifneq ($(USE_LIBBACKTRACE), 0) +deps: | libbacktrace +endif + +ifeq ($(POSTGRES), 1) +NIM_PARAMS := $(NIM_PARAMS) -d:postgres -d:nimDebugDlOpen +endif + +ifeq ($(DEBUG_DISCV5), 1) +NIM_PARAMS := $(NIM_PARAMS) -d:debugDiscv5 +endif + +clean: | clean-libbacktrace + +### Create nimble links (used when building with Nix) + +nimbus-build-system-nimble-dir: + NIMBLE_DIR="$(CURDIR)/$(NIMBLE_DIR)" \ + PWD_CMD="$(PWD)" \ + $(CURDIR)/scripts/generate_nimble_links.sh + +################## +## RLN ## +################## +.PHONY: librln + +LIBRLN_BUILDDIR := $(CURDIR)/vendor/zerokit +LIBRLN_VERSION := v0.7.0 + +ifeq ($(detected_OS),Windows) +LIBRLN_FILE := rln.lib +else +LIBRLN_FILE := librln_$(LIBRLN_VERSION).a +endif + +$(LIBRLN_FILE): + echo -e $(BUILD_MSG) "$@" && \ + ./scripts/build_rln.sh $(LIBRLN_BUILDDIR) $(LIBRLN_VERSION) $(LIBRLN_FILE) + +librln: | $(LIBRLN_FILE) + $(eval NIM_PARAMS += --passL:$(LIBRLN_FILE) --passL:-lm) + +clean-librln: + cargo clean --manifest-path vendor/zerokit/rln/Cargo.toml + rm -f $(LIBRLN_FILE) + +# Extend clean target +clean: | clean-librln + +################# +## Waku Common ## +################# +.PHONY: testcommon + +testcommon: | build deps + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim testcommon $(NIM_PARAMS) waku.nims + + +########## +## Waku ## +########## +.PHONY: testwaku wakunode2 testwakunode2 example2 chat2 chat2bridge liteprotocoltester + +# install rln-deps only for the testwaku target +testwaku: | build deps rln-deps librln + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim test -d:os=$(shell uname) $(NIM_PARAMS) waku.nims + +wakunode2: | build deps librln + echo -e $(BUILD_MSG) "build/$@" && \ + \ + $(ENV_SCRIPT) nim wakunode2 $(NIM_PARAMS) waku.nims + +benchmarks: | build deps librln + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim benchmarks $(NIM_PARAMS) waku.nims + +testwakunode2: | build deps librln + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim testwakunode2 $(NIM_PARAMS) waku.nims + +example2: | build deps librln + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim example2 $(NIM_PARAMS) waku.nims + +chat2: | build deps librln + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim chat2 $(NIM_PARAMS) waku.nims + +chat2mix: | build deps librln + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim chat2mix $(NIM_PARAMS) waku.nims + +rln-db-inspector: | build deps librln + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim rln_db_inspector $(NIM_PARAMS) waku.nims + +chat2bridge: | build deps librln + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim chat2bridge $(NIM_PARAMS) waku.nims + +liteprotocoltester: | build deps librln + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim liteprotocoltester $(NIM_PARAMS) waku.nims + +lightpushwithmix: | build deps librln + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim lightpushwithmix $(NIM_PARAMS) waku.nims + +build/%: | build deps librln + echo -e $(BUILD_MSG) "build/$*" && \ + $(ENV_SCRIPT) nim buildone $(NIM_PARAMS) waku.nims $* + +compile-test: | build deps librln + echo -e $(BUILD_MSG) "$(TEST_FILE)" "\"$(TEST_NAME)\"" && \ + $(ENV_SCRIPT) nim buildTest $(NIM_PARAMS) waku.nims $(TEST_FILE) && \ + $(ENV_SCRIPT) nim execTest $(NIM_PARAMS) waku.nims $(TEST_FILE) "\"$(TEST_NAME)\""; \ + +################ +## Waku tools ## +################ +.PHONY: tools wakucanary networkmonitor + +tools: networkmonitor wakucanary + +wakucanary: | build deps librln + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim wakucanary $(NIM_PARAMS) waku.nims + +networkmonitor: | build deps librln + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim networkmonitor $(NIM_PARAMS) waku.nims + +############ +## Format ## +############ +.PHONY: build-nph install-nph clean-nph print-nph-path + +# Default location for nph binary shall be next to nim binary to make it available on the path. +NPH:=$(shell dirname $(NIM_BINARY))/nph + +build-nph: | build deps +ifeq ("$(wildcard $(NPH))","") + $(ENV_SCRIPT) nim c --skipParentCfg:on vendor/nph/src/nph.nim && \ + mv vendor/nph/src/nph $(shell dirname $(NPH)) + echo "nph utility is available at " $(NPH) +else + echo "nph utility already exists at " $(NPH) +endif + +GIT_PRE_COMMIT_HOOK := .git/hooks/pre-commit + +install-nph: build-nph +ifeq ("$(wildcard $(GIT_PRE_COMMIT_HOOK))","") + cp ./scripts/git_pre_commit_format.sh $(GIT_PRE_COMMIT_HOOK) +else + echo "$(GIT_PRE_COMMIT_HOOK) already present, will NOT override" + exit 1 +endif + +nph/%: | build-nph + echo -e $(FORMAT_MSG) "nph/$*" && \ + $(NPH) $* + +clean-nph: + rm -f $(NPH) + +# To avoid hardcoding nph binary location in several places +print-nph-path: + echo "$(NPH)" + +clean: | clean-nph + +################### +## Documentation ## +################### +.PHONY: docs coverage + +# TODO: Remove unused target +docs: | build deps + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) nim doc --run --index:on --project --out:.gh-pages waku/waku.nim waku.nims + +coverage: + echo -e $(BUILD_MSG) "build/$@" && \ + $(ENV_SCRIPT) ./scripts/run_cov.sh -y + + +##################### +## Container image ## +##################### +# -d:insecure - Necessary to enable Prometheus HTTP endpoint for metrics +# -d:chronicles_colors:none - Necessary to disable colors in logs for Docker +DOCKER_IMAGE_NIMFLAGS ?= -d:chronicles_colors:none -d:insecure -d:postgres +DOCKER_IMAGE_NIMFLAGS := $(DOCKER_IMAGE_NIMFLAGS) $(HEAPTRACK_PARAMS) + +# build a docker image for the fleet +docker-image: MAKE_TARGET ?= wakunode2 +docker-image: DOCKER_IMAGE_TAG ?= $(MAKE_TARGET)-$(GIT_VERSION) +docker-image: DOCKER_IMAGE_NAME ?= wakuorg/nwaku:$(DOCKER_IMAGE_TAG) +docker-image: + docker build \ + --build-arg="MAKE_TARGET=$(MAKE_TARGET)" \ + --build-arg="NIMFLAGS=$(DOCKER_IMAGE_NIMFLAGS)" \ + --build-arg="NIM_COMMIT=$(DOCKER_NIM_COMMIT)" \ + --build-arg="LOG_LEVEL=$(LOG_LEVEL)" \ + --build-arg="HEAPTRACK_BUILD=$(HEAPTRACKER)" \ + --label="commit=$(shell git rev-parse HEAD)" \ + --label="version=$(GIT_VERSION)" \ + --target $(TARGET) \ + --tag $(DOCKER_IMAGE_NAME) . + +docker-quick-image: MAKE_TARGET ?= wakunode2 +docker-quick-image: DOCKER_IMAGE_TAG ?= $(MAKE_TARGET)-$(GIT_VERSION) +docker-quick-image: DOCKER_IMAGE_NAME ?= wakuorg/nwaku:$(DOCKER_IMAGE_TAG) +docker-quick-image: NIM_PARAMS := $(NIM_PARAMS) -d:chronicles_colors:none -d:insecure -d:postgres --passL:$(LIBRLN_FILE) --passL:-lm +docker-quick-image: | build deps librln wakunode2 + docker build \ + --build-arg="MAKE_TARGET=$(MAKE_TARGET)" \ + --tag $(DOCKER_IMAGE_NAME) \ + --target $(TARGET) \ + --file docker/binaries/Dockerfile.bn.local \ + . + +docker-push: + docker push $(DOCKER_IMAGE_NAME) + +#################################### +## Container lite-protocol-tester ## +#################################### +# -d:insecure - Necessary to enable Prometheus HTTP endpoint for metrics +# -d:chronicles_colors:none - Necessary to disable colors in logs for Docker +DOCKER_LPT_NIMFLAGS ?= -d:chronicles_colors:none -d:insecure + +# build a docker image for the fleet +docker-liteprotocoltester: DOCKER_LPT_TAG ?= latest +docker-liteprotocoltester: DOCKER_LPT_NAME ?= wakuorg/liteprotocoltester:$(DOCKER_LPT_TAG) +# --no-cache +docker-liteprotocoltester: + docker build \ + --build-arg="MAKE_TARGET=liteprotocoltester" \ + --build-arg="NIMFLAGS=$(DOCKER_LPT_NIMFLAGS)" \ + --build-arg="NIM_COMMIT=$(DOCKER_NIM_COMMIT)" \ + --build-arg="LOG_LEVEL=TRACE" \ + --label="commit=$(shell git rev-parse HEAD)" \ + --label="version=$(GIT_VERSION)" \ + --target $(if $(filter deploy,$(DOCKER_LPT_TAG)),deployment_lpt,standalone_lpt) \ + --tag $(DOCKER_LPT_NAME) \ + --file apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile \ + . + +docker-quick-liteprotocoltester: DOCKER_LPT_TAG ?= latest +docker-quick-liteprotocoltester: DOCKER_LPT_NAME ?= wakuorg/liteprotocoltester:$(DOCKER_LPT_TAG) +docker-quick-liteprotocoltester: | liteprotocoltester + docker build \ + --tag $(DOCKER_LPT_NAME) \ + --file apps/liteprotocoltester/Dockerfile.liteprotocoltester \ + . + +docker-liteprotocoltester-push: + docker push $(DOCKER_LPT_NAME) + + +################ +## C Bindings ## +################ +.PHONY: cbindings cwaku_example libwaku + +STATIC ?= 0 + + +libwaku: | build deps librln + rm -f build/libwaku* + +ifeq ($(STATIC), 1) + echo -e $(BUILD_MSG) "build/$@.a" && $(ENV_SCRIPT) nim libwakuStatic $(NIM_PARAMS) waku.nims +else ifeq ($(detected_OS),Windows) + echo -e $(BUILD_MSG) "build/$@.dll" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims +else + echo -e $(BUILD_MSG) "build/$@.so" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims +endif + +##################### +## Mobile Bindings ## +##################### +.PHONY: libwaku-android \ + libwaku-android-precheck \ + libwaku-android-arm64 \ + libwaku-android-amd64 \ + libwaku-android-x86 \ + libwaku-android-arm \ + rebuild-nat-libs \ + build-libwaku-for-android-arch + +ANDROID_TARGET ?= 30 +ifeq ($(detected_OS),Darwin) + ANDROID_TOOLCHAIN_DIR := $(ANDROID_NDK_HOME)/toolchains/llvm/prebuilt/darwin-x86_64 +else + ANDROID_TOOLCHAIN_DIR := $(ANDROID_NDK_HOME)/toolchains/llvm/prebuilt/linux-x86_64 +endif + +rebuild-nat-libs: | clean-cross nat-libs + +libwaku-android-precheck: +ifndef ANDROID_NDK_HOME + $(error ANDROID_NDK_HOME is not set) +endif + +build-libwaku-for-android-arch: + $(MAKE) rebuild-nat-libs CC=$(ANDROID_TOOLCHAIN_DIR)/bin/$(ANDROID_COMPILER) && \ + ./scripts/build_rln_android.sh $(CURDIR)/build $(LIBRLN_BUILDDIR) $(LIBRLN_VERSION) $(CROSS_TARGET) $(ABIDIR) && \ + CPU=$(CPU) ABIDIR=$(ABIDIR) ANDROID_ARCH=$(ANDROID_ARCH) ANDROID_COMPILER=$(ANDROID_COMPILER) ANDROID_TOOLCHAIN_DIR=$(ANDROID_TOOLCHAIN_DIR) $(ENV_SCRIPT) nim libWakuAndroid $(NIM_PARAMS) waku.nims + +libwaku-android-arm64: ANDROID_ARCH=aarch64-linux-android +libwaku-android-arm64: CPU=arm64 +libwaku-android-arm64: ABIDIR=arm64-v8a +libwaku-android-arm64: | libwaku-android-precheck build deps + $(MAKE) build-libwaku-for-android-arch ANDROID_ARCH=$(ANDROID_ARCH) CROSS_TARGET=$(ANDROID_ARCH) CPU=$(CPU) ABIDIR=$(ABIDIR) ANDROID_COMPILER=$(ANDROID_ARCH)$(ANDROID_TARGET)-clang + +libwaku-android-amd64: ANDROID_ARCH=x86_64-linux-android +libwaku-android-amd64: CPU=amd64 +libwaku-android-amd64: ABIDIR=x86_64 +libwaku-android-amd64: | libwaku-android-precheck build deps + $(MAKE) build-libwaku-for-android-arch ANDROID_ARCH=$(ANDROID_ARCH) CROSS_TARGET=$(ANDROID_ARCH) CPU=$(CPU) ABIDIR=$(ABIDIR) ANDROID_COMPILER=$(ANDROID_ARCH)$(ANDROID_TARGET)-clang + +libwaku-android-x86: ANDROID_ARCH=i686-linux-android +libwaku-android-x86: CPU=i386 +libwaku-android-x86: ABIDIR=x86 +libwaku-android-x86: | libwaku-android-precheck build deps + $(MAKE) build-libwaku-for-android-arch ANDROID_ARCH=$(ANDROID_ARCH) CROSS_TARGET=$(ANDROID_ARCH) CPU=$(CPU) ABIDIR=$(ABIDIR) ANDROID_COMPILER=$(ANDROID_ARCH)$(ANDROID_TARGET)-clang + +libwaku-android-arm: ANDROID_ARCH=armv7a-linux-androideabi +libwaku-android-arm: CPU=arm +libwaku-android-arm: ABIDIR=armeabi-v7a +libwaku-android-arm: | libwaku-android-precheck build deps +# cross-rs target architecture name does not match the one used in android + $(MAKE) build-libwaku-for-android-arch ANDROID_ARCH=$(ANDROID_ARCH) CROSS_TARGET=armv7-linux-androideabi CPU=$(CPU) ABIDIR=$(ABIDIR) ANDROID_COMPILER=$(ANDROID_ARCH)$(ANDROID_TARGET)-clang + +libwaku-android: + $(MAKE) libwaku-android-amd64 + $(MAKE) libwaku-android-arm64 + $(MAKE) libwaku-android-x86 +# This target is disabled because on recent versions of cross-rs complain with the following error +# relocation R_ARM_THM_ALU_PREL_11_0 cannot be used against symbol 'stack_init_trampoline_return'; recompile with -fPIC +# It's likely this architecture is not used so we might just not support it. +# $(MAKE) libwaku-android-arm + +cwaku_example: | build libwaku + echo -e $(BUILD_MSG) "build/$@" && \ + cc -o "build/$@" \ + ./examples/cbindings/waku_example.c \ + ./examples/cbindings/base64.c \ + -lwaku -Lbuild/ \ + -pthread -ldl -lm \ + -lminiupnpc -Lvendor/nim-nat-traversal/vendor/miniupnp/miniupnpc/build/ \ + -lnatpmp -Lvendor/nim-nat-traversal/vendor/libnatpmp-upstream/ \ + vendor/nim-libbacktrace/libbacktrace_wrapper.o \ + vendor/nim-libbacktrace/install/usr/lib/libbacktrace.a + +cppwaku_example: | build libwaku + echo -e $(BUILD_MSG) "build/$@" && \ + g++ -o "build/$@" \ + ./examples/cpp/waku.cpp \ + ./examples/cpp/base64.cpp \ + -lwaku -Lbuild/ \ + -pthread -ldl -lm \ + -lminiupnpc -Lvendor/nim-nat-traversal/vendor/miniupnp/miniupnpc/build/ \ + -lnatpmp -Lvendor/nim-nat-traversal/vendor/libnatpmp-upstream/ \ + vendor/nim-libbacktrace/libbacktrace_wrapper.o \ + vendor/nim-libbacktrace/install/usr/lib/libbacktrace.a + +nodejswaku: | build deps + echo -e $(BUILD_MSG) "build/$@" && \ + node-gyp build --directory=examples/nodejs/ + +endif # "variables.mk" was not included + +################### +# Release Targets # +################### + +release-notes: + docker run \ + -it \ + --rm \ + -v $${PWD}:/opt/sv4git/repo:z \ + -u $(shell id -u) \ + docker.io/wakuorg/sv4git:latest \ + release-notes |\ + sed -E 's@#([0-9]+)@[#\1](https://github.com/waku-org/nwaku/issues/\1)@g' +# I could not get the tool to replace issue ids with links, so using sed for now, +# asked here: https://github.com/bvieira/sv4git/discussions/101 + diff --git a/third-party/nwaku/README.md b/third-party/nwaku/README.md new file mode 100644 index 0000000..ce352d6 --- /dev/null +++ b/third-party/nwaku/README.md @@ -0,0 +1,186 @@ +# Nwaku + +## Introduction + +The nwaku repository implements Waku, and provides tools related to it. + +- A Nim implementation of the [Waku (v2) protocol](https://specs.vac.dev/specs/waku/v2/waku-v2.html). +- CLI application `wakunode2` that allows you to run a Waku node. +- Examples of Waku usage. +- Various tests of above. + +For more details see the [source code](waku/README.md) + +## How to Build & Run ( Linux, MacOS & WSL ) + +These instructions are generic. For more detailed instructions, see the Waku source code above. + +### Prerequisites + +The standard developer tools, including a C compiler, GNU Make, Bash, and Git. More information on these installations can be found [here](https://docs.waku.org/guides/nwaku/build-source#install-dependencies). + +> In some distributions (Fedora linux for example), you may need to install `which` utility separately. Nimbus build system is relying on it. + +You'll also need an installation of Rust and its toolchain (specifically `rustc` and `cargo`). +The easiest way to install these, is using `rustup`: + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +### Wakunode + +```bash +# The first `make` invocation will update all Git submodules. +# You'll run `make update` after each `git pull` in the future to keep those submodules updated. +make wakunode2 + +# Build with custom compilation flags. Do not use NIM_PARAMS unless you know what you are doing. +# Replace with your own flags +make wakunode2 NIMFLAGS="-d:chronicles_colors:none -d:disableMarchNative" + +# Run with DNS bootstrapping +./build/wakunode2 --dns-discovery --dns-discovery-url=DNS_BOOTSTRAP_NODE_URL + +# See available command line options +./build/wakunode2 --help +``` +To join the network, you need to know the address of at least one bootstrap node. +Please refer to the [Waku README](https://github.com/waku-org/nwaku/blob/master/waku/README.md) for more information. + +For more on how to run `wakunode2`, refer to: +- [Run using binaries](https://docs.waku.org/guides/nwaku/build-source) +- [Run using docker](https://docs.waku.org/guides/nwaku/run-docker) +- [Run using docker-compose](https://docs.waku.org/guides/nwaku/run-docker-compose) + +#### Issues +##### WSL +If you encounter difficulties building the project on WSL, consider placing the project within WSL's filesystem, avoiding the `/mnt/` directory. + +### How to Build & Run ( Windows ) + +### Windows Build Instructions + +#### 1. Install Required Tools +- **Git Bash Terminal**: Download and install from https://git-scm.com/download/win +- **MSYS2**: + a. Download installer from https://www.msys2.org + b. Install at "C:\" (default location). Remove/rename the msys folder in case of previous installation. + c. Use the mingw64 terminal from msys64 directory for package installation. + +#### 2. Install Dependencies +Open MSYS2 mingw64 terminal and run the following one-by-one : +```bash +pacman -Syu --noconfirm +pacman -S --noconfirm --needed mingw-w64-x86_64-toolchain +pacman -S --noconfirm --needed base-devel make cmake upx +pacman -S --noconfirm --needed mingw-w64-x86_64-rust +pacman -S --noconfirm --needed mingw-w64-x86_64-postgresql +pacman -S --noconfirm --needed mingw-w64-x86_64-gcc +pacman -S --noconfirm --needed mingw-w64-x86_64-gcc-libs +pacman -S --noconfirm --needed mingw-w64-x86_64-libwinpthread-git +pacman -S --noconfirm --needed mingw-w64-x86_64-zlib +pacman -S --noconfirm --needed mingw-w64-x86_64-openssl +pacman -S --noconfirm --needed mingw-w64-x86_64-python +``` + +#### 3. Build Wakunode +- Open Git Bash as administrator +- clone nwaku and cd nwaku +- Execute: `./scripts/build_windows.sh` + +#### 4. Troubleshooting +If `wakunode2.exe` isn't generated: +- **Missing Dependencies**: Verify with: + `which make cmake gcc g++ rustc cargo python3 upx` + If missing, revisit Step 2 or ensure MSYS2 is at `C:\` +- **Installation Conflicts**: Remove existing MinGW/MSYS2/Git Bash installations and perform fresh install + +### Developing + +#### Nim Runtime +This repository is bundled with a Nim runtime that includes the necessary dependencies for the project. + +Before you can utilize the runtime you'll need to build the project, as detailed in a previous section. +This will generate a `vendor` directory containing various dependencies, including the `nimbus-build-system` which has the bundled nim runtime. + +After successfully building the project, you may bring the bundled runtime into scope by running: +```bash +source env.sh +``` +If everything went well, you should see your prompt suffixed with `[Nimbus env]$`. Now you can run `nim` commands as usual. + +### Test Suite + +```bash +# Run all the Waku tests +make test + +# Run a specific test file +make test +# e.g. : make test tests/wakunode2/test_all.nim + +# Run a specific test name from a specific test file +make test +# e.g. : make test tests/wakunode2/test_all.nim "node setup is successful with default configuration" +``` + +### Building single test files + +During development it is helpful to build and run a single test file. +To support this make has a specific target: + +targets: +- `build/` +- `test/` + +Binary will be created as `.bin` under the `build` directory . + +```bash +# Build and run your test file separately +make test/tests/common/test_enr_builder.nim +``` + +### Testing against `js-waku` +Refer to [js-waku repo](https://github.com/waku-org/js-waku/tree/master/packages/tests) for instructions. + +## Formatting + +Nim files are expected to be formatted using the [`nph`](https://github.com/arnetheduck/nph) version present in `vendor/nph`. + +You can easily format file with the `make nph/ file` command. +For example: + +``` +make nph/waku/waku_core.nim +``` + +A convenient git hook is provided to automatically format file at commit time. +Run the following command to install it: + +```shell +make install-nph +``` + +### Examples + +Examples can be found in the examples folder. +This includes a fully featured chat example. + +### Tools + +Different tools and their corresponding how-to guides can be found in the `tools` folder. + +### Bugs, Questions & Features + +For an inquiry, or if you would like to propose new features, feel free to [open a general issue](https://github.com/waku-org/nwaku/issues/new). + +For bug reports, please [tag your issue with the `bug` label](https://github.com/waku-org/nwaku/issues/new). + +If you believe the reported issue requires critical attention, please [use the `critical` label](https://github.com/waku-org/nwaku/issues/new?labels=critical,bug) to assist with triaging. + +To get help, or participate in the conversation, join the [Waku Discord](https://discord.waku.org/) server. + +### Docs + +* [REST API Documentation](https://waku-org.github.io/waku-rest-api/) diff --git a/third-party/nwaku/apps/benchmarks/benchmarks.nim b/third-party/nwaku/apps/benchmarks/benchmarks.nim new file mode 100644 index 0000000..75686c8 --- /dev/null +++ b/third-party/nwaku/apps/benchmarks/benchmarks.nim @@ -0,0 +1,74 @@ +import + std/[strutils, times, sequtils, osproc], math, results, options, testutils/unittests + +import + waku/[ + waku_rln_relay/protocol_types, + waku_rln_relay/rln, + waku_rln_relay, + waku_rln_relay/conversion_utils, + waku_rln_relay/group_manager/on_chain/group_manager, + ], + tests/waku_rln_relay/utils_onchain + +proc benchmark( + manager: OnChainGroupManager, registerCount: int, messageLimit: int +): Future[string] {.async, gcsafe.} = + # Register a new member so that we can later generate proofs + let idCredentials = generateCredentials(manager.rlnInstance, registerCount) + + var start_time = getTime() + for i in 0 .. registerCount - 1: + try: + await manager.register(idCredentials[i], UserMessageLimit(messageLimit + 1)) + except Exception, CatchableError: + assert false, "exception raised: " & getCurrentExceptionMsg() + + debug "registration finished", + iter = i, elapsed_ms = (getTime() - start_time).inMilliseconds + + discard await manager.updateRoots() + let proofResult = await manager.fetchMerkleProofElements() + if proofResult.isErr(): + error "Failed to fetch Merkle proof", error = proofResult.error + manager.merkleProofCache = proofResult.get() + + let epoch = default(Epoch) + debug "epoch in bytes", epochHex = epoch.inHex() + let data: seq[byte] = newSeq[byte](1024) + + var proofGenTimes: seq[times.Duration] = @[] + var proofVerTimes: seq[times.Duration] = @[] + + start_time = getTime() + for i in 1 .. messageLimit: + var generate_time = getTime() + let proof = manager.generateProof(data, epoch, MessageId(i.uint8)).valueOr: + raiseAssert $error + proofGenTimes.add(getTime() - generate_time) + + let verify_time = getTime() + let ok = manager.verifyProof(data, proof).valueOr: + raiseAssert $error + proofVerTimes.add(getTime() - verify_time) + debug "iteration finished", + iter = i, elapsed_ms = (getTime() - start_time).inMilliseconds + + echo "Proof generation times: ", sum(proofGenTimes) div len(proofGenTimes) + echo "Proof verification times: ", sum(proofVerTimes) div len(proofVerTimes) + +proc main() = + # Start a local Ethereum JSON-RPC (Anvil) so that the group-manager setup can connect. + let anvilProc = runAnvil() + defer: + stopAnvil(anvilProc) + + # Set up an On-chain group manager (includes contract deployment) + let manager = waitFor setupOnchainGroupManager() + (waitFor manager.init()).isOkOr: + raiseAssert $error + + discard waitFor benchmark(manager, 200, 20) + +when isMainModule: + main() diff --git a/third-party/nwaku/apps/chat2/chat2.nim b/third-party/nwaku/apps/chat2/chat2.nim new file mode 100644 index 0000000..1531a46 --- /dev/null +++ b/third-party/nwaku/apps/chat2/chat2.nim @@ -0,0 +1,626 @@ +## chat2 is an example of usage of Waku v2. For suggested usage options, please +## see dingpu tutorial in docs folder. + +when not (compileOption("threads")): + {.fatal: "Please, compile this program with the --threads:on option!".} + +{.push raises: [].} + +import std/[strformat, strutils, times, options, random, sequtils] +import + confutils, + chronicles, + chronos, + eth/keys, + bearssl, + stew/[byteutils, results], + metrics, + metrics/chronos_httpserver +import + libp2p/[ + switch, # manage transports, a single entry point for dialing and listening + crypto/crypto, # cryptographic functions + stream/connection, # create and close stream read / write connections + multiaddress, + # encode different addressing schemes. For example, /ip4/7.7.7.7/tcp/6543 means it is using IPv4 protocol and TCP + peerinfo, + # manage the information of a peer, such as peer ID and public / private key + peerid, # Implement how peers interact + protobuf/minprotobuf, # message serialisation/deserialisation from and to protobufs + nameresolving/dnsresolver, + ] # define DNS resolution +import + waku/[ + waku_core, + waku_lightpush_legacy/common, + waku_lightpush_legacy/rpc, + waku_enr, + discovery/waku_dnsdisc, + waku_store_legacy, + waku_node, + node/waku_metrics, + node/peer_manager, + factory/builder, + common/utils/nat, + waku_relay, + waku_store/common, + ], + ./config_chat2 + +import libp2p/protocols/pubsub/rpc/messages, libp2p/protocols/pubsub/pubsub +import ../../waku/waku_rln_relay + +const Help = + """ + Commands: /[?|help|connect|nick|exit] + help: Prints this help + connect: dials a remote peer + nick: change nickname for current chat session + exit: exits chat session +""" + +# XXX Connected is a bit annoying, because incoming connections don't trigger state change +# Could poll connection pool or something here, I suppose +# TODO Ensure connected turns true on incoming connections, or get rid of it +type Chat = ref object + node: WakuNode # waku node for publishing, subscribing, etc + transp: StreamTransport # transport streams between read & write file descriptor + subscribed: bool # indicates if a node is subscribed or not to a topic + connected: bool # if the node is connected to another peer + started: bool # if the node has started + nick: string # nickname for this chat session + prompt: bool # chat prompt is showing + contentTopic: string # default content topic for chat messages + +type + PrivateKey* = crypto.PrivateKey + Topic* = waku_core.PubsubTopic + +##################### +## chat2 protobufs ## +##################### + +type + SelectResult*[T] = Result[T, string] + + Chat2Message* = object + timestamp*: int64 + nick*: string + payload*: seq[byte] + +proc init*(T: type Chat2Message, buffer: seq[byte]): ProtoResult[T] = + var msg = Chat2Message() + let pb = initProtoBuffer(buffer) + + var timestamp: uint64 + discard ?pb.getField(1, timestamp) + msg.timestamp = int64(timestamp) + + discard ?pb.getField(2, msg.nick) + discard ?pb.getField(3, msg.payload) + + ok(msg) + +proc encode*(message: Chat2Message): ProtoBuffer = + var serialised = initProtoBuffer() + + serialised.write(1, uint64(message.timestamp)) + serialised.write(2, message.nick) + serialised.write(3, message.payload) + + return serialised + +proc toString*(message: Chat2Message): string = + # Get message date and timestamp in local time + let time = message.timestamp.fromUnix().local().format("'<'MMM' 'dd,' 'HH:mm'>'") + + return time & " " & message.nick & ": " & string.fromBytes(message.payload) + +##################### + +proc connectToNodes(c: Chat, nodes: seq[string]) {.async.} = + echo "Connecting to nodes" + await c.node.connectToNodes(nodes) + c.connected = true + +proc showChatPrompt(c: Chat) = + if not c.prompt: + try: + stdout.write(">> ") + stdout.flushFile() + c.prompt = true + except IOError: + discard + +proc getChatLine(c: Chat, msg: WakuMessage): Result[string, string] = + # No payload encoding/encryption from Waku + let + pb = Chat2Message.init(msg.payload) + chatLine = + if pb.isOk: + pb[].toString() + else: + string.fromBytes(msg.payload) + return ok(chatline) + +proc printReceivedMessage(c: Chat, msg: WakuMessage) = + let + pb = Chat2Message.init(msg.payload) + chatLine = + if pb.isOk: + pb[].toString() + else: + string.fromBytes(msg.payload) + try: + echo &"{chatLine}" + except ValueError: + # Formatting fail. Print chat line in any case. + echo chatLine + + c.prompt = false + showChatPrompt(c) + trace "Printing message", + topic = DefaultPubsubTopic, chatLine, contentTopic = msg.contentTopic + +proc readNick(transp: StreamTransport): Future[string] {.async.} = + # Chat prompt + stdout.write("Choose a nickname >> ") + stdout.flushFile() + return await transp.readLine() + +proc startMetricsServer( + serverIp: IpAddress, serverPort: Port +): Result[MetricsHttpServerRef, string] = + info "Starting metrics HTTP server", serverIp = $serverIp, serverPort = $serverPort + + let metricsServerRes = MetricsHttpServerRef.new($serverIp, serverPort) + if metricsServerRes.isErr(): + return err("metrics HTTP server start failed: " & $metricsServerRes.error) + + let server = metricsServerRes.value + try: + waitFor server.start() + except CatchableError: + return err("metrics HTTP server start failed: " & getCurrentExceptionMsg()) + + info "Metrics HTTP server started", serverIp = $serverIp, serverPort = $serverPort + ok(metricsServerRes.value) + +proc publish(c: Chat, line: string) = + # First create a Chat2Message protobuf with this line of text + let time = getTime().toUnix() + let chat2pb = + Chat2Message(timestamp: time, nick: c.nick, payload: line.toBytes()).encode() + + ## @TODO: error handling on failure + proc handler(response: PushResponse) {.gcsafe, closure.} = + trace "lightpush response received", response = response + + var message = WakuMessage( + payload: chat2pb.buffer, + contentTopic: c.contentTopic, + version: 0, + timestamp: getNanosecondTime(time), + ) + if not isNil(c.node.wakuRlnRelay): + # for future version when we support more than one rln protected content topic, + # we should check the message content topic as well + let appendRes = c.node.wakuRlnRelay.appendRLNProof(message, float64(time)) + if appendRes.isErr(): + debug "could not append rate limit proof to the message" + else: + debug "rate limit proof is appended to the message" + let decodeRes = RateLimitProof.init(message.proof) + if decodeRes.isErr(): + error "could not decode the RLN proof" + + let proof = decodeRes.get() + # TODO move it to log after dogfooding + let msgEpoch = fromEpoch(proof.epoch) + if fromEpoch(c.node.wakuRlnRelay.lastEpoch) == msgEpoch: + echo "--rln epoch: ", + msgEpoch, " ⚠️ message rate violation! you are spamming the network!" + else: + echo "--rln epoch: ", msgEpoch + # update the last epoch + c.node.wakuRlnRelay.lastEpoch = proof.epoch + + try: + if not c.node.wakuLegacyLightPush.isNil(): + # Attempt lightpush + (waitFor c.node.legacyLightpushPublish(some(DefaultPubsubTopic), message)).isOkOr: + error "failed to publish lightpush message", error = error + else: + (waitFor c.node.publish(some(DefaultPubsubTopic), message)).isOkOr: + error "failed to publish message", error = error + except CatchableError: + error "caught error publishing message: ", error = getCurrentExceptionMsg() + +# TODO This should read or be subscribe handler subscribe +proc readAndPrint(c: Chat) {.async.} = + while true: + # while p.connected: + # # TODO: echo &"{p.id} -> " + # + # echo cast[string](await p.conn.readLp(1024)) + #echo "readAndPrint subscribe NYI" + await sleepAsync(100.millis) + +# TODO Implement +proc writeAndPrint(c: Chat) {.async.} = + while true: + # Connect state not updated on incoming WakuRelay connections + # if not c.connected: + # echo "type an address or wait for a connection:" + # echo "type /[help|?] for help" + + # Chat prompt + showChatPrompt(c) + + let line = await c.transp.readLine() + if line.startsWith("/help") or line.startsWith("/?") or not c.started: + echo Help + continue + + # if line.startsWith("/disconnect"): + # echo "Ending current session" + # if p.connected and p.conn.closed.not: + # await p.conn.close() + # p.connected = false + elif line.startsWith("/connect"): + # TODO Should be able to connect to multiple peers for Waku chat + if c.connected: + echo "already connected to at least one peer" + continue + + echo "enter address of remote peer" + let address = await c.transp.readLine() + if address.len > 0: + await c.connectToNodes(@[address]) + elif line.startsWith("/nick"): + # Set a new nickname + c.nick = await readNick(c.transp) + echo "You are now known as " & c.nick + elif line.startsWith("/exit"): + echo "quitting..." + + try: + await c.node.stop() + except: + echo "exception happened when stopping: " & getCurrentExceptionMsg() + + quit(QuitSuccess) + else: + # XXX connected state problematic + if c.started: + c.publish(line) + # TODO Connect to peer logic? + else: + try: + if line.startsWith("/") and "p2p" in line: + await c.connectToNodes(@[line]) + except: + echo &"unable to dial remote peer {line}" + echo getCurrentExceptionMsg() + +proc readWriteLoop(c: Chat) {.async.} = + asyncSpawn c.writeAndPrint() # execute the async function but does not block + asyncSpawn c.readAndPrint() + +proc readInput(wfd: AsyncFD) {.thread, raises: [Defect, CatchableError].} = + ## This procedure performs reading from `stdin` and sends data over + ## pipe to main thread. + let transp = fromPipe(wfd) + + while true: + let line = stdin.readLine() + discard waitFor transp.write(line & "\r\n") + +{.pop.} + # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError +proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = + let + transp = fromPipe(rfd) + conf = Chat2Conf.load() + nodekey = + if conf.nodekey.isSome(): + conf.nodekey.get() + else: + PrivateKey.random(Secp256k1, rng[]).tryGet() + + # set log level + if conf.logLevel != LogLevel.NONE: + setLogLevel(conf.logLevel) + + let natRes = setupNat( + conf.nat, + clientId, + Port(uint16(conf.tcpPort) + conf.portsShift), + Port(uint16(conf.udpPort) + conf.portsShift), + ) + + if natRes.isErr(): + raise newException(ValueError, "setupNat error " & natRes.error) + + let (extIp, extTcpPort, extUdpPort) = natRes.get() + + var enrBuilder = EnrBuilder.init(nodeKey) + + let recordRes = enrBuilder.build() + let record = + if recordRes.isErr(): + error "failed to create enr record", error = recordRes.error + quit(QuitFailure) + else: + recordRes.get() + + let node = block: + var builder = WakuNodeBuilder.init() + builder.withNodeKey(nodeKey) + builder.withRecord(record) + + builder + .withNetworkConfigurationDetails( + conf.listenAddress, + Port(uint16(conf.tcpPort) + conf.portsShift), + extIp, + extTcpPort, + wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift), + wsEnabled = conf.websocketSupport, + wssEnabled = conf.websocketSecureSupport, + ) + .tryGet() + builder.build().tryGet() + + await node.start() + + if conf.rlnRelayCredPath == "": + raise newException(ConfigurationError, "rln-relay-cred-path MUST be passed") + + if conf.relay: + let shards = + conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it))) + (await node.mountRelay()).isOkOr: + echo "failed to mount relay: " & error + return + + await node.mountLibp2pPing() + + let nick = await readNick(transp) + echo "Welcome, " & nick & "!" + + var chat = Chat( + node: node, + transp: transp, + subscribed: true, + connected: false, + started: true, + nick: nick, + prompt: false, + contentTopic: conf.contentTopic, + ) + + if conf.staticnodes.len > 0: + echo "Connecting to static peers..." + await connectToNodes(chat, conf.staticnodes) + + var dnsDiscoveryUrl = none(string) + + if conf.fleet != Fleet.none: + # Use DNS discovery to connect to selected fleet + echo "Connecting to " & $conf.fleet & " fleet using DNS discovery..." + + if conf.fleet == Fleet.test: + dnsDiscoveryUrl = some( + "enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im" + ) + else: + # Connect to sandbox by default + dnsDiscoveryUrl = some( + "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im" + ) + elif conf.dnsDiscoveryUrl != "": + # No pre-selected fleet. Discover nodes via DNS using user config + debug "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl + dnsDiscoveryUrl = some(conf.dnsDiscoveryUrl) + + var discoveredNodes: seq[RemotePeerInfo] + + if dnsDiscoveryUrl.isSome: + var nameServers: seq[TransportAddress] + for ip in conf.dnsAddrsNameServers: + nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53 + + let dnsResolver = DnsResolver.new(nameServers) + + proc resolver(domain: string): Future[string] {.async, gcsafe.} = + trace "resolving", domain = domain + let resolved = await dnsResolver.resolveTxt(domain) + return resolved[0] # Use only first answer + + var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl.get(), resolver) + if wakuDnsDiscovery.isOk: + let discoveredPeers = await wakuDnsDiscovery.get().findPeers() + if discoveredPeers.isOk: + info "Connecting to discovered peers" + discoveredNodes = discoveredPeers.get() + echo "Discovered and connecting to " & $discoveredNodes + waitFor chat.node.connectToNodes(discoveredNodes) + else: + warn "Failed to init Waku DNS discovery" + + let peerInfo = node.switch.peerInfo + let listenStr = $peerInfo.addrs[0] & "/p2p/" & $peerInfo.peerId + echo &"Listening on\n {listenStr}" + + if (conf.storenode != "") or (conf.store == true): + await node.mountStore() + + var storenode: Option[RemotePeerInfo] + + if conf.storenode != "": + let peerInfo = parsePeerInfo(conf.storenode) + if peerInfo.isOk(): + storenode = some(peerInfo.value) + else: + error "Incorrect conf.storenode", error = peerInfo.error + elif discoveredNodes.len > 0: + echo "Store enabled, but no store nodes configured. Choosing one at random from discovered peers" + storenode = some(discoveredNodes[rand(0 .. len(discoveredNodes) - 1)]) + + if storenode.isSome(): + # We have a viable storenode. Let's query it for historical messages. + echo "Connecting to storenode: " & $(storenode.get()) + + node.mountStoreClient() + node.peerManager.addServicePeer(storenode.get(), WakuStoreCodec) + + proc storeHandler(response: StoreQueryResponse) {.gcsafe.} = + for msg in response.messages: + let payload = + if msg.message.isSome(): + msg.message.get().payload + else: + newSeq[byte](0) + + let + pb = Chat2Message.init(payload) + chatLine = + if pb.isOk: + pb[].toString() + else: + string.fromBytes(payload) + echo &"{chatLine}" + info "Hit store handler" + + let queryRes = await node.query( + StoreQueryRequest(contentTopics: @[chat.contentTopic]), storenode.get() + ) + if queryRes.isOk(): + storeHandler(queryRes.value) + + # NOTE Must be mounted after relay + if conf.lightpushnode != "": + let peerInfo = parsePeerInfo(conf.lightpushnode) + if peerInfo.isOk(): + await mountLegacyLightPush(node) + node.mountLegacyLightPushClient() + node.peerManager.addServicePeer(peerInfo.value, WakuLightpushCodec) + else: + error "LightPush not mounted. Couldn't parse conf.lightpushnode", + error = peerInfo.error + + if conf.filternode != "": + let peerInfo = parsePeerInfo(conf.filternode) + if peerInfo.isOk(): + await node.mountFilter() + await node.mountFilterClient() + + proc filterHandler( + pubsubTopic: PubsubTopic, msg: WakuMessage + ) {.async, gcsafe, closure.} = + trace "Hit filter handler", contentTopic = msg.contentTopic + chat.printReceivedMessage(msg) + + # TODO: Here to support FilterV2 relevant subscription. + else: + error "Filter not mounted. Couldn't parse conf.filternode", error = peerInfo.error + + # Subscribe to a topic, if relay is mounted + if conf.relay: + proc handler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + trace "Hit subscribe handler", topic + + if msg.contentTopic == chat.contentTopic: + chat.printReceivedMessage(msg) + + node.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), WakuRelayHandler(handler) + ).isOkOr: + error "failed to subscribe to pubsub topic", + topic = DefaultPubsubTopic, error = error + + if conf.rlnRelay: + info "WakuRLNRelay is enabled" + + proc spamHandler(wakuMessage: WakuMessage) {.gcsafe, closure.} = + debug "spam handler is called" + let chatLineResult = chat.getChatLine(wakuMessage) + if chatLineResult.isOk(): + echo "A spam message is found and discarded : ", chatLineResult.value + else: + echo "A spam message is found and discarded" + chat.prompt = false + showChatPrompt(chat) + + echo "rln-relay preparation is in progress..." + + let rlnConf = WakuRlnConfig( + dynamic: conf.rlnRelayDynamic, + credIndex: conf.rlnRelayCredIndex, + chainId: UInt256.fromBytesBE(conf.rlnRelayChainId.toBytesBE()), + ethClientUrls: conf.ethClientUrls.mapIt(string(it)), + creds: some( + RlnRelayCreds( + path: conf.rlnRelayCredPath, password: conf.rlnRelayCredPassword + ) + ), + userMessageLimit: conf.rlnRelayUserMessageLimit, + epochSizeSec: conf.rlnEpochSizeSec, + ) + + waitFor node.mountRlnRelay(rlnConf, spamHandler = some(spamHandler)) + + let membershipIndex = node.wakuRlnRelay.groupManager.membershipIndex.get() + let identityCredential = node.wakuRlnRelay.groupManager.idCredentials.get() + echo "your membership index is: ", membershipIndex + echo "your rln identity commitment key is: ", + identityCredential.idCommitment.inHex() + else: + info "WakuRLNRelay is disabled" + echo "WakuRLNRelay is disabled, please enable it by passing in the --rln-relay flag" + if conf.metricsLogging: + startMetricsLog() + + if conf.metricsServer: + let metricsServer = startMetricsServer( + conf.metricsServerAddress, Port(conf.metricsServerPort + conf.portsShift) + ) + + await chat.readWriteLoop() + + runForever() + +proc main(rng: ref HmacDrbgContext) {.async.} = + let (rfd, wfd) = createAsyncPipe() + if rfd == asyncInvalidPipe or wfd == asyncInvalidPipe: + raise newException(ValueError, "Could not initialize pipe!") + + var thread: Thread[AsyncFD] + thread.createThread(readInput, wfd) + try: + await processInput(rfd, rng) + # Handle only ConfigurationError for now + # TODO: Throw other errors from the mounting procedure + except ConfigurationError as e: + raise e + +when isMainModule: # isMainModule = true when the module is compiled as the main file + let rng = crypto.newRng() + try: + waitFor(main(rng)) + except CatchableError as e: + raise e + +## Dump of things that can be improved: +## +## - Incoming dialed peer does not change connected state (not relying on it for now) +## - Unclear if staticnode argument works (can enter manually) +## - Don't trigger self / double publish own messages +## - Integrate store protocol (fetch messages in beginning) +## - Integrate filter protocol (default/option to be light node, connect to filter node) +## - Test/default to cluster node connection (diff protocol version) +## - Redirect logs to separate file +## - Expose basic publish/subscribe etc commands with /syntax +## - Show part of peerid to know who sent message +## - Deal with protobuf messages (e.g. other chat protocol, or encrypted) diff --git a/third-party/nwaku/apps/chat2/config_chat2.nim b/third-party/nwaku/apps/chat2/config_chat2.nim new file mode 100644 index 0000000..fe7865c --- /dev/null +++ b/third-party/nwaku/apps/chat2/config_chat2.nim @@ -0,0 +1,351 @@ +import + chronicles, + chronos, + confutils, + confutils/defs, + confutils/std/net, + eth/keys, + libp2p/crypto/crypto, + libp2p/crypto/secp, + nimcrypto/utils, + std/strutils, + regex +import waku/waku_core + +type + Fleet* = enum + none + prod + test + + EthRpcUrl* = distinct string + + Chat2Conf* = object ## General node config + logLevel* {. + desc: "Sets the log level.", defaultValue: LogLevel.INFO, name: "log-level" + .}: LogLevel + + nodekey* {.desc: "P2P node private key as 64 char hex string.", name: "nodekey".}: + Option[crypto.PrivateKey] + + listenAddress* {. + defaultValue: defaultListenAddress(config), + desc: "Listening address for the LibP2P traffic.", + name: "listen-address" + .}: IpAddress + + tcpPort* {.desc: "TCP listening port.", defaultValue: 60000, name: "tcp-port".}: + Port + + udpPort* {.desc: "UDP listening port.", defaultValue: 60000, name: "udp-port".}: + Port + + portsShift* {. + desc: "Add a shift to all port numbers.", defaultValue: 0, name: "ports-shift" + .}: uint16 + + nat* {. + desc: + "Specify method to use for determining public address. " & + "Must be one of: any, none, upnp, pmp, extip:.", + defaultValue: "any" + .}: string + + ## Persistence config + dbPath* {. + desc: "The database path for peristent storage", defaultValue: "", name: "db-path" + .}: string + + persistPeers* {. + desc: "Enable peer persistence: true|false", + defaultValue: false, + name: "persist-peers" + .}: bool + + persistMessages* {. + desc: "Enable message persistence: true|false", + defaultValue: false, + name: "persist-messages" + .}: bool + + ## Relay config + relay* {. + desc: "Enable relay protocol: true|false", defaultValue: true, name: "relay" + .}: bool + + staticnodes* {. + desc: "Peer multiaddr to directly connect with. Argument may be repeated.", + name: "staticnode" + .}: seq[string] + + keepAlive* {. + desc: "Enable keep-alive for idle connections: true|false", + defaultValue: false, + name: "keep-alive" + .}: bool + + clusterId* {. + desc: + "Cluster id that the node is running in. Node in a different cluster id is disconnected.", + defaultValue: 0, + name: "cluster-id" + .}: uint16 + + shards* {. + desc: + "Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.", + defaultValue: @[uint16(0)], + name: "shard" + .}: seq[uint16] + + ## Store config + store* {. + desc: "Enable store protocol: true|false", defaultValue: true, name: "store" + .}: bool + + storenode* {. + desc: "Peer multiaddr to query for storage.", defaultValue: "", name: "storenode" + .}: string + + ## Filter config + filter* {. + desc: "Enable filter protocol: true|false", defaultValue: false, name: "filter" + .}: bool + + filternode* {. + desc: "Peer multiaddr to request content filtering of messages.", + defaultValue: "", + name: "filternode" + .}: string + + ## Lightpush config + lightpush* {. + desc: "Enable lightpush protocol: true|false", + defaultValue: false, + name: "lightpush" + .}: bool + + lightpushnode* {. + desc: "Peer multiaddr to request lightpush of published messages.", + defaultValue: "", + name: "lightpushnode" + .}: string + + ## Metrics config + metricsServer* {. + desc: "Enable the metrics server: true|false", + defaultValue: false, + name: "metrics-server" + .}: bool + + metricsServerAddress* {. + desc: "Listening address of the metrics server.", + defaultValue: parseIpAddress("127.0.0.1"), + name: "metrics-server-address" + .}: IpAddress + + metricsServerPort* {. + desc: "Listening HTTP port of the metrics server.", + defaultValue: 8008, + name: "metrics-server-port" + .}: uint16 + + metricsLogging* {. + desc: "Enable metrics logging: true|false", + defaultValue: true, + name: "metrics-logging" + .}: bool + + ## DNS discovery config + dnsDiscovery* {. + desc: + "Deprecated, please set dns-discovery-url instead. Enable discovering nodes via DNS", + defaultValue: false, + name: "dns-discovery" + .}: bool + + dnsDiscoveryUrl* {. + desc: "URL for DNS node list in format 'enrtree://@'", + defaultValue: "", + name: "dns-discovery-url" + .}: string + + dnsAddrsNameServers* {. + desc: + "DNS name server IPs to query for DNS multiaddrs resolution. Argument may be repeated.", + defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")], + name: "dns-addrs-name-server" + .}: seq[IpAddress] + + ## Chat2 configuration + fleet* {. + desc: + "Select the fleet to connect to. This sets the DNS discovery URL to the selected fleet.", + defaultValue: Fleet.prod, + name: "fleet" + .}: Fleet + + contentTopic* {. + desc: "Content topic for chat messages.", + defaultValue: "/toy-chat/2/huilong/proto", + name: "content-topic" + .}: string + + ## Websocket Configuration + websocketSupport* {. + desc: "Enable websocket: true|false", + defaultValue: false, + name: "websocket-support" + .}: bool + + websocketPort* {. + desc: "WebSocket listening port.", defaultValue: 8000, name: "websocket-port" + .}: Port + + websocketSecureSupport* {. + desc: "WebSocket Secure Support.", + defaultValue: false, + name: "websocket-secure-support" + .}: bool + + ## rln-relay configuration + rlnRelay* {. + desc: "Enable spam protection through rln-relay: true|false", + defaultValue: false, + name: "rln-relay" + .}: bool + + rlnRelayChainId* {. + desc: + "Chain ID of the provided contract (optional, will fetch from RPC provider if not used)", + defaultValue: 0, + name: "rln-relay-chain-id" + .}: uint + + rlnRelayCredPath* {. + desc: "The path for peristing rln-relay credential", + defaultValue: "", + name: "rln-relay-cred-path" + .}: string + + rlnRelayCredIndex* {. + desc: "the index of the onchain commitment to use", name: "rln-relay-cred-index" + .}: Option[uint] + + rlnRelayDynamic* {. + desc: "Enable waku-rln-relay with on-chain dynamic group management: true|false", + defaultValue: false, + name: "rln-relay-dynamic" + .}: bool + + rlnRelayIdKey* {. + desc: "Rln relay identity secret key as a Hex string", + defaultValue: "", + name: "rln-relay-id-key" + .}: string + + rlnRelayIdCommitmentKey* {. + desc: "Rln relay identity commitment key as a Hex string", + defaultValue: "", + name: "rln-relay-id-commitment-key" + .}: string + + ethClientUrls* {. + desc: + "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/. Argument may be repeated.", + defaultValue: newSeq[EthRpcUrl](0), + name: "rln-relay-eth-client-address" + .}: seq[EthRpcUrl] + + rlnRelayEthContractAddress* {. + desc: "Address of membership contract on an Ethereum testnet", + defaultValue: "", + name: "rln-relay-eth-contract-address" + .}: string + + rlnRelayCredPassword* {. + desc: "Password for encrypting RLN credentials", + defaultValue: "", + name: "rln-relay-cred-password" + .}: string + + rlnRelayUserMessageLimit* {. + desc: + "Set a user message limit for the rln membership registration. Must be a positive integer. Default is 1.", + defaultValue: 1, + name: "rln-relay-user-message-limit" + .}: uint64 + + rlnEpochSizeSec* {. + desc: + "Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.", + defaultValue: 1, + name: "rln-relay-epoch-sec" + .}: uint64 + +# NOTE: Keys are different in nim-libp2p +proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T = + try: + let key = SkPrivateKey.init(utils.fromHex(p)).tryGet() + # XXX: Here at the moment + result = crypto.PrivateKey(scheme: Secp256k1, skkey: key) + except CatchableError as e: + raise newException(ValueError, "Invalid private key") + +proc completeCmdArg*(T: type crypto.PrivateKey, val: string): seq[string] = + return @[] + +proc parseCmdArg*(T: type IpAddress, p: string): T = + try: + result = parseIpAddress(p) + except CatchableError as e: + raise newException(ValueError, "Invalid IP address") + +proc completeCmdArg*(T: type IpAddress, val: string): seq[string] = + return @[] + +proc parseCmdArg*(T: type Port, p: string): T = + try: + result = Port(parseInt(p)) + except CatchableError as e: + raise newException(ValueError, "Invalid Port number") + +proc completeCmdArg*(T: type Port, val: string): seq[string] = + return @[] + +proc parseCmdArg*(T: type Option[uint], p: string): T = + try: + some(parseUint(p)) + except CatchableError: + raise newException(ValueError, "Invalid unsigned integer") + +proc completeCmdArg*(T: type EthRpcUrl, val: string): seq[string] = + return @[] + +proc parseCmdArg*(T: type EthRpcUrl, s: string): T = + ## allowed patterns: + ## http://url:port + ## https://url:port + ## http://url:port/path + ## https://url:port/path + ## http://url/with/path + ## http://url:port/path?query + ## https://url:port/path?query + ## disallowed patterns: + ## any valid/invalid ws or wss url + var httpPattern = + re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" + var wsPattern = + re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" + if regex.match(s, wsPattern): + raise newException( + ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL" + ) + if not regex.match(s, httpPattern): + raise newException(ValueError, "Invalid HTTP RPC URL") + return EthRpcUrl(s) + +func defaultListenAddress*(conf: Chat2Conf): IpAddress = + # TODO: How should we select between IPv4 and IPv6 + # Maybe there should be a config option for this. + (static parseIpAddress("0.0.0.0")) diff --git a/third-party/nwaku/apps/chat2/nim.cfg b/third-party/nwaku/apps/chat2/nim.cfg new file mode 100644 index 0000000..2231f2e --- /dev/null +++ b/third-party/nwaku/apps/chat2/nim.cfg @@ -0,0 +1,4 @@ +-d:chronicles_line_numbers +-d:chronicles_runtime_filtering:on +-d:discv5_protocol_id:d5waku +path = "../.." diff --git a/third-party/nwaku/apps/chat2bridge/chat2bridge.nim b/third-party/nwaku/apps/chat2bridge/chat2bridge.nim new file mode 100644 index 0000000..c2bf9c0 --- /dev/null +++ b/third-party/nwaku/apps/chat2bridge/chat2bridge.nim @@ -0,0 +1,328 @@ +{.push raises: [].} + +import + std/[tables, times, strutils, hashes, sequtils, json], + chronos, + confutils, + chronicles, + chronicles/topics_registry, + chronos/streams/tlsstream, + metrics, + metrics/chronos_httpserver, + stew/byteutils, + eth/net/nat, + # Matterbridge client imports + # Waku v2 imports + libp2p/crypto/crypto, + libp2p/errors, + waku/[ + waku_core, + waku_node, + node/peer_manager, + waku_filter_v2, + waku_store, + factory/builder, + common/utils/matterbridge_client, + common/rate_limit/setting, + ], + # Chat 2 imports + ../chat2/chat2, + # Common cli config + ./config_chat2bridge + +declarePublicCounter chat2_mb_transfers, + "Number of messages transferred between chat2 and Matterbridge", ["type"] +declarePublicCounter chat2_mb_dropped, "Number of messages dropped", ["reason"] + +logScope: + topics = "chat2bridge" + +################## +# Default values # +################## + +const DeduplQSize = 20 # Maximum number of seen messages to keep in deduplication queue + +######### +# Types # +######### + +type + Chat2MatterBridge* = ref object of RootObj + mbClient*: MatterbridgeClient + nodev2*: WakuNode + running: bool + pollPeriod: chronos.Duration + seen: seq[Hash] #FIFO queue + contentTopic: string + + MbMessageHandler = proc(jsonNode: JsonNode) {.async.} + +################### +# Helper functions # +###################S + +proc containsOrAdd(sequence: var seq[Hash], hash: Hash): bool = + if sequence.contains(hash): + return true + + if sequence.len >= DeduplQSize: + trace "Deduplication queue full. Removing oldest item." + sequence.delete 0, 0 # Remove first item in queue + + sequence.add(hash) + + return false + +proc toWakuMessage( + cmb: Chat2MatterBridge, jsonNode: JsonNode +): WakuMessage {.raises: [Defect, KeyError].} = + # Translates a Matterbridge API JSON response to a Waku v2 message + let msgFields = jsonNode.getFields() + + # @TODO error handling here - verify expected fields + + let chat2pb = Chat2Message( + timestamp: getTime().toUnix(), # @TODO use provided timestamp + nick: msgFields["username"].getStr(), + payload: msgFields["text"].getStr().toBytes(), + ).encode() + + WakuMessage(payload: chat2pb.buffer, contentTopic: cmb.contentTopic, version: 0) + +proc toChat2(cmb: Chat2MatterBridge, jsonNode: JsonNode) {.async.} = + let msg = cmb.toWakuMessage(jsonNode) + + if cmb.seen.containsOrAdd(msg.payload.hash()): + # This is a duplicate message. Return. + chat2_mb_dropped.inc(labelValues = ["duplicate"]) + return + + trace "Post Matterbridge message to chat2" + + chat2_mb_transfers.inc(labelValues = ["mb_to_chat2"]) + + (await cmb.nodev2.publish(some(DefaultPubsubTopic), msg)).isOkOr: + error "failed to publish message", error = error + +proc toMatterbridge( + cmb: Chat2MatterBridge, msg: WakuMessage +) {.gcsafe, raises: [Exception].} = + if cmb.seen.containsOrAdd(msg.payload.hash()): + # This is a duplicate message. Return. + chat2_mb_dropped.inc(labelValues = ["duplicate"]) + return + + if msg.contentTopic != cmb.contentTopic: + # Only bridge messages on the configured content topic + chat2_mb_dropped.inc(labelValues = ["filtered"]) + return + + trace "Post chat2 message to Matterbridge" + + chat2_mb_transfers.inc(labelValues = ["chat2_to_mb"]) + + let chat2Msg = Chat2Message.init(msg.payload) + + assert chat2Msg.isOk + + let postRes = cmb.mbClient.postMessage( + text = string.fromBytes(chat2Msg[].payload), username = chat2Msg[].nick + ) + + if postRes.isErr() or (postRes[] == false): + chat2_mb_dropped.inc(labelValues = ["duplicate"]) + error "Matterbridge host unreachable. Dropping message." + +proc pollMatterbridge(cmb: Chat2MatterBridge, handler: MbMessageHandler) {.async.} = + while cmb.running: + let getRes = cmb.mbClient.getMessages() + + if getRes.isOk(): + for jsonNode in getRes[]: + await handler(jsonNode) + else: + error "Matterbridge host unreachable. Sleeping before retrying." + await sleepAsync(chronos.seconds(10)) + + await sleepAsync(cmb.pollPeriod) + +############## +# Public API # +############## +proc new*( + T: type Chat2MatterBridge, + # Matterbridge initialisation + mbHostUri: string, + mbGateway: string, + # NodeV2 initialisation + nodev2Key: crypto.PrivateKey, + nodev2BindIp: IpAddress, + nodev2BindPort: Port, + nodev2ExtIp = none[IpAddress](), + nodev2ExtPort = none[Port](), + contentTopic: string, +): T {. + raises: [Defect, ValueError, KeyError, TLSStreamProtocolError, IOError, LPError] +.} = + # Setup Matterbridge + let mbClient = MatterbridgeClient.new(mbHostUri, mbGateway) + + # Let's verify the Matterbridge configuration before continuing + let clientHealth = mbClient.isHealthy() + + if clientHealth.isOk() and clientHealth[]: + info "Reached Matterbridge host", host = mbClient.host + else: + raise newException(ValueError, "Matterbridge client not reachable/healthy") + + # Setup Waku v2 node + let nodev2 = block: + var builder = WakuNodeBuilder.init() + builder.withNodeKey(nodev2Key) + + builder + .withNetworkConfigurationDetails( + nodev2BindIp, nodev2BindPort, nodev2ExtIp, nodev2ExtPort + ) + .tryGet() + builder.build().tryGet() + + return Chat2MatterBridge( + mbClient: mbClient, + nodev2: nodev2, + running: false, + pollPeriod: chronos.seconds(1), + contentTopic: contentTopic, + ) + +proc start*(cmb: Chat2MatterBridge) {.async.} = + info "Starting Chat2MatterBridge" + + cmb.running = true + + debug "Start polling Matterbridge" + + # Start Matterbridge polling (@TODO: use streaming interface) + proc mbHandler(jsonNode: JsonNode) {.async.} = + trace "Bridging message from Matterbridge to chat2", jsonNode = jsonNode + waitFor cmb.toChat2(jsonNode) + + asyncSpawn cmb.pollMatterbridge(mbHandler) + + # Start Waku v2 node + debug "Start listening on Waku v2" + await cmb.nodev2.start() + + # Always mount relay for bridge + # `triggerSelf` is false on a `bridge` to avoid duplicates + (await cmb.nodev2.mountRelay()).isOkOr: + error "failed to mount relay", error = error + return + + cmb.nodev2.wakuRelay.triggerSelf = false + + # Bridging + # Handle messages on Waku v2 and bridge to Matterbridge + proc relayHandler( + pubsubTopic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async.} = + trace "Bridging message from Chat2 to Matterbridge", msg = msg + try: + cmb.toMatterbridge(msg) + except: + error "exception in relayHandler: " & getCurrentExceptionMsg() + + cmb.nodev2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr: + error "failed to subscribe to relay", topic = DefaultPubsubTopic, error = error + return + +proc stop*(cmb: Chat2MatterBridge) {.async: (raises: [Exception]).} = + info "Stopping Chat2MatterBridge" + + cmb.running = false + + await cmb.nodev2.stop() + +{.pop.} + # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError +when isMainModule: + import waku/common/utils/nat, waku/waku_api/message_cache + + let + rng = newRng() + conf = Chat2MatterbridgeConf.load() + + if conf.logLevel != LogLevel.NONE: + setLogLevel(conf.logLevel) + + let natRes = setupNat( + conf.nat, + clientId, + Port(uint16(conf.libp2pTcpPort) + conf.portsShift), + Port(uint16(conf.udpPort) + conf.portsShift), + ) + if natRes.isErr(): + error "Error in setupNat", error = natRes.error + + # Load address configuration + let + (nodev2ExtIp, nodev2ExtPort, _) = natRes.get() + ## The following heuristic assumes that, in absence of manual + ## config, the external port is the same as the bind port. + extPort = + if nodev2ExtIp.isSome() and nodev2ExtPort.isNone(): + some(Port(uint16(conf.libp2pTcpPort) + conf.portsShift)) + else: + nodev2ExtPort + + let bridge = Chat2Matterbridge.new( + mbHostUri = "http://" & $initTAddress(conf.mbHostAddress, Port(conf.mbHostPort)), + mbGateway = conf.mbGateway, + nodev2Key = conf.nodekey, + nodev2BindIp = conf.listenAddress, + nodev2BindPort = Port(uint16(conf.libp2pTcpPort) + conf.portsShift), + nodev2ExtIp = nodev2ExtIp, + nodev2ExtPort = extPort, + contentTopic = conf.contentTopic, + ) + + waitFor bridge.start() + + # Now load rest of config + # Mount configured Waku v2 protocols + waitFor mountLibp2pPing(bridge.nodev2) + + if conf.store: + waitFor mountStore(bridge.nodev2) + + if conf.filter: + waitFor mountFilter(bridge.nodev2) + + if conf.staticnodes.len > 0: + waitFor connectToNodes(bridge.nodev2, conf.staticnodes) + + if conf.storenode != "": + let storePeer = parsePeerInfo(conf.storenode) + if storePeer.isOk(): + bridge.nodev2.peerManager.addServicePeer(storePeer.value, WakuStoreCodec) + else: + error "Error parsing conf.storenode", error = storePeer.error + + if conf.filternode != "": + let filterPeer = parsePeerInfo(conf.filternode) + if filterPeer.isOk(): + bridge.nodev2.peerManager.addServicePeer( + filterPeer.value, WakuFilterSubscribeCodec + ) + else: + error "Error parsing conf.filternode", error = filterPeer.error + + if conf.metricsServer: + let + address = conf.metricsServerAddress + port = conf.metricsServerPort + conf.portsShift + info "Starting metrics HTTP server", address, port + startMetricsHttpServer($address, Port(port)) + + runForever() diff --git a/third-party/nwaku/apps/chat2bridge/config_chat2bridge.nim b/third-party/nwaku/apps/chat2bridge/config_chat2bridge.nim new file mode 100644 index 0000000..c7d8bb5 --- /dev/null +++ b/third-party/nwaku/apps/chat2bridge/config_chat2bridge.nim @@ -0,0 +1,148 @@ +import + confutils, + confutils/defs, + confutils/std/net, + chronicles, + chronos, + libp2p/crypto/[crypto, secp], + eth/keys + +type Chat2MatterbridgeConf* = object + logLevel* {. + desc: "Sets the log level", defaultValue: LogLevel.INFO, name: "log-level" + .}: LogLevel + + listenAddress* {. + defaultValue: defaultListenAddress(config), + desc: "Listening address for the LibP2P traffic", + name: "listen-address" + .}: IpAddress + + libp2pTcpPort* {. + desc: "Libp2p TCP listening port (for Waku v2)", + defaultValue: 9000, + name: "libp2p-tcp-port" + .}: uint16 + + udpPort* {.desc: "UDP listening port", defaultValue: 9000, name: "udp-port".}: uint16 + + portsShift* {. + desc: "Add a shift to all default port numbers", + defaultValue: 0, + name: "ports-shift" + .}: uint16 + + nat* {. + desc: + "Specify method to use for determining public address. " & + "Must be one of: any, none, upnp, pmp, extip:", + defaultValue: "any" + .}: string + + metricsServer* {. + desc: "Enable the metrics server", defaultValue: false, name: "metrics-server" + .}: bool + + metricsServerAddress* {. + desc: "Listening address of the metrics server", + defaultValue: parseIpAddress("127.0.0.1"), + name: "metrics-server-address" + .}: IpAddress + + metricsServerPort* {. + desc: "Listening HTTP port of the metrics server", + defaultValue: 8008, + name: "metrics-server-port" + .}: uint16 + + ### Waku v2 options + staticnodes* {. + desc: "Multiaddr of peer to directly connect with. Argument may be repeated", + name: "staticnode" + .}: seq[string] + + nodekey* {. + desc: "P2P node private key as hex", + defaultValue: crypto.PrivateKey.random(Secp256k1, newRng()[]).tryGet(), + name: "nodekey" + .}: crypto.PrivateKey + + store* {. + desc: "Flag whether to start store protocol", defaultValue: true, name: "store" + .}: bool + + filter* {. + desc: "Flag whether to start filter protocol", defaultValue: false, name: "filter" + .}: bool + + relay* {. + desc: "Flag whether to start relay protocol", defaultValue: true, name: "relay" + .}: bool + + storenode* {. + desc: "Multiaddr of peer to connect with for waku store protocol", + defaultValue: "", + name: "storenode" + .}: string + + filternode* {. + desc: "Multiaddr of peer to connect with for waku filter protocol", + defaultValue: "", + name: "filternode" + .}: string + + # Matterbridge options + mbHostAddress* {. + desc: "Listening address of the Matterbridge host", + defaultValue: parseIpAddress("127.0.0.1"), + name: "mb-host-address" + .}: IpAddress + + mbHostPort* {. + desc: "Listening port of the Matterbridge host", + defaultValue: 4242, + name: "mb-host-port" + .}: uint16 + + mbGateway* {. + desc: "Matterbridge gateway", defaultValue: "gateway1", name: "mb-gateway" + .}: string + + ## Chat2 options + contentTopic* {. + desc: "Content topic to bridge chat messages to.", + defaultValue: "/toy-chat/2/huilong/proto", + name: "content-topic" + .}: string + +proc parseCmdArg*(T: type keys.KeyPair, p: string): T = + try: + let privkey = keys.PrivateKey.fromHex(string(p)).tryGet() + result = privkey.toKeyPair() + except CatchableError: + raise newException(ValueError, "Invalid private key") + +proc completeCmdArg*(T: type keys.KeyPair, val: string): seq[string] = + return @[] + +proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T = + let key = SkPrivateKey.init(p) + if key.isOk(): + crypto.PrivateKey(scheme: Secp256k1, skkey: key.get()) + else: + raise newException(ValueError, "Invalid private key") + +proc completeCmdArg*(T: type crypto.PrivateKey, val: string): seq[string] = + return @[] + +proc parseCmdArg*(T: type IpAddress, p: string): T = + try: + result = parseIpAddress(p) + except CatchableError: + raise newException(ValueError, "Invalid IP address") + +proc completeCmdArg*(T: type IpAddress, val: string): seq[string] = + return @[] + +func defaultListenAddress*(conf: Chat2MatterbridgeConf): IpAddress = + (parseIpAddress("0.0.0.0")) diff --git a/third-party/nwaku/apps/chat2bridge/nim.cfg b/third-party/nwaku/apps/chat2bridge/nim.cfg new file mode 100644 index 0000000..2231f2e --- /dev/null +++ b/third-party/nwaku/apps/chat2bridge/nim.cfg @@ -0,0 +1,4 @@ +-d:chronicles_line_numbers +-d:chronicles_runtime_filtering:on +-d:discv5_protocol_id:d5waku +path = "../.." diff --git a/third-party/nwaku/apps/chat2mix/chat2mix.nim b/third-party/nwaku/apps/chat2mix/chat2mix.nim new file mode 100644 index 0000000..e75ca75 --- /dev/null +++ b/third-party/nwaku/apps/chat2mix/chat2mix.nim @@ -0,0 +1,703 @@ +## chat2 is an example of usage of Waku v2. For suggested usage options, please +## see dingpu tutorial in docs folder. + +when not (compileOption("threads")): + {.fatal: "Please, compile this program with the --threads:on option!".} + +{.push raises: [].} + +import std/[strformat, strutils, times, options, random, sequtils] +import + confutils, + chronicles, + chronos, + eth/keys, + bearssl, + results, + stew/[byteutils], + metrics, + metrics/chronos_httpserver +import + libp2p/[ + switch, # manage transports, a single entry point for dialing and listening + crypto/crypto, # cryptographic functions + stream/connection, # create and close stream read / write connections + multiaddress, + # encode different addressing schemes. For example, /ip4/7.7.7.7/tcp/6543 means it is using IPv4 protocol and TCP + peerinfo, + # manage the information of a peer, such as peer ID and public / private key + peerid, # Implement how peers interact + protobuf/minprotobuf, # message serialisation/deserialisation from and to protobufs + nameresolving/dnsresolver, + ] # define DNS resolution +import mix/curve25519 +import + waku/[ + waku_core, + waku_lightpush/common, + waku_lightpush/rpc, + waku_enr, + discovery/waku_dnsdisc, + waku_node, + node/waku_metrics, + node/peer_manager, + factory/builder, + common/utils/nat, + waku_store/common, + waku_filter_v2/client, + common/logging, + ], + ./config_chat2mix + +import libp2p/protocols/pubsub/rpc/messages, libp2p/protocols/pubsub/pubsub +import ../../waku/waku_rln_relay + +logScope: + topics = "chat2 mix" + +const Help = + """ + Commands: /[?|help|connect|nick|exit] + help: Prints this help + connect: dials a remote peer + nick: change nickname for current chat session + exit: exits chat session +""" + +# XXX Connected is a bit annoying, because incoming connections don't trigger state change +# Could poll connection pool or something here, I suppose +# TODO Ensure connected turns true on incoming connections, or get rid of it +type Chat = ref object + node: WakuNode # waku node for publishing, subscribing, etc + transp: StreamTransport # transport streams between read & write file descriptor + subscribed: bool # indicates if a node is subscribed or not to a topic + connected: bool # if the node is connected to another peer + started: bool # if the node has started + nick: string # nickname for this chat session + prompt: bool # chat prompt is showing + contentTopic: string # default content topic for chat messages + conf: Chat2Conf # configuration for chat2 + +type + PrivateKey* = crypto.PrivateKey + Topic* = waku_core.PubsubTopic + +##################### +## chat2 protobufs ## +##################### + +type + SelectResult*[T] = Result[T, string] + + Chat2Message* = object + timestamp*: int64 + nick*: string + payload*: seq[byte] + +proc getPubsubTopic*( + conf: Chat2Conf, node: WakuNode, contentTopic: string +): PubsubTopic = + let shard = node.wakuAutoSharding.get().getShard(contentTopic).valueOr: + echo "Could not parse content topic: " & error + return "" #TODO: fix this. + return $RelayShard(clusterId: conf.clusterId, shardId: shard.shardId) + +proc init*(T: type Chat2Message, buffer: seq[byte]): ProtoResult[T] = + var msg = Chat2Message() + let pb = initProtoBuffer(buffer) + + var timestamp: uint64 + discard ?pb.getField(1, timestamp) + msg.timestamp = int64(timestamp) + + discard ?pb.getField(2, msg.nick) + discard ?pb.getField(3, msg.payload) + + ok(msg) + +proc encode*(message: Chat2Message): ProtoBuffer = + var serialised = initProtoBuffer() + + serialised.write(1, uint64(message.timestamp)) + serialised.write(2, message.nick) + serialised.write(3, message.payload) + + return serialised + +proc toString*(message: Chat2Message): string = + # Get message date and timestamp in local time + let time = message.timestamp.fromUnix().local().format("'<'MMM' 'dd,' 'HH:mm'>'") + + return time & " " & message.nick & ": " & string.fromBytes(message.payload) + +##################### + +proc connectToNodes(c: Chat, nodes: seq[string]) {.async.} = + echo "Connecting to nodes" + await c.node.connectToNodes(nodes) + c.connected = true + +proc showChatPrompt(c: Chat) = + if not c.prompt: + try: + stdout.write(">> ") + stdout.flushFile() + c.prompt = true + except IOError: + discard + +proc getChatLine(c: Chat, msg: WakuMessage): Result[string, string] = + # No payload encoding/encryption from Waku + let + pb = Chat2Message.init(msg.payload) + chatLine = + if pb.isOk: + pb[].toString() + else: + string.fromBytes(msg.payload) + return ok(chatline) + +proc printReceivedMessage(c: Chat, msg: WakuMessage) = + let + pb = Chat2Message.init(msg.payload) + chatLine = + if pb.isOk: + pb[].toString() + else: + string.fromBytes(msg.payload) + try: + echo &"{chatLine}" + except ValueError: + # Formatting fail. Print chat line in any case. + echo chatLine + + c.prompt = false + showChatPrompt(c) + trace "Printing message", chatLine, contentTopic = msg.contentTopic + +proc readNick(transp: StreamTransport): Future[string] {.async.} = + # Chat prompt + stdout.write("Choose a nickname >> ") + stdout.flushFile() + return await transp.readLine() + +proc startMetricsServer( + serverIp: IpAddress, serverPort: Port +): Result[MetricsHttpServerRef, string] = + info "Starting metrics HTTP server", serverIp = $serverIp, serverPort = $serverPort + + let metricsServerRes = MetricsHttpServerRef.new($serverIp, serverPort) + if metricsServerRes.isErr(): + return err("metrics HTTP server start failed: " & $metricsServerRes.error) + + let server = metricsServerRes.value + try: + waitFor server.start() + except CatchableError: + return err("metrics HTTP server start failed: " & getCurrentExceptionMsg()) + + info "Metrics HTTP server started", serverIp = $serverIp, serverPort = $serverPort + ok(metricsServerRes.value) + +proc publish(c: Chat, line: string) {.async.} = + # First create a Chat2Message protobuf with this line of text + let time = getTime().toUnix() + let chat2pb = + Chat2Message(timestamp: time, nick: c.nick, payload: line.toBytes()).encode() + + ## @TODO: error handling on failure + proc handler(response: LightPushResponse) {.gcsafe, closure.} = + trace "lightpush response received", response = response + + var message = WakuMessage( + payload: chat2pb.buffer, + contentTopic: c.contentTopic, + version: 0, + timestamp: getNanosecondTime(time), + ) + + try: + if not c.node.wakuLightpushClient.isNil(): + # Attempt lightpush with mix + + ( + waitFor c.node.lightpushPublish( + some(c.conf.getPubsubTopic(c.node, c.contentTopic)), + message, + none(RemotePeerInfo), + true, + ) + ).isOkOr: + error "failed to publish lightpush message", error = error + else: + error "failed to publish message as lightpush client is not initialized" + except CatchableError: + error "caught error publishing message: ", error = getCurrentExceptionMsg() + +# TODO This should read or be subscribe handler subscribe +proc readAndPrint(c: Chat) {.async.} = + while true: + # while p.connected: + # # TODO: echo &"{p.id} -> " + # + # echo cast[string](await p.conn.readLp(1024)) + #echo "readAndPrint subscribe NYI" + await sleepAsync(100) + +# TODO Implement +proc writeAndPrint(c: Chat) {.async.} = + while true: + # Connect state not updated on incoming WakuRelay connections + # if not c.connected: + # echo "type an address or wait for a connection:" + # echo "type /[help|?] for help" + + # Chat prompt + showChatPrompt(c) + + let line = await c.transp.readLine() + if line.startsWith("/help") or line.startsWith("/?") or not c.started: + echo Help + continue + + # if line.startsWith("/disconnect"): + # echo "Ending current session" + # if p.connected and p.conn.closed.not: + # await p.conn.close() + # p.connected = false + elif line.startsWith("/connect"): + # TODO Should be able to connect to multiple peers for Waku chat + if c.connected: + echo "already connected to at least one peer" + continue + + echo "enter address of remote peer" + let address = await c.transp.readLine() + if address.len > 0: + await c.connectToNodes(@[address]) + elif line.startsWith("/nick"): + # Set a new nickname + c.nick = await readNick(c.transp) + echo "You are now known as " & c.nick + elif line.startsWith("/exit"): + echo "quitting..." + + try: + await c.node.stop() + except: + echo "exception happened when stopping: " & getCurrentExceptionMsg() + + quit(QuitSuccess) + else: + # XXX connected state problematic + if c.started: + echo "publishing message: " & line + await c.publish(line) + # TODO Connect to peer logic? + else: + try: + if line.startsWith("/") and "p2p" in line: + await c.connectToNodes(@[line]) + except: + echo &"unable to dial remote peer {line}" + echo getCurrentExceptionMsg() + +proc readWriteLoop(c: Chat) {.async.} = + asyncSpawn c.writeAndPrint() # execute the async function but does not block + asyncSpawn c.readAndPrint() + +proc readInput(wfd: AsyncFD) {.thread, raises: [Defect, CatchableError].} = + ## This procedure performs reading from `stdin` and sends data over + ## pipe to main thread. + let transp = fromPipe(wfd) + + while true: + let line = stdin.readLine() + discard waitFor transp.write(line & "\r\n") + +var alreadyUsedServicePeers {.threadvar.}: seq[RemotePeerInfo] + +proc selectRandomServicePeer*( + pm: PeerManager, actualPeer: Option[RemotePeerInfo], codec: string +): Result[RemotePeerInfo, void] = + if actualPeer.isSome(): + alreadyUsedServicePeers.add(actualPeer.get()) + + let supportivePeers = pm.switch.peerStore.getPeersByProtocol(codec).filterIt( + it notin alreadyUsedServicePeers + ) + if supportivePeers.len == 0: + return err() + + let rndPeerIndex = rand(0 .. supportivePeers.len - 1) + return ok(supportivePeers[rndPeerIndex]) + +proc maintainSubscription( + wakuNode: WakuNode, + filterPubsubTopic: PubsubTopic, + filterContentTopic: ContentTopic, + filterPeer: RemotePeerInfo, + preventPeerSwitch: bool, +) {.async.} = + var actualFilterPeer = filterPeer + const maxFailedSubscribes = 3 + const maxFailedServiceNodeSwitches = 10 + var noFailedSubscribes = 0 + var noFailedServiceNodeSwitches = 0 + while true: + info "maintaining subscription at", peer = constructMultiaddrStr(actualFilterPeer) + # First use filter-ping to check if we have an active subscription + let pingRes = await wakuNode.wakuFilterClient.ping(actualFilterPeer) + if pingRes.isErr(): + # No subscription found. Let's subscribe. + error "ping failed.", err = pingRes.error + trace "no subscription found. Sending subscribe request" + + let subscribeRes = await wakuNode.filterSubscribe( + some(filterPubsubTopic), filterContentTopic, actualFilterPeer + ) + + if subscribeRes.isErr(): + noFailedSubscribes += 1 + error "Subscribe request failed.", + err = subscribeRes.error, + peer = actualFilterPeer, + failCount = noFailedSubscribes + + # TODO: disconnet from failed actualFilterPeer + # asyncSpawn(wakuNode.peerManager.switch.disconnect(p)) + # wakunode.peerManager.peerStore.delete(actualFilterPeer) + + if noFailedSubscribes < maxFailedSubscribes: + await sleepAsync(2000) # Wait a bit before retrying + continue + elif not preventPeerSwitch: + let peerOpt = selectRandomServicePeer( + wakuNode.peerManager, some(actualFilterPeer), WakuFilterSubscribeCodec + ) + if peerOpt.isOk(): + actualFilterPeer = peerOpt.get() + + info "Found new peer for codec", + codec = filterPubsubTopic, peer = constructMultiaddrStr(actualFilterPeer) + + noFailedSubscribes = 0 + continue # try again with new peer without delay + else: + error "Failed to find new service peer. Exiting." + noFailedServiceNodeSwitches += 1 + break + else: + if noFailedSubscribes > 0: + noFailedSubscribes -= 1 + + notice "subscribe request successful." + else: + info "subscription is live." + + await sleepAsync(30000) # Subscription maintenance interval + +proc processMixNodes(localnode: WakuNode, nodes: seq[string]) {.async.} = + if nodes.len == 0: + return + + info "Processing mix nodes: ", nodes = $nodes + for node in nodes: + var enrRec: enr.Record + if enrRec.fromURI(node): + let peerInfo = enrRec.toRemotePeerInfo().valueOr: + error "Failed to parse mix node", error = error + continue + localnode.peermanager.addPeer(peerInfo, Discv5) + info "Added mix node", peer = peerInfo + else: + error "Failed to parse mix node ENR", node = node + +{.pop.} + # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError +proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} = + let + transp = fromPipe(rfd) + conf = Chat2Conf.load() + nodekey = + if conf.nodekey.isSome(): + conf.nodekey.get() + else: + PrivateKey.random(Secp256k1, rng[]).tryGet() + + # set log level + if conf.logLevel != LogLevel.NONE: + setLogLevel(conf.logLevel) + + let natRes = setupNat( + conf.nat, + clientId, + Port(uint16(conf.tcpPort) + conf.portsShift), + Port(uint16(conf.udpPort) + conf.portsShift), + ) + + if natRes.isErr(): + raise newException(ValueError, "setupNat error " & natRes.error) + + let (extIp, extTcpPort, extUdpPort) = natRes.get() + + var enrBuilder = EnrBuilder.init(nodeKey) + + enrBuilder.withWakuRelaySharding( + RelayShards(clusterId: conf.clusterId, shardIds: conf.shards) + ).isOkOr: + error "failed to add sharded topics to ENR", error = error + quit(QuitFailure) + + let recordRes = enrBuilder.build() + let record = + if recordRes.isErr(): + error "failed to create enr record", error = recordRes.error + quit(QuitFailure) + else: + recordRes.get() + + let node = block: + var builder = WakuNodeBuilder.init() + builder.withNodeKey(nodeKey) + builder.withRecord(record) + + builder + .withNetworkConfigurationDetails( + conf.listenAddress, + Port(uint16(conf.tcpPort) + conf.portsShift), + extIp, + extTcpPort, + wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift), + wsEnabled = conf.websocketSupport, + wssEnabled = conf.websocketSecureSupport, + ) + .tryGet() + builder.build().tryGet() + + node.mountAutoSharding(conf.clusterId, conf.numShardsInNetwork).isOkOr: + error "failed to mount waku sharding: ", error = error + quit(QuitFailure) + node.mountMetadata(conf.clusterId, conf.shards).isOkOr: + error "failed to mount waku metadata protocol: ", err = error + quit(QuitFailure) + + let (mixPrivKey, mixPubKey) = generateKeyPair().valueOr: + error "failed to generate mix key pair", error = error + return + + (await node.mountMix(conf.clusterId, mixPrivKey)).isOkOr: + error "failed to mount waku mix protocol: ", error = $error + quit(QuitFailure) + if conf.mixnodes.len > 0: + await processMixNodes(node, conf.mixnodes) + await node.start() + + node.peerManager.start() + + await node.mountLibp2pPing() + await node.mountPeerExchangeClient() + let pubsubTopic = conf.getPubsubTopic(node, conf.contentTopic) + echo "pubsub topic is: " & pubsubTopic + let nick = await readNick(transp) + echo "Welcome, " & nick & "!" + + var chat = Chat( + node: node, + transp: transp, + subscribed: true, + connected: false, + started: true, + nick: nick, + prompt: false, + contentTopic: conf.contentTopic, + conf: conf, + ) + + var dnsDiscoveryUrl = none(string) + + if conf.fleet != Fleet.none: + # Use DNS discovery to connect to selected fleet + echo "Connecting to " & $conf.fleet & " fleet using DNS discovery..." + + if conf.fleet == Fleet.test: + dnsDiscoveryUrl = some( + "enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im" + ) + else: + # Connect to sandbox by default + dnsDiscoveryUrl = some( + "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im" + ) + elif conf.dnsDiscoveryUrl != "": + # No pre-selected fleet. Discover nodes via DNS using user config + debug "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl + dnsDiscoveryUrl = some(conf.dnsDiscoveryUrl) + + var discoveredNodes: seq[RemotePeerInfo] + + if dnsDiscoveryUrl.isSome: + var nameServers: seq[TransportAddress] + for ip in conf.dnsDiscoveryNameServers: + nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53 + + let dnsResolver = DnsResolver.new(nameServers) + + proc resolver(domain: string): Future[string] {.async, gcsafe.} = + trace "resolving", domain = domain + let resolved = await dnsResolver.resolveTxt(domain) + return resolved[0] # Use only first answer + + var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl.get(), resolver) + if wakuDnsDiscovery.isOk: + let discoveredPeers = await wakuDnsDiscovery.get().findPeers() + if discoveredPeers.isOk: + info "Connecting to discovered peers" + discoveredNodes = discoveredPeers.get() + echo "Discovered and connecting to " & $discoveredNodes + waitFor chat.node.connectToNodes(discoveredNodes) + else: + warn "Failed to init Waku DNS discovery" + + let peerInfo = node.switch.peerInfo + let listenStr = $peerInfo.addrs[0] & "/p2p/" & $peerInfo.peerId + echo &"Listening on\n {listenStr}" + + if (conf.storenode != "") or (conf.store == true): + await node.mountStore() + + var storenode: Option[RemotePeerInfo] + + if conf.storenode != "": + let peerInfo = parsePeerInfo(conf.storenode) + if peerInfo.isOk(): + storenode = some(peerInfo.value) + else: + error "Incorrect conf.storenode", error = peerInfo.error + elif discoveredNodes.len > 0: + echo "Store enabled, but no store nodes configured. Choosing one at random from discovered peers" + storenode = some(discoveredNodes[rand(0 .. len(discoveredNodes) - 1)]) + + if storenode.isSome(): + # We have a viable storenode. Let's query it for historical messages. + echo "Connecting to storenode: " & $(storenode.get()) + + node.mountStoreClient() + node.peerManager.addServicePeer(storenode.get(), WakuStoreCodec) + + proc storeHandler(response: StoreQueryResponse) {.gcsafe.} = + for msg in response.messages: + let payload = + if msg.message.isSome(): + msg.message.get().payload + else: + newSeq[byte](0) + + let + pb = Chat2Message.init(payload) + chatLine = + if pb.isOk: + pb[].toString() + else: + string.fromBytes(payload) + echo &"{chatLine}" + info "Hit store handler" + + let queryRes = await node.query( + StoreQueryRequest(contentTopics: @[chat.contentTopic]), storenode.get() + ) + if queryRes.isOk(): + storeHandler(queryRes.value) + + if conf.edgemode: #Mount light protocol clients + node.mountLightPushClient() + await node.mountFilterClient() + let filterHandler = proc( + pubsubTopic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, closure.} = + trace "Hit filter handler", contentTopic = msg.contentTopic + chat.printReceivedMessage(msg) + + node.wakuFilterClient.registerPushHandler(filterHandler) + var servicePeerInfo: RemotePeerInfo + if conf.serviceNode != "": + servicePeerInfo = parsePeerInfo(conf.serviceNode).valueOr: + error "Couldn't parse conf.serviceNode", error = error + RemotePeerInfo() + if $servicePeerInfo.peerId == "": + # Assuming that service node supports all services + servicePeerInfo = selectRandomServicePeer( + node.peerManager, none(RemotePeerInfo), WakuLightpushCodec + ).valueOr: + error "Couldn't find any service peer" + quit(QuitFailure) + + #await mountLegacyLightPush(node) + node.peerManager.addServicePeer(servicePeerInfo, WakuLightpushCodec) + node.peerManager.addServicePeer(servicePeerInfo, WakuPeerExchangeCodec) + + # Start maintaining subscription + asyncSpawn maintainSubscription( + node, pubsubTopic, conf.contentTopic, servicePeerInfo, false + ) + echo "waiting for mix nodes to be discovered..." + while true: + if node.getMixNodePoolSize() >= 3: + break + discard await node.fetchPeerExchangePeers() + await sleepAsync(1000) + + while node.getMixNodePoolSize() < 3: + info "waiting for mix nodes to be discovered", + currentpoolSize = node.getMixNodePoolSize() + await sleepAsync(1000) + notice "ready to publish with mix node pool size ", + currentpoolSize = node.getMixNodePoolSize() + echo "ready to publish messages now" + + # Once min mixnodes are discovered loop as per default setting + node.startPeerExchangeLoop() + + if conf.metricsLogging: + startMetricsLog() + + if conf.metricsServer: + let metricsServer = startMetricsServer( + conf.metricsServerAddress, Port(conf.metricsServerPort + conf.portsShift) + ) + + await chat.readWriteLoop() + + runForever() + +proc main(rng: ref HmacDrbgContext) {.async.} = + let (rfd, wfd) = createAsyncPipe() + if rfd == asyncInvalidPipe or wfd == asyncInvalidPipe: + raise newException(ValueError, "Could not initialize pipe!") + + var thread: Thread[AsyncFD] + thread.createThread(readInput, wfd) + try: + await processInput(rfd, rng) + # Handle only ConfigurationError for now + # TODO: Throw other errors from the mounting procedure + except ConfigurationError as e: + raise e + +when isMainModule: # isMainModule = true when the module is compiled as the main file + let rng = crypto.newRng() + try: + waitFor(main(rng)) + except CatchableError as e: + raise e + +## Dump of things that can be improved: +## +## - Incoming dialed peer does not change connected state (not relying on it for now) +## - Unclear if staticnode argument works (can enter manually) +## - Don't trigger self / double publish own messages +## - Test/default to cluster node connection (diff protocol version) +## - Redirect logs to separate file +## - Expose basic publish/subscribe etc commands with /syntax +## - Show part of peerid to know who sent message +## - Deal with protobuf messages (e.g. other chat protocol, or encrypted) diff --git a/third-party/nwaku/apps/chat2mix/config_chat2mix.nim b/third-party/nwaku/apps/chat2mix/config_chat2mix.nim new file mode 100644 index 0000000..1d28149 --- /dev/null +++ b/third-party/nwaku/apps/chat2mix/config_chat2mix.nim @@ -0,0 +1,293 @@ +import chronicles, chronos, std/strutils, regex + +import + eth/keys, + libp2p/crypto/crypto, + libp2p/crypto/secp, + nimcrypto/utils, + confutils, + confutils/defs, + confutils/std/net + +import waku/waku_core + +type + Fleet* = enum + none + sandbox + test + + EthRpcUrl* = distinct string + + Chat2Conf* = object ## General node config + edgemode* {. + defaultValue: true, desc: "Run the app in edge mode", name: "edge-mode" + .}: bool + + logLevel* {. + desc: "Sets the log level.", defaultValue: LogLevel.INFO, name: "log-level" + .}: LogLevel + + nodekey* {.desc: "P2P node private key as 64 char hex string.", name: "nodekey".}: + Option[crypto.PrivateKey] + + listenAddress* {. + defaultValue: defaultListenAddress(config), + desc: "Listening address for the LibP2P traffic.", + name: "listen-address" + .}: IpAddress + + tcpPort* {.desc: "TCP listening port.", defaultValue: 60000, name: "tcp-port".}: + Port + + udpPort* {.desc: "UDP listening port.", defaultValue: 60000, name: "udp-port".}: + Port + + portsShift* {. + desc: "Add a shift to all port numbers.", defaultValue: 0, name: "ports-shift" + .}: uint16 + + nat* {. + desc: + "Specify method to use for determining public address. " & + "Must be one of: any, none, upnp, pmp, extip:.", + defaultValue: "any" + .}: string + + ## Persistence config + dbPath* {. + desc: "The database path for peristent storage", defaultValue: "", name: "db-path" + .}: string + + persistPeers* {. + desc: "Enable peer persistence: true|false", + defaultValue: false, + name: "persist-peers" + .}: bool + + persistMessages* {. + desc: "Enable message persistence: true|false", + defaultValue: false, + name: "persist-messages" + .}: bool + + ## Relay config + relay* {. + desc: "Enable relay protocol: true|false", defaultValue: true, name: "relay" + .}: bool + + staticnodes* {. + desc: "Peer multiaddr to directly connect with. Argument may be repeated.", + name: "staticnode", + defaultValue: @[] + .}: seq[string] + + mixnodes* {. + desc: "Peer ENR to add as a mixnode. Argument may be repeated.", name: "mixnode" + .}: seq[string] + + keepAlive* {. + desc: "Enable keep-alive for idle connections: true|false", + defaultValue: false, + name: "keep-alive" + .}: bool + + clusterId* {. + desc: + "Cluster id that the node is running in. Node in a different cluster id is disconnected.", + defaultValue: 1, + name: "cluster-id" + .}: uint16 + + numShardsInNetwork* {. + desc: "Number of shards in the network", + defaultValue: 8, + name: "num-shards-in-network" + .}: uint32 + + shards* {. + desc: + "Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.", + defaultValue: + @[ + uint16(0), + uint16(1), + uint16(2), + uint16(3), + uint16(4), + uint16(5), + uint16(6), + uint16(7), + ], + name: "shard" + .}: seq[uint16] + + ## Store config + store* {. + desc: "Enable store protocol: true|false", defaultValue: false, name: "store" + .}: bool + + storenode* {. + desc: "Peer multiaddr to query for storage.", defaultValue: "", name: "storenode" + .}: string + + ## Filter config + filter* {. + desc: "Enable filter protocol: true|false", defaultValue: false, name: "filter" + .}: bool + + ## Lightpush config + lightpush* {. + desc: "Enable lightpush protocol: true|false", + defaultValue: false, + name: "lightpush" + .}: bool + + servicenode* {. + desc: "Peer multiaddr to request lightpush and filter services", + defaultValue: "", + name: "servicenode" + .}: string + + ## Metrics config + metricsServer* {. + desc: "Enable the metrics server: true|false", + defaultValue: false, + name: "metrics-server" + .}: bool + + metricsServerAddress* {. + desc: "Listening address of the metrics server.", + defaultValue: parseIpAddress("127.0.0.1"), + name: "metrics-server-address" + .}: IpAddress + + metricsServerPort* {. + desc: "Listening HTTP port of the metrics server.", + defaultValue: 8008, + name: "metrics-server-port" + .}: uint16 + + metricsLogging* {. + desc: "Enable metrics logging: true|false", + defaultValue: true, + name: "metrics-logging" + .}: bool + + ## DNS discovery config + dnsDiscovery* {. + desc: + "Deprecated, please set dns-discovery-url instead. Enable discovering nodes via DNS", + defaultValue: false, + name: "dns-discovery" + .}: bool + + dnsDiscoveryUrl* {. + desc: "URL for DNS node list in format 'enrtree://@'", + defaultValue: "", + name: "dns-discovery-url" + .}: string + + dnsDiscoveryNameServers* {. + desc: "DNS name server IPs to query. Argument may be repeated.", + defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")], + name: "dns-discovery-name-server" + .}: seq[IpAddress] + + ## Chat2 configuration + fleet* {. + desc: + "Select the fleet to connect to. This sets the DNS discovery URL to the selected fleet.", + defaultValue: Fleet.test, + name: "fleet" + .}: Fleet + + contentTopic* {. + desc: "Content topic for chat messages.", + defaultValue: "/toy-chat-mix/2/huilong/proto", + name: "content-topic" + .}: string + + ## Websocket Configuration + websocketSupport* {. + desc: "Enable websocket: true|false", + defaultValue: false, + name: "websocket-support" + .}: bool + + websocketPort* {. + desc: "WebSocket listening port.", defaultValue: 8000, name: "websocket-port" + .}: Port + + websocketSecureSupport* {. + desc: "WebSocket Secure Support.", + defaultValue: false, + name: "websocket-secure-support" + .}: bool ## rln-relay configuration + +# NOTE: Keys are different in nim-libp2p +proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T = + try: + let key = SkPrivateKey.init(utils.fromHex(p)).tryGet() + # XXX: Here at the moment + result = crypto.PrivateKey(scheme: Secp256k1, skkey: key) + except CatchableError as e: + raise newException(ValueError, "Invalid private key") + +proc completeCmdArg*(T: type crypto.PrivateKey, val: string): seq[string] = + return @[] + +proc parseCmdArg*(T: type IpAddress, p: string): T = + try: + result = parseIpAddress(p) + except CatchableError as e: + raise newException(ValueError, "Invalid IP address") + +proc completeCmdArg*(T: type IpAddress, val: string): seq[string] = + return @[] + +proc parseCmdArg*(T: type Port, p: string): T = + try: + result = Port(parseInt(p)) + except CatchableError as e: + raise newException(ValueError, "Invalid Port number") + +proc completeCmdArg*(T: type Port, val: string): seq[string] = + return @[] + +proc parseCmdArg*(T: type Option[uint], p: string): T = + try: + some(parseUint(p)) + except CatchableError: + raise newException(ValueError, "Invalid unsigned integer") + +proc completeCmdArg*(T: type EthRpcUrl, val: string): seq[string] = + return @[] + +proc parseCmdArg*(T: type EthRpcUrl, s: string): T = + ## allowed patterns: + ## http://url:port + ## https://url:port + ## http://url:port/path + ## https://url:port/path + ## http://url/with/path + ## http://url:port/path?query + ## https://url:port/path?query + ## disallowed patterns: + ## any valid/invalid ws or wss url + var httpPattern = + re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" + var wsPattern = + re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" + if regex.match(s, wsPattern): + raise newException( + ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL" + ) + if not regex.match(s, httpPattern): + raise newException(ValueError, "Invalid HTTP RPC URL") + return EthRpcUrl(s) + +func defaultListenAddress*(conf: Chat2Conf): IpAddress = + # TODO: How should we select between IPv4 and IPv6 + # Maybe there should be a config option for this. + (static parseIpAddress("0.0.0.0")) diff --git a/third-party/nwaku/apps/chat2mix/nim.cfg b/third-party/nwaku/apps/chat2mix/nim.cfg new file mode 100644 index 0000000..2231f2e --- /dev/null +++ b/third-party/nwaku/apps/chat2mix/nim.cfg @@ -0,0 +1,4 @@ +-d:chronicles_line_numbers +-d:chronicles_runtime_filtering:on +-d:discv5_protocol_id:d5waku +path = "../.." diff --git a/third-party/nwaku/apps/liteprotocoltester/.env b/third-party/nwaku/apps/liteprotocoltester/.env new file mode 100644 index 0000000..0330284 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/.env @@ -0,0 +1,27 @@ +START_PUBLISHING_AFTER_SECS=45 +# can add some seconds delay before SENDER starts publishing + +NUM_MESSAGES=0 +# 0 for infinite number of messages + +MESSAGE_INTERVAL_MILLIS=8000 +# ms delay between messages + + +MIN_MESSAGE_SIZE=15Kb +MAX_MESSAGE_SIZE=145Kb + +## for wakusim +#SHARD=0 +#CONTENT_TOPIC=/tester/2/light-pubsub-test/wakusim +#CLUSTER_ID=66 + +## for status.prod +#SHARDS=32 +CONTENT_TOPIC=/tester/2/light-pubsub-test/fleet +CLUSTER_ID=16 + +## for TWN +#SHARD=4 +#CONTENT_TOPIC=/tester/2/light-pubsub-test/twn +#CLUSTER_ID=1 diff --git a/third-party/nwaku/apps/liteprotocoltester/Dockerfile.liteprotocoltester b/third-party/nwaku/apps/liteprotocoltester/Dockerfile.liteprotocoltester new file mode 100644 index 0000000..1948300 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/Dockerfile.liteprotocoltester @@ -0,0 +1,37 @@ + # TESTING IMAGE -------------------------------------------------------------- + + ## NOTICE: This is a short cut build file for ubuntu users who compiles nwaku in ubuntu distro. + ## This is used for faster turnaround time for testing the compiled binary. + ## Prerequisites: compiled liteprotocoltester binary in build/ directory + + FROM ubuntu:noble AS prod + + LABEL maintainer="zoltan@status.im" + LABEL source="https://github.com/waku-org/nwaku" + LABEL description="Lite Protocol Tester: Waku light-client" + LABEL commit="unknown" + LABEL version="unknown" + + # DevP2P, LibP2P, and JSON RPC ports + EXPOSE 30303 60000 8545 + + # Referenced in the binary + RUN apt-get update && apt-get install -y --no-install-recommends \ + libgcc1 \ + libpcre3 \ + libpq-dev \ + wget \ + iproute2 \ + && rm -rf /var/lib/apt/lists/* + + # Fix for 'Error loading shared library libpcre.so.3: No such file or directory' + RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3 + + COPY build/liteprotocoltester /usr/bin/ + COPY apps/liteprotocoltester/run_tester_node.sh /usr/bin/ + COPY apps/liteprotocoltester/run_tester_node_on_fleet.sh /usr/bin/ + + ENTRYPOINT ["/usr/bin/run_tester_node.sh", "/usr/bin/liteprotocoltester"] + + # # By default just show help if called without arguments + CMD ["--help"] diff --git a/third-party/nwaku/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile b/third-party/nwaku/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile new file mode 100644 index 0000000..497570c --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile @@ -0,0 +1,76 @@ +# BUILD NIM APP ---------------------------------------------------------------- +FROM rust:1.77.1-alpine3.18 AS nim-build + +ARG NIMFLAGS +ARG MAKE_TARGET=liteprotocoltester +ARG NIM_COMMIT +ARG LOG_LEVEL=TRACE + +# Get build tools and required header files +RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq + +WORKDIR /app +COPY . . + +# workaround for alpine issue: https://github.com/alpinelinux/docker-alpine/issues/383 +RUN apk update && apk upgrade + +# Ran separately from 'make' to avoid re-doing +RUN git submodule update --init --recursive + +# Slowest build step for the sake of caching layers +RUN make -j$(nproc) deps QUICK_AND_DIRTY_COMPILER=1 ${NIM_COMMIT} + +# Build the final node binary +RUN make -j$(nproc) ${NIM_COMMIT} $MAKE_TARGET LOG_LEVEL=${LOG_LEVEL} NIMFLAGS="${NIMFLAGS}" + + +# REFERENCE IMAGE as BASE for specialized PRODUCTION IMAGES---------------------------------------- +FROM alpine:3.18 AS base_lpt + +ARG MAKE_TARGET=liteprotocoltester + +LABEL maintainer="zoltan@status.im" +LABEL source="https://github.com/waku-org/nwaku" +LABEL description="Lite Protocol Tester: Waku light-client" +LABEL commit="unknown" +LABEL version="unknown" + +# DevP2P, LibP2P, and JSON RPC ports +EXPOSE 30303 60000 8545 + +# Referenced in the binary +RUN apk add --no-cache libgcc libpq-dev \ + wget \ + iproute2 \ + python3 + +# Fix for 'Error loading shared library libpcre.so.3: No such file or directory' +RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3 + +COPY --from=nim-build /app/build/liteprotocoltester /usr/bin/ +RUN chmod +x /usr/bin/liteprotocoltester + +# Standalone image to be used manually and in lpt-runner ------------------------------------------- +FROM base_lpt AS standalone_lpt + +COPY --from=nim-build /app/apps/liteprotocoltester/run_tester_node.sh /usr/bin/ +COPY --from=nim-build /app/apps/liteprotocoltester/run_tester_node_on_fleet.sh /usr/bin/ + +RUN chmod +x /usr/bin/run_tester_node.sh + +ENTRYPOINT ["/usr/bin/run_tester_node.sh", "/usr/bin/liteprotocoltester"] + +# Image for infra deployment ------------------------------------------- +FROM base_lpt AS deployment_lpt + +# let supervisor python script flush logs immediately +ENV PYTHONUNBUFFERED="1" + +COPY --from=nim-build /app/apps/liteprotocoltester/run_tester_node_at_infra.sh /usr/bin/ +COPY --from=nim-build /app/apps/liteprotocoltester/infra.env /usr/bin/ +COPY --from=nim-build /app/apps/liteprotocoltester/lpt_supervisor.py /usr/bin/ +RUN chmod +x /usr/bin/run_tester_node_at_infra.sh +RUN chmod +x /usr/bin/lpt_supervisor.py + +ENTRYPOINT ["/usr/bin/lpt_supervisor.py"] diff --git a/third-party/nwaku/apps/liteprotocoltester/README.md b/third-party/nwaku/apps/liteprotocoltester/README.md new file mode 100644 index 0000000..ea02ec1 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/README.md @@ -0,0 +1,329 @@ +# Waku - Lite Protocol Tester + +## Aim + +Testing reliability of light client protocols in different scale. +Measure message delivery reliability and latency between light push client(s) and a filter client(s) node(s). + +## Concept of testing + +A tester node is configured either 'publisher' or 'receiver' and connects to a certain service node. +All service protocols are disabled except for lightpush client or filter client. This way we would like to simulate +a light client application. +Each publisher pumps messages to the network in a preconfigured way (number of messages, frequency) while on the receiver side +we would like to track and measure message losses, mis-ordered receives, late arrived messages and latencies. +Ideally the tester nodes will connect to different edge of the network where we can gather more result from mulitple publishers +and multiple receivers. + +Publishers are fill all message payloads with information about the test message and sender, helping the receiver side to calculate results. + +## Usage + +### Using lpt-runner + +For ease of use, you can clone lpt-runner repository. That will utilize previously pushed liteprotocoltester docker image. +It is recommended to use this method for fleet testing. + +```bash +git clone https://github.com/waku-org/lpt-runner.git +cd lpt-runner + +# check Reame.md for more information +# edit .env file to your needs + +docker compose up -d + +# navigate localhost:3033 to see the lite-protocol-tester dashboard +``` + +> See more detailed examples below. + +### Integration with waku-simulator! + +- For convenience, integration is done in cooperation with waku-simulator repository, but nothing is tightly coupled. +- waku-simulator must be started separately with its own configuration. +- To enable waku-simulator working without RLN currently a separate branch is to be used. +- When waku-simulator is configured and up and running, lite-protocol-tester composite docker setup can be started. + +```bash + +# Start waku-simulator + +git clone https://github.com/waku-org/waku-simulator.git ../waku-simulator +cd ../waku-simulator +git checkout chore-integrate-liteprotocoltester + +# optionally edit .env file + +docker compose -f docker-compose-norln.yml up -d + +# navigate localhost:30001 to see the waku-simulator dashboard + +cd ../{your-repository} + +make LOG_LEVEL=DEBUG liteprotocoltester + +cd apps/liteprotocoltester + +# optionally edit .env file + +docker compose -f docker-compose-on-simularor.yml build +docker compose -f docker-compose-on-simularor.yml up -d +docker compose -f docker-compose-on-simularor.yml logs -f receivernode +``` +#### Current setup + +- waku-simulator is configured to run with 25 full node +- liteprotocoltester is configured to run with 3 publisher and 1 receiver +- liteprotocoltester is configured to run 1 lightpush service and a filter service node + - light clients are connected accordingly +- publishers will send 250 messages in every 200ms with size between 1KiB and 120KiB +- Notice there is a configurable wait before start publishing messages as it is noticed time is needed for the service nodes to get connected to full nodes from simulator +- light clients will print report on their and the connected service node's connectivity to the network in every 20 secs. + +#### Test monitoring + +Navigate to http://localhost:3033 to see the lite-protocol-tester dashboard. + +### Run independently on a chosen waku fleet + +This option is simple as is just to run the built liteprotocoltester binary with run_tester_node.sh script. + +Syntax: +`./run_tester_node.sh ` + +How to run from you nwaku repository: +```bash +cd ../{your-repository} + +make LOG_LEVEL=DEBUG liteprotocoltester + +cd apps/liteprotocoltester + +# optionally edit .env file + +# run publisher side +./run_tester_node.sh ../../build/liteprotocoltester SENDER [chosen service node address that support lightpush] + +# or run receiver side +./run_tester_node.sh ../../build/liteprotocoltester RECEIVER [chosen service node address that support filter service] +``` + +#### Recommendations + +In order to run on any kind of network, it is recommended to deploy the built `liteprotocoltester` binary with the `.env` file and the `run_tester_node.sh` script to the desired machine. + +Select a lightpush service node and a filter service node from the targeted network, or you can run your own. Note down the selected peers peer_id. + +Run a SENDER role liteprotocoltester and a RECEIVER role one on different terminals. Depending on the test aim, you may want to redirect the output to a file. + +> RECEIVER side will periodically print statistics to standard output. + +## Configuration + +### Environment variables for docker compose runs + +| Variable | Description | Default | +| ---: | :--- | :--- | +| NUM_MESSAGES | Number of message to publish, 0 means infinite | 120 | +| MESSAGE_INTERVAL_MILLIS | Frequency of messages in milliseconds | 1000 | +| SHARD | Used shard for testing | 0 | +| CONTENT_TOPIC | content_topic for testing | /tester/1/light-pubsub-example/proto | +| CLUSTER_ID | cluster_id of the network | 16 | +| START_PUBLISHING_AFTER_SECS | Delay in seconds before starting to publish to let service node connected | 5 | +| MIN_MESSAGE_SIZE | Minimum message size in bytes | 1KiB | +| MAX_MESSAGE_SIZE | Maximum message size in bytes | 120KiB | + + +### Lite Protocol Tester application cli options + +| Option | Description | Default | +| :--- | :--- | :--- | +| --test_func | separation of PUBLISHER or RECEIVER mode | RECEIVER | +| --service-node| Address of the service node to use for lightpush and/or filter service | - | +| --bootstrap-node| Address of the fleet's bootstrap node to use to determine service peer randomly choosen from the network. `--service-node` switch has precedence over this | - | +| --num-messages | Number of message to publish | 120 | +| --message-interval | Frequency of messages in milliseconds | 1000 | +| --min-message-size | Minimum message size in bytes | 1KiB | +| --max-message-size | Maximum message size in bytes | 120KiB | +| --start-publishing-after | Delay in seconds before starting to publish to let service node connected in seconds | 5 | +| --pubsub-topic | Used pubsub_topic for testing | /waku/2/default-waku/proto | +| --content_topic | content_topic for testing | /tester/1/light-pubsub-example/proto | +| --cluster-id | Cluster id for the test | 0 | +| --config-file | TOML configuration file to fine tune the light waku node
Note that some configurations (full node services) are not taken into account | - | +| --nat |Same as wakunode "nat" configuration, appear here to ease test setup | any | +| --rest-address | For convenience rest configuration can be done here | 127.0.0.1 | +| --rest-port | For convenience rest configuration can be done here | 8654 | +| --rest-allow-origin | For convenience rest configuration can be done here | * | +| --log-level | Log level for the application | DEBUG | +| --log-format | Logging output format (TEXT or JSON) | TEXT | +| --metrics-port | Metrics scarpe port | 8003 | + +### Specifying peer addresses + +Service node or bootstrap addresses can be specified in multiadress or ENR form. + +### Using bootstrap nodes + +There are multiple benefits of using bootstrap nodes. By using them liteprotocoltester will use Peer Exchange protocol to get possible peers from the network that are capable to serve as service peers for testing. Additionally it will test dial them to verify their connectivity - this will be reported in the logs and on dashboard metrics. +Also by using bootstrap node and peer exchange discovery, litprotocoltester will be able to simulate service peer switch in case of failures. There are built in tresholds count for service peer failures (3) after service peer will be switched during the test. Also there will be max 10 trials of switching peer before test declared failed and quit. +These service peer failures are reported, thus extending network reliability measures. + +### Building docker image + +Easiest way to build the docker image is to use the provided Makefile target. + +```bash +cd +make docker-liteprotocoltester +``` +This will build liteprotocoltester from the ground up and create a docker image with the binary copied to it under image name and tag `wakuorg/liteprotocoltester:latest`. + +#### Building public image + +If you want to push the image to a public registry, you can use the jenkins job to do so. +The job is available at https://ci.status.im/job/waku/job/liteprotocoltester/job/build-liteprotocoltester-image + +#### Building and deployment for infra testing + +For specific and continuous testing purposes we have a deployment of `liteprotocoltester` test suite to our infra appliances. +This has its own configuration, constraints and requirements. To ease this job, image shall be built and pushed with `deploy` tag. +This can be done by the jenkins job mentioned above. + +or manually by: +```bash +cd +make DOCKER_LPT_TAG=deploy docker-liteprotocoltester +``` + +The image created with this method will be different from under any other tag. It prepared to run a preconfigured test suite continuously. +It will also miss prometheus metrics scraping endpoint and grafana, thus it is not recommended to use it for general testing. + +#### Manually building for docker compose runs on simulator or standalone +Please note that currently to ease testing and development tester application docker image is based on ubuntu and uses the externally pre-built binary of 'liteprotocoltester'. +This speeds up image creation. Another dokcer build file is provided for proper build of boundle image. + +> `Dockerfile.liteprotocoltester` will create an ubuntu based image with the binary copied from the build directory. + +> `Dockerfile.liteprotocoltester.compile` will create an ubuntu based image completely compiled from source. This can be slow. + +#### Creating standalone runner docker image + +To ease the work with lite-protocol-tester, a docker image is possible to build. +With that image it is easy to run the application in a container. + +> `Dockerfile.liteprotocoltester` will create an ubuntu image with the binary copied from the build directory. You need to pre-build the application. + +Here is how to build and run: +```bash +cd +make liteprotocoltester + +cd apps/liteprotocoltester +docker build -t liteprotocoltester:latest -f Dockerfile.liteprotocoltester ../.. + +# alternatively you can push it to a registry + +# edit and adjust .env file to your needs and for the network configuration + +docker run --env-file .env liteprotocoltester:latest RECEIVER + +docker run --env-file .env liteprotocoltester:latest SENDER +``` + +#### Run test with auto service peer selection from a fleet using bootstrap node + +```bash + +docker run --env-file .env liteprotocoltester:latest RECEIVER BOOTSTRAP + +docker run --env-file .env liteprotocoltester:latest SENDER BOOTSTRAP +``` + +> Notice that official image is also available at harbor.status.im/wakuorg/liteprotocoltester:latest + +## Examples + +### Bootstrap or Service node selection + +The easiest way to get the proper bootstrap nodes for the tests from https://fleets.status.im page. +Adjust on which fleets you would like to run the tests. + +> Please note that not all of them configured to support Peer Exchange protocol, those ones cannot be for bootstrap nodes for `liteprotocoltester`. + +### Environment variables +You need not necessary to use .env file, although it can be more convenient. +Anytime you can override all or part of the environment variables defined in the .env file. + +### Run standalone + +Example of running the liteprotocoltester in standalone mode on status.stagin network. +Testing includes using bootstrap nodes to gather service peers from the network via Peer Exchange protocol. +Both parties will test-dial all the peers retrieved with the corresponding protocol. +Sender will start publishing messages after 60 seconds, sending 200 messages with 1 second delay between them. +Message size will be between 15KiB and 145KiB. +Cluster id and Pubsub-topic must be accurately set according to the network configuration. + +The example shows that either multiaddress or ENR form accepted. + +```bash +export START_PUBLISHING_AFTER_SECS=60 +export NUM_MESSAGES=200 +export MESSAGE_INTERVAL_MILLIS=1000 +export MIN_MESSAGE_SIZE=15Kb +export MAX_MESSAGE_SIZE=145Kb +export SHARD=32 +export CONTENT_TOPIC=/tester/2/light-pubsub-test/fleet +export CLUSTER_ID=16 + +docker run harbor.status.im/wakuorg/liteprotocoltester:latest RECEIVER /dns4/boot-01.do-ams3.status.staging.status.im/tcp/30303/p2p/16Uiu2HAmQE7FXQc6iZHdBzYfw3qCSDa9dLc1wsBJKoP4aZvztq2d BOOTSTRAP + +# in different terminal session, repeat the exports and run the other party of the test. +docker run harbor.status.im/wakuorg/liteprotocoltester:latest SENDER enr:-QEiuECJPv2vL00Jp5sTEMAFyW7qXkK2cFgphlU_G8-FJuJqoW_D5aWIy3ylGdv2K8DkiG7PWgng4Ql_VI7Qc2RhBdwfAYJpZIJ2NIJpcIQvTKi6im11bHRpYWRkcnO4cgA2NjFib290LTAxLmFjLWNuLWhvbmdrb25nLWMuc3RhdHVzLnN0YWdpbmcuc3RhdHVzLmltBnZfADg2MWJvb3QtMDEuYWMtY24taG9uZ2tvbmctYy5zdGF0dXMuc3RhZ2luZy5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEDkbgV7oqPNmFtX5FzSPi9WH8kkmrPB1R3n9xRXge91M-DdGNwgnZfg3VkcIIjKIV3YWt1Mg0 BOOTSTRAP + +``` + +### Use of lpt-runner + +Another method is to use [lpt-runner repository](https://github.com/waku-org/lpt-runner/tree/master). +This extends testing with grafana dashboard and ease the test setup. +Please read the corresponding [README](https://github.com/waku-org/lpt-runner/blob/master/README.md) there as well. + +In this example we will run similar test as above but there will be 3 instances of publisher nodes and 1 receiver node. +This test uses waku.sandbox fleet which is connected to TWN. This implies lower message rates due to the RLN rate limation. +Also leave a gap of 120 seconds before starting to publish messages to let receiver side fully finish peer test-dialing. +For TWN network it is always wise to use bootstrap nodes with Peer Exchange support. + +> Theoritically we can use the same bootstrap nodes for both parties, but it is recommended to use different ones to simulate different network edges, thus getting more meaningful results. + +```bash +git clone https://github.com/waku-org/lpt-runner.git +cd lpt-runner + +export NUM_PUBLISHER_NODES=3 +export NUM_RECEIVER_NODES=1 +export START_PUBLISHING_AFTER_SECS=120 +export NUM_MESSAGES=300 +export MESSAGE_INTERVAL_MILLIS=7000 +export MIN_MESSAGE_SIZE=15Kb +export MAX_MESSAGE_SIZE=145Kb +export SHARD=4 +export CONTENT_TOPIC=/tester/2/light-pubsub-test/twn +export CLUSTER_ID=1 + +export FILTER_BOOTSTRAP=/dns4/node-01.ac-cn-hongkong-c.waku.sandbox.status.im/tcp/30303/p2p/16Uiu2HAmQYiojgZ8APsh9wqbWNyCstVhnp9gbeNrxSEQnLJchC92 +export LIGHTPUSH_BOOTSTRAP=/dns4/node-01.do-ams3.waku.sandbox.status.im/tcp/30303/p2p/16Uiu2HAmNaeL4p3WEYzC9mgXBmBWSgWjPHRvatZTXnp8Jgv3iKsb + +docker compose up -d + +# we can check logs from one or all SENDER +docker compose logs -f --index 1 publishernode + +# for checking receiver side performance +docker compose logs -f receivernode + +# when test completed +docker compose down +``` + +For dashboard navigate to http://localhost:3033 diff --git a/third-party/nwaku/apps/liteprotocoltester/diagnose_connections.nim b/third-party/nwaku/apps/liteprotocoltester/diagnose_connections.nim new file mode 100644 index 0000000..f595b4e --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/diagnose_connections.nim @@ -0,0 +1,65 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import + std/[options, net, strformat], + chronicles, + chronos, + metrics, + libbacktrace, + libp2p/crypto/crypto, + confutils, + libp2p/wire + +import + ../../tools/confutils/cli_args, + waku/[ + node/peer_manager, + waku_lightpush/common, + waku_relay, + waku_filter_v2, + waku_peer_exchange/protocol, + waku_core/multiaddrstr, + waku_enr/capabilities, + ] +logScope: + topics = "diagnose connections" + +proc allPeers(pm: PeerManager): string = + var allStr: string = "" + for idx, peer in pm.switch.peerStore.peers(): + allStr.add( + " " & $idx & ". | " & constructMultiaddrStr(peer) & " | agent: " & + peer.getAgent() & " | protos: " & $peer.protocols & " | caps: " & + $peer.enr.map(getCapabilities) & "\n" + ) + return allStr + +proc logSelfPeers*(pm: PeerManager) = + let selfLighpushPeers = pm.switch.peerStore.getPeersByProtocol(WakuLightPushCodec) + let selfRelayPeers = pm.switch.peerStore.getPeersByProtocol(WakuRelayCodec) + let selfFilterPeers = pm.switch.peerStore.getPeersByProtocol(WakuFilterSubscribeCodec) + let selfPxPeers = pm.switch.peerStore.getPeersByProtocol(WakuPeerExchangeCodec) + + let printable = catch: + """*------------------------------------------------------------------------------------------* +| Self ({constructMultiaddrStr(pm.switch.peerInfo)}) peers: +*------------------------------------------------------------------------------------------* +| Lightpush peers({selfLighpushPeers.len()}): ${selfLighpushPeers} +*------------------------------------------------------------------------------------------* +| Filter peers({selfFilterPeers.len()}): ${selfFilterPeers} +*------------------------------------------------------------------------------------------* +| Relay peers({selfRelayPeers.len()}): ${selfRelayPeers} +*------------------------------------------------------------------------------------------* +| PX peers({selfPxPeers.len()}): ${selfPxPeers} +*------------------------------------------------------------------------------------------* +| All peers with protocol support: +{allPeers(pm)} +*------------------------------------------------------------------------------------------*""".fmt() + + if printable.isErr(): + echo "Error while printing statistics: " & printable.error().msg + else: + echo printable.get() diff --git a/third-party/nwaku/apps/liteprotocoltester/docker-compose-on-simularor.yml b/third-party/nwaku/apps/liteprotocoltester/docker-compose-on-simularor.yml new file mode 100644 index 0000000..9e899f7 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/docker-compose-on-simularor.yml @@ -0,0 +1,227 @@ +version: "3.7" +x-logging: &logging + logging: + driver: json-file + options: + max-size: 1000m + +# Environment variable definitions +x-eth-client-address: ð_client_address ${ETH_CLIENT_ADDRESS:-} # Add your ETH_CLIENT_ADDRESS after the "-" + +x-rln-environment: &rln_env + RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4} + RLN_RELAY_CRED_PATH: ${RLN_RELAY_CRED_PATH:-} # Optional: Add your RLN_RELAY_CRED_PATH after the "-" + RLN_RELAY_CRED_PASSWORD: ${RLN_RELAY_CRED_PASSWORD:-} # Optional: Add your RLN_RELAY_CRED_PASSWORD after the "-" + +x-test-running-conditions: &test_running_conditions + NUM_MESSAGES: ${NUM_MESSAGES:-120} + MESSAGE_INTERVAL_MILLIS: "${MESSAGE_INTERVAL_MILLIS:-1000}" + SHARD: ${SHARD:-0} + CONTENT_TOPIC: ${CONTENT_TOPIC:-/tester/2/light-pubsub-test/wakusim} + CLUSTER_ID: ${CLUSTER_ID:-66} + MIN_MESSAGE_SIZE: ${MIN_MESSAGE_SIZE:-1Kb} + MAX_MESSAGE_SIZE: ${MAX_MESSAGE_SIZE:-150Kb} + START_PUBLISHING_AFTER_SECS: ${START_PUBLISHING_AFTER_SECS:-5} # seconds + + +# Services definitions +services: + lightpush-service: + image: ${NWAKU_IMAGE:-harbor.status.im/wakuorg/nwaku:latest-release} + # ports: + # - 30304:30304/tcp + # - 30304:30304/udp + # - 9005:9005/udp + # - 127.0.0.1:8003:8003 + # - 80:80 #Let's Encrypt + # - 8000:8000/tcp #WSS + # - 127.0.0.1:8645:8645 + <<: + - *logging + environment: + DOMAIN: ${DOMAIN} + RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}" + ETH_CLIENT_ADDRESS: *eth_client_address + EXTRA_ARGS: ${EXTRA_ARGS} + <<: + - *rln_env + - *test_running_conditions + volumes: + - ./run_service_node.sh:/opt/run_service_node.sh:Z + - ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z + - ./rln_tree:/etc/rln_tree/:Z + - ./keystore:/keystore:Z + entrypoint: sh + command: + - /opt/run_service_node.sh + - LIGHTPUSH + networks: + - waku-simulator_simulation + + publishernode: + image: waku.liteprotocoltester:latest + build: + context: ../.. + dockerfile: ./apps/liteprotocoltester/Dockerfile.liteprotocoltester + deploy: + replicas: ${NUM_PUBLISHER_NODES:-3} + # ports: + # - 30304:30304/tcp + # - 30304:30304/udp + # - 9005:9005/udp + # - 127.0.0.1:8003:8003 + # - 80:80 #Let's Encrypt + # - 8000:8000/tcp #WSS + # - 127.0.0.1:8646:8646 + <<: + - *logging + environment: + DOMAIN: ${DOMAIN} + RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}" + ETH_CLIENT_ADDRESS: *eth_client_address + EXTRA_ARGS: ${EXTRA_ARGS} + <<: + - *rln_env + - *test_running_conditions + volumes: + - ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z + - ./rln_tree:/etc/rln_tree/:Z + - ./keystore:/keystore:Z + entrypoint: sh + command: + - /usr/bin/run_tester_node.sh + - /usr/bin/liteprotocoltester + - SENDER + - waku-sim + depends_on: + - lightpush-service + configs: + - source: cfg_tester_node.toml + target: config.toml + networks: + - waku-simulator_simulation + + filter-service: + image: ${NWAKU_IMAGE:-harbor.status.im/wakuorg/nwaku:latest-release} + # ports: + # - 30304:30305/tcp + # - 30304:30305/udp + # - 9005:9005/udp + # - 127.0.0.1:8003:8003 + # - 80:80 #Let's Encrypt + # - 8000:8000/tcp #WSS + # - 127.0.0.1:8645:8645 + <<: + - *logging + environment: + DOMAIN: ${DOMAIN} + RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}" + ETH_CLIENT_ADDRESS: *eth_client_address + EXTRA_ARGS: ${EXTRA_ARGS} + <<: + - *rln_env + - *test_running_conditions + volumes: + - ./run_service_node.sh:/opt/run_service_node.sh:Z + - ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z + - ./rln_tree:/etc/rln_tree/:Z + - ./keystore:/keystore:Z + entrypoint: sh + command: + - /opt/run_service_node.sh + - FILTER + networks: + - waku-simulator_simulation + + + receivernode: + image: waku.liteprotocoltester:latest + build: + context: ../.. + dockerfile: ./apps/liteprotocoltester/Dockerfile.liteprotocoltester + deploy: + replicas: ${NUM_RECEIVER_NODES:-1} + # ports: + # - 30304:30304/tcp + # - 30304:30304/udp + # - 9005:9005/udp + # - 127.0.0.1:8003:8003 + # - 80:80 #Let's Encrypt + # - 8000:8000/tcp #WSS + # - 127.0.0.1:8647:8647 + <<: + - *logging + environment: + DOMAIN: ${DOMAIN} + RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}" + ETH_CLIENT_ADDRESS: *eth_client_address + EXTRA_ARGS: ${EXTRA_ARGS} + <<: + - *rln_env + - *test_running_conditions + volumes: + - ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z + - ./rln_tree:/etc/rln_tree/:Z + - ./keystore:/keystore:Z + entrypoint: sh + command: + - /usr/bin/run_tester_node.sh + - /usr/bin/liteprotocoltester + - RECEIVER + - waku-sim + depends_on: + - filter-service + - publishernode + configs: + - source: cfg_tester_node.toml + target: config.toml + networks: + - waku-simulator_simulation + + # We have prometheus and grafana defined in waku-simulator already + prometheus: + image: docker.io/prom/prometheus:latest + volumes: + - ./monitoring/prometheus-config.yml:/etc/prometheus/prometheus.yml:Z + command: + - --config.file=/etc/prometheus/prometheus.yml + - --web.listen-address=:9099 + # ports: + # - 127.0.0.1:9090:9090 + restart: on-failure:5 + depends_on: + - filter-service + - lightpush-service + - publishernode + - receivernode + networks: + - waku-simulator_simulation + + grafana: + image: docker.io/grafana/grafana:latest + env_file: + - ./monitoring/configuration/grafana-plugins.env + volumes: + - ./monitoring/configuration/grafana.ini:/etc/grafana/grafana.ini:Z + - ./monitoring/configuration/dashboards.yaml:/etc/grafana/provisioning/dashboards/dashboards.yaml:Z + - ./monitoring/configuration/datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml:Z + - ./monitoring/configuration/dashboards:/var/lib/grafana/dashboards/:Z + - ./monitoring/configuration/customizations/custom-logo.svg:/usr/share/grafana/public/img/grafana_icon.svg:Z + - ./monitoring/configuration/customizations/custom-logo.svg:/usr/share/grafana/public/img/grafana_typelogo.svg:Z + - ./monitoring/configuration/customizations/custom-logo.png:/usr/share/grafana/public/img/fav32.png:Z + ports: + - 0.0.0.0:3033:3033 + restart: on-failure:5 + depends_on: + - prometheus + networks: + - waku-simulator_simulation + +configs: + cfg_tester_node.toml: + content: | + max-connections = 100 + +networks: + waku-simulator_simulation: + external: true diff --git a/third-party/nwaku/apps/liteprotocoltester/docker-compose.yml b/third-party/nwaku/apps/liteprotocoltester/docker-compose.yml new file mode 100644 index 0000000..16b5446 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/docker-compose.yml @@ -0,0 +1,172 @@ +version: "3.7" +x-logging: &logging + logging: + driver: json-file + options: + max-size: 1000m + +# Environment variable definitions +x-eth-client-address: ð_client_address ${ETH_CLIENT_ADDRESS:-} # Add your ETH_CLIENT_ADDRESS after the "-" + +x-rln-environment: &rln_env + RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xB9cd878C90E49F797B4431fBF4fb333108CB90e6} + RLN_RELAY_CRED_PATH: ${RLN_RELAY_CRED_PATH:-} # Optional: Add your RLN_RELAY_CRED_PATH after the "-" + RLN_RELAY_CRED_PASSWORD: ${RLN_RELAY_CRED_PASSWORD:-} # Optional: Add your RLN_RELAY_CRED_PASSWORD after the "-" + +x-test-running-conditions: &test_running_conditions + NUM_MESSAGES: ${NUM_MESSAGES:-120} + MESSAGE_INTERVAL_MILLIS: "${MESSAGE_INTERVAL_MILLIS:-1000}" + SHARD: ${SHARD:-0} + CONTENT_TOPIC: ${CONTENT_TOPIC:-/tester/2/light-pubsub-test/wakusim} + CLUSTER_ID: ${CLUSTER_ID:-66} + MIN_MESSAGE_SIZE: ${MIN_MESSAGE_SIZE:-1Kb} + MAX_MESSAGE_SIZE: ${MAX_MESSAGE_SIZE:-150Kb} + START_PUBLISHING_AFTER_SECS: ${START_PUBLISHING_AFTER_SECS:-5} # seconds + STANDALONE: ${STANDALONE:-1} + RECEIVER_METRICS_PORT: 8003 + PUBLISHER_METRICS_PORT: 8003 + + +# Services definitions +services: + servicenode: + image: ${NWAKU_IMAGE:-harbor.status.im/wakuorg/nwaku:latest-release} + ports: + - 30304:30304/tcp + - 30304:30304/udp + - 9005:9005/udp + - 127.0.0.1:8003:8003 + - 80:80 #Let's Encrypt + - 8000:8000/tcp #WSS + - 127.0.0.1:8645:8645 + <<: + - *logging + environment: + DOMAIN: ${DOMAIN} + RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}" + ETH_CLIENT_ADDRESS: *eth_client_address + EXTRA_ARGS: ${EXTRA_ARGS} + <<: + - *rln_env + - *test_running_conditions + volumes: + - ./run_service_node.sh:/opt/run_service_node.sh:Z + - ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z + - ./rln_tree:/etc/rln_tree/:Z + - ./keystore:/keystore:Z + entrypoint: sh + command: + - /opt/run_service_node.sh + + publishernode: + image: waku.liteprotocoltester:latest + build: + context: ../.. + dockerfile: ./apps/liteprotocoltester/Dockerfile.liteprotocoltester + ports: + # - 30304:30304/tcp + # - 30304:30304/udp + # - 9005:9005/udp + # - 127.0.0.1:8003:8003 + # - 80:80 #Let's Encrypt + # - 8000:8000/tcp #WSS + - 127.0.0.1:8646:8646 + <<: + - *logging + environment: + DOMAIN: ${DOMAIN} + RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}" + ETH_CLIENT_ADDRESS: *eth_client_address + EXTRA_ARGS: ${EXTRA_ARGS} + <<: + - *rln_env + - *test_running_conditions + volumes: + - ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z + - ./rln_tree:/etc/rln_tree/:Z + - ./keystore:/keystore:Z + entrypoint: sh + command: + - /usr/bin/run_tester_node.sh + - /usr/bin/liteprotocoltester + - SENDER + - servicenode + depends_on: + - servicenode + configs: + - source: cfg_tester_node.toml + target: config.toml + + receivernode: + image: waku.liteprotocoltester:latest + build: + context: ../.. + dockerfile: ./apps/liteprotocoltester/Dockerfile.liteprotocoltester + ports: + # - 30304:30304/tcp + # - 30304:30304/udp + # - 9005:9005/udp + # - 127.0.0.1:8003:8003 + # - 80:80 #Let's Encrypt + # - 8000:8000/tcp #WSS + - 127.0.0.1:8647:8647 + <<: + - *logging + environment: + DOMAIN: ${DOMAIN} + RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}" + ETH_CLIENT_ADDRESS: *eth_client_address + EXTRA_ARGS: ${EXTRA_ARGS} + <<: + - *rln_env + - *test_running_conditions + volumes: + - ./run_tester_node.sh:/opt/run_tester_node.sh:Z + - ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z + - ./rln_tree:/etc/rln_tree/:Z + - ./keystore:/keystore:Z + entrypoint: sh + command: + - /usr/bin/run_tester_node.sh + - /usr/bin/liteprotocoltester + - RECEIVER + - servicenode + depends_on: + - servicenode + - publishernode + configs: + - source: cfg_tester_node.toml + target: config.toml + + prometheus: + image: docker.io/prom/prometheus:latest + volumes: + - ./monitoring/prometheus-config.yml:/etc/prometheus/prometheus.yml:Z + command: + - --config.file=/etc/prometheus/prometheus.yml + ports: + - 127.0.0.1:9090:9090 + depends_on: + - servicenode + + grafana: + image: docker.io/grafana/grafana:latest + env_file: + - ./monitoring/configuration/grafana-plugins.env + volumes: + - ./monitoring/configuration/grafana.ini:/etc/grafana/grafana.ini:Z + - ./monitoring/configuration/dashboards.yaml:/etc/grafana/provisioning/dashboards/dashboards.yaml:Z + - ./monitoring/configuration/datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml:Z + - ./monitoring/configuration/dashboards:/var/lib/grafana/dashboards/:Z + - ./monitoring/configuration/customizations/custom-logo.svg:/usr/share/grafana/public/img/grafana_icon.svg:Z + - ./monitoring/configuration/customizations/custom-logo.svg:/usr/share/grafana/public/img/grafana_typelogo.svg:Z + - ./monitoring/configuration/customizations/custom-logo.png:/usr/share/grafana/public/img/fav32.png:Z + ports: + - 0.0.0.0:3000:3000 + depends_on: + - prometheus + +configs: + cfg_tester_node.toml: + content: | + max-connections = 100 diff --git a/third-party/nwaku/apps/liteprotocoltester/infra.env b/third-party/nwaku/apps/liteprotocoltester/infra.env new file mode 100644 index 0000000..ebf6147 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/infra.env @@ -0,0 +1,11 @@ +TEST_INTERVAL_MINUTES=180 +START_PUBLISHING_AFTER_SECS=120 +NUM_MESSAGES=300 +MESSAGE_INTERVAL_MILLIS=1000 +MIN_MESSAGE_SIZE=15Kb +MAX_MESSAGE_SIZE=145Kb +SHARD=32 +CONTENT_TOPIC=/tester/2/light-pubsub-test-at-infra/status-prod +CLUSTER_ID=16 +LIGHTPUSH_BOOTSTRAP=enr:-QEKuED9AJm2HGgrRpVaJY2nj68ao_QiPeUT43sK-aRM7sMJ6R4G11OSDOwnvVacgN1sTw-K7soC5dzHDFZgZkHU0u-XAYJpZIJ2NIJpcISnYxMvim11bHRpYWRkcnO4WgAqNiVib290LTAxLmRvLWFtczMuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfACw2JWJvb3QtMDEuZG8tYW1zMy5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEC3rRtFQSgc24uWewzXaxTY8hDAHB8sgnxr9k8Rjb5GeSDdGNwgnZfg3VkcIIjKIV3YWt1Mg0 +FILTER_BOOTSTRAP=enr:-QEcuED7ww5vo2rKc1pyBp7fubBUH-8STHEZHo7InjVjLblEVyDGkjdTI9VdqmYQOn95vuQH-Htku17WSTzEufx-Wg4mAYJpZIJ2NIJpcIQihw1Xim11bHRpYWRkcnO4bAAzNi5ib290LTAxLmdjLXVzLWNlbnRyYWwxLWEuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfADU2LmJvb3QtMDEuZ2MtdXMtY2VudHJhbDEtYS5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaECxjqgDQ0WyRSOilYU32DA5k_XNlDis3m1VdXkK9xM6kODdGNwgnZfg3VkcIIjKIV3YWt1Mg0 diff --git a/third-party/nwaku/apps/liteprotocoltester/legacy_publisher.nim b/third-party/nwaku/apps/liteprotocoltester/legacy_publisher.nim new file mode 100644 index 0000000..12733ad --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/legacy_publisher.nim @@ -0,0 +1,24 @@ +import chronos, results, options +import waku/[waku_node, waku_core] +import publisher_base + +type LegacyPublisher* = ref object of PublisherBase + +proc new*(T: type LegacyPublisher, wakuNode: WakuNode): T = + if isNil(wakuNode.wakuLegacyLightpushClient): + wakuNode.mountLegacyLightPushClient() + + return LegacyPublisher(wakuNode: wakuNode) + +method send*( + self: LegacyPublisher, + topic: PubsubTopic, + message: WakuMessage, + servicePeer: RemotePeerInfo, +): Future[Result[void, string]] {.async.} = + # when error it must return original error desc due the text is used for distinction between error types in metrics. + discard ( + await self.wakuNode.legacyLightpushPublish(some(topic), message, servicePeer) + ).valueOr: + return err(error) + return ok() diff --git a/third-party/nwaku/apps/liteprotocoltester/liteprotocoltester.nim b/third-party/nwaku/apps/liteprotocoltester/liteprotocoltester.nim new file mode 100644 index 0000000..2db9bf5 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/liteprotocoltester.nim @@ -0,0 +1,217 @@ +{.push raises: [].} + +import + std/[options, strutils, os, sequtils, net], + chronicles, + chronos, + metrics, + libbacktrace, + system/ansi_c, + libp2p/crypto/crypto, + confutils + +import + ../../tools/confutils/cli_args, + waku/[ + common/enr, + common/logging, + factory/waku as waku_factory, + waku_node, + node/waku_metrics, + node/peer_manager, + waku_lightpush/common, + waku_filter_v2, + waku_peer_exchange/protocol, + waku_core/peers, + waku_core/multiaddrstr, + ], + ./tester_config, + ./publisher, + ./receiver, + ./diagnose_connections, + ./service_peer_management + +logScope: + topics = "liteprotocoltester main" + +proc logConfig(conf: LiteProtocolTesterConf) = + info "Configuration: Lite protocol tester", conf = $conf + +{.pop.} +when isMainModule: + ## Node setup happens in 6 phases: + ## 1. Set up storage + ## 2. Initialize node + ## 3. Mount and initialize configured protocols + ## 4. Start node and mounted protocols + ## 5. Start monitoring tools and external interfaces + ## 6. Setup graceful shutdown hooks + + const versionString = "version / git commit hash: " & waku_factory.git_version + + let confRes = LiteProtocolTesterConf.load(version = versionString) + if confRes.isErr(): + error "failure while loading the configuration", error = confRes.error + quit(QuitFailure) + + var conf = confRes.get() + + ## Logging setup + logging.setupLog(conf.logLevel, conf.logFormat) + + info "Running Lite Protocol Tester node", version = waku_factory.git_version + logConfig(conf) + + ##Prepare Waku configuration + ## - load from config file + ## - override according to tester functionality + ## + + var wakuNodeConf: WakuNodeConf + + if conf.configFile.isSome(): + try: + var configFile {.threadvar.}: InputFile + configFile = conf.configFile.get() + wakuNodeConf = WakuNodeConf.load( + version = versionString, + printUsage = false, + secondarySources = proc( + wnconf: WakuNodeConf, sources: auto + ) {.gcsafe, raises: [ConfigurationError].} = + echo "Loading secondary configuration file into WakuNodeConf" + sources.addConfigFile(Toml, configFile), + ) + except CatchableError: + error "Loading Waku configuration failed", error = getCurrentExceptionMsg() + quit(QuitFailure) + + wakuNodeConf.logLevel = conf.logLevel + wakuNodeConf.logFormat = conf.logFormat + wakuNodeConf.nat = conf.nat + wakuNodeConf.maxConnections = 500 + wakuNodeConf.restAddress = conf.restAddress + wakuNodeConf.restPort = conf.restPort + wakuNodeConf.restAllowOrigin = conf.restAllowOrigin + + wakuNodeConf.dnsAddrsNameServers = + @[parseIpAddress("8.8.8.8"), parseIpAddress("1.1.1.1")] + + wakuNodeConf.shards = @[conf.shard] + wakuNodeConf.contentTopics = conf.contentTopics + wakuNodeConf.clusterId = conf.clusterId + ## TODO: Depending on the tester needs we might extend here with shards, clusterId, etc... + + wakuNodeConf.metricsServer = true + wakuNodeConf.metricsServerAddress = parseIpAddress("0.0.0.0") + wakuNodeConf.metricsServerPort = conf.metricsPort + + # If bootstrap option is chosen we expect our clients will not mounted + # so we will mount PeerExchange manually to gather possible service peers, + # if got some we will mount the client protocols afterward. + wakuNodeConf.peerExchange = false + wakuNodeConf.relay = false + wakuNodeConf.filter = false + wakuNodeConf.lightpush = false + wakuNodeConf.store = false + + wakuNodeConf.rest = false + wakuNodeConf.relayServiceRatio = "40:60" + + let wakuConf = wakuNodeConf.toWakuConf().valueOr: + error "Issue converting toWakuConf", error = $error + quit(QuitFailure) + + var waku = (waitFor Waku.new(wakuConf)).valueOr: + error "Waku initialization failed", error = error + quit(QuitFailure) + + (waitFor startWaku(addr waku)).isOkOr: + error "Starting waku failed", error = error + quit(QuitFailure) + + debug "Setting up shutdown hooks" + + proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} = + await waku.stop() + quit(QuitSuccess) + + # Handle Ctrl-C SIGINT + proc handleCtrlC() {.noconv.} = + when defined(windows): + # workaround for https://github.com/nim-lang/Nim/issues/4057 + setupForeignThreadGc() + notice "Shutting down after receiving SIGINT" + asyncSpawn asyncStopper(waku) + + setControlCHook(handleCtrlC) + + # Handle SIGTERM + when defined(posix): + proc handleSigterm(signal: cint) {.noconv.} = + notice "Shutting down after receiving SIGTERM" + asyncSpawn asyncStopper(waku) + + c_signal(ansi_c.SIGTERM, handleSigterm) + + # Handle SIGSEGV + when defined(posix): + proc handleSigsegv(signal: cint) {.noconv.} = + # Require --debugger:native + fatal "Shutting down after receiving SIGSEGV", stacktrace = getBacktrace() + + # Not available in -d:release mode + writeStackTrace() + + waitFor waku.stop() + quit(QuitFailure) + + c_signal(ansi_c.SIGSEGV, handleSigsegv) + + info "Node setup complete" + + var codec = WakuLightPushCodec + # mounting relevant client, for PX filter client must be mounted ahead + if conf.testFunc == TesterFunctionality.SENDER: + codec = WakuLightPushCodec + else: + codec = WakuFilterSubscribeCodec + + var lookForServiceNode = false + var serviceNodePeerInfo: RemotePeerInfo + if conf.serviceNode.len == 0: + if conf.bootstrapNode.len > 0: + info "Bootstrapping with PeerExchange to gather random service node" + let futForServiceNode = pxLookupServiceNode(waku.node, conf) + if not (waitFor futForServiceNode.withTimeout(20.minutes)): + error "Service node not found in time via PX" + quit(QuitFailure) + + if futForServiceNode.read().isErr(): + error "Service node for test not found via PX" + quit(QuitFailure) + + serviceNodePeerInfo = selectRandomServicePeer( + waku.node.peerManager, none(RemotePeerInfo), codec + ).valueOr: + error "Service node selection failed" + quit(QuitFailure) + else: + error "No service or bootstrap node provided" + quit(QuitFailure) + else: + # support for both ENR and URI formatted service node addresses + serviceNodePeerInfo = translateToRemotePeerInfo(conf.serviceNode).valueOr: + error "failed to parse service-node", node = conf.serviceNode + quit(QuitFailure) + + info "Service node to be used", serviceNode = $serviceNodePeerInfo + + logSelfPeers(waku.node.peerManager) + + if conf.testFunc == TesterFunctionality.SENDER: + setupAndPublish(waku.node, conf, serviceNodePeerInfo) + else: + setupAndListen(waku.node, conf, serviceNodePeerInfo) + + runForever() diff --git a/third-party/nwaku/apps/liteprotocoltester/lpt_metrics.nim b/third-party/nwaku/apps/liteprotocoltester/lpt_metrics.nim new file mode 100644 index 0000000..8b30619 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/lpt_metrics.nim @@ -0,0 +1,56 @@ +## Example showing how a resource restricted client may +## subscribe to messages without relay + +import metrics + +export metrics + +declarePublicGauge lpt_receiver_sender_peer_count, "count of sender peers" + +declarePublicCounter lpt_receiver_received_messages_count, + "number of messages received per peer", ["peer"] + +declarePublicCounter lpt_receiver_received_bytes, + "number of received bytes per peer", ["peer"] + +declarePublicGauge lpt_receiver_missing_messages_count, + "number of missing messages per peer", ["peer"] + +declarePublicCounter lpt_receiver_duplicate_messages_count, + "number of duplicate messages per peer", ["peer"] + +declarePublicGauge lpt_receiver_distinct_duplicate_messages_count, + "number of distinct duplicate messages per peer", ["peer"] + +declarePublicGauge lpt_receiver_latencies, + "Message delivery latency per peer (min-avg-max)", ["peer", "latency"] + +declarePublicCounter lpt_receiver_lost_subscription_count, + "number of filter service peer failed PING requests - lost subscription" + +declarePublicCounter lpt_publisher_sent_messages_count, "number of messages published" + +declarePublicCounter lpt_publisher_failed_messages_count, + "number of messages failed to publish per failure cause", ["cause"] + +declarePublicCounter lpt_publisher_sent_bytes, "number of total bytes sent" + +declarePublicCounter lpt_service_peer_failure_count, + "number of failure during using service peer [publisher/receiever]", ["role", "agent"] + +declarePublicCounter lpt_change_service_peer_count, + "number of times [publisher/receiver] had to change service peer", ["role"] + +declarePublicGauge lpt_px_peers, + "Number of peers PeerExchange discovered and can be dialed" + +declarePublicGauge lpt_dialed_peers, "Number of peers successfully dialed", ["agent"] + +declarePublicGauge lpt_dial_failures, "Number of dial failures by cause", ["agent"] + +declarePublicHistogram lpt_publish_duration_seconds, + "duration to lightpush messages", + buckets = [ + 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, + 15.0, 20.0, 30.0, Inf, + ] diff --git a/third-party/nwaku/apps/liteprotocoltester/lpt_supervisor.py b/third-party/nwaku/apps/liteprotocoltester/lpt_supervisor.py new file mode 100755 index 0000000..7d882af --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/lpt_supervisor.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 + +import os +import time +from subprocess import Popen +import sys + +def load_env(file_path): + predefined_test_env = {} + with open(file_path) as f: + for line in f: + if line.strip() and not line.startswith('#'): + key, value = line.strip().split('=', 1) + predefined_test_env[key] = value + return predefined_test_env + +def run_tester_node(predefined_test_env): + role = sys.argv[1] + # override incoming environment variables with the ones from the file to prefer predefined testing environment. + for key, value in predefined_test_env.items(): + os.environ[key] = value + + script_cmd = "/usr/bin/run_tester_node_at_infra.sh /usr/bin/liteprotocoltester {role}".format(role=role) + return os.system(script_cmd) + +if __name__ == "__main__": + if len(sys.argv) < 2 or sys.argv[1] not in ["RECEIVER", "SENDER", "SENDERV3"]: + print("Error: First argument must be either 'RECEIVER' or 'SENDER' or 'SENDERV3'") + sys.exit(1) + + predefined_test_env_file = '/usr/bin/infra.env' + predefined_test_env = load_env(predefined_test_env_file) + + test_interval_minutes = int(predefined_test_env.get('TEST_INTERVAL_MINUTES', 60)) # Default to 60 minutes if not set + print(f"supervisor: Start testing loop. Interval is {test_interval_minutes} minutes") + counter = 0 + + while True: + counter += 1 + start_time = time.time() + print(f"supervisor: Run #{counter} started at {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))}") + print(f"supervisor: with arguments: {predefined_test_env}") + + exit_code = run_tester_node(predefined_test_env) + + end_time = time.time() + run_time = end_time - start_time + sleep_time = max(5 * 60, (test_interval_minutes * 60) - run_time) + + print(f"supervisor: Tester node finished at {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))}") + print(f"supervisor: Runtime was {run_time:.2f} seconds") + print(f"supervisor: Next run scheduled in {sleep_time // 60:.2f} minutes") + + time.sleep(sleep_time) diff --git a/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/customizations/custom-logo.png b/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/customizations/custom-logo.png new file mode 100644 index 0000000..dcf13b9 Binary files /dev/null and b/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/customizations/custom-logo.png differ diff --git a/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/customizations/custom-logo.svg b/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/customizations/custom-logo.svg new file mode 100644 index 0000000..3c9a6da --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/customizations/custom-logo.svg @@ -0,0 +1,3 @@ + + + diff --git a/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/dashboards.yaml b/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/dashboards.yaml new file mode 100644 index 0000000..e59ac96 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/dashboards.yaml @@ -0,0 +1,9 @@ +apiVersion: 1 + +providers: +- name: 'Prometheus' + orgId: 1 + folder: '' + type: file + options: + path: /var/lib/grafana/dashboards \ No newline at end of file diff --git a/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/dashboards/liter-protocol-test-monitoring.json b/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/dashboards/liter-protocol-test-monitoring.json new file mode 100644 index 0000000..22770e2 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/dashboards/liter-protocol-test-monitoring.json @@ -0,0 +1,1949 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "Monitoring of lite-protocol-tester's send/receiver performance and failure counters.", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "links": [], + "panels": [ + { + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 13, + "panels": [], + "title": "Peer statistics", + "type": "row" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "fieldMinMax": false, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 15, + "w": 5, + "x": 0, + "y": 1 + }, + "id": 15, + "options": { + "displayMode": "lcd", + "maxVizHeight": 300, + "minVizHeight": 16, + "minVizWidth": 8, + "namePlacement": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "color" + }, + "pluginVersion": "11.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "lpt_px_peers{instance=~\".*publisher.*\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Lightpush capable peers found via PX", + "type": "bargauge" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 15, + "w": 7, + "x": 5, + "y": 1 + }, + "id": 22, + "options": { + "legend": { + "calcs": [ + "lastNotNull" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "lpt_dialed_peers{instance=~\".*publisher.*\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Working filter peers {{instance}}", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "lpt_dial_failures{instance=~\".*publisher.*\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Failed to dial {{instance}}", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "Tested lightpush peers", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "fieldMinMax": false, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 15, + "w": 5, + "x": 12, + "y": 1 + }, + "id": 21, + "options": { + "displayMode": "lcd", + "maxVizHeight": 300, + "minVizHeight": 16, + "minVizWidth": 8, + "namePlacement": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "color" + }, + "pluginVersion": "11.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "lpt_px_peers{instance=~\".*receiver.*\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Filter capable peers found via PX", + "type": "bargauge" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 15, + "w": 7, + "x": 17, + "y": 1 + }, + "id": 14, + "options": { + "legend": { + "calcs": [ + "lastNotNull" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "lpt_dialed_peers{instance=~\".*receivernode.*\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Working filter peers {{instance}}", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "lpt_dial_failures{instance=~\".*receivernode.*\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Failed to dial {{instance}}", + "range": true, + "refId": "C", + "useBackend": false + } + ], + "title": "Tested filter peers", + "type": "timeseries" + }, + { + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 16 + }, + "id": 12, + "title": "Test publisher monitor", + "type": "row" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 23, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 17 + }, + "id": 16, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(instace) (lpt_service_peer_failure_count_total{instance=~\".*publishernode.*\", role=\"publisher\"})", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Push failed", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "sum by(instace) (lpt_change_service_peer_count_total{instance=~\".*publishernode.*\", role=\"publisher\"})", + "hide": false, + "instant": false, + "legendFormat": "Peer switch", + "range": true, + "refId": "B" + } + ], + "title": "Lightpush service peer failures and switches", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 23, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 17 + }, + "id": 17, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(instace) (lpt_service_peer_failure_count_total{instance=~\".*receivernode.*\", role=\"receiver\"})", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Subscribe failed", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "sum by(instace) (lpt_change_service_peer_count_total{instance=~\".*receivernode.*\", role=\"receiver\"})", + "hide": false, + "instant": false, + "legendFormat": "Peer switch", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "sum by(instace) (lpt_receiver_lost_subscription_count_total{instance=~\".*receivernode.*\"})", + "hide": false, + "instant": false, + "legendFormat": "Subscription loss - ping fail", + "range": true, + "refId": "C" + } + ], + "title": "Filter service peer failures and switches", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "orange", + "value": 70 + }, + { + "color": "red", + "value": 85 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 25 + }, + "id": 18, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": false, + "sizing": "auto" + }, + "pluginVersion": "11.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "count(\n group(\n last_over_time(lpt_px_peers{instance=~\".*publishernode.*\"}[24h])\n ) by (instance)\n)", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Number or publishers", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "count(\n group(\n last_over_time(lpt_px_peers{instance=~\".*receivernode.*\"}[24h])\n ) by (instance)\n)", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Number or receivers", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Number of tester nodes", + "type": "gauge" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 25 + }, + "id": 8, + "options": { + "displayMode": "lcd", + "maxVizHeight": 300, + "minVizHeight": 16, + "minVizWidth": 8, + "namePlacement": "top", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "color" + }, + "pluginVersion": "11.2.0", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "lpt_receiver_sender_peer_count{instance=~\".*receivernode.*\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Receiver detected message from number of publisher peers", + "type": "bargauge" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 37 + }, + "id": 11, + "panels": [], + "title": "Test performance", + "type": "row" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Publishing rate" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "custom.gradientMode", + "value": "hue" + }, + { + "id": "custom.fillOpacity", + "value": 15 + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "normal" + } + }, + { + "id": "unit", + "value": "reqps" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 38 + }, + "id": 1, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(instace) (lpt_publisher_sent_messages_count_total{instance=~\".*publishernode.*\"})", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total published count", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(instance) (rate(lpt_publisher_sent_messages_count_total{instance=~\".*publishernode.*\"}[$__rate_interval]))", + "format": "time_series", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Publishing rate", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Published test messages", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "series", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Received message rate" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "custom.scaleDistribution", + "value": { + "type": "linear" + } + }, + { + "id": "custom.fillOpacity", + "value": 9 + }, + { + "id": "custom.gradientMode", + "value": "hue" + }, + { + "id": "unit", + "value": "reqps" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 38 + }, + "id": 2, + "options": { + "legend": { + "calcs": [ + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "sum by(instance) (lpt_receiver_received_messages_count_total{instance=~\".*receivernode.*\"})", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total message received", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "sum by(instance) (rate(lpt_receiver_received_messages_count_total{instance=~\".*receivernode.*\"}[$__rate_interval]))", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Received message rate", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Received test messages", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "series", + "axisLabel": "", + "axisPlacement": "left", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "kbytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Send message transfer rate" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.fillOpacity", + "value": 22 + }, + { + "id": "custom.scaleDistribution", + "value": { + "type": "linear" + } + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "normal" + } + }, + { + "id": "unit", + "value": "KiBs" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 46 + }, + "id": 5, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(instance) (lpt_publisher_sent_bytes_total{instance=~\".*publishernode.*\"})", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total sent bytes", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(instance) (rate(lpt_publisher_sent_bytes_total{instance=~\".*publishernode.*\"}[$__rate_interval]))", + "format": "time_series", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Send message transfer rate", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Sent bytes", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "series", + "axisLabel": "", + "axisPlacement": "left", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "kbytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Test message transfer rate" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "custom.fillOpacity", + "value": 22 + }, + { + "id": "custom.scaleDistribution", + "value": { + "type": "linear" + } + }, + { + "id": "custom.stacking", + "value": { + "group": "A", + "mode": "normal" + } + }, + { + "id": "unit", + "value": "KiBs" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 46 + }, + "id": 4, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(instance) (lpt_receiver_received_bytes_total{instance=~\".*receivernode.*\"})", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total received bytes", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "sum by(instance) (rate(lpt_receiver_received_bytes_total{instance=~\".*receivernode.*\"}[$__rate_interval]))", + "format": "time_series", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Test message transfer rate", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Received bytes", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 54 + }, + "id": 10, + "panels": [], + "title": "Failure statistics", + "type": "row" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 23, + "gradientMode": "hue", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Published message rate" + }, + "properties": [ + { + "id": "custom.axisPlacement", + "value": "right" + }, + { + "id": "custom.gradientMode", + "value": "hue" + }, + { + "id": "custom.fillOpacity", + "value": 15 + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 55 + }, + "id": 6, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "lpt_publisher_failed_messages_count_total{instance=~\".*publishernode.*\"}", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{instance}} - {{cause}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Failed publish count per cause", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "series", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 55 + }, + "id": 7, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "lpt_receiver_duplicate_messages_count_total{instance=~\".*receivernode.*\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total duplicates at {{instance}}", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "lpt_receiver_distinct_duplicate_messages_count{instance=~\".*receivernode.*\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Distinct duplicates at {{instance}}", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Received duplicated messages", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "series", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 21, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 63 + }, + "id": 9, + "options": { + "legend": { + "calcs": [ + "lastNotNull" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "lpt_receiver_missing_messages_count{instance=~\".*receivernode.*\"}", + "format": "time_series", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Receiver {{instance}}:Publisher {{peer}}", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Not arrived messages", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": "lpt-runner-publishernode-1:8003", + "value": "lpt-runner-publishernode-1:8003" + }, + "definition": "label_values({instance=~\".*publishernode.*\"},instance)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "publisher", + "options": [], + "query": { + "qryType": 1, + "query": "label_values({instance=~\".*publishernode.*\"},instance)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": { + "selected": true, + "text": "lpt-runner-receivernode-1:8003", + "value": "lpt-runner-receivernode-1:8003" + }, + "definition": "label_values({instance=~\".*receivernode.*\"},instance)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "receiver", + "options": [], + "query": { + "qryType": 1, + "query": "label_values({instance=~\".*receivernode.*\"},instance)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "2024-10-02T22:07:37.000Z", + "to": "2024-10-02T22:23:21.000Z" + }, + "timepicker": {}, + "timezone": "browser", + "title": "Liteprotocoltester monitoring", + "uid": "fdw6pgh9odszkd", + "version": 1, + "weekStart": "" +} diff --git a/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/datasources.yaml b/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/datasources.yaml new file mode 100644 index 0000000..2cc211f --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/datasources.yaml @@ -0,0 +1,11 @@ +apiVersion: 1 + +datasources: + - name: Prometheus + type: prometheus + access: proxy + org_id: 1 + url: http://prometheus:9099 + is_default: true + version: 1 + editable: true diff --git a/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/grafana-plugins.env b/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/grafana-plugins.env new file mode 100644 index 0000000..2780809 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/grafana-plugins.env @@ -0,0 +1,2 @@ +#GF_INSTALL_PLUGINS=grafana-worldmap-panel,grafana-piechart-panel,digrich-bubblechart-panel,yesoreyeram-boomtheme-panel,briangann-gauge-panel,jdbranham-diagram-panel,agenty-flowcharting-panel,citilogics-geoloop-panel,savantly-heatmap-panel,mtanda-histogram-panel,pierosavi-imageit-panel,michaeldmoore-multistat-panel,zuburqan-parity-report-panel,natel-plotly-panel,bessler-pictureit-panel,grafana-polystat-panel,corpglory-progresslist-panel,snuids-radar-panel,fzakaria-simple-config.config.annotations-datasource,vonage-status-panel,snuids-trafficlights-panel,pr0ps-trackmap-panel,alexandra-trackmap-panel,btplc-trend-box-panel +GF_INSTALL_PLUGINS=grafana-worldmap-panel,grafana-piechart-panel,yesoreyeram-boomtheme-panel,briangann-gauge-panel,pierosavi-imageit-panel,bessler-pictureit-panel,vonage-status-panel diff --git a/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/grafana.ini b/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/grafana.ini new file mode 100644 index 0000000..631fbb7 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/grafana.ini @@ -0,0 +1,53 @@ +instance_name = liteprotocoltester dashboard + +;[dashboards.json] +;enabled = true +;path = /home/git/grafana/grafana-dashboards/dashboards + +[server] +http_port = 3033 + +#################################### Auth ########################## +[auth] +disable_login_form = false + +#################################### Anonymous Auth ########################## +[auth.anonymous] +# enable anonymous access +enabled = true + +# specify organization name that should be used for unauthenticated users +;org_name = Public + +# specify role for unauthenticated users +org_role = Admin +; org_role = Viewer + +;[security] +;admin_user = ocr +;admin_password = ocr + +;[users] +# disable user signup / registration +;allow_sign_up = false + +# Set to true to automatically assign new users to the default organization (id 1) +;auto_assign_org = true + +# Default role new users will be automatically assigned (if disabled above is set to true) +;auto_assign_org_role = Viewer + +#################################### SMTP / Emailing ########################## +;[smtp] +;enabled = false +;host = localhost:25 +;user = +;password = +;cert_file = +;key_file = +;skip_verify = false +;from_address = admin@grafana.localhost + +;[emails] +;welcome_email_on_sign_up = false + diff --git a/third-party/nwaku/apps/liteprotocoltester/monitoring/prometheus-config.yml b/third-party/nwaku/apps/liteprotocoltester/monitoring/prometheus-config.yml new file mode 100644 index 0000000..d04eaf0 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/monitoring/prometheus-config.yml @@ -0,0 +1,35 @@ +global: + scrape_interval: 15s + evaluation_interval: 15s + external_labels: + monitor: "Monitoring" + +scrape_configs: + - job_name: "liteprotocoltester" + static_configs: + - targets: ["liteprotocoltester-publishernode-1:8003", + "liteprotocoltester-publishernode-2:8003", + "liteprotocoltester-publishernode-3:8003", + "liteprotocoltester-publishernode-4:8003", + "liteprotocoltester-publishernode-5:8003", + "liteprotocoltester-publishernode-6:8003", + "liteprotocoltester-receivernode-1:8003", + "liteprotocoltester-receivernode-2:8003", + "liteprotocoltester-receivernode-3:8003", + "liteprotocoltester-receivernode-4:8003", + "liteprotocoltester-receivernode-5:8003", + "liteprotocoltester-receivernode-6:8003", + "publishernode:8003", + "publishernode-1:8003", + "publishernode-2:8003", + "publishernode-3:8003", + "publishernode-4:8003", + "publishernode-5:8003", + "publishernode-6:8003", + "receivernode:8003", + "receivernode-1:8003", + "receivernode-2:8003", + "receivernode-3:8003", + "receivernode-4:8003", + "receivernode-5:8003", + "receivernode-6:8003",] diff --git a/third-party/nwaku/apps/liteprotocoltester/nim.cfg b/third-party/nwaku/apps/liteprotocoltester/nim.cfg new file mode 100644 index 0000000..2231f2e --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/nim.cfg @@ -0,0 +1,4 @@ +-d:chronicles_line_numbers +-d:chronicles_runtime_filtering:on +-d:discv5_protocol_id:d5waku +path = "../.." diff --git a/third-party/nwaku/apps/liteprotocoltester/publisher.nim b/third-party/nwaku/apps/liteprotocoltester/publisher.nim new file mode 100644 index 0000000..d803147 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/publisher.nim @@ -0,0 +1,272 @@ +import + std/[strformat, sysrand, random, strutils, sequtils], + system/ansi_c, + chronicles, + chronos, + chronos/timer as chtimer, + stew/byteutils, + results, + json_serialization as js +import + waku/[ + common/logging, + waku_node, + node/peer_manager, + waku_core, + waku_lightpush/client, + waku_lightpush/common, + common/utils/parse_size_units, + ], + ./tester_config, + ./tester_message, + ./lpt_metrics, + ./diagnose_connections, + ./service_peer_management, + ./publisher_base, + ./legacy_publisher, + ./v3_publisher + +randomize() + +type SizeRange* = tuple[min: uint64, max: uint64] + +var RANDOM_PAYLOAD {.threadvar.}: seq[byte] +RANDOM_PAYLOAD = urandom(1024 * 1024) + # 1MiB of random payload to be used to extend message + +proc prepareMessage( + sender: string, + messageIndex, numMessages: uint32, + startedAt: TimeStamp, + prevMessageAt: var Timestamp, + contentTopic: ContentTopic, + size: SizeRange, +): (WakuMessage, uint64) = + var renderSize = rand(size.min .. size.max) + let current = getNowInNanosecondTime() + let payload = ProtocolTesterMessage( + sender: sender, + index: messageIndex, + count: numMessages, + startedAt: startedAt, + sinceStart: current - startedAt, + sincePrev: current - prevMessageAt, + size: renderSize, + ) + + prevMessageAt = current + + let text = js.Json.encode(payload) + let contentPayload = toBytes(text & " \0") + + if renderSize < len(contentPayload).uint64: + renderSize = len(contentPayload).uint64 + + let finalPayload = + concat(contentPayload, RANDOM_PAYLOAD[0 .. renderSize - len(contentPayload).uint64]) + let message = WakuMessage( + payload: finalPayload, # content of the message + contentTopic: contentTopic, # content topic to publish to + ephemeral: true, # tell store nodes to not store it + timestamp: current, # current timestamp + ) + + return (message, renderSize) + +var sentMessages {.threadvar.}: OrderedTable[uint32, tuple[hash: string, relayed: bool]] +var failedToSendCause {.threadvar.}: Table[string, uint32] +var failedToSendCount {.threadvar.}: uint32 +var numMessagesToSend {.threadvar.}: uint32 +var messagesSent {.threadvar.}: uint32 +var noOfServicePeerSwitches {.threadvar.}: uint32 + +proc reportSentMessages() = + let report = catch: + """*----------------------------------------* +| Service Peer Switches: {noOfServicePeerSwitches:>15} | +*----------------------------------------* +| Expected | Sent | Failed | +|{numMessagesToSend+failedToSendCount:>11} |{messagesSent:>11} |{failedToSendCount:>11} | +*----------------------------------------*""".fmt() + + if report.isErr: + echo "Error while printing statistics" + else: + echo report.get() + + echo "*--------------------------------------------------------------------------------------------------*" + echo "| Failure cause | count |" + for (cause, count) in failedToSendCause.pairs: + echo fmt"|{cause:<87}|{count:>10}|" + echo "*--------------------------------------------------------------------------------------------------*" + + echo "*--------------------------------------------------------------------------------------------------*" + echo "| Index | Relayed | Hash |" + for (index, info) in sentMessages.pairs: + echo fmt"|{index+1:>10}|{info.relayed:<9}| {info.hash:<76}|" + echo "*--------------------------------------------------------------------------------------------------*" + # evere sent message hash should logged once + sentMessages.clear() + +proc publishMessages( + wakuNode: WakuNode, + publisher: PublisherBase, + servicePeer: RemotePeerInfo, + lightpushPubsubTopic: PubsubTopic, + lightpushContentTopic: ContentTopic, + numMessages: uint32, + messageSizeRange: SizeRange, + messageInterval: Duration, + preventPeerSwitch: bool, +) {.async.} = + var actualServicePeer = servicePeer + let startedAt = getNowInNanosecondTime() + var prevMessageAt = startedAt + var renderMsgSize = messageSizeRange + # sets some default of min max message size to avoid conflict with meaningful payload size + renderMsgSize.min = max(1024.uint64, renderMsgSize.min) # do not use less than 1KB + renderMsgSize.max = max(2048.uint64, renderMsgSize.max) # minimum of max is 2KB + renderMsgSize.min = min(renderMsgSize.min, renderMsgSize.max) + renderMsgSize.max = max(renderMsgSize.min, renderMsgSize.max) + + const maxFailedPush = 3 + var noFailedPush = 0 + var noFailedServiceNodeSwitches = 0 + + let selfPeerId = $wakuNode.switch.peerInfo.peerId + failedToSendCount = 0 + numMessagesToSend = if numMessages == 0: uint32.high else: numMessages + messagesSent = 0 + + while messagesSent < numMessagesToSend: + let (message, msgSize) = prepareMessage( + selfPeerId, + messagesSent + 1, + numMessagesToSend, + startedAt, + prevMessageAt, + lightpushContentTopic, + renderMsgSize, + ) + + let publishStartTime = Moment.now() + + let wlpRes = await publisher.send(lightpushPubsubTopic, message, actualServicePeer) + + let publishDuration = Moment.now() - publishStartTime + + let msgHash = computeMessageHash(lightpushPubsubTopic, message).to0xHex + + if wlpRes.isOk(): + lpt_publish_duration_seconds.observe(publishDuration.milliseconds.float / 1000) + + sentMessages[messagesSent] = (hash: msgHash, relayed: true) + notice "published message using lightpush", + index = messagesSent + 1, + count = numMessagesToSend, + size = msgSize, + pubsubTopic = lightpushPubsubTopic, + hash = msgHash + inc(messagesSent) + lpt_publisher_sent_messages_count.inc() + lpt_publisher_sent_bytes.inc(amount = msgSize.int64) + if noFailedPush > 0: + noFailedPush -= 1 + else: + sentMessages[messagesSent] = (hash: msgHash, relayed: false) + failedToSendCause.mgetOrPut(wlpRes.error, 1).inc() + error "failed to publish message using lightpush", + err = wlpRes.error, hash = msgHash + inc(failedToSendCount) + lpt_publisher_failed_messages_count.inc(labelValues = [wlpRes.error]) + if not wlpRes.error.toLower().contains("dial"): + # retry sending after shorter wait + await sleepAsync(2.seconds) + continue + else: + noFailedPush += 1 + lpt_service_peer_failure_count.inc( + labelValues = ["publisher", actualServicePeer.getAgent()] + ) + if not preventPeerSwitch and noFailedPush > maxFailedPush: + info "Max push failure limit reached, Try switching peer." + let peerOpt = selectRandomServicePeer( + wakuNode.peerManager, some(actualServicePeer), WakuLightPushCodec + ) + if peerOpt.isOk(): + actualServicePeer = peerOpt.get() + + info "New service peer in use", + codec = lightpushPubsubTopic, + peer = constructMultiaddrStr(actualServicePeer) + + noFailedPush = 0 + noOfServicePeerSwitches += 1 + lpt_change_service_peer_count.inc(labelValues = ["publisher"]) + continue # try again with new peer without delay + else: + error "Failed to find new service peer. Exiting." + noFailedServiceNodeSwitches += 1 + break + + await sleepAsync(messageInterval) + +proc setupAndPublish*( + wakuNode: WakuNode, conf: LiteProtocolTesterConf, servicePeer: RemotePeerInfo +) = + var publisher: PublisherBase + if conf.lightpushVersion == LightpushVersion.LEGACY: + info "Using legacy lightpush protocol for publishing messages" + publisher = LegacyPublisher.new(wakuNode) + else: + info "Using lightpush v3 protocol for publishing messages" + publisher = V3Publisher.new(wakuNode) + + # give some time to receiver side to set up + let waitTillStartTesting = conf.startPublishingAfter.seconds + + let parsedMinMsgSize = parseMsgSize(conf.minTestMessageSize).valueOr: + error "failed to parse 'min-test-msg-size' param: ", error = error + return + + let parsedMaxMsgSize = parseMsgSize(conf.maxTestMessageSize).valueOr: + error "failed to parse 'max-test-msg-size' param: ", error = error + return + + info "Sending test messages in", wait = waitTillStartTesting + waitFor sleepAsync(waitTillStartTesting) + + info "Start sending messages to service node using lightpush" + + sentMessages.sort(system.cmp) + + let interval = secs(60) + var printStats: CallbackFunc + + printStats = CallbackFunc( + proc(udata: pointer) {.gcsafe.} = + reportSentMessages() + + if messagesSent >= numMessagesToSend: + info "All messages are sent. Exiting." + + ## for gracefull shutdown through signal hooks + discard c_raise(ansi_c.SIGTERM) + else: + discard setTimer(Moment.fromNow(interval), printStats) + ) + + discard setTimer(Moment.fromNow(interval), printStats) + + # Start maintaining subscription + asyncSpawn publishMessages( + wakuNode, + publisher, + servicePeer, + conf.getPubsubTopic(), + conf.contentTopics[0], + conf.numMessages, + (min: parsedMinMsgSize, max: parsedMaxMsgSize), + conf.messageInterval.milliseconds, + conf.fixedServicePeer, + ) diff --git a/third-party/nwaku/apps/liteprotocoltester/publisher_base.nim b/third-party/nwaku/apps/liteprotocoltester/publisher_base.nim new file mode 100644 index 0000000..de88d82 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/publisher_base.nim @@ -0,0 +1,14 @@ +import chronos, results +import waku/[waku_node, waku_core] + +type PublisherBase* = ref object of RootObj + wakuNode*: WakuNode + +method send*( + self: PublisherBase, + topic: PubsubTopic, + message: WakuMessage, + servicePeer: RemotePeerInfo, +): Future[Result[void, string]] {.base, async.} = + discard + # when error it must return original error desc due the text is used for distinction between error types in metrics. diff --git a/third-party/nwaku/apps/liteprotocoltester/receiver.nim b/third-party/nwaku/apps/liteprotocoltester/receiver.nim new file mode 100644 index 0000000..f0f41b1 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/receiver.nim @@ -0,0 +1,182 @@ +## Example showing how a resource restricted client may +## subscribe to messages without relay + +import + std/options, + system/ansi_c, + chronicles, + chronos, + chronos/timer as chtimer, + stew/byteutils, + results, + serialization, + json_serialization as js + +import + waku/[ + common/logging, + node/peer_manager, + waku_node, + waku_core, + waku_filter_v2/client, + waku_filter_v2/common, + waku_core/multiaddrstr, + ], + ./tester_config, + ./tester_message, + ./statistics, + ./diagnose_connections, + ./service_peer_management, + ./lpt_metrics + +var actualFilterPeer {.threadvar.}: RemotePeerInfo + +proc unsubscribe( + wakuNode: WakuNode, filterPubsubTopic: PubsubTopic, filterContentTopic: ContentTopic +) {.async.} = + notice "unsubscribing from filter" + let unsubscribeRes = await wakuNode.wakuFilterClient.unsubscribe( + actualFilterPeer, filterPubsubTopic, @[filterContentTopic] + ) + if unsubscribeRes.isErr: + notice "unsubscribe request failed", err = unsubscribeRes.error + else: + notice "unsubscribe request successful" + +proc maintainSubscription( + wakuNode: WakuNode, + filterPubsubTopic: PubsubTopic, + filterContentTopic: ContentTopic, + preventPeerSwitch: bool, +) {.async.} = + const maxFailedSubscribes = 3 + const maxFailedServiceNodeSwitches = 10 + var noFailedSubscribes = 0 + var noFailedServiceNodeSwitches = 0 + var isFirstPingOnNewPeer = true + while true: + info "maintaining subscription at", peer = constructMultiaddrStr(actualFilterPeer) + # First use filter-ping to check if we have an active subscription + let pingRes = await wakuNode.wakuFilterClient.ping(actualFilterPeer) + if pingRes.isErr(): + if isFirstPingOnNewPeer == false: + # Very first ping expected to fail as we have not yet subscribed at all + lpt_receiver_lost_subscription_count.inc() + isFirstPingOnNewPeer = false + # No subscription found. Let's subscribe. + error "ping failed.", err = pingRes.error + trace "no subscription found. Sending subscribe request" + + let subscribeRes = await wakuNode.filterSubscribe( + some(filterPubsubTopic), filterContentTopic, actualFilterPeer + ) + + if subscribeRes.isErr(): + noFailedSubscribes += 1 + lpt_service_peer_failure_count.inc( + labelValues = ["receiver", actualFilterPeer.getAgent()] + ) + error "Subscribe request failed.", + err = subscribeRes.error, + peer = actualFilterPeer, + failCount = noFailedSubscribes + + # TODO: disconnet from failed actualFilterPeer + # asyncSpawn(wakuNode.peerManager.switch.disconnect(p)) + # wakunode.peerManager.peerStore.delete(actualFilterPeer) + + if noFailedSubscribes < maxFailedSubscribes: + await sleepAsync(2.seconds) # Wait a bit before retrying + continue + elif not preventPeerSwitch: + let peerOpt = selectRandomServicePeer( + wakuNode.peerManager, some(actualFilterPeer), WakuFilterSubscribeCodec + ) + if peerOpt.isOk(): + actualFilterPeer = peerOpt.get() + + info "Found new peer for codec", + codec = filterPubsubTopic, peer = constructMultiaddrStr(actualFilterPeer) + + noFailedSubscribes = 0 + lpt_change_service_peer_count.inc(labelValues = ["receiver"]) + isFirstPingOnNewPeer = true + continue # try again with new peer without delay + else: + error "Failed to find new service peer. Exiting." + noFailedServiceNodeSwitches += 1 + break + else: + if noFailedSubscribes > 0: + noFailedSubscribes -= 1 + + notice "subscribe request successful." + else: + info "subscription is live." + + await sleepAsync(30.seconds) # Subscription maintenance interval + +proc setupAndListen*( + wakuNode: WakuNode, conf: LiteProtocolTesterConf, servicePeer: RemotePeerInfo +) = + if isNil(wakuNode.wakuFilterClient): + # if we have not yet initialized lightpush client, then do it as the only way we can get here is + # by having a service peer discovered. + waitFor wakuNode.mountFilterClient() + + info "Start receiving messages to service node using filter", + servicePeer = servicePeer + + var stats: PerPeerStatistics + actualFilterPeer = servicePeer + + let pushHandler = proc( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.async, closure.} = + let payloadStr = string.fromBytes(message.payload) + let testerMessage = js.Json.decode(payloadStr, ProtocolTesterMessage) + let msgHash = computeMessageHash(pubsubTopic, message).to0xHex + + stats.addMessage(testerMessage.sender, testerMessage, msgHash) + + notice "message received", + index = testerMessage.index, + count = testerMessage.count, + startedAt = $testerMessage.startedAt, + sinceStart = $testerMessage.sinceStart, + sincePrev = $testerMessage.sincePrev, + size = $testerMessage.size, + pubsubTopic = pubsubTopic, + hash = msgHash + + wakuNode.wakuFilterClient.registerPushHandler(pushHandler) + + let interval = millis(20000) + var printStats: CallbackFunc + + # calculate max wait after the last known message arrived before exiting + # 20% of expected messages times the expected interval but capped to 10min + let maxWaitForLastMessage: Duration = + min(conf.messageInterval.milliseconds * (conf.numMessages div 5), 10.minutes) + + printStats = CallbackFunc( + proc(udata: pointer) {.gcsafe.} = + stats.echoStats() + + if conf.numMessages > 0 and + waitFor stats.checkIfAllMessagesReceived(maxWaitForLastMessage): + waitFor unsubscribe(wakuNode, conf.getPubsubTopic(), conf.contentTopics[0]) + info "All messages received. Exiting." + + ## for gracefull shutdown through signal hooks + discard c_raise(ansi_c.SIGTERM) + else: + discard setTimer(Moment.fromNow(interval), printStats) + ) + + discard setTimer(Moment.fromNow(interval), printStats) + + # Start maintaining subscription + asyncSpawn maintainSubscription( + wakuNode, conf.getPubsubTopic(), conf.contentTopics[0], conf.fixedServicePeer + ) diff --git a/third-party/nwaku/apps/liteprotocoltester/run_service_node.sh b/third-party/nwaku/apps/liteprotocoltester/run_service_node.sh new file mode 100755 index 0000000..07fdbe9 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/run_service_node.sh @@ -0,0 +1,63 @@ +#!/bin/sh + +echo "I am a service node" +IP=$(ip a | grep "inet " | grep -Fv 127.0.0.1 | sed 's/.*inet \([^/]*\).*/\1/') + +echo "Service node IP: ${IP}" + +if [ -n "${SHARD}" ]; then + SHARD=--shard="${SHARD}" +else + SHARD=--shard="0" +fi + +if [ -n "${CLUSTER_ID}" ]; then + CLUSTER_ID=--cluster-id="${CLUSTER_ID}" +fi + +echo "STANDALONE: ${STANDALONE}" + +if [ -z "${STANDALONE}" ]; then + + RETRIES=${RETRIES:=20} + + while [ -z "${BOOTSTRAP_ENR}" ] && [ ${RETRIES} -ge 0 ]; do + BOOTSTRAP_ENR=$(wget -qO- http://bootstrap:8645/debug/v1/info --header='Content-Type:application/json' 2> /dev/null | sed 's/.*"enrUri":"\([^"]*\)".*/\1/'); + echo "Bootstrap node not ready, retrying (retries left: ${RETRIES})" + sleep 3 + RETRIES=$(( $RETRIES - 1 )) + done + + if [ -z "${BOOTSTRAP_ENR}" ]; then + echo "Could not get BOOTSTRAP_ENR and none provided. Failing" + exit 1 + fi + + echo "Using bootstrap node: ${BOOTSTRAP_ENR}" + +fi + + +exec /usr/bin/wakunode\ + --relay=true\ + --filter=true\ + --lightpush=true\ + --store=false\ + --rest=true\ + --rest-admin=true\ + --rest-private=true\ + --rest-address=0.0.0.0\ + --rest-allow-origin="*"\ + --keep-alive=true\ + --max-connections=300\ + --dns-discovery=true\ + --discv5-discovery=true\ + --discv5-enr-auto-update=True\ + --discv5-bootstrap-node=${BOOTSTRAP_ENR}\ + --log-level=INFO\ + --metrics-server=True\ + --metrics-server-port=8003\ + --metrics-server-address=0.0.0.0\ + --nat=extip:${IP}\ + ${SHARD}\ + ${CLUSTER_ID} diff --git a/third-party/nwaku/apps/liteprotocoltester/run_tester_node.sh b/third-party/nwaku/apps/liteprotocoltester/run_tester_node.sh new file mode 100755 index 0000000..3c2d60e --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/run_tester_node.sh @@ -0,0 +1,161 @@ +#!/bin/sh + +#set -x + +if test -f .env; then + echo "Using .env file" + . $(pwd)/.env +fi + + +echo "I am a lite-protocol-tester node" + +BINARY_PATH=$1 + +if [ ! -x "${BINARY_PATH}" ]; then + echo "Invalid binary path '${BINARY_PATH}'. Failing" + exit 1 +fi + +if [ "${2}" = "--help" ]; then + echo "You might want to check nwaku/apps/liteprotocoltester/README.md" + exec "${BINARY_PATH}" --help + exit 0 +fi + +FUNCTION=$2 +if [ "${FUNCTION}" = "SENDER" ]; then + FUNCTION="--test-func=SENDER --lightpush-version=LEGACY" + SERVICENAME=lightpush-service +fi + +if [ "${FUNCTION}" = "SENDERV3" ]; then + FUNCTION="--test-func=SENDER --lightpush-version=V3" + SERVICENAME=lightpush-service +fi + +if [ "${FUNCTION}" = "RECEIVER" ]; then + FUNCTION=--test-func=RECEIVER + SERVICENAME=filter-service +fi + +SERIVCE_NODE_ADDR=$3 +if [ -z "${SERIVCE_NODE_ADDR}" ]; then + echo "Service node peer_id provided. Failing" + exit 1 +fi + +SELECTOR=$4 +if [ -z "${SELECTOR}" ] || [ "${SELECTOR}" = "SERVICE" ]; then + SERVICE_NODE_DIRECT=true +elif [ "${SELECTOR}" = "BOOTSTRAP" ]; then + SERVICE_NODE_DIRECT=false +else + echo "Invalid selector '${SELECTOR}'. Failing" + exit 1 +fi + +DO_DETECT_SERVICENODE=0 + +if [ "${SERIVCE_NODE_ADDR}" = "servicenode" ]; then + DO_DETECT_SERVICENODE=1 + SERIVCE_NODE_ADDR="" + SERVICENAME=servicenode +fi + +if [ "${SERIVCE_NODE_ADDR}" = "waku-sim" ]; then + DO_DETECT_SERVICENODE=1 + SERIVCE_NODE_ADDR="" + MY_EXT_IP=$(ip a | grep "inet " | grep -Fv 127.0.0.1 | sed 's/.*inet \([^/]*\).*/\1/') +else + MY_EXT_IP=$(wget -qO- --no-check-certificate https://api4.ipify.org) +fi + + +if [ $DO_DETECT_SERVICENODE -eq 1 ]; then + RETRIES=${RETRIES:=20} + + while [ -z "${SERIVCE_NODE_ADDR}" ] && [ ${RETRIES} -ge 0 ]; do + SERVICE_DEBUG_INFO=$(wget -qO- http://${SERVICENAME}:8645/debug/v1/info --header='Content-Type:application/json' 2> /dev/null); + echo "SERVICE_DEBUG_INFO: ${SERVICE_DEBUG_INFO}" + + SERIVCE_NODE_ADDR=$(wget -qO- http://${SERVICENAME}:8645/debug/v1/info --header='Content-Type:application/json' 2> /dev/null | sed 's/.*"listenAddresses":\["\([^"]*\)".*/\1/'); + echo "Service node not ready, retrying (retries left: ${RETRIES})" + sleep 3 + RETRIES=$(( $RETRIES - 1 )) + done + +fi + +if [ -z "${SERIVCE_NODE_ADDR}" ]; then + echo "Could not get SERIVCE_NODE_ADDR and none provided. Failing" + exit 1 +fi + +if $SERVICE_NODE_DIRECT; then + FULL_NODE=--service-node="${SERIVCE_NODE_ADDR} --fixed-service-peer" +else + FULL_NODE=--bootstrap-node="${SERIVCE_NODE_ADDR}" +fi + +if [ -n "${SHARD}" ]; then + SHARD=--shard="${SHARD}" +else + SHARD=--shard="0" +fi + +if [ -n "${CONTENT_TOPIC}" ]; then + CONTENT_TOPIC=--content-topic="${CONTENT_TOPIC}" +fi + +if [ -n "${CLUSTER_ID}" ]; then + CLUSTER_ID=--cluster-id="${CLUSTER_ID}" +fi + +if [ -n "${START_PUBLISHING_AFTER_SECS}" ]; then + START_PUBLISHING_AFTER_SECS=--start-publishing-after="${START_PUBLISHING_AFTER_SECS}" +fi + +if [ -n "${MIN_MESSAGE_SIZE}" ]; then + MIN_MESSAGE_SIZE=--min-test-msg-size="${MIN_MESSAGE_SIZE}" +fi + +if [ -n "${MAX_MESSAGE_SIZE}" ]; then + MAX_MESSAGE_SIZE=--max-test-msg-size="${MAX_MESSAGE_SIZE}" +fi + + +if [ -n "${NUM_MESSAGES}" ]; then + NUM_MESSAGES=--num-messages="${NUM_MESSAGES}" +fi + +if [ -n "${MESSAGE_INTERVAL_MILLIS}" ]; then + MESSAGE_INTERVAL_MILLIS=--message-interval="${MESSAGE_INTERVAL_MILLIS}" +fi + +if [ -n "${LOG_LEVEL}" ]; then + LOG_LEVEL=--log-level=${LOG_LEVEL} +else + LOG_LEVEL=--log-level=INFO +fi + +echo "Running binary: ${BINARY_PATH}" +echo "Tester node: ${FUNCTION}" +echo "Using service node: ${SERIVCE_NODE_ADDR}" +echo "My external IP: ${MY_EXT_IP}" + +exec "${BINARY_PATH}"\ + --nat=extip:${MY_EXT_IP}\ + --test-peers\ + ${LOG_LEVEL}\ + ${FULL_NODE}\ + ${MESSAGE_INTERVAL_MILLIS}\ + ${NUM_MESSAGES}\ + ${SHARD}\ + ${CONTENT_TOPIC}\ + ${CLUSTER_ID}\ + ${FUNCTION}\ + ${START_PUBLISHING_AFTER_SECS}\ + ${MIN_MESSAGE_SIZE}\ + ${MAX_MESSAGE_SIZE} + # --config-file=config.toml\ diff --git a/third-party/nwaku/apps/liteprotocoltester/run_tester_node_at_infra.sh b/third-party/nwaku/apps/liteprotocoltester/run_tester_node_at_infra.sh new file mode 100644 index 0000000..db26eb0 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/run_tester_node_at_infra.sh @@ -0,0 +1,119 @@ +#!/bin/sh + +#set -x +#echo "$@" + +if test -f .env; then + echo "Using .env file" + . $(pwd)/.env +fi + + +echo "I am a lite-protocol-tester node" + +BINARY_PATH=$1 + +if [ ! -x "${BINARY_PATH}" ]; then + echo "Invalid binary path '${BINARY_PATH}'. Failing" + exit 1 +fi + +if [ "${2}" = "--help" ]; then + echo "You might want to check nwaku/apps/liteprotocoltester/README.md" + exec "${BINARY_PATH}" --help + exit 0 +fi + +FUNCTION=$2 +if [ "${FUNCTION}" = "SENDER" ]; then + FUNCTION="--test-func=SENDER --lightpush-version=LEGACY" + SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}} + NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"} + NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"} + METRICS_PORT=--metrics-port="${PUBLISHER_METRICS_PORT:-8003}" +fi + +if [ "${FUNCTION}" = "SENDERV3" ]; then + FUNCTION="--test-func=SENDER --lightpush-version=V3" + SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}} + NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"} + NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"} + METRICS_PORT=--metrics-port="${PUBLISHER_METRICS_PORT:-8003}" +fi + +if [ "${FUNCTION}" = "RECEIVER" ]; then + FUNCTION=--test-func=RECEIVER + SERIVCE_NODE_ADDR=${FILTER_SERVICE_PEER:-${FILTER_BOOTSTRAP:-}} + NODE_ARG=${FILTER_SERVICE_PEER:+--service-node="${FILTER_SERVICE_PEER}"} + NODE_ARG=${NODE_ARG:---bootstrap-node="${FILTER_BOOTSTRAP}"} + METRICS_PORT=--metrics-port="${RECEIVER_METRICS_PORT:-8003}" +fi + +if [ -z "${SERIVCE_NODE_ADDR}" ]; then + echo "Service/Bootsrap node peer_id or enr is not provided. Failing" + exit 1 +fi + +MY_EXT_IP=$(wget -qO- --no-check-certificate https://api4.ipify.org) + +if [ -n "${SHARD}" ]; then + SHARD=--shard="${SHARD}" +else + SHARD=--shard="0" +fi + +if [ -n "${CONTENT_TOPIC}" ]; then + CONTENT_TOPIC=--content-topic="${CONTENT_TOPIC}" +fi + +if [ -n "${CLUSTER_ID}" ]; then + CLUSTER_ID=--cluster-id="${CLUSTER_ID}" +fi + +if [ -n "${START_PUBLISHING_AFTER_SECS}" ]; then + START_PUBLISHING_AFTER_SECS=--start-publishing-after="${START_PUBLISHING_AFTER_SECS}" +fi + +if [ -n "${MIN_MESSAGE_SIZE}" ]; then + MIN_MESSAGE_SIZE=--min-test-msg-size="${MIN_MESSAGE_SIZE}" +fi + +if [ -n "${MAX_MESSAGE_SIZE}" ]; then + MAX_MESSAGE_SIZE=--max-test-msg-size="${MAX_MESSAGE_SIZE}" +fi + + +if [ -n "${NUM_MESSAGES}" ]; then + NUM_MESSAGES=--num-messages="${NUM_MESSAGES}" +fi + +if [ -n "${MESSAGE_INTERVAL_MILLIS}" ]; then + MESSAGE_INTERVAL_MILLIS=--message-interval="${MESSAGE_INTERVAL_MILLIS}" +fi + +if [ -n "${LOG_LEVEL}" ]; then + LOG_LEVEL=--log-level=${LOG_LEVEL} +else + LOG_LEVEL=--log-level=INFO +fi + +echo "Running binary: ${BINARY_PATH}" +echo "Node function is: ${FUNCTION}" +echo "Using service/bootstrap node as: ${NODE_ARG}" +echo "My external IP: ${MY_EXT_IP}" + +exec "${BINARY_PATH}"\ + --nat=extip:${MY_EXT_IP}\ + --test-peers\ + ${LOG_LEVEL}\ + ${NODE_ARG}\ + ${MESSAGE_INTERVAL_MILLIS}\ + ${NUM_MESSAGES}\ + ${SHARD}\ + ${CONTENT_TOPIC}\ + ${CLUSTER_ID}\ + ${FUNCTION}\ + ${START_PUBLISHING_AFTER_SECS}\ + ${MIN_MESSAGE_SIZE}\ + ${MAX_MESSAGE_SIZE}\ + ${METRICS_PORT} diff --git a/third-party/nwaku/apps/liteprotocoltester/run_tester_node_on_fleet.sh b/third-party/nwaku/apps/liteprotocoltester/run_tester_node_on_fleet.sh new file mode 100644 index 0000000..533f5b1 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/run_tester_node_on_fleet.sh @@ -0,0 +1,118 @@ +#!/bin/sh + +#set -x +#echo "$@" + +if test -f .env; then + echo "Using .env file" + . $(pwd)/.env +fi + + +echo "I am a lite-protocol-tester node" + +BINARY_PATH=$1 + +if [ ! -x "${BINARY_PATH}" ]; then + echo "Invalid binary path '${BINARY_PATH}'. Failing" + exit 1 +fi + +if [ "${2}" = "--help" ]; then + echo "You might want to check nwaku/apps/liteprotocoltester/README.md" + exec "${BINARY_PATH}" --help + exit 0 +fi + +FUNCTION=$2 +if [ "${FUNCTION}" = "SENDER" ]; then + FUNCTION="--test-func=SENDER --lightpush-version=LEGACY" + SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}} + NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"} + NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"} + METRICS_PORT=--metrics-port="${PUBLISHER_METRICS_PORT:-8003}" +fi + +if [ "${FUNCTION}" = "SENDERV3" ]; then + FUNCTION="--test-func=SENDER --lightpush-version=V3" + SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}} + NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"} + NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"} + METRICS_PORT=--metrics-port="${PUBLISHER_METRICS_PORT:-8003}" +fi + +if [ "${FUNCTION}" = "RECEIVER" ]; then + FUNCTION=--test-func=RECEIVER + SERIVCE_NODE_ADDR=${FILTER_SERVICE_PEER:-${FILTER_BOOTSTRAP:-}} + NODE_ARG=${FILTER_SERVICE_PEER:+--service-node="${FILTER_SERVICE_PEER}"} + NODE_ARG=${NODE_ARG:---bootstrap-node="${FILTER_BOOTSTRAP}"} + METRICS_PORT=--metrics-port="${RECEIVER_METRICS_PORT:-8003}" +fi + +if [ -z "${SERIVCE_NODE_ADDR}" ]; then + echo "Service/Bootsrap node peer_id or enr is not provided. Failing" + exit 1 +fi + +MY_EXT_IP=$(wget -qO- --no-check-certificate https://api4.ipify.org) + +if [ -n "${SHARD}" ]; then + SHARD=--shard=${SHARD} +else + SHARD=--shard=0 +fi + +if [ -n "${CONTENT_TOPIC}" ]; then + CONTENT_TOPIC=--content-topic="${CONTENT_TOPIC}" +fi + +if [ -n "${CLUSTER_ID}" ]; then + CLUSTER_ID=--cluster-id="${CLUSTER_ID}" +fi + +if [ -n "${START_PUBLISHING_AFTER}" ]; then + START_PUBLISHING_AFTER=--start-publishing-after="${START_PUBLISHING_AFTER}" +fi + +if [ -n "${MIN_MESSAGE_SIZE}" ]; then + MIN_MESSAGE_SIZE=--min-test-msg-size="${MIN_MESSAGE_SIZE}" +fi + +if [ -n "${MAX_MESSAGE_SIZE}" ]; then + MAX_MESSAGE_SIZE=--max-test-msg-size="${MAX_MESSAGE_SIZE}" +fi + + +if [ -n "${NUM_MESSAGES}" ]; then + NUM_MESSAGES=--num-messages="${NUM_MESSAGES}" +fi + +if [ -n "${MESSAGE_INTERVAL_MILLIS}" ]; then + MESSAGE_INTERVAL_MILLIS=--message-interval="${MESSAGE_INTERVAL_MILLIS}" +fi + +if [ -n "${LOG_LEVEL}" ]; then + LOG_LEVEL=--log-level=${LOG_LEVEL} +else + LOG_LEVEL=--log-level=INFO +fi + +echo "Running binary: ${BINARY_PATH}" +echo "Node function is: ${FUNCTION}" +echo "Using service/bootstrap node as: ${NODE_ARG}" +echo "My external IP: ${MY_EXT_IP}" + +exec "${BINARY_PATH}"\ + --nat=extip:${MY_EXT_IP}\ + ${LOG_LEVEL}\ + ${NODE_ARG}\ + ${MESSAGE_INTERVAL_MILLIS}\ + ${NUM_MESSAGES}\ + ${SHARD}\ + ${CONTENT_TOPIC}\ + ${CLUSTER_ID}\ + ${FUNCTION}\ + ${START_PUBLISHING_AFTER}\ + ${MIN_MESSAGE_SIZE}\ + ${MAX_MESSAGE_SIZE}\ + ${METRICS_PORT} diff --git a/third-party/nwaku/apps/liteprotocoltester/service_peer_management.nim b/third-party/nwaku/apps/liteprotocoltester/service_peer_management.nim new file mode 100644 index 0000000..a72daa2 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/service_peer_management.nim @@ -0,0 +1,223 @@ +{.push raises: [].} + +import + std/[options, net, sysrand, random, strformat, strutils, sequtils], + chronicles, + chronos, + metrics, + libbacktrace, + libp2p/crypto/crypto, + confutils, + libp2p/wire + +import + ../wakunode2/cli_args, + waku/[ + common/enr, + waku_node, + node/peer_manager, + waku_lightpush/common, + waku_relay, + waku_filter_v2, + waku_peer_exchange/protocol, + waku_core/multiaddrstr, + waku_core/topics/pubsub_topic, + waku_enr/capabilities, + waku_enr/sharding, + ], + ./tester_config, + ./diagnose_connections, + ./lpt_metrics + +logScope: + topics = "service peer mgmt" + +randomize() + +proc translateToRemotePeerInfo*(peerAddress: string): Result[RemotePeerInfo, void] = + var peerInfo: RemotePeerInfo + var enrRec: enr.Record + if enrRec.fromURI(peerAddress): + trace "Parsed ENR", enrRec = $enrRec + peerInfo = enrRec.toRemotePeerInfo().valueOr: + error "failed to convert ENR to RemotePeerInfo", error = error + return err() + else: + peerInfo = parsePeerInfo(peerAddress).valueOr: + error "failed to parse node waku peer-exchange peerId", error = error + return err() + + return ok(peerInfo) + +## To retrieve peers from PeerExchange partner and return one randomly selected one +## among the ones successfully dialed +## Note: This is kept for future use. +proc selectRandomCapablePeer*( + pm: PeerManager, codec: string, pubsubTopic: PubsubTopic +): Future[Option[RemotePeerInfo]] {.async.} = + var cap = Capabilities.Filter + if codec.contains("lightpush"): + cap = Capabilities.Lightpush + elif codec.contains("filter"): + cap = Capabilities.Filter + + var supportivePeers = pm.switch.peerStore.getPeersByCapability(cap) + + trace "Found supportive peers count", count = supportivePeers.len() + trace "Found supportive peers", supportivePeers = $supportivePeers + if supportivePeers.len == 0: + return none(RemotePeerInfo) + + var found = none(RemotePeerInfo) + while found.isNone() and supportivePeers.len > 0: + let rndPeerIndex = rand(0 .. supportivePeers.len - 1) + let randomPeer = supportivePeers[rndPeerIndex] + + debug "Dialing random peer", + idx = $rndPeerIndex, peer = constructMultiaddrStr(randomPeer) + + supportivePeers.delete(rndPeerIndex .. rndPeerIndex) + + let connOpt = pm.dialPeer(randomPeer, codec) + if (await connOpt.withTimeout(10.seconds)): + if connOpt.value().isSome(): + found = some(randomPeer) + debug "Dialing successful", + peer = constructMultiaddrStr(randomPeer), codec = codec + else: + debug "Dialing failed", peer = constructMultiaddrStr(randomPeer), codec = codec + else: + debug "Timeout dialing service peer", + peer = constructMultiaddrStr(randomPeer), codec = codec + + return found + +# Debugging PX gathered peers connectivity +proc tryCallAllPxPeers*( + pm: PeerManager, codec: string, pubsubTopic: PubsubTopic +): Future[Option[seq[RemotePeerInfo]]] {.async.} = + var capability = Capabilities.Filter + if codec.contains("lightpush"): + capability = Capabilities.Lightpush + elif codec.contains("filter"): + capability = Capabilities.Filter + + var supportivePeers = pm.switch.peerStore.getPeersByCapability(capability) + + lpt_px_peers.set(supportivePeers.len) + debug "Found supportive peers count", count = supportivePeers.len() + debug "Found supportive peers", supportivePeers = $supportivePeers + if supportivePeers.len == 0: + return none(seq[RemotePeerInfo]) + + var okPeers: seq[RemotePeerInfo] = @[] + + while supportivePeers.len > 0: + let rndPeerIndex = rand(0 .. supportivePeers.len - 1) + let randomPeer = supportivePeers[rndPeerIndex] + + debug "Dialing random peer", + idx = $rndPeerIndex, peer = constructMultiaddrStr(randomPeer) + + supportivePeers.delete(rndPeerIndex, rndPeerIndex) + + let connOpt = pm.dialPeer(randomPeer, codec) + if (await connOpt.withTimeout(10.seconds)): + if connOpt.value().isSome(): + okPeers.add(randomPeer) + info "Dialing successful", + peer = constructMultiaddrStr(randomPeer), + agent = randomPeer.getAgent(), + codec = codec + lpt_dialed_peers.inc(labelValues = [randomPeer.getAgent()]) + else: + lpt_dial_failures.inc(labelValues = [randomPeer.getAgent()]) + error "Dialing failed", + peer = constructMultiaddrStr(randomPeer), + agent = randomPeer.getAgent(), + codec = codec + else: + lpt_dial_failures.inc(labelValues = [randomPeer.getAgent()]) + error "Timeout dialing service peer", + peer = constructMultiaddrStr(randomPeer), + agent = randomPeer.getAgent(), + codec = codec + + var okPeersStr: string = "" + for idx, peer in okPeers: + okPeersStr.add( + " " & $idx & ". | " & constructMultiaddrStr(peer) & " | agent: " & + peer.getAgent() & " | protos: " & $peer.protocols & " | caps: " & + $peer.enr.map(getCapabilities) & "\n" + ) + echo "PX returned peers found callable for " & codec & " / " & $capability & ":\n" + echo okPeersStr + + return some(okPeers) + +proc pxLookupServiceNode*( + node: WakuNode, conf: LiteProtocolTesterConf +): Future[Result[bool, void]] {.async.} = + let codec: string = conf.getCodec() + + if node.wakuPeerExchange.isNil(): + let peerExchangeNode = translateToRemotePeerInfo(conf.bootstrapNode).valueOr: + error "Failed to parse bootstrap node - cannot use PeerExchange.", + node = conf.bootstrapNode + return err() + info "PeerExchange node", peer = constructMultiaddrStr(peerExchangeNode) + node.peerManager.addServicePeer(peerExchangeNode, WakuPeerExchangeCodec) + + try: + await node.mountPeerExchange(some(conf.clusterId)) + except CatchableError: + error "failed to mount waku peer-exchange protocol", + error = getCurrentExceptionMsg() + return err() + + var trialCount = 5 + while trialCount > 0: + let futPeers = node.fetchPeerExchangePeers(conf.reqPxPeers) + if not await futPeers.withTimeout(30.seconds): + notice "Cannot get peers from PX", round = 5 - trialCount + else: + if futPeers.value().isErr(): + info "PeerExchange reported error", error = futPeers.read().error + return err() + + if conf.testPeers: + let peersOpt = + await tryCallAllPxPeers(node.peerManager, codec, conf.getPubsubTopic()) + if peersOpt.isSome(): + info "Found service peers for codec", + codec = codec, peer_count = peersOpt.get().len() + return ok(peersOpt.get().len > 0) + else: + let peerOpt = + await selectRandomCapablePeer(node.peerManager, codec, conf.getPubsubTopic()) + if peerOpt.isSome(): + info "Found service peer for codec", codec = codec, peer = peerOpt.get() + return ok(true) + + await sleepAsync(5.seconds) + trialCount -= 1 + + return err() + +var alreadyUsedServicePeers {.threadvar.}: seq[RemotePeerInfo] + +## Select service peers by codec from peer store randomly. +proc selectRandomServicePeer*( + pm: PeerManager, actualPeer: Option[RemotePeerInfo], codec: string +): Result[RemotePeerInfo, void] = + if actualPeer.isSome(): + alreadyUsedServicePeers.add(actualPeer.get()) + + let supportivePeers = pm.switch.peerStore.getPeersByProtocol(codec).filterIt( + it notin alreadyUsedServicePeers + ) + if supportivePeers.len == 0: + return err() + + let rndPeerIndex = rand(0 .. supportivePeers.len - 1) + return ok(supportivePeers[rndPeerIndex]) diff --git a/third-party/nwaku/apps/liteprotocoltester/statistics.nim b/third-party/nwaku/apps/liteprotocoltester/statistics.nim new file mode 100644 index 0000000..8322edd --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/statistics.nim @@ -0,0 +1,336 @@ +{.push raises: [].} + +import + std/[sets, tables, sequtils, options, strformat], + chronos/timer as chtimer, + chronicles, + chronos, + results, + libp2p/peerid + +import ./tester_message, ./lpt_metrics + +type + ArrivalInfo = object + arrivedAt: Moment + prevArrivedAt: Moment + prevIndex: uint32 + + MessageInfo = tuple[msg: ProtocolTesterMessage, info: ArrivalInfo] + DupStat = tuple[hash: string, dupCount: int, size: uint64] + + StatHelper = object + prevIndex: uint32 + prevArrivedAt: Moment + lostIndices: HashSet[uint32] + seenIndices: HashSet[uint32] + maxIndex: uint32 + duplicates: OrderedTable[uint32, DupStat] + + Statistics* = object + received: Table[uint32, MessageInfo] + firstReceivedIdx*: uint32 + allMessageCount*: uint32 + receivedMessages*: uint32 + misorderCount*: uint32 + lateCount*: uint32 + duplicateCount*: uint32 + helper: StatHelper + + PerPeerStatistics* = Table[string, Statistics] + +func `$`*(a: Duration): string {.inline.} = + ## Original stringify implementation from chronos/timer.nim is not capable of printing 0ns + ## Returns string representation of Duration ``a`` as nanoseconds value. + + if a.isZero: + return "0ns" + + return chtimer.`$`(a) + +proc init*(T: type Statistics, expectedMessageCount: int = 1000): T = + result.helper.prevIndex = 0 + result.helper.maxIndex = 0 + result.helper.seenIndices.init(expectedMessageCount) + result.received = initTable[uint32, MessageInfo](expectedMessageCount) + return result + +proc addMessage*( + self: var Statistics, sender: string, msg: ProtocolTesterMessage, msgHash: string +) = + if self.allMessageCount == 0: + self.allMessageCount = msg.count + self.firstReceivedIdx = msg.index + elif self.allMessageCount != msg.count: + error "Message count mismatch at message", + index = msg.index, expected = self.allMessageCount, got = msg.count + + let currentArrived: MessageInfo = ( + msg: msg, + info: ArrivalInfo( + arrivedAt: Moment.now(), + prevArrivedAt: self.helper.prevArrivedAt, + prevIndex: self.helper.prevIndex, + ), + ) + lpt_receiver_received_bytes.inc(labelValues = [sender], amount = msg.size.int64) + if self.received.hasKeyOrPut(msg.index, currentArrived): + inc(self.duplicateCount) + self.helper.duplicates.mgetOrPut(msg.index, (msgHash, 0, msg.size)).dupCount.inc() + warn "Duplicate message", + index = msg.index, + hash = msgHash, + times_duplicated = self.helper.duplicates[msg.index].dupCount + lpt_receiver_duplicate_messages_count.inc(labelValues = [sender]) + lpt_receiver_distinct_duplicate_messages_count.set( + labelValues = [sender], value = self.helper.duplicates.len() + ) + return + + ## detect misorder arrival and possible lost messages + if self.helper.prevIndex + 1 < msg.index: + inc(self.misorderCount) + warn "Misordered message arrival", + index = msg.index, expected = self.helper.prevIndex + 1 + elif self.helper.prevIndex > msg.index: + inc(self.lateCount) + warn "Late message arrival", index = msg.index, expected = self.helper.prevIndex + 1 + + self.helper.maxIndex = max(self.helper.maxIndex, msg.index) + self.helper.prevIndex = msg.index + self.helper.prevArrivedAt = currentArrived.info.arrivedAt + inc(self.receivedMessages) + lpt_receiver_received_messages_count.inc(labelValues = [sender]) + lpt_receiver_missing_messages_count.set( + labelValues = [sender], value = (self.helper.maxIndex - self.receivedMessages).int64 + ) + +proc addMessage*( + self: var PerPeerStatistics, + peerId: string, + msg: ProtocolTesterMessage, + msgHash: string, +) = + if not self.contains(peerId): + self[peerId] = Statistics.init() + + let shortSenderId = block: + let senderPeer = PeerId.init(msg.sender) + if senderPeer.isErr(): + msg.sender + else: + senderPeer.get().shortLog() + + discard catch: + self[peerId].addMessage(shortSenderId, msg, msgHash) + + lpt_receiver_sender_peer_count.set(value = self.len) + +proc lastMessageArrivedAt*(self: Statistics): Option[Moment] = + if self.receivedMessages > 0: + return some(self.helper.prevArrivedAt) + return none(Moment) + +proc lossCount*(self: Statistics): uint32 = + self.helper.maxIndex - self.receivedMessages + +proc calcLatency*(self: Statistics): tuple[min, max, avg: Duration] = + var + minLatency = nanos(0) + maxLatency = nanos(0) + avgLatency = nanos(0) + + if self.receivedMessages > 2: + try: + var prevArrivedAt = self.received[self.firstReceivedIdx].info.arrivedAt + + for idx, (msg, arrival) in self.received.pairs: + if idx <= 1: + continue + let expectedDelay = nanos(msg.sincePrev) + + ## latency will be 0 if arrived in shorter time than expected + var latency = arrival.arrivedAt - arrival.prevArrivedAt - expectedDelay + + ## will not measure zero latency, it is unlikely to happen but in case happens could + ## ditort the min latency calulculation as we want to calculate the feasible minimum. + if latency > nanos(0): + if minLatency == nanos(0): + minLatency = latency + else: + minLatency = min(minLatency, latency) + + maxLatency = max(maxLatency, latency) + avgLatency += latency + + avgLatency = avgLatency div (self.receivedMessages - 1) + except KeyError: + error "Error while calculating latency: " & getCurrentExceptionMsg() + + return (minLatency, maxLatency, avgLatency) + +proc missingIndices*(self: Statistics): seq[uint32] = + var missing: seq[uint32] = @[] + for idx in 1 .. self.helper.maxIndex: + if not self.received.hasKey(idx): + missing.add(idx) + return missing + +proc distinctDupCount(self: Statistics): int {.inline.} = + return self.helper.duplicates.len() + +proc allDuplicates(self: Statistics): int {.inline.} = + var total = 0 + for _, (_, dupCount, _) in self.helper.duplicates.pairs: + total += dupCount + return total + +proc dupMsgs(self: Statistics): string = + var dupMsgs: string = "" + for idx, (hash, dupCount, size) in self.helper.duplicates.pairs: + dupMsgs.add( + " index: " & $idx & " | hash: " & hash & " | count: " & $dupCount & " | size: " & + $size & "\n" + ) + return dupMsgs + +proc echoStat*(self: Statistics, peerId: string) = + let (minL, maxL, avgL) = self.calcLatency() + lpt_receiver_latencies.set(labelValues = [peerId, "min"], value = minL.nanos()) + lpt_receiver_latencies.set(labelValues = [peerId, "avg"], value = avgL.nanos()) + lpt_receiver_latencies.set(labelValues = [peerId, "max"], value = maxL.nanos()) + + let printable = catch: + """*------------------------------------------------------------------------------------------* +| Expected | Received | Target | Loss | Misorder | Late | | +|{self.helper.maxIndex:>11} |{self.receivedMessages:>11} |{self.allMessageCount:>11} |{self.lossCount():>11} |{self.misorderCount:>11} |{self.lateCount:>11} | | +*------------------------------------------------------------------------------------------* +| Latency stat: | +| min latency: {$minL:<73}| +| avg latency: {$avgL:<73}| +| max latency: {$maxL:<73}| +*------------------------------------------------------------------------------------------* +| Duplicate stat: | +| distinct duplicate messages: {$self.distinctDupCount():<57}| +| sum duplicates : {$self.allDuplicates():<57}| + Duplicated messages: + {self.dupMsgs()} +*------------------------------------------------------------------------------------------* +| Lost indices: | +| {self.missingIndices()} | +*------------------------------------------------------------------------------------------*""".fmt() + + if printable.isErr(): + echo "Error while printing statistics: " & printable.error().msg + else: + echo printable.get() + +proc jsonStat*(self: Statistics): string = + let minL, maxL, avgL = self.calcLatency() + + let json = catch: + """{{"expected":{self.helper.maxIndex}, + "received": {self.receivedMessages}, + "target": {self.allMessageCount}, + "loss": {self.lossCount()}, + "misorder": {self.misorderCount}, + "late": {self.lateCount}, + "duplicate": {self.duplicateCount}, + "latency": + {{"avg": "{avgL}", + "min": "{minL}", + "max": "{maxL}" + }}, + "lostIndices": {self.missingIndices()} + }}""".fmt() + if json.isErr: + return "{\"result:\": \"" & json.error.msg & "\"}" + + return json.get() + +proc echoStats*(self: var PerPeerStatistics) = + for peerId, stats in self.pairs: + let peerLine = catch: + "Receiver statistics from peer {peerId}".fmt() + if peerLine.isErr: + echo "Error while printing statistics" + else: + echo peerLine.get() + stats.echoStat(peerId) + +proc jsonStats*(self: PerPeerStatistics): string = + try: + #!fmt: off + var json = "{\"statistics\": [" + var first = true + for peerId, stats in self.pairs: + if first: + first = false + else: + json.add(", ") + json.add("{{\"sender\": \"{peerId}\", \"stat\":".fmt()) + json.add(stats.jsonStat()) + json.add("}") + json.add("]}") + return json + #!fmt: on + except CatchableError: + return + "{\"result:\": \"Error while generating json stats: " & getCurrentExceptionMsg() & + "\"}" + +proc lastMessageArrivedAt*(self: PerPeerStatistics): Option[Moment] = + var lastArrivedAt = Moment.init(0, Millisecond) + for stat in self.values: + let lastMsgFromPeerAt = stat.lastMessageArrivedAt().valueOr: + continue + + if lastMsgFromPeerAt > lastArrivedAt: + lastArrivedAt = lastMsgFromPeerAt + + if lastArrivedAt == Moment.init(0, Millisecond): + return none(Moment) + + return some(lastArrivedAt) + +proc checkIfAllMessagesReceived*( + self: PerPeerStatistics, maxWaitForLastMessage: Duration +): Future[bool] {.async.} = + # if there are no peers have sent messages, assume we just have started. + if self.len == 0: + return false + + # check if numerically all messages are received. + # this suggest we received at least one message already from one peer + var isAlllMessageReceived = true + for stat in self.values: + if (stat.allMessageCount == 0 and stat.receivedMessages == 0) or + stat.helper.maxIndex < stat.allMessageCount: + isAlllMessageReceived = false + break + + if not isAlllMessageReceived: + # if not all message received we still need to check if last message arrived within a time frame + # to avoid endless waiting while publishers are already quit. + let lastMessageAt = self.lastMessageArrivedAt() + if lastMessageAt.isNone(): + return false + + # last message shall arrived within time limit + if Moment.now() - lastMessageAt.get() < maxWaitForLastMessage: + return false + else: + info "No message since max wait time", maxWait = $maxWaitForLastMessage + + ## Ok, we see last message arrived from all peers, + ## lets check if all messages are received + ## and if not let's wait another 20 secs to give chance the system will send them. + var shallWait = false + for stat in self.values: + if stat.receivedMessages < stat.allMessageCount: + shallWait = true + + if shallWait: + await sleepAsync(20.seconds) + + return true diff --git a/third-party/nwaku/apps/liteprotocoltester/tester_config.nim b/third-party/nwaku/apps/liteprotocoltester/tester_config.nim new file mode 100644 index 0000000..dee918b --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/tester_config.nim @@ -0,0 +1,208 @@ +import + results, + chronos, + confutils, + confutils/defs, + confutils/std/net, + confutils/toml/defs as confTomlDefs, + confutils/toml/std/net as confTomlNet, + libp2p/crypto/crypto, + libp2p/crypto/secp, + libp2p/multiaddress, + secp256k1 + +import + ../../tools/confutils/ + [cli_args, envvar as confEnvvarDefs, envvar_net as confEnvvarNet], + waku/[common/logging, waku_core, waku_core/topics/pubsub_topic] + +export confTomlDefs, confTomlNet, confEnvvarDefs, confEnvvarNet + +const + LitePubsubTopic* = PubsubTopic("/waku/2/rs/66/0") + LiteContentTopic* = ContentTopic("/tester/1/light-pubsub-example/proto") + DefaultMinTestMessageSizeStr* = "1KiB" + DefaultMaxTestMessageSizeStr* = "150KiB" + +type TesterFunctionality* = enum + SENDER # pumps messages to the network + RECEIVER # gather and analyze messages from the network + +type LightpushVersion* = enum + LEGACY # legacy lightpush protocol + V3 # lightpush v3 protocol + +type LiteProtocolTesterConf* = object + configFile* {. + desc: + "Loads configuration from a TOML file (cmd-line parameters take precedence) for the light waku node", + name: "config-file" + .}: Option[InputFile] + + ## Log configuration + logLevel* {. + desc: + "Sets the log level for process. Supported levels: TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL", + defaultValue: logging.LogLevel.DEBUG, + name: "log-level" + .}: logging.LogLevel + + logFormat* {. + desc: + "Specifies what kind of logs should be written to stdout. Supported formats: TEXT, JSON", + defaultValue: logging.LogFormat.TEXT, + name: "log-format" + .}: logging.LogFormat + + ## Test configuration + serviceNode* {. + desc: "Peer multiaddr of the service node.", defaultValue: "", name: "service-node" + .}: string + + bootstrapNode* {. + desc: + "Peer multiaddr of the bootstrap node. If `service-node` not set, it is used to retrieve potential service nodes of the network.", + defaultValue: "", + name: "bootstrap-node" + .}: string + + nat* {. + desc: + "Specify method to use for determining public address. " & + "Must be one of: any, none, upnp, pmp, extip:.", + defaultValue: "any" + .}: string + + testFunc* {. + desc: "Specifies the lite protocol tester side. Supported values: sender, receiver.", + defaultValue: TesterFunctionality.RECEIVER, + name: "test-func" + .}: TesterFunctionality + + lightpushVersion* {. + desc: "Version of the sender to use. Supported values: legacy, v3.", + defaultValue: LightpushVersion.LEGACY, + name: "lightpush-version" + .}: LightpushVersion + + numMessages* {. + desc: "Number of messages to send.", defaultValue: 120, name: "num-messages" + .}: uint32 + + startPublishingAfter* {. + desc: "Wait number of seconds before start publishing messages.", + defaultValue: 5, + name: "start-publishing-after" + .}: uint32 + + messageInterval* {. + desc: "Delay between messages in milliseconds.", + defaultValue: 1000, + name: "message-interval" + .}: uint32 + + shard* {.desc: "Shards index to subscribe to. ", defaultValue: 0, name: "shard".}: + uint16 + + contentTopics* {. + desc: "Default content topic to subscribe to. Argument may be repeated.", + defaultValue: @[LiteContentTopic], + name: "content-topic" + .}: seq[ContentTopic] + + clusterId* {. + desc: + "Cluster id that the node is running in. Node in a different cluster id is disconnected.", + defaultValue: 0, + name: "cluster-id" + .}: uint16 + + minTestMessageSize* {. + desc: + "Minimum message size. Accepted units: KiB, KB, and B. e.g. 1024KiB; 1500 B; etc.", + defaultValue: DefaultMinTestMessageSizeStr, + name: "min-test-msg-size" + .}: string + + maxTestMessageSize* {. + desc: + "Maximum message size. Accepted units: KiB, KB, and B. e.g. 1024KiB; 1500 B; etc.", + defaultValue: DefaultMaxTestMessageSizeStr, + name: "max-test-msg-size" + .}: string + ## Tester REST service configuration + restAddress* {. + desc: "Listening address of the REST HTTP server.", + defaultValue: parseIpAddress("127.0.0.1"), + name: "rest-address" + .}: IpAddress + + testPeers* {. + desc: "Run dial test on gathered PeerExchange peers.", + defaultValue: false, + name: "test-peers" + .}: bool + + reqPxPeers* {. + desc: "Number of peers to request on PeerExchange.", + defaultValue: 100, + name: "req-px-peers" + .}: uint16 + + restPort* {. + desc: "Listening port of the REST HTTP server.", + defaultValue: 8654, + name: "rest-port" + .}: uint16 + + fixedServicePeer* {. + desc: + "Prevent changing the service peer in case of failures, the full test will stict to the first service peer in use.", + defaultValue: false, + name: "fixed-service-peer" + .}: bool + + restAllowOrigin* {. + desc: + "Allow cross-origin requests from the specified origin." & + "Argument may be repeated." & "Wildcards: * or ? allowed." & + "Ex.: \"localhost:*\" or \"127.0.0.1:8080\"", + defaultValue: @["*"], + name: "rest-allow-origin" + .}: seq[string] + + metricsPort* {. + desc: "Listening port of the Metrics HTTP server.", + defaultValue: 8003, + name: "metrics-port" + .}: uint16 + +{.push warning[ProveInit]: off.} + +proc load*(T: type LiteProtocolTesterConf, version = ""): ConfResult[T] = + try: + let conf = LiteProtocolTesterConf.load( + version = version, + secondarySources = proc( + conf: LiteProtocolTesterConf, sources: auto + ) {.gcsafe, raises: [ConfigurationError].} = + sources.addConfigFile(Envvar, InputFile("liteprotocoltester")), + ) + ok(conf) + except CatchableError: + err(getCurrentExceptionMsg()) + +proc getPubsubTopic*(conf: LiteProtocolTesterConf): PubsubTopic = + return $RelayShard(clusterId: conf.clusterId, shardId: conf.shard) + +proc getCodec*(conf: LiteProtocolTesterConf): string = + return + if conf.testFunc == TesterFunctionality.RECEIVER: + WakuFilterSubscribeCodec + else: + if conf.lightpushVersion == LightpushVersion.LEGACY: + WakuLegacyLightPushCodec + else: + WakuLightPushCodec + +{.pop.} diff --git a/third-party/nwaku/apps/liteprotocoltester/tester_message.nim b/third-party/nwaku/apps/liteprotocoltester/tester_message.nim new file mode 100644 index 0000000..eeff7b5 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/tester_message.nim @@ -0,0 +1,121 @@ +{.push raises: [].} + +import + chronicles, + json_serialization, + json_serialization/std/options, + json_serialization/lexer + +import ../../waku/waku_api/rest/serdes + +type ProtocolTesterMessage* = object + sender*: string + index*: uint32 + count*: uint32 + startedAt*: int64 + sinceStart*: int64 + sincePrev*: int64 + size*: uint64 + +proc writeValue*( + writer: var JsonWriter[RestJson], value: ProtocolTesterMessage +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("sender", value.sender) + writer.writeField("index", value.index) + writer.writeField("count", value.count) + writer.writeField("startedAt", value.startedAt) + writer.writeField("sinceStart", value.sinceStart) + writer.writeField("sincePrev", value.sincePrev) + writer.writeField("size", value.size) + writer.endRecord() + +proc readValue*( + reader: var JsonReader[RestJson], value: var ProtocolTesterMessage +) {.gcsafe, raises: [SerializationError, IOError].} = + var + sender: Option[string] + index: Option[uint32] + count: Option[uint32] + startedAt: Option[int64] + sinceStart: Option[int64] + sincePrev: Option[int64] + size: Option[uint64] + + for fieldName in readObjectFields(reader): + case fieldName + of "sender": + if sender.isSome(): + reader.raiseUnexpectedField( + "Multiple `sender` fields found", "ProtocolTesterMessage" + ) + sender = some(reader.readValue(string)) + of "index": + if index.isSome(): + reader.raiseUnexpectedField( + "Multiple `index` fields found", "ProtocolTesterMessage" + ) + index = some(reader.readValue(uint32)) + of "count": + if count.isSome(): + reader.raiseUnexpectedField( + "Multiple `count` fields found", "ProtocolTesterMessage" + ) + count = some(reader.readValue(uint32)) + of "startedAt": + if startedAt.isSome(): + reader.raiseUnexpectedField( + "Multiple `startedAt` fields found", "ProtocolTesterMessage" + ) + startedAt = some(reader.readValue(int64)) + of "sinceStart": + if sinceStart.isSome(): + reader.raiseUnexpectedField( + "Multiple `sinceStart` fields found", "ProtocolTesterMessage" + ) + sinceStart = some(reader.readValue(int64)) + of "sincePrev": + if sincePrev.isSome(): + reader.raiseUnexpectedField( + "Multiple `sincePrev` fields found", "ProtocolTesterMessage" + ) + sincePrev = some(reader.readValue(int64)) + of "size": + if size.isSome(): + reader.raiseUnexpectedField( + "Multiple `size` fields found", "ProtocolTesterMessage" + ) + size = some(reader.readValue(uint64)) + else: + unrecognizedFieldWarning(value) + + if sender.isNone(): + reader.raiseUnexpectedValue("Field `sender` is missing") + + if index.isNone(): + reader.raiseUnexpectedValue("Field `index` is missing") + + if count.isNone(): + reader.raiseUnexpectedValue("Field `count` is missing") + + if startedAt.isNone(): + reader.raiseUnexpectedValue("Field `startedAt` is missing") + + if sinceStart.isNone(): + reader.raiseUnexpectedValue("Field `sinceStart` is missing") + + if sincePrev.isNone(): + reader.raiseUnexpectedValue("Field `sincePrev` is missing") + + if size.isNone(): + reader.raiseUnexpectedValue("Field `size` is missing") + + value = ProtocolTesterMessage( + sender: sender.get(), + index: index.get(), + count: count.get(), + startedAt: startedAt.get(), + sinceStart: sinceStart.get(), + sincePrev: sincePrev.get(), + size: size.get(), + ) diff --git a/third-party/nwaku/apps/liteprotocoltester/v3_publisher.nim b/third-party/nwaku/apps/liteprotocoltester/v3_publisher.nim new file mode 100644 index 0000000..c8353b5 --- /dev/null +++ b/third-party/nwaku/apps/liteprotocoltester/v3_publisher.nim @@ -0,0 +1,29 @@ +import results, options, chronos +import waku/[waku_node, waku_core, waku_lightpush, waku_lightpush/common] +import publisher_base + +type V3Publisher* = ref object of PublisherBase + +proc new*(T: type V3Publisher, wakuNode: WakuNode): T = + if isNil(wakuNode.wakuLightpushClient): + wakuNode.mountLightPushClient() + + return V3Publisher(wakuNode: wakuNode) + +method send*( + self: V3Publisher, + topic: PubsubTopic, + message: WakuMessage, + servicePeer: RemotePeerInfo, +): Future[Result[void, string]] {.async.} = + # when error it must return original error desc due the text is used for distinction between error types in metrics. + discard ( + await self.wakuNode.lightpushPublish(some(topic), message, some(servicePeer)) + ).valueOr: + if error.code == LightPushErrorCode.NO_PEERS_TO_RELAY and + error.desc != some("No peers for topic, skipping publish"): + # TODO: We need better separation of errors happening on the client side or the server side.- + return err("dial_failure") + else: + return err($error.code) + return ok() diff --git a/third-party/nwaku/apps/networkmonitor/README.md b/third-party/nwaku/apps/networkmonitor/README.md new file mode 100644 index 0000000..3e10367 --- /dev/null +++ b/third-party/nwaku/apps/networkmonitor/README.md @@ -0,0 +1,84 @@ +# networkmonitor + +Monitoring tool to run in an existing `waku` network with the following features: + +* Keeps discovering new peers using `discv5` +* Tracks advertised capabilities of each node as per stored in the ENR `waku` field +* Attempts to connect to all nodes, tracking which protocols each node supports +* Presents grafana-ready metrics showing the state of the network in terms of locations, ips, number discovered peers, number of peers we could connect to, user-agent that each peer contains, content topics and the amount of rx messages in each one. +* Metrics are exposed through prometheus metrics but also with a custom rest api, presenting detailed information about each peer. These metrics are exposed via a rest api. + +## Usage + +```console +./build/networkmonitor --help +Usage: + +networkmonitor [OPTIONS]... + +The following options are available: + + -l, --log-level Sets the log level [=LogLevel.INFO]. + -t, --timeout Timeout to consider that the connection failed [=chronos.seconds(10)]. + -b, --bootstrap-node Bootstrap ENR node. Argument may be repeated. [=@[""]]. + --dns-discovery-url URL for DNS node list in format 'enrtree://@'. + --pubsub-topic Default pubsub topic to subscribe to. Argument may be repeated.. + -r, --refresh-interval How often new peers are discovered and connected to (in seconds) [=5]. + --cluster-id Cluster id that the node is running in. Node in a different cluster id is + disconnected. [=1]. + --rln-relay Enable spam protection through rln-relay: true|false [=true]. + --rln-relay-dynamic Enable waku-rln-relay with on-chain dynamic group management: true|false + [=true]. + --rln-relay-eth-client-address HTTP address of an Ethereum testnet client e.g., http://localhost:8540/ + [=http://localhost:8540/]. + --rln-relay-eth-contract-address Address of membership contract on an Ethereum testnet. + --rln-relay-epoch-sec Epoch size in seconds used to rate limit RLN memberships. Default is 1 second. + [=1]. + --rln-relay-user-message-limit Set a user message limit for the rln membership registration. Must be a positive + integer. Default is 1. [=1]. + --metrics-server Enable the metrics server: true|false [=true]. + --metrics-server-address Listening address of the metrics server. [=parseIpAddress("127.0.0.1")]. + --metrics-server-port Listening HTTP port of the metrics server. [=8008]. + --metrics-rest-address Listening address of the metrics rest server. [=127.0.0.1]. + --metrics-rest-port Listening HTTP port of the metrics rest server. [=8009]. +``` + +## Example + +Connect to the network through a given bootstrap node, with default parameters. See metrics section for the data that it exposes. + +```console +./build/networkmonitor --log-level=INFO --b="enr:-QEkuEB3WHNS-xA3RDpfu9A2Qycr3bN3u7VoArMEiDIFZJ66F1EB3d4wxZN1hcdcOX-RfuXB-MQauhJGQbpz3qUofOtLAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQPK35Nnz0cWUtSAhBp7zvHEhyU_AqeQUlqzLiLxfP2L4oN0Y3CCdl-DdWRwgiMohXdha3UyDw" +``` + +```console +./build/networkmonitor --log-level=INFO --dns-discovery-url=enrtree://AL65EKLJAUXKKPG43HVTML5EFFWEZ7L4LOKTLZCLJASG4DSESQZEC@prod.status.nodes.status.im +``` + +## Metrics + +Metrics are divided into two categories: + +* Prometheus metrics, exposed as i.e. gauges. +* Custom metrics, used for unconstrained labels such as peer information or content topics. + - These metrics are not exposed through prometheus because since they are unconstrained, they can end up breaking the backend, as a new datapoint is generated for each one and it can reach up a point where is too much to handle. + +### Prometheus Metrics + +The following metrics are available. See `http://localhost:8008/metrics` + +* `peer_type_as_per_enr`: Number of peers supporting each capability according to the ENR (Relay, Store, Lightpush, Filter) +* `peer_type_as_per_protocol`: Number of peers supporting each protocol, after a successful connection) +* `peer_user_agents`: List of useragents found in the network and their count + +Other relevant metrics reused from `nim-eth`: + +* `routing_table_nodes`: Inherited from nim-eth, number of nodes in the routing table +* `discovery_message_requests_outgoing_total`: Inherited from nim-eth, number of outgoing discovery requests, useful to know if the node is actively looking for new peers + +### Custom Metrics + +The following endpoints are available: + +* `http://localhost:8009/allpeersinfo`: json list of all peers with extra information such as ip, location, supported protocols and last connection time. +* `http://localhost:8009/contenttopics`: content topic messages and its message count. diff --git a/third-party/nwaku/apps/networkmonitor/docker-compose.yml b/third-party/nwaku/apps/networkmonitor/docker-compose.yml new file mode 100644 index 0000000..d7bf661 --- /dev/null +++ b/third-party/nwaku/apps/networkmonitor/docker-compose.yml @@ -0,0 +1,34 @@ +version: '3.8' +networks: + monitoring: + driver: bridge + +volumes: + prometheus-data: + driver: local + grafana-data: + driver: local + +# Services definitions +services: + + prometheus: + image: docker.io/prom/prometheus:latest + container_name: prometheus + ports: + - 9090:9090 + command: + - '--config.file=/etc/prometheus/prometheus.yaml' + volumes: + - ./prometheus.yaml:/etc/prometheus/prometheus.yaml:ro + - ./data:/prometheus + restart: unless-stopped + + grafana: + image: grafana/grafana-oss:latest + container_name: grafana + ports: + - '3000:3000' + volumes: + - grafana-data:/var/lib/grafana + restart: unless-stopped diff --git a/third-party/nwaku/apps/networkmonitor/networkmonitor.nim b/third-party/nwaku/apps/networkmonitor/networkmonitor.nim new file mode 100644 index 0000000..a9144ae --- /dev/null +++ b/third-party/nwaku/apps/networkmonitor/networkmonitor.nim @@ -0,0 +1,668 @@ +{.push raises: [].} + +import + std/[net, tables, strutils, times, sequtils, random], + results, + chronicles, + chronicles/topics_registry, + chronos, + chronos/timer as ctime, + confutils, + eth/keys, + eth/p2p/discoveryv5/enr, + libp2p/crypto/crypto, + libp2p/nameresolving/dnsresolver, + libp2p/protocols/ping, + metrics, + metrics/chronos_httpserver, + presto/[route, server, client] +import + waku/[ + waku_core, + node/peer_manager, + waku_node, + waku_enr, + discovery/waku_discv5, + discovery/waku_dnsdisc, + waku_relay, + waku_rln_relay, + factory/builder, + factory/networks_config, + ], + ./networkmonitor_metrics, + ./networkmonitor_config, + ./networkmonitor_utils + +logScope: + topics = "networkmonitor" + +const ReconnectTime = 60 +const MaxConnectionRetries = 5 +const ResetRetriesAfter = 1200 +const PingSmoothing = 0.3 +const MaxConnectedPeers = 150 + +const git_version* {.strdefine.} = "n/a" + +proc setDiscoveredPeersCapabilities(routingTableNodes: seq[waku_enr.Record]) = + for capability in @[Relay, Store, Filter, Lightpush]: + let nOfNodesWithCapability = + routingTableNodes.countIt(it.supportsCapability(capability)) + info "capabilities as per ENR waku flag", + capability = capability, amount = nOfNodesWithCapability + networkmonitor_peer_type_as_per_enr.set( + int64(nOfNodesWithCapability), labelValues = [$capability] + ) + +proc setDiscoveredPeersCluster(routingTableNodes: seq[Node]) = + var clusters: CountTable[uint16] + + for node in routingTableNodes: + let typedRec = node.record.toTyped().valueOr: + clusters.inc(0) + continue + + let relayShard = typedRec.relaySharding().valueOr: + clusters.inc(0) + continue + + clusters.inc(relayShard.clusterId) + + for (key, value) in clusters.pairs: + networkmonitor_peer_cluster_as_per_enr.set(int64(value), labelValues = [$key]) + +proc analyzePeer( + customPeerInfo: CustomPeerInfoRef, + peerInfo: RemotePeerInfo, + node: WakuNode, + timeout: chronos.Duration, +): Future[Result[string, string]] {.async.} = + var pingDelay: chronos.Duration + + proc ping(): Future[Result[void, string]] {.async, gcsafe.} = + try: + let conn = await node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec) + pingDelay = await node.libp2pPing.ping(conn) + return ok() + except CatchableError: + var msg = getCurrentExceptionMsg() + if msg == "Future operation cancelled!": + msg = "timedout" + warn "failed to ping the peer", peer = peerInfo, err = msg + + customPeerInfo.connError = msg + return err("could not ping peer: " & msg) + + let timedOut = not await ping().withTimeout(timeout) + # need this check for pingDelat == 0 because there may be a conn error before timeout + if timedOut or pingDelay == 0.millis: + customPeerInfo.retries += 1 + return err(customPeerInfo.connError) + + customPeerInfo.connError = "" + info "successfully pinged peer", peer = peerInfo, duration = pingDelay.millis + networkmonitor_peer_ping.observe(pingDelay.millis) + + # We are using a smoothed moving average + customPeerInfo.avgPingDuration = + if customPeerInfo.avgPingDuration.millis == 0: + pingDelay + else: + let newAvg = + (float64(pingDelay.millis) * PingSmoothing) + + float64(customPeerInfo.avgPingDuration.millis) * (1.0 - PingSmoothing) + + int64(newAvg).millis + + customPeerInfo.lastPingDuration = pingDelay + + return ok(customPeerInfo.peerId) + +proc shouldReconnect(customPeerInfo: CustomPeerInfoRef): bool = + let reconnetIntervalCheck = + getTime().toUnix() >= customPeerInfo.lastTimeConnected + ReconnectTime + var retriesCheck = customPeerInfo.retries < MaxConnectionRetries + + if not retriesCheck and + getTime().toUnix() >= customPeerInfo.lastTimeConnected + ResetRetriesAfter: + customPeerInfo.retries = 0 + retriesCheck = true + info "resetting retries counter", peerId = customPeerInfo.peerId + + return reconnetIntervalCheck and retriesCheck + +# TODO: Split in discover, connect +proc setConnectedPeersMetrics( + discoveredNodes: seq[waku_enr.Record], + node: WakuNode, + timeout: chronos.Duration, + restClient: RestClientRef, + allPeers: CustomPeersTableRef, +) {.async.} = + let currentTime = getTime().toUnix() + + var newPeers = 0 + var successfulConnections = 0 + + var analyzeFuts: seq[Future[Result[string, string]]] + + var (inConns, outConns) = node.peer_manager.connectedPeers(WakuRelayCodec) + info "connected peers", inConns = inConns.len, outConns = outConns.len + + shuffle(outConns) + + if outConns.len >= toInt(MaxConnectedPeers / 2): + for p in outConns[0 ..< toInt(outConns.len / 2)]: + trace "Pruning Peer", Peer = $p + asyncSpawn(node.switch.disconnect(p)) + + # iterate all newly discovered nodes + for discNode in discoveredNodes: + let peerRes = toRemotePeerInfo(discNode) + + let peerInfo = peerRes.valueOr: + warn "error converting record to remote peer info", record = discNode + continue + + # create new entry if new peerId found + let peerId = $peerInfo.peerId + + if not allPeers.hasKey(peerId): + allPeers[peerId] = CustomPeerInfoRef(peerId: peerId) + newPeers += 1 + else: + info "already seen", peerId = peerId + + let customPeerInfo = allPeers[peerId] + + customPeerInfo.lastTimeDiscovered = currentTime + customPeerInfo.enr = discNode.toURI() + customPeerInfo.enrCapabilities = discNode.getCapabilities().mapIt($it) + customPeerInfo.discovered += 1 + + for maddr in peerInfo.addrs: + if $maddr notin customPeerInfo.maddrs: + customPeerInfo.maddrs.add $maddr + let typedRecord = discNode.toTypedRecord() + if not typedRecord.isOk(): + warn "could not convert record to typed record", record = discNode + continue + if not typedRecord.get().ip.isSome(): + warn "ip field is not set", record = typedRecord.get() + continue + + let ip = $typedRecord.get().ip.get().join(".") + customPeerInfo.ip = ip + + # try to ping the peer + if shouldReconnect(customPeerInfo): + if customPeerInfo.retries > 0: + warn "trying to dial failed peer again", + peerId = peerId, retry = customPeerInfo.retries + analyzeFuts.add(analyzePeer(customPeerInfo, peerInfo, node, timeout)) + + # Wait for all connection attempts to finish + let analyzedPeers = await allFinished(analyzeFuts) + + for peerIdFut in analyzedPeers: + let peerIdRes = await peerIdFut + let peerIdStr = peerIdRes.valueOr: + continue + + successfulConnections += 1 + let peerId = PeerId.init(peerIdStr).valueOr: + warn "failed to parse peerId", peerId = peerIdStr + continue + var customPeerInfo = allPeers[peerIdStr] + + debug "connected to peer", peer = customPeerInfo[] + + # after connection, get supported protocols + let lp2pPeerStore = node.switch.peerStore + let nodeProtocols = lp2pPeerStore[ProtoBook][peerId] + customPeerInfo.supportedProtocols = nodeProtocols + customPeerInfo.lastTimeConnected = currentTime + + # after connection, get user-agent + let nodeUserAgent = lp2pPeerStore[AgentBook][peerId] + customPeerInfo.userAgent = nodeUserAgent + + info "number of newly discovered peers", amount = newPeers + # inform the total connections that we did in this round + info "number of successful connections", amount = successfulConnections + +proc updateMetrics(allPeersRef: CustomPeersTableRef) {.gcsafe.} = + var allProtocols: Table[string, int] + var allAgentStrings: Table[string, int] + var countries: Table[string, int] + var connectedPeers = 0 + var failedPeers = 0 + + for peerInfo in allPeersRef.values: + if peerInfo.connError == "": + for protocol in peerInfo.supportedProtocols: + allProtocols[protocol] = allProtocols.mgetOrPut(protocol, 0) + 1 + + # store available user-agents in the network + allAgentStrings[peerInfo.userAgent] = + allAgentStrings.mgetOrPut(peerInfo.userAgent, 0) + 1 + + if peerInfo.country != "": + countries[peerInfo.country] = countries.mgetOrPut(peerInfo.country, 0) + 1 + + connectedPeers += 1 + else: + failedPeers += 1 + + networkmonitor_peer_count.set(int64(connectedPeers), labelValues = ["true"]) + networkmonitor_peer_count.set(int64(failedPeers), labelValues = ["false"]) + # update count on each protocol + for protocol in allProtocols.keys(): + let countOfProtocols = allProtocols.mgetOrPut(protocol, 0) + networkmonitor_peer_type_as_per_protocol.set( + int64(countOfProtocols), labelValues = [protocol] + ) + info "supported protocols in the network", + protocol = protocol, count = countOfProtocols + + # update count on each user-agent + for userAgent in allAgentStrings.keys(): + let countOfUserAgent = allAgentStrings.mgetOrPut(userAgent, 0) + networkmonitor_peer_user_agents.set( + int64(countOfUserAgent), labelValues = [userAgent] + ) + info "user agents participating in the network", + userAgent = userAgent, count = countOfUserAgent + + for country in countries.keys(): + let peerCount = countries.mgetOrPut(country, 0) + networkmonitor_peer_country_count.set(int64(peerCount), labelValues = [country]) + info "number of peers per country", country = country, count = peerCount + +proc populateInfoFromIp( + allPeersRef: CustomPeersTableRef, restClient: RestClientRef +) {.async.} = + for peer in allPeersRef.keys(): + if allPeersRef[peer].country != "" and allPeersRef[peer].city != "": + continue + # TODO: Update also if last update > x + if allPeersRef[peer].ip == "": + continue + # get more info the peers from its ip address + var location: NodeLocation + try: + # IP-API endpoints are now limited to 45 HTTP requests per minute + await sleepAsync(1400.millis) + let response = await restClient.ipToLocation(allPeersRef[peer].ip) + location = response.data + except CatchableError: + warn "could not get location", ip = allPeersRef[peer].ip + continue + allPeersRef[peer].country = location.country + allPeersRef[peer].city = location.city + +# TODO: Split in discovery, connections, and ip2location +# crawls the network discovering peers and trying to connect to them +# metrics are processed and exposed +proc crawlNetwork( + node: WakuNode, + wakuDiscv5: WakuDiscoveryV5, + restClient: RestClientRef, + conf: NetworkMonitorConf, + allPeersRef: CustomPeersTableRef, +) {.async.} = + let crawlInterval = conf.refreshInterval * 1000 + while true: + let startTime = Moment.now() + # discover new random nodes + let discoveredNodes = await wakuDiscv5.findRandomPeers() + + # nodes are nested into bucket, flat it + let flatNodes = wakuDiscv5.protocol.routingTable.buckets.mapIt(it.nodes).flatten() + + # populate metrics related to capabilities as advertised by the ENR (see waku field) + setDiscoveredPeersCapabilities(discoveredNodes) + + # populate cluster metrics as advertised by the ENR + setDiscoveredPeersCluster(flatNodes) + + # tries to connect to all newly discovered nodes + # and populates metrics related to peers we could connect + # note random discovered nodes can be already known + await setConnectedPeersMetrics( + discoveredNodes, node, conf.timeout, restClient, allPeersRef + ) + + updateMetrics(allPeersRef) + + # populate info from ip addresses + await populateInfoFromIp(allPeersRef, restClient) + + let totalNodes = discoveredNodes.len + #let seenNodes = totalNodes + + info "discovered nodes: ", total = totalNodes #, seen = seenNodes + + # Notes: + # we dont run ipMajorityLoop + # we dont run revalidateLoop + let endTime = Moment.now() + let elapsed = (endTime - startTime).nanos + + info "crawl duration", time = elapsed.millis + + await sleepAsync(crawlInterval.millis - elapsed.millis) + +proc retrieveDynamicBootstrapNodes( + dnsDiscoveryUrl: string, dnsAddrsNameServers: seq[IpAddress] +): Future[Result[seq[RemotePeerInfo], string]] {.async.} = + ## Retrieve dynamic bootstrap nodes (DNS discovery) + + if dnsDiscoveryUrl != "": + # DNS discovery + debug "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl + + var nameServers: seq[TransportAddress] + for ip in dnsAddrsNameServers: + nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53 + + let dnsResolver = DnsResolver.new(nameServers) + + proc resolver(domain: string): Future[string] {.async, gcsafe.} = + trace "resolving", domain = domain + let resolved = await dnsResolver.resolveTxt(domain) + if resolved.len > 0: + return resolved[0] # Use only first answer + + var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl, resolver) + if wakuDnsDiscovery.isOk(): + return (await wakuDnsDiscovery.get().findPeers()).mapErr( + proc(e: cstring): string = + $e + ) + else: + warn "Failed to init Waku DNS discovery" + + debug "No method for retrieving dynamic bootstrap nodes specified." + ok(newSeq[RemotePeerInfo]()) # Return an empty seq by default + +proc getBootstrapFromDiscDns( + conf: NetworkMonitorConf +): Future[Result[seq[enr.Record], string]] {.async.} = + try: + let dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")] + let dynamicBootstrapNodesRes = + await retrieveDynamicBootstrapNodes(conf.dnsDiscoveryUrl, dnsNameServers) + if not dynamicBootstrapNodesRes.isOk(): + error("failed discovering peers from DNS") + let dynamicBootstrapNodes = dynamicBootstrapNodesRes.get() + + # select dynamic bootstrap nodes that have an ENR containing a udp port. + # Discv5 only supports UDP https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md) + var discv5BootstrapEnrs: seq[enr.Record] + for n in dynamicBootstrapNodes: + if n.enr.isSome(): + let + enr = n.enr.get() + tenrRes = enr.toTypedRecord() + if tenrRes.isOk() and ( + tenrRes.get().udp.isSome() or tenrRes.get().udp6.isSome() + ): + discv5BootstrapEnrs.add(enr) + return ok(discv5BootstrapEnrs) + except CatchableError: + error("failed discovering peers from DNS") + +proc initAndStartApp( + conf: NetworkMonitorConf +): Future[Result[(WakuNode, WakuDiscoveryV5), string]] {.async.} = + let bindIp = + try: + parseIpAddress("0.0.0.0") + except CatchableError: + return err("could not start node: " & getCurrentExceptionMsg()) + + let extIp = + try: + parseIpAddress("127.0.0.1") + except CatchableError: + return err("could not start node: " & getCurrentExceptionMsg()) + + let + # some hardcoded parameters + rng = keys.newRng() + key = crypto.PrivateKey.random(Secp256k1, rng[])[] + nodeTcpPort = Port(60000) + nodeUdpPort = Port(9000) + flags = CapabilitiesBitfield.init( + lightpush = false, filter = false, store = false, relay = true + ) + + var builder = EnrBuilder.init(key) + + builder.withIpAddressAndPorts( + ipAddr = some(extIp), tcpPort = some(nodeTcpPort), udpPort = some(nodeUdpPort) + ) + builder.withWakuCapabilities(flags) + + builder.withWakuRelaySharding( + RelayShards(clusterId: conf.clusterId, shardIds: conf.shards) + ).isOkOr: + error "failed to add sharded topics to ENR", error = error + return err("failed to add sharded topics to ENR: " & $error) + + let recordRes = builder.build() + let record = + if recordRes.isErr(): + return err("cannot build record: " & $recordRes.error) + else: + recordRes.get() + + var nodeBuilder = WakuNodeBuilder.init() + + nodeBuilder.withNodeKey(key) + nodeBuilder.withRecord(record) + nodeBuilder.withSwitchConfiguration(maxConnections = some(MaxConnectedPeers)) + + nodeBuilder.withPeerManagerConfig( + maxConnections = MaxConnectedPeers, + relayServiceRatio = "13.33:86.67", + shardAware = true, + ) + let res = nodeBuilder.withNetworkConfigurationDetails(bindIp, nodeTcpPort) + if res.isErr(): + return err("node building error" & $res.error) + + let nodeRes = nodeBuilder.build() + let node = + if nodeRes.isErr(): + return err("node building error" & $res.error) + else: + nodeRes.get() + + var discv5BootstrapEnrsRes = await getBootstrapFromDiscDns(conf) + if discv5BootstrapEnrsRes.isErr(): + error("failed discovering peers from DNS") + var discv5BootstrapEnrs = discv5BootstrapEnrsRes.get() + + # parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq + for enrUri in conf.bootstrapNodes: + addBootstrapNode(enrUri, discv5BootstrapEnrs) + + # discv5 + let discv5Conf = WakuDiscoveryV5Config( + discv5Config: none(DiscoveryConfig), + address: bindIp, + port: nodeUdpPort, + privateKey: keys.PrivateKey(key.skkey), + bootstrapRecords: discv5BootstrapEnrs, + autoupdateRecord: false, + ) + + let wakuDiscv5 = WakuDiscoveryV5.new(node.rng, discv5Conf, some(record)) + + try: + wakuDiscv5.protocol.open() + except CatchableError: + return err("could not start node: " & getCurrentExceptionMsg()) + + ok((node, wakuDiscv5)) + +proc startRestApiServer( + conf: NetworkMonitorConf, + allPeersInfo: CustomPeersTableRef, + numMessagesPerContentTopic: ContentTopicMessageTableRef, +): Result[void, string] = + try: + let serverAddress = + initTAddress(conf.metricsRestAddress & ":" & $conf.metricsRestPort) + proc validate(pattern: string, value: string): int = + if pattern.startsWith("{") and pattern.endsWith("}"): 0 else: 1 + + var router = RestRouter.init(validate) + router.installHandler(allPeersInfo, numMessagesPerContentTopic) + var sres = RestServerRef.new(router, serverAddress) + let restServer = sres.get() + restServer.start() + except CatchableError: + error("could not start rest api server") + ok() + +# handles rx of messages over a topic (see subscribe) +# counts the number of messages per content topic +proc subscribeAndHandleMessages( + node: WakuNode, + pubsubTopic: PubsubTopic, + msgPerContentTopic: ContentTopicMessageTableRef, +) = + # handle function + proc handler( + pubsubTopic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + trace "rx message", pubsubTopic = pubsubTopic, contentTopic = msg.contentTopic + + # If we reach a table limit size, remove c topics with the least messages. + let tableSize = 100 + if msgPerContentTopic.len > (tableSize - 1): + let minIndex = toSeq(msgPerContentTopic.values()).minIndex() + msgPerContentTopic.del(toSeq(msgPerContentTopic.keys())[minIndex]) + + # TODO: Will overflow at some point + # +1 if content topic existed, init to 1 otherwise + if msgPerContentTopic.hasKey(msg.contentTopic): + msgPerContentTopic[msg.contentTopic] += 1 + else: + msgPerContentTopic[msg.contentTopic] = 1 + + node.subscribe((kind: PubsubSub, topic: pubsubTopic), WakuRelayHandler(handler)).isOkOr: + error "failed to subscribe to pubsub topic", pubsubTopic, error + quit(1) + +when isMainModule: + # known issue: confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError + {.pop.} + let confRes = NetworkMonitorConf.loadConfig() + if confRes.isErr(): + error "could not load cli variables", err = confRes.error + quit(1) + + var conf = confRes.get() + info "cli flags", conf = conf + + if conf.clusterId == 1: + let twnNetworkConf = NetworkConf.TheWakuNetworkConf() + + conf.bootstrapNodes = twnNetworkConf.discv5BootstrapNodes + conf.rlnRelayDynamic = twnNetworkConf.rlnRelayDynamic + conf.rlnRelayEthContractAddress = twnNetworkConf.rlnRelayEthContractAddress + conf.rlnEpochSizeSec = twnNetworkConf.rlnEpochSizeSec + conf.rlnRelayUserMessageLimit = twnNetworkConf.rlnRelayUserMessageLimit + conf.numShardsInNetwork = twnNetworkConf.shardingConf.numShardsInCluster + + if conf.shards.len == 0: + conf.shards = + toSeq(uint16(0) .. uint16(twnNetworkConf.shardingConf.numShardsInCluster - 1)) + + if conf.logLevel != LogLevel.NONE: + setLogLevel(conf.logLevel) + + # list of peers that we have discovered/connected + var allPeersInfo = CustomPeersTableRef() + + # content topic and the number of messages that were received + var msgPerContentTopic = ContentTopicMessageTableRef() + + # start metrics server + if conf.metricsServer: + let res = + startMetricsServer(conf.metricsServerAddress, Port(conf.metricsServerPort)) + if res.isErr(): + error "could not start metrics server", err = res.error + quit(1) + + # start rest server for custom metrics + let res = startRestApiServer(conf, allPeersInfo, msgPerContentTopic) + if res.isErr(): + error "could not start rest api server", err = res.error + quit(1) + + # create a rest client + let clientRest = + RestClientRef.new(url = "http://ip-api.com", connectTimeout = ctime.seconds(2)) + if clientRest.isErr(): + error "could not start rest api client", err = res.error + quit(1) + let restClient = clientRest.get() + + # start waku node + let nodeRes = waitFor initAndStartApp(conf) + if nodeRes.isErr(): + error "could not start node" + quit 1 + + let (node, discv5) = nodeRes.get() + + (waitFor node.mountRelay()).isOkOr: + error "failed to mount waku relay protocol: ", err = error + quit 1 + + waitFor node.mountLibp2pPing() + + var onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} = + ## Action to be taken when an internal error occurs during the node run. + ## e.g. the connection with the database is lost and not recovered. + error "Unrecoverable error occurred", error = msg + quit(QuitFailure) + + if conf.rlnRelay and conf.rlnRelayEthContractAddress != "": + let rlnConf = WakuRlnConfig( + dynamic: conf.rlnRelayDynamic, + credIndex: some(uint(0)), + ethContractAddress: conf.rlnRelayEthContractAddress, + ethClientUrls: conf.ethClientUrls.mapIt(string(it)), + epochSizeSec: conf.rlnEpochSizeSec, + creds: none(RlnRelayCreds), + onFatalErrorAction: onFatalErrorAction, + ) + + try: + waitFor node.mountRlnRelay(rlnConf) + except CatchableError: + error "failed to setup RLN", err = getCurrentExceptionMsg() + quit 1 + + node.mountMetadata(conf.clusterId, conf.shards).isOkOr: + error "failed to mount waku metadata protocol: ", err = error + quit 1 + + for shard in conf.shards: + # Subscribe the node to the shards, to count messages + subscribeAndHandleMessages( + node, $RelayShard(shardId: shard, clusterId: conf.clusterId), msgPerContentTopic + ) + + # spawn the routine that crawls the network + # TODO: split into 3 routines (discovery, connections, ip2location) + asyncSpawn crawlNetwork(node, discv5, restClient, conf, allPeersInfo) + + runForever() diff --git a/third-party/nwaku/apps/networkmonitor/networkmonitor_config.nim b/third-party/nwaku/apps/networkmonitor/networkmonitor_config.nim new file mode 100644 index 0000000..f67fb09 --- /dev/null +++ b/third-party/nwaku/apps/networkmonitor/networkmonitor_config.nim @@ -0,0 +1,190 @@ +import + chronicles, + chronicles/topics_registry, + confutils, + chronos, + std/strutils, + results, + regex + +const git_version* {.strdefine.} = "n/a" + +type EthRpcUrl* = distinct string + +proc `$`*(u: EthRpcUrl): string = + string(u) + +type NetworkMonitorConf* = object + logLevel* {. + desc: "Sets the log level", + defaultValue: LogLevel.INFO, + name: "log-level", + abbr: "l" + .}: LogLevel + + timeout* {. + desc: "Timeout to consider that the connection failed", + defaultValue: chronos.seconds(10), + name: "timeout", + abbr: "t" + .}: chronos.Duration + + bootstrapNodes* {. + desc: "Bootstrap ENR node. Argument may be repeated.", + defaultValue: @[""], + name: "bootstrap-node", + abbr: "b" + .}: seq[string] + + dnsDiscoveryUrl* {. + desc: "URL for DNS node list in format 'enrtree://@'", + defaultValue: "", + name: "dns-discovery-url" + .}: string + + shards* {. + desc: + "Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.", + name: "shard" + .}: seq[uint16] + + numShardsInNetwork* {. + desc: "Number of shards in the network", + name: "num-shards-in-network", + defaultValue: 8 + .}: uint32 + + refreshInterval* {. + desc: "How often new peers are discovered and connected to (in seconds)", + defaultValue: 5, + name: "refresh-interval", + abbr: "r" + .}: int + + clusterId* {. + desc: + "Cluster id that the node is running in. Node in a different cluster id is disconnected.", + defaultValue: 1, + name: "cluster-id" + .}: uint16 + + rlnRelay* {. + desc: "Enable spam protection through rln-relay: true|false", + defaultValue: true, + name: "rln-relay" + .}: bool + + rlnRelayDynamic* {. + desc: "Enable waku-rln-relay with on-chain dynamic group management: true|false", + defaultValue: true, + name: "rln-relay-dynamic" + .}: bool + + ethClientUrls* {. + desc: + "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/. Argument may be repeated.", + defaultValue: newSeq[EthRpcUrl](0), + name: "rln-relay-eth-client-address" + .}: seq[EthRpcUrl] + + rlnRelayEthContractAddress* {. + desc: "Address of membership contract on an Ethereum testnet", + defaultValue: "", + name: "rln-relay-eth-contract-address" + .}: string + + rlnEpochSizeSec* {. + desc: + "Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.", + defaultValue: 1, + name: "rln-relay-epoch-sec" + .}: uint64 + + rlnRelayUserMessageLimit* {. + desc: + "Set a user message limit for the rln membership registration. Must be a positive integer. Default is 1.", + defaultValue: 1, + name: "rln-relay-user-message-limit" + .}: uint64 + + ## Prometheus metrics config + metricsServer* {. + desc: "Enable the metrics server: true|false", + defaultValue: true, + name: "metrics-server" + .}: bool + + metricsServerAddress* {. + desc: "Listening address of the metrics server.", + defaultValue: parseIpAddress("127.0.0.1"), + name: "metrics-server-address" + .}: IpAddress + + metricsServerPort* {. + desc: "Listening HTTP port of the metrics server.", + defaultValue: 8008, + name: "metrics-server-port" + .}: uint16 + + ## Custom metrics rest server + metricsRestAddress* {. + desc: "Listening address of the metrics rest server.", + defaultValue: "127.0.0.1", + name: "metrics-rest-address" + .}: string + metricsRestPort* {. + desc: "Listening HTTP port of the metrics rest server.", + defaultValue: 8009, + name: "metrics-rest-port" + .}: uint16 + +proc parseCmdArg*(T: type IpAddress, p: string): T = + try: + result = parseIpAddress(p) + except CatchableError as e: + raise newException(ValueError, "Invalid IP address") + +proc completeCmdArg*(T: type IpAddress, val: string): seq[string] = + return @[] + +proc parseCmdArg*(T: type chronos.Duration, p: string): T = + try: + result = chronos.seconds(parseInt(p)) + except CatchableError as e: + raise newException(ValueError, "Invalid duration value") + +proc completeCmdArg*(T: type chronos.Duration, val: string): seq[string] = + return @[] + +proc completeCmdArg*(T: type EthRpcUrl, val: string): seq[string] = + return @[] + +proc parseCmdArg*(T: type EthRpcUrl, s: string): T = + ## allowed patterns: + ## http://url:port + ## https://url:port + ## http://url:port/path + ## https://url:port/path + ## http://url/with/path + ## http://url:port/path?query + ## https://url:port/path?query + ## disallowed patterns: + ## any valid/invalid ws or wss url + var httpPattern = + re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" + var wsPattern = + re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*" + if regex.match(s, wsPattern): + raise newException( + ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL" + ) + if not regex.match(s, httpPattern): + raise newException(ValueError, "Invalid HTTP RPC URL") + return EthRpcUrl(s) + +proc loadConfig*(T: type NetworkMonitorConf): Result[T, string] = + try: + let conf = NetworkMonitorConf.load(version = git_version) + ok(conf) + except CatchableError: + err(getCurrentExceptionMsg()) diff --git a/third-party/nwaku/apps/networkmonitor/networkmonitor_metrics.nim b/third-party/nwaku/apps/networkmonitor/networkmonitor_metrics.nim new file mode 100644 index 0000000..dda3e57 --- /dev/null +++ b/third-party/nwaku/apps/networkmonitor/networkmonitor_metrics.nim @@ -0,0 +1,107 @@ +{.push raises: [].} + +import + std/[net, json, tables, sequtils], + chronicles, + chronicles/topics_registry, + chronos, + json_serialization, + metrics, + metrics/chronos_httpserver, + presto/route, + presto/server, + results + +logScope: + topics = "networkmonitor_metrics" + +# On top of our custom metrics, the following are reused from nim-eth +#routing_table_nodes{state=""} +#routing_table_nodes{state="seen"} +#discovery_message_requests_outgoing_total{response=""} +#discovery_message_requests_outgoing_total{response="no_response"} + +declarePublicGauge networkmonitor_peer_type_as_per_enr, + "Number of peers supporting each capability according to the ENR", + labels = ["capability"] + +declarePublicGauge networkmonitor_peer_cluster_as_per_enr, + "Number of peers on each cluster according to the ENR", labels = ["cluster"] + +declarePublicGauge networkmonitor_peer_type_as_per_protocol, + "Number of peers supporting each protocol, after a successful connection) ", + labels = ["protocols"] + +declarePublicGauge networkmonitor_peer_user_agents, + "Number of peers with each user agent", labels = ["user_agent"] + +declarePublicHistogram networkmonitor_peer_ping, + "Histogram tracking ping durations for discovered peers", + buckets = [10.0, 20.0, 50.0, 100.0, 200.0, 300.0, 500.0, 800.0, 1000.0, 2000.0, Inf] + +declarePublicGauge networkmonitor_peer_count, + "Number of discovered peers", labels = ["connected"] + +declarePublicGauge networkmonitor_peer_country_count, + "Number of peers per country", labels = ["country"] + +type + CustomPeerInfo* = object # populated after discovery + lastTimeDiscovered*: int64 + discovered*: int64 + peerId*: string + enr*: string + ip*: string + enrCapabilities*: seq[string] + country*: string + city*: string + maddrs*: seq[string] + + # only after ok connection + lastTimeConnected*: int64 + retries*: int64 + supportedProtocols*: seq[string] + userAgent*: string + lastPingDuration*: Duration + avgPingDuration*: Duration + + # only after a ok/nok connection + connError*: string + + CustomPeerInfoRef* = ref CustomPeerInfo + + # Stores information about all discovered/connected peers + CustomPeersTableRef* = TableRef[string, CustomPeerInfoRef] + + # stores the content topic and the count of rx messages + ContentTopicMessageTableRef* = TableRef[string, int] + +proc installHandler*( + router: var RestRouter, + allPeers: CustomPeersTableRef, + numMessagesPerContentTopic: ContentTopicMessageTableRef, +) = + router.api(MethodGet, "/allpeersinfo") do() -> RestApiResponse: + let values = toSeq(allPeers.values()) + return RestApiResponse.response(values.toJson(), contentType = "application/json") + router.api(MethodGet, "/contenttopics") do() -> RestApiResponse: + # TODO: toJson() includes the hash + return RestApiResponse.response( + $(%numMessagesPerContentTopic), contentType = "application/json" + ) + +proc startMetricsServer*(serverIp: IpAddress, serverPort: Port): Result[void, string] = + info "Starting metrics HTTP server", serverIp, serverPort + + try: + startMetricsHttpServer($serverIp, serverPort) + except Exception as e: + error( + "Failed to start metrics HTTP server", + serverIp = serverIp, + serverPort = serverPort, + msg = e.msg, + ) + + info "Metrics HTTP server started", serverIp, serverPort + ok() diff --git a/third-party/nwaku/apps/networkmonitor/networkmonitor_utils.nim b/third-party/nwaku/apps/networkmonitor/networkmonitor_utils.nim new file mode 100644 index 0000000..0e89c4a --- /dev/null +++ b/third-party/nwaku/apps/networkmonitor/networkmonitor_utils.nim @@ -0,0 +1,53 @@ +{.push raises: [].} + +import + std/json, + results, + chronicles, + chronicles/topics_registry, + chronos, + presto/[client, common] + +type NodeLocation* = object + country*: string + city*: string + lat*: string + long*: string + isp*: string + +proc flatten*[T](a: seq[seq[T]]): seq[T] = + var aFlat = newSeq[T](0) + for subseq in a: + aFlat &= subseq + return aFlat + +proc decodeBytes*( + t: typedesc[NodeLocation], value: openArray[byte], contentType: Opt[ContentTypeData] +): RestResult[NodeLocation] = + var res: string + if len(value) > 0: + res = newString(len(value)) + copyMem(addr res[0], unsafeAddr value[0], len(value)) + try: + let jsonContent = parseJson(res) + if $jsonContent["status"].getStr() != "success": + error "query failed", result = $jsonContent + return err("query failed") + return ok( + NodeLocation( + country: jsonContent["country"].getStr(), + city: jsonContent["city"].getStr(), + lat: $jsonContent["lat"].getFloat(), + long: $jsonContent["lon"].getFloat(), + isp: jsonContent["isp"].getStr(), + ) + ) + except Exception: + return err("failed to get the location: " & getCurrentExceptionMsg()) + +proc encodeString*(value: string): RestResult[string] = + ok(value) + +proc ipToLocation*( + ip: string +): RestResponse[NodeLocation] {.rest, endpoint: "json/{ip}", meth: MethodGet.} diff --git a/third-party/nwaku/apps/networkmonitor/nim.cfg b/third-party/nwaku/apps/networkmonitor/nim.cfg new file mode 100644 index 0000000..2231f2e --- /dev/null +++ b/third-party/nwaku/apps/networkmonitor/nim.cfg @@ -0,0 +1,4 @@ +-d:chronicles_line_numbers +-d:chronicles_runtime_filtering:on +-d:discv5_protocol_id:d5waku +path = "../.." diff --git a/third-party/nwaku/apps/networkmonitor/prometheus.yaml b/third-party/nwaku/apps/networkmonitor/prometheus.yaml new file mode 100644 index 0000000..c7af03f --- /dev/null +++ b/third-party/nwaku/apps/networkmonitor/prometheus.yaml @@ -0,0 +1,9 @@ +global: + scrape_interval: 15s + +scrape_configs: + - job_name: 'prometheus' + scrape_interval: 5s + static_configs: + - targets: ['host.docker.internal:8008'] + metrics_path: '/metrics' \ No newline at end of file diff --git a/third-party/nwaku/apps/sonda/.env.example b/third-party/nwaku/apps/sonda/.env.example new file mode 100644 index 0000000..ea769b3 --- /dev/null +++ b/third-party/nwaku/apps/sonda/.env.example @@ -0,0 +1,44 @@ +# RPC URL for accessing testnet via HTTP. +# e.g. https://linea-sepolia.infura.io/v3/123aa110320f4aec179150fba1e1b1b1 +RLN_RELAY_ETH_CLIENT_ADDRESS= + +# Account of testnet where you have Linea Sepolia ETH that would be staked into RLN contract. +ETH_TESTNET_ACCOUNT= + +# Private key of testnet where you have Linea Sepolia ETH that would be staked into RLN contract. +# Note: make sure you don't use the '0x' prefix. +# e.g. 0116196e9a8abed42dd1a22eb63fa2a5a17b0c27d716b87ded2c54f1bf192a0b +ETH_TESTNET_KEY= + +# Address of the RLN contract on Linea Sepolia. +RLN_CONTRACT_ADDRESS=0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 +# Address of the RLN Membership Token contract on Linea Sepolia used to pay for membership. +TOKEN_CONTRACT_ADDRESS=0x185A0015aC462a0aECb81beCc0497b649a64B9ea + +# Password you would like to use to protect your RLN membership. +RLN_RELAY_CRED_PASSWORD= + +# Advanced. Can be left empty in normal use cases. +NWAKU_IMAGE= +NODEKEY= +DOMAIN= +EXTRA_ARGS= +STORAGE_SIZE= + + +# -------------------- SONDA CONFIG ------------------ +METRICS_PORT=8004 +NODE_REST_ADDRESS="http://nwaku:8645" +CLUSTER_ID=16 +SHARD=32 +# Comma separated list of store nodes to poll +STORE_NODES="/dns4/store-01.do-ams3.shards.test.status.im/tcp/30303/p2p/16Uiu2HAmAUdrQ3uwzuE4Gy4D56hX6uLKEeerJAnhKEHZ3DxF1EfT, +/dns4/store-02.do-ams3.shards.test.status.im/tcp/30303/p2p/16Uiu2HAm9aDJPkhGxc2SFcEACTFdZ91Q5TJjp76qZEhq9iF59x7R, +/dns4/store-01.gc-us-central1-a.shards.test.status.im/tcp/30303/p2p/16Uiu2HAmMELCo218hncCtTvC2Dwbej3rbyHQcR8erXNnKGei7WPZ, +/dns4/store-02.gc-us-central1-a.shards.test.status.im/tcp/30303/p2p/16Uiu2HAmJnVR7ZzFaYvciPVafUXuYGLHPzSUigqAmeNw9nJUVGeM, +/dns4/store-01.ac-cn-hongkong-c.shards.test.status.im/tcp/30303/p2p/16Uiu2HAm2M7xs7cLPc3jamawkEqbr7cUJX11uvY7LxQ6WFUdUKUT, +/dns4/store-02.ac-cn-hongkong-c.shards.test.status.im/tcp/30303/p2p/16Uiu2HAm9CQhsuwPR54q27kNj9iaQVfyRzTGKrhFmr94oD8ujU6P" +# Wait time in seconds between two consecutive queries +QUERY_DELAY=60 +# Consecutive successful store requests to consider a store node healthy +HEALTH_THRESHOLD=5 \ No newline at end of file diff --git a/third-party/nwaku/apps/sonda/.gitignore b/third-party/nwaku/apps/sonda/.gitignore new file mode 100644 index 0000000..f366b94 --- /dev/null +++ b/third-party/nwaku/apps/sonda/.gitignore @@ -0,0 +1,4 @@ +.env +keystore +rln_tree +.env diff --git a/third-party/nwaku/apps/sonda/Dockerfile.sonda b/third-party/nwaku/apps/sonda/Dockerfile.sonda new file mode 100644 index 0000000..0e5a606 --- /dev/null +++ b/third-party/nwaku/apps/sonda/Dockerfile.sonda @@ -0,0 +1,23 @@ +FROM python:3.9.18-alpine3.18 + +ENV METRICS_PORT=8004 +ENV NODE_REST_ADDRESS="http://nwaku:8645" +ENV QUERY_DELAY=60 +ENV STORE_NODES="" +ENV CLUSTER_ID=1 +ENV SHARD=1 +ENV HEALTH_THRESHOLD=5 + +WORKDIR /opt + +COPY sonda.py /opt/sonda.py + +RUN pip install requests argparse prometheus_client + +CMD python -u /opt/sonda.py \ + --metrics-port=$METRICS_PORT \ + --node-rest-address="${NODE_REST_ADDRESS}" \ + --delay-seconds=$QUERY_DELAY \ + --pubsub-topic="/waku/2/rs/${CLUSTER_ID}/${SHARD}" \ + --store-nodes="${STORE_NODES}" \ + --health-threshold=$HEALTH_THRESHOLD diff --git a/third-party/nwaku/apps/sonda/README.md b/third-party/nwaku/apps/sonda/README.md new file mode 100644 index 0000000..459f6fe --- /dev/null +++ b/third-party/nwaku/apps/sonda/README.md @@ -0,0 +1,52 @@ +# Sonda + +Sonda is a tool to monitor store nodes and measure their performance. + +It works by running a `nwaku` node, publishing a message from it every fixed interval and performing a store query to all the store nodes we want to monitor to check they respond with the last message we published. + +## Instructions + +1. Create an `.env` file which will contain the configuration parameters. + You can start by copying `.env.example` and adapting it for your use case + + ``` + cp .env.example .env + ${EDITOR} .env + ``` + + The variables that have to be filled for Sonda are + + ``` + CLUSTER_ID= + SHARD= + # Comma separated list of store nodes to poll + STORE_NODES= + # Wait time in seconds between two consecutive queries + QUERY_DELAY= + # Consecutive successful store requests to consider a store node healthy + HEALTH_THRESHOLD= + ``` + +2. If you want to query nodes in `cluster-id` 1, then you have to follow the steps of registering an RLN membership. Otherwise, you can skip this step. + + For it, you need: + * Ethereum Linea Sepolia WebSocket endpoint. Get one free from [Infura](https://linea-sepolia.infura.io/). + * Ethereum Linea Sepolia account with minimum 0.01ETH. Get some [here](https://docs.metamask.io/developer-tools/faucet/). + * A password to protect your rln membership. + + Fill the `RLN_RELAY_ETH_CLIENT_ADDRESS`, `ETH_TESTNET_KEY` and `RLN_RELAY_CRED_PASSWORD` env variables and run + + ``` + ./register_rln.sh + ``` + +3. Start Sonda by running + + ``` + docker-compose up -d + ``` + +4. Browse to http://localhost:3000/dashboards and monitor the performance + + There's two Grafana dashboards: `nwaku-monitoring` to track the stats of your node that is publishing messages and performing queries, and `sonda-monitoring` to monitor the responses of the store nodes. + diff --git a/third-party/nwaku/apps/sonda/docker-compose.yml b/third-party/nwaku/apps/sonda/docker-compose.yml new file mode 100644 index 0000000..d659442 --- /dev/null +++ b/third-party/nwaku/apps/sonda/docker-compose.yml @@ -0,0 +1,114 @@ + +x-logging: &logging + logging: + driver: json-file + options: + max-size: 1000m + +# Environment variable definitions +x-rln-relay-eth-client-address: &rln_relay_eth_client_address ${RLN_RELAY_ETH_CLIENT_ADDRESS:-} # Add your RLN_RELAY_ETH_CLIENT_ADDRESS after the "-" + +x-rln-environment: &rln_env + RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xB9cd878C90E49F797B4431fBF4fb333108CB90e6} + RLN_RELAY_CRED_PATH: ${RLN_RELAY_CRED_PATH:-} # Optional: Add your RLN_RELAY_CRED_PATH after the "-" + RLN_RELAY_CRED_PASSWORD: ${RLN_RELAY_CRED_PASSWORD:-} # Optional: Add your RLN_RELAY_CRED_PASSWORD after the "-" + +x-sonda-env: &sonda_env + METRICS_PORT: ${METRICS_PORT:-8004} + NODE_REST_ADDRESS: ${NODE_REST_ADDRESS:-"http://nwaku:8645"} + CLUSTER_ID: ${CLUSTER_ID:-1} + SHARD: ${SHARD:-0} + STORE_NODES: ${STORE_NODES:-} + QUERY_DELAY: ${QUERY_DELAY-60} + HEALTH_THRESHOLD: ${HEALTH_THRESHOLD-5} + +# Services definitions +services: + nwaku: + image: ${NWAKU_IMAGE:-harbor.status.im/wakuorg/nwaku:deploy-status-prod} + container_name: nwaku + restart: on-failure + ports: + - 30304:30304/tcp + - 30304:30304/udp + - 9005:9005/udp + - 127.0.0.1:8003:8003 + - 80:80 #Let's Encrypt + - 8000:8000/tcp #WSS + - 127.0.0.1:8645:8645 + <<: + - *logging + environment: + DOMAIN: ${DOMAIN} + NODEKEY: ${NODEKEY} + RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}" + RLN_RELAY_ETH_CLIENT_ADDRESS: *rln_relay_eth_client_address + EXTRA_ARGS: ${EXTRA_ARGS} + STORAGE_SIZE: ${STORAGE_SIZE} + <<: + - *rln_env + - *sonda_env + volumes: + - ./run_node.sh:/opt/run_node.sh:Z + - ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z + - ./rln_tree:/etc/rln_tree/:Z + - ./keystore:/keystore:Z + entrypoint: sh + command: + - /opt/run_node.sh + networks: + - nwaku-sonda + + sonda: + build: + context: . + dockerfile: Dockerfile.sonda + container_name: sonda + ports: + - 127.0.0.1:${METRICS_PORT}:${METRICS_PORT} + environment: + <<: + - *sonda_env + depends_on: + - nwaku + networks: + - nwaku-sonda + + prometheus: + image: docker.io/prom/prometheus:latest + container_name: prometheus + volumes: + - ./monitoring/prometheus-config.yml:/etc/prometheus/prometheus.yml:Z + command: + - --config.file=/etc/prometheus/prometheus.yml + # ports: + # - 127.0.0.1:9090:9090 + restart: on-failure:5 + depends_on: + - nwaku + networks: + - nwaku-sonda + + grafana: + image: docker.io/grafana/grafana:latest + container_name: grafana + env_file: + - ./monitoring/configuration/grafana-plugins.env + volumes: + - ./monitoring/configuration/grafana.ini:/etc/grafana/grafana.ini:Z + - ./monitoring/configuration/dashboards.yaml:/etc/grafana/provisioning/dashboards/dashboards.yaml:Z + - ./monitoring/configuration/datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml:Z + - ./monitoring/configuration/dashboards:/var/lib/grafana/dashboards/:Z + - ./monitoring/configuration/customizations/custom-logo.svg:/usr/share/grafana/public/img/grafana_icon.svg:Z + - ./monitoring/configuration/customizations/custom-logo.svg:/usr/share/grafana/public/img/grafana_typelogo.svg:Z + - ./monitoring/configuration/customizations/custom-logo.png:/usr/share/grafana/public/img/fav32.png:Z + ports: + - 0.0.0.0:3000:3000 + restart: on-failure:5 + depends_on: + - prometheus + networks: + - nwaku-sonda + +networks: + nwaku-sonda: \ No newline at end of file diff --git a/third-party/nwaku/apps/sonda/monitoring/configuration/customizations/custom-logo.png b/third-party/nwaku/apps/sonda/monitoring/configuration/customizations/custom-logo.png new file mode 100644 index 0000000..dcf13b9 Binary files /dev/null and b/third-party/nwaku/apps/sonda/monitoring/configuration/customizations/custom-logo.png differ diff --git a/third-party/nwaku/apps/sonda/monitoring/configuration/customizations/custom-logo.svg b/third-party/nwaku/apps/sonda/monitoring/configuration/customizations/custom-logo.svg new file mode 100644 index 0000000..3c9a6da --- /dev/null +++ b/third-party/nwaku/apps/sonda/monitoring/configuration/customizations/custom-logo.svg @@ -0,0 +1,3 @@ + + + diff --git a/third-party/nwaku/apps/sonda/monitoring/configuration/dashboards.yaml b/third-party/nwaku/apps/sonda/monitoring/configuration/dashboards.yaml new file mode 100644 index 0000000..e59ac96 --- /dev/null +++ b/third-party/nwaku/apps/sonda/monitoring/configuration/dashboards.yaml @@ -0,0 +1,9 @@ +apiVersion: 1 + +providers: +- name: 'Prometheus' + orgId: 1 + folder: '' + type: file + options: + path: /var/lib/grafana/dashboards \ No newline at end of file diff --git a/third-party/nwaku/apps/sonda/monitoring/configuration/dashboards/nwaku-monitoring.json b/third-party/nwaku/apps/sonda/monitoring/configuration/dashboards/nwaku-monitoring.json new file mode 100644 index 0000000..2b024e3 --- /dev/null +++ b/third-party/nwaku/apps/sonda/monitoring/configuration/dashboards/nwaku-monitoring.json @@ -0,0 +1,5303 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "gnetId": 12485, + "graphTooltip": 0, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 45, + "panels": [], + "title": "Waku Node", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 9, + "x": 0, + "y": 1 + }, + "id": 41, + "options": { + "displayMode": "gradient", + "maxVizHeight": 300, + "minVizHeight": 10, + "minVizWidth": 0, + "namePlacement": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "color" + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "rate(waku_histogram_message_size_bucket[1h])/scalar(rate(waku_histogram_message_size_count[1h]))*100", + "format": "heatmap", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Message distrubution %/kBytes (Last Hour)", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "deckbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 9, + "x": 9, + "y": 1 + }, + "id": 38, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_histogram_message_size_sum[1h])/rate(waku_histogram_message_size_count[1h])", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Average Msg Size (Last Hour)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "deckbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 5, + "x": 9, + "y": 5 + }, + "id": 42, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "histogram_quantile(0.75, rate(waku_histogram_message_size_bucket[1h]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "75% Percentile (Last hour)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "deckbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 14, + "y": 5 + }, + "id": 39, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "histogram_quantile(0.99, rate(waku_histogram_message_size_bucket[1h]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "99% Percentile (Last Hour)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 9, + "x": 0, + "y": 9 + }, + "id": 12, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "(increase(waku_node_messages_total[1m]))/60", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Messages/second", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "deckbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 9, + "x": 9, + "y": 9 + }, + "id": 43, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.4.7", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "waku_histogram_message_size_sum/waku_histogram_message_size_count", + "format": "heatmap", + "instant": false, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Average msg size (kBytes)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 0, + "y": 16 + }, + "id": 2, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "waku_version{instance=\"nwaku:8003\"}", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Version", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "version" + ] + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 3, + "y": 16 + }, + "id": 22, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "libp2p_autonat_reachability_confidence", + "legendFormat": "{{reachability}}", + "range": true, + "refId": "A" + } + ], + "title": "Reachability", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "max" + ] + } + } + ], + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 6, + "y": 16 + }, + "id": 32, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "routing_table_nodes{state=\"seen\"}", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Discv5 (Seen Nodes)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 9, + "y": 16 + }, + "id": 33, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "routing_table_nodes", + "legendFormat": "{{label_name}}", + "range": true, + "refId": "A" + } + ], + "title": "Discv5 (Nodes)", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "Time", + "{__name__=\"routing_table_nodes\", instance=\"nwaku:8003\", job=\"nwaku\"}" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 12, + "y": 16 + }, + "id": 25, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "libp2p_peers", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Connected Peers", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 15, + "y": 16 + }, + "id": 28, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "libp2p_pubsub_topics", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Number Pubsub Topics", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "dateTimeAsIso" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 0, + "y": 21 + }, + "id": 10, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "process_start_time_seconds{job=\"nwaku\"}*1000", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Start Times (UTC)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 15, + "x": 3, + "y": 21 + }, + "id": 44, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "waku_connected_peers", + "legendFormat": "{{direction}}_{{protocol}}", + "range": true, + "refId": "A" + } + ], + "title": "Connected Peers (Direction/Protocol)", + "transformations": [], + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 0, + "y": 26 + }, + "id": 36, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "waku_peer_store_size", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Peer Store Size", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 6, + "x": 0, + "y": 31 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.3.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "libp2p_peers", + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A" + } + ], + "title": "Connected Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 6, + "x": 6, + "y": 31 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(libp2p_network_bytes_total{direction=\"in\"}[$__rate_interval])", + "legendFormat": "traffic_{{direction}}", + "range": true, + "refId": "A" + } + ], + "title": "libp2p traffic (in)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 6, + "x": 12, + "y": 31 + }, + "id": 29, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(libp2p_network_bytes_total{direction=\"out\"}[$__rate_interval])", + "legendFormat": "traffic_{{direction}}", + "range": true, + "refId": "A" + } + ], + "title": "libp2p traffic (out)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 0, + "y": 40 + }, + "id": 20, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "nim_gc_heap_instance_occupied_bytes{}", + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A" + } + ], + "title": "Heap allocation", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 6, + "y": 40 + }, + "id": 18, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "nim_gc_mem_bytes{}", + "hide": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A" + } + ], + "title": "Nim Memory Usage", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 40 + }, + "id": 128, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_number_registered_memberships", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Registered Memberships", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 48 + }, + "id": 127, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_proof_generation_duration_seconds", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Proof Generation (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 48 + }, + "id": 126, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_proof_verification_duration_seconds", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Proof Verification (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 48 + }, + "id": 135, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_membership_insertion_duration_seconds", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Membership Insertion (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 54 + }, + "id": 134, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_membership_credentials_import_duration_seconds", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Credentials Import (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 54 + }, + "id": 137, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_messages_total_total", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Messages Total", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 54 + }, + "id": 136, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_proof_verification_total_total", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Proof Verification Total", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 60 + }, + "id": 133, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_invalid_messages_total_total", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Invalid Messages Total", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 60 + }, + "id": 130, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_spam_messages_total_total", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Spam Messages Total", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 60 + }, + "id": 138, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_invalid_messages_total_total", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{type}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Invalid Messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Number of messages currently stored in the database", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 9, + "x": 0, + "y": 66 + }, + "id": 141, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "pg_tb_stats_messages{}", + "instant": false, + "legendFormat": "{{ pubsubtopic }}", + "range": true, + "refId": "A" + } + ], + "title": "# messages per shard", + "type": "timeseries" + }, + { + "datasource": { + "type": "postgres", + "uid": "e5d2e0c2-371d-4178-ac71-edc122fb459c" + }, + "description": "Messages in local database per app name, as extracted from the content topic.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [ + { + "options": { + "/waku/2/rs/1/0": { + "index": 0, + "text": "0" + }, + "/waku/2/rs/1/1": { + "index": 1, + "text": "1" + }, + "/waku/2/rs/1/2": { + "index": 2, + "text": "2" + }, + "/waku/2/rs/1/3": { + "index": 3, + "text": "3" + }, + "/waku/2/rs/1/4": { + "index": 4, + "text": "4" + }, + "/waku/2/rs/1/5": { + "index": 5, + "text": "5" + }, + "/waku/2/rs/1/6": { + "index": 6, + "text": "6" + }, + "/waku/2/rs/1/7": { + "index": 7, + "text": "7" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "string" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Number of Messages (sum)" + }, + "properties": [ + { + "id": "unit", + "value": "none" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Total Payload Size (sum)" + }, + "properties": [ + { + "id": "unit", + "value": "decbytes" + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 9, + "x": 9, + "y": 66 + }, + "id": 144, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "enablePagination": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "frameIndex": 1, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "datasource": { + "type": "postgres", + "uid": "e5d2e0c2-371d-4178-ac71-edc122fb459c" + }, + "editorMode": "code", + "format": "table", + "hide": false, + "rawQuery": true, + "rawSql": "SELECT REGEXP_REPLACE(contenttopic,'^\\/(.+)\\/(\\d+)\\/(.+)\\/(.+)$','\\1') as \"App name\", COUNT(id), pg_column_size(payload)\nFROM messages\nGROUP BY contenttopic, payload", + "refId": "A", + "sql": { + "columns": [ + { + "parameters": [ + { + "name": "pubsubtopic", + "type": "functionParameter" + } + ], + "type": "function" + } + ], + "groupBy": [ + { + "property": { + "name": "pubsubtopic", + "type": "string" + }, + "type": "groupBy" + } + ], + "limit": 50 + }, + "table": "messages" + } + ], + "title": "Stored Message by Content Topic App Name", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": {}, + "renameByName": { + "contenttopic": "App name", + "count": "Number of Messages", + "pg_column_size": "Total Payload Size" + } + } + }, + { + "id": "groupBy", + "options": { + "fields": { + "App name": { + "aggregations": [ + "uniqueValues" + ], + "operation": "groupby" + }, + "Number of Messages": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + }, + "Total Payload Size": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + }, + "pg_column_size": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + } + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "desc": true, + "field": "Number of Messages (sum)" + } + ] + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Number of messages currently stored in the database", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 9, + "x": 0, + "y": 77 + }, + "id": 146, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "pg_tb_messages_count{}", + "instant": false, + "interval": "", + "legendFormat": "messages", + "range": true, + "refId": "A" + } + ], + "title": "Unique stored messages (Postgres)", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 84 + }, + "id": 46, + "panels": [], + "title": "Postgres", + "type": "row" + }, + { + "colorBackground": false, + "colorValue": false, + "datasource": "Prometheus", + "description": "Source: server_version_num", + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 0, + "y": 85 + }, + "id": 11, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(50, 168, 82)", + "show": false + }, + "tableColumn": "", + "targets": [ + { + "expr": "max(pg_settings_server_version_num)", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "PostgreSQL Version", + "type": "stat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "Prometheus", + "description": "Transactions committed + roolback per minute\n\nSource: pg_stat_database,xact_commit + xact_rollback", + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 4, + "y": 85 + }, + "id": 14, + "interval": "", + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum((rate(pg_stat_database_xact_commit{instance=\"$Instance\"}[$Interval])))+sum((rate(pg_stat_database_xact_rollback{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Transaction rate (Postgres)", + "type": "stat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "Prometheus", + "description": "Statements executed per Minute.\n\nSource: pg_stat_statements.calls", + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 8, + "y": 85 + }, + "id": 93, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum((rate(pg_stat_statements_calls{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Query rate (Postgres)", + "type": "stat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "Prometheus", + "description": "Source: pg_stat_statements.total_time / pg_stat_statements.calls", + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "format": "s", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 12, + "y": 85 + }, + "id": 102, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum((delta(pg_stat_statements_total_time_seconds{instance=\"$Instance\"}[$Interval])))/sum((delta(pg_stat_statements_calls{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Average query runtime (Postgres)", + "type": "stat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "Prometheus", + "decimals": 2, + "description": "Size of all databases in $Instance.\n\nSource: pg_database_size()", + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "format": "bytes", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 16, + "y": 85 + }, + "id": 37, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(pg_database_size_bytes{instance=\"$Instance\"})", + "refId": "A" + } + ], + "thresholds": "", + "title": "Total database size (Postgres)", + "type": "stat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "Prometheus", + "description": "Max Replication lag behind master in seconds\n\nOnly available on a standby system.\n\nSource: pg_last_xact_replay_timestamp\n\nUse: pg_stat_replication for Details.", + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "format": "s", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 20, + "y": 85 + }, + "id": 84, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "max(pg_replication_lag{instance=\"$Instance\"})", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "thresholds": "", + "title": "Max Replication Lag (Postgres)", + "type": "stat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "max" + }, + { + "datasource": "Prometheus", + "description": "Shared buffer hits vs reads from disc", + "fieldConfig": { + "defaults": { + "decimals": 2, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "semi-dark-red" + }, + { + "color": "semi-dark-yellow", + "value": 80 + }, + { + "color": "semi-dark-green", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 0, + "y": 88 + }, + "id": 16, + "links": [], + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "expr": "sum(pg_stat_database_blks_hit{instance=~\"$Instance\"})/(sum(pg_stat_database_blks_hit{instance=~\"$Instance\"})+sum(pg_stat_database_blks_read{instance=~\"$Instance\"}))*100", + "refId": "A" + } + ], + "title": "Shared Buffer Hits (Postgres)", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "Percentage of max_connections used", + "fieldConfig": { + "defaults": { + "decimals": 0, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "semi-dark-green", + "value": null + }, + { + "color": "semi-dark-yellow", + "value": 0.75 + }, + { + "color": "semi-dark-red", + "value": 0.9 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 3, + "y": 88 + }, + "id": 9, + "links": [], + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "expr": "sum(pg_stat_database_numbackends)/max(pg_settings_max_connections)", + "refId": "A" + } + ], + "title": "Connections used (Postgres)", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "Transaction committed vs rollbacked", + "fieldConfig": { + "defaults": { + "decimals": 2, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "semi-dark-red", + "value": null + }, + { + "color": "#EAB839", + "value": 0.75 + }, + { + "color": "semi-dark-green", + "value": 0.9 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 3, + "x": 6, + "y": 88 + }, + "id": 15, + "links": [], + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "expr": "sum(pg_stat_database_xact_commit{instance=\"$Instance\"})/(sum(pg_stat_database_xact_commit{instance=\"$Instance\"}) + sum(pg_stat_database_xact_rollback{instance=\"$Instance\"}))", + "refId": "A" + } + ], + "title": "Commit Ratio (Postgres)", + "type": "gauge" + }, + { + "colorBackground": false, + "colorValue": false, + "colors": [ + "#299c46", + "rgba(237, 129, 40, 0.89)", + "#d44a3a" + ], + "datasource": "Prometheus", + "description": "Clients executing Statements.\n\nSource: pg_stat_activity", + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 12, + "y": 88 + }, + "id": 23, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "sum(pg_stat_activity_count{state=\"active\",instance=\"$Instance\"})", + "refId": "A" + } + ], + "thresholds": "", + "title": "Active clients (Postgres)", + "type": "stat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "dateTimeAsIso" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 16, + "y": 88 + }, + "id": 125, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "10.2.3", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "pg_postmaster_start_time_seconds*1000", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Postgres start time", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 51, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 6, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 91 + }, + "id": 142, + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "pg_stat_user_tables_n_live_tup{datname=\"postgres\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Live", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "pg_stat_user_tables_n_dead_tup", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Dead", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Estimated number of rows (Postgres)", + "type": "timeseries" + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "decimals": 0, + "description": "View: pg_stat_activity", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 95 + }, + "hiddenSeries": false, + "id": 24, + "interval": "$Interval", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [ + { + "targetBlank": true, + "title": "PostgreSQL Documentation", + "url": "https://www.postgresql.org/docs/current/monitoring-stats.html" + } + ], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.2.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (state) (pg_stat_activity_count{instance=\"$Instance\"})", + "legendFormat": "{{state}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Connections by state (stacked) (Postgres)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "logBase": 1, + "show": true + }, + { + "decimals": 0, + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": true, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "decimals": 0, + "description": "View: pg_stat_activity", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 99 + }, + "hiddenSeries": false, + "id": 121, + "interval": "$Interval", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": false, + "linewidth": 1, + "links": [ + { + "targetBlank": true, + "title": "PostgreSQL Documentation", + "url": "https://www.postgresql.org/docs/current/monitoring-stats.html" + } + ], + "nullPointMode": "null as zero", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.2.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (datname) (pg_stat_activity_count{instance=\"$Instance\"})", + "legendFormat": "{{datname}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Connections by database (stacked) (Postgres)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": 0, + "format": "short", + "logBase": 1, + "show": true + }, + { + "decimals": 0, + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "decimals": 2, + "description": "1 Minute rate of transactions committed or rollback.", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 104 + }, + "hiddenSeries": false, + "id": 122, + "interval": "", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "hideZero": false, + "max": true, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.2.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum ((rate(pg_stat_database_xact_commit[$Interval])))", + "interval": "", + "legendFormat": "committed", + "refId": "A" + }, + { + "expr": "sum ((rate(pg_stat_database_xact_rollback[$Interval])))", + "hide": false, + "interval": "", + "legendFormat": "rollback", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Transactions (Postgres)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "Source: pg_stat_database", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 108 + }, + "hiddenSeries": false, + "id": 27, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideZero": false, + "max": true, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.2.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum((rate(pg_stat_database_tup_inserted{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "Inserts", + "refId": "A" + }, + { + "expr": "sum((rate(pg_stat_database_tup_updated{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "Updates", + "refId": "B" + }, + { + "expr": "sum((rate(pg_stat_database_tup_deleted{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "Deletes", + "refId": "C" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Tuples inserts/updates/deletes (Postgres)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "* blk_read_time: Time spent reading data file blocks by backends in this database, in milliseconds\n* blk_write_time: Time spent writing data file blocks by backends in this database, in milliseconds\n\ntrack_io_timings needs to be activated", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 113 + }, + "hiddenSeries": false, + "id": 26, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.2.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum ((rate(pg_stat_database_blk_read_time{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "blk_read_time", + "refId": "A" + }, + { + "expr": "sum ((rate(pg_stat_database_blk_write_time{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "blk_read_time", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "I/O Read/Write time (Postgres)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "ms", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "Source: pg_stat_database\n\n* tup_fetched: rows needed to satisfy queries\n* tup_returned: rows read/scanned", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 117 + }, + "hiddenSeries": false, + "id": 111, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideZero": false, + "max": true, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.2.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum((rate(pg_stat_database_tup_fetched{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "Fetched", + "refId": "A" + }, + { + "expr": "sum((rate(pg_stat_database_tup_returned{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "Returned", + "refId": "B" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Tuples fetched/returned (Postgres)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "min": "0", + "show": false + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "Source: pg_locks", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 122 + }, + "hiddenSeries": false, + "id": 123, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + { + "title": "PostgreSQL Lock Modes", + "url": "https://www.postgresql.org/docs/12/explicit-locking.html#LOCKING-TABLES" + } + ], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.2.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (mode) (pg_locks_count{instance=\"$Instance\"})", + "legendFormat": "{{mode}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Locks by state (Postgres)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "logBase": 1, + "show": true + }, + { + "format": "short", + "logBase": 1, + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "Should be 0 \n\nSource: pg_stat_database\n\nWith log_lock_waits turned on, deadlocks will be logged to the PostgreSQL Logfiles.", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 5, + "w": 12, + "x": 12, + "y": 126 + }, + "hiddenSeries": false, + "id": 30, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + { + "title": "PostgreSQL Locking", + "url": "https://www.postgresql.org/docs/12/explicit-locking.html" + } + ], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.2.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (datname) ((rate(pg_stat_database_deadlocks{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "{{datname}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Deadlocks by database (Postgres)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "Should be 0. If temporary files are created, it can indicate insufficient work_mem. With log_temp_files the creation of temporary files are logged to the PostgreSQL Logfiles.", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 5, + "w": 12, + "x": 12, + "y": 131 + }, + "hiddenSeries": false, + "id": 31, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": true, + "hideZero": true, + "max": true, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [ + { + "title": "PostgreSQL Ressources", + "url": "https://www.postgresql.org/docs/current/runtime-config-resource.html" + } + ], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.2.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum by (datname) ((rate(pg_stat_database_temp_files{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "{{datname}}", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Temporary files by database (Postgres)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "Lag behind master in seconds.\n\nOnly available on a standby System.", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 132 + }, + "hiddenSeries": false, + "id": 120, + "interval": "1m", + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "10.2.3", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "max(pg_replication_lag{instance=\"$Instance\"})", + "instant": false, + "intervalFactor": 1, + "legendFormat": "lag ", + "refId": "A" + } + ], + "thresholds": [], + "timeRegions": [], + "title": "Replication lag (Postgres)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "mode": "time", + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "s", + "logBase": 1, + "min": "0", + "show": true + }, + { + "format": "short", + "logBase": 1, + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false + } + } + ], + "refresh": "1m", + "revision": 1, + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "postgres-exporter:9187", + "value": "postgres-exporter:9187" + }, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "definition": "label_values({job=\"postgres-exporter\"}, instance)", + "hide": 0, + "includeAll": false, + "multi": false, + "name": "Instance", + "options": [], + "query": "label_values({job=\"postgres-exporter\"}, instance)", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "definition": "label_values(datname)", + "hide": 0, + "includeAll": true, + "multi": true, + "name": "Database", + "options": [], + "query": "label_values(datname)", + "refresh": 1, + "regex": "/^(?!template*|postgres).*$/", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "auto": true, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "10m", + "value": "10m" + }, + "hide": 0, + "name": "Interval", + "options": [ + { + "selected": false, + "text": "auto", + "value": "$__auto_interval_Interval" + }, + { + "selected": false, + "text": "30sec", + "value": "30sec" + }, + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": true, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + } + ], + "query": "30sec,1m,10m,30m,1h,6h,12h,1d", + "queryValue": "", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "browser", + "title": "nwaku-monitoring", + "uid": "yns_4vFVk", + "version": 1, + "weekStart": "" +} \ No newline at end of file diff --git a/third-party/nwaku/apps/sonda/monitoring/configuration/dashboards/sonda-monitoring.json b/third-party/nwaku/apps/sonda/monitoring/configuration/dashboards/sonda-monitoring.json new file mode 100644 index 0000000..1d87f2b --- /dev/null +++ b/third-party/nwaku/apps/sonda/monitoring/configuration/dashboards/sonda-monitoring.json @@ -0,0 +1,1571 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Sonda messages successfully sent to the network", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 9, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "successful_sonda_msgs_total", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Sent Sonda Messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Sonda messages that failed to be sent to the network", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "{__name__=\"failed_sonda_msgs_total\", instance=\"sonda:8004\", job=\"nwaku\"}" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "failed_sonda_msgs_total", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Failed Sonda Messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Sonda messages successfully sent to the network", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "increase(successful_sonda_msgs_total[5m])/5", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "interval": "", + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Sent Sonda Messages Rate per Minute", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Sonda messages that failed to be sent to the network", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "{instance=\"sonda:8004\", job=\"nwaku\"}" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 10, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "increase(failed_sonda_msgs_total[5m]) / 5", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Failed Sonda Messages per Minute", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Store responses including the latest Sonda message ", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "successful_store_queries_total{node=~\"^$node$\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{__name__=\"{{__name__}}\" , node=\"{{node}}\"}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Successful Store Responses", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Store queries with a non-200 response", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "{__name__=\"failed_store_queries_total\" , error=\"504 PEER_DIAL_FAILURE: 16Uiu2HAmAUdrQ3uwzuE4Gy4D56hX6uLKEeerJAnhKEHZ3DxF1EfT\", node=\"/dns4/store-01.do-ams3.shards.test.status.im/tcp/30303/p2p/16Uiu2HAmAUdrQ3uwzuE4Gy4D56hX6uLKEeerJAnhKEHZ3DxF1EfT\"}" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "failed_store_queries_total{node=~\"^$node$\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{__name__=\"{{__name__}}\" , error=\"{{error}}\", node=\"{{node}}\"}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Failed Store Queries", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Store responses including the latest Sonda message ", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "{__name__=\"\" , node=\"/dns4/store-01.do-ams3.shards.test.status.im/tcp/30303/p2p/16Uiu2HAmAUdrQ3uwzuE4Gy4D56hX6uLKEeerJAnhKEHZ3DxF1EfT\"}" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 11, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "increase(successful_store_queries_total{node=~\"^$node$\"}[5m]) / 5", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{__name__=\"{{__name__}}\" , node=\"{{node}}\"}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Successful Store Responses per Minute", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Store queries with a non-200 response", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 24 + }, + "id": 12, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "increase(failed_store_queries_total{node=~\"^$node$\"}[5m]) / 5", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{__name__=\"{{__name__}}\" , error=\"{{error}}\", node=\"{{node}}\"}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Failed Store Queries per Minute", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Store responses that didn't include our latest Sonda message", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "{__name__=\"empty_store_responses_total\" , node=\"/dns4/store-01.do-ams3.shards.test.status.im/tcp/30303/p2p/16Uiu2HAmAUdrQ3uwzuE4Gy4D56hX6uLKEeerJAnhKEHZ3DxF1EfT\"}" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 32 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "empty_store_responses_total{node=~\"^$node$\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{__name__=\"{{__name__}}\" , node=\"{{node}}\"}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Empty Store Responses", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Store responses that didn't include our latest Sonda message", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "{__name__=\"\" , node=\"/dns4/store-01.do-ams3.shards.test.status.im/tcp/30303/p2p/16Uiu2HAmAUdrQ3uwzuE4Gy4D56hX6uLKEeerJAnhKEHZ3DxF1EfT\"}" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 32 + }, + "id": 13, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "increase(empty_store_responses_total{node=~\"^$node$\"}[5m]) / 5", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{__name__=\"{{__name__}}\" , node=\"{{node}}\"}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Empty Store Responses per Minute", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Latency of store queries", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1 + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "{__name__=\"store_query_latency\" , node=\"/dns4/store-01.do-ams3.shards.test.status.im/tcp/30303/p2p/16Uiu2HAmAUdrQ3uwzuE4Gy4D56hX6uLKEeerJAnhKEHZ3DxF1EfT\"}" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 40 + }, + "id": 7, + "options": { + "bucketOffset": 0, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "store_query_latency{node=~\"^$node$\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{__name__=\"{{__name__}}\" , node=\"{{node}}\"}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Store Query Latency (seconds)", + "type": "histogram" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Latency of each store query", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "{__name__=\"store_query_latency\" , node=\"/dns4/store-01.do-ams3.shards.test.status.im/tcp/30303/p2p/16Uiu2HAmAUdrQ3uwzuE4Gy4D56hX6uLKEeerJAnhKEHZ3DxF1EfT\"}" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 40 + }, + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "store_query_latency{node=~\"^$node$\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{__name__=\"{{__name__}}\" , node=\"{{node}}\"}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Store Query Latency (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Node health according to the configured health threshold. 1 means healthy, 0 not.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "stepAfter", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "{__name__=\"node_health\" , node=\"/dns4/store-01.do-ams3.shards.test.status.im/tcp/30303/p2p/16Uiu2HAmAUdrQ3uwzuE4Gy4D56hX6uLKEeerJAnhKEHZ3DxF1EfT\"}" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 48 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "node_health{node=~\"^$node$\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{__name__=\"{{__name__}}\" , node=\"{{node}}\"}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Node health", + "type": "timeseries" + } + ], + "refresh": "", + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "definition": "label_values(node)", + "hide": 0, + "includeAll": true, + "label": "node", + "multi": false, + "name": "node", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(node)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "sonda-monitoring", + "uid": "cbd1b6c8-63d2-41f3-b57b-a776ec8fa23e", + "version": 1, + "weekStart": "" +} \ No newline at end of file diff --git a/third-party/nwaku/apps/sonda/monitoring/configuration/datasources.yaml b/third-party/nwaku/apps/sonda/monitoring/configuration/datasources.yaml new file mode 100644 index 0000000..9f4f51f --- /dev/null +++ b/third-party/nwaku/apps/sonda/monitoring/configuration/datasources.yaml @@ -0,0 +1,11 @@ +apiVersion: 1 + +datasources: + - name: Prometheus + type: prometheus + access: proxy + org_id: 1 + url: http://prometheus:9090 + is_default: true + version: 1 + editable: true \ No newline at end of file diff --git a/third-party/nwaku/apps/sonda/monitoring/configuration/grafana-plugins.env b/third-party/nwaku/apps/sonda/monitoring/configuration/grafana-plugins.env new file mode 100644 index 0000000..2780809 --- /dev/null +++ b/third-party/nwaku/apps/sonda/monitoring/configuration/grafana-plugins.env @@ -0,0 +1,2 @@ +#GF_INSTALL_PLUGINS=grafana-worldmap-panel,grafana-piechart-panel,digrich-bubblechart-panel,yesoreyeram-boomtheme-panel,briangann-gauge-panel,jdbranham-diagram-panel,agenty-flowcharting-panel,citilogics-geoloop-panel,savantly-heatmap-panel,mtanda-histogram-panel,pierosavi-imageit-panel,michaeldmoore-multistat-panel,zuburqan-parity-report-panel,natel-plotly-panel,bessler-pictureit-panel,grafana-polystat-panel,corpglory-progresslist-panel,snuids-radar-panel,fzakaria-simple-config.config.annotations-datasource,vonage-status-panel,snuids-trafficlights-panel,pr0ps-trackmap-panel,alexandra-trackmap-panel,btplc-trend-box-panel +GF_INSTALL_PLUGINS=grafana-worldmap-panel,grafana-piechart-panel,yesoreyeram-boomtheme-panel,briangann-gauge-panel,pierosavi-imageit-panel,bessler-pictureit-panel,vonage-status-panel diff --git a/third-party/nwaku/apps/sonda/monitoring/configuration/grafana.ini b/third-party/nwaku/apps/sonda/monitoring/configuration/grafana.ini new file mode 100644 index 0000000..f237726 --- /dev/null +++ b/third-party/nwaku/apps/sonda/monitoring/configuration/grafana.ini @@ -0,0 +1,51 @@ +instance_name = nwaku dashboard + +;[dashboards.json] +;enabled = true +;path = /home/git/grafana/grafana-dashboards/dashboards + + +#################################### Auth ########################## +[auth] +disable_login_form = false + +#################################### Anonymous Auth ########################## +[auth.anonymous] +# enable anonymous access +enabled = true + +# specify organization name that should be used for unauthenticated users +;org_name = Public + +# specify role for unauthenticated users +org_role = Admin +; org_role = Viewer + +;[security] +;admin_user = ocr +;admin_password = ocr + +;[users] +# disable user signup / registration +;allow_sign_up = false + +# Set to true to automatically assign new users to the default organization (id 1) +;auto_assign_org = true + +# Default role new users will be automatically assigned (if disabled above is set to true) +;auto_assign_org_role = Viewer + +#################################### SMTP / Emailing ########################## +;[smtp] +;enabled = false +;host = localhost:25 +;user = +;password = +;cert_file = +;key_file = +;skip_verify = false +;from_address = admin@grafana.localhost + +;[emails] +;welcome_email_on_sign_up = false + diff --git a/third-party/nwaku/apps/sonda/monitoring/prometheus-config.yml b/third-party/nwaku/apps/sonda/monitoring/prometheus-config.yml new file mode 100644 index 0000000..51e2b50 --- /dev/null +++ b/third-party/nwaku/apps/sonda/monitoring/prometheus-config.yml @@ -0,0 +1,10 @@ +global: + scrape_interval: 15s + evaluation_interval: 15s + external_labels: + monitor: "Monitoring" + +scrape_configs: + - job_name: "nwaku" + static_configs: + - targets: ["nwaku:8003", "sonda:8004"] diff --git a/third-party/nwaku/apps/sonda/register_rln.sh b/third-party/nwaku/apps/sonda/register_rln.sh new file mode 100755 index 0000000..4fb373b --- /dev/null +++ b/third-party/nwaku/apps/sonda/register_rln.sh @@ -0,0 +1,31 @@ +#!/bin/sh + + +if test -f ./keystore/keystore.json; then + echo "keystore/keystore.json already exists. Use it instead of creating a new one." + echo "Exiting" + exit 1 +fi + + +if test -f .env; then + echo "Using .env file" + . $(pwd)/.env +fi + +# TODO: Set nwaku release when ready instead of quay + +if test -n "${ETH_CLIENT_ADDRESS}"; then + echo "ETH_CLIENT_ADDRESS variable was renamed to RLN_RELAY_ETH_CLIENT_ADDRESS" + echo "Please update your .env file" + exit 1 +fi + +docker run -v $(pwd)/keystore:/keystore/:Z harbor.status.im/wakuorg/nwaku:v0.30.1 generateRlnKeystore \ +--rln-relay-eth-client-address=${RLN_RELAY_ETH_CLIENT_ADDRESS} \ +--rln-relay-eth-private-key=${ETH_TESTNET_KEY} \ +--rln-relay-eth-contract-address=0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 \ +--rln-relay-cred-path=/keystore/keystore.json \ +--rln-relay-cred-password="${RLN_RELAY_CRED_PASSWORD}" \ +--rln-relay-user-message-limit=20 \ +--execute diff --git a/third-party/nwaku/apps/sonda/run_node.sh b/third-party/nwaku/apps/sonda/run_node.sh new file mode 100644 index 0000000..e130019 --- /dev/null +++ b/third-party/nwaku/apps/sonda/run_node.sh @@ -0,0 +1,110 @@ +#!/bin/sh + +echo "I am a nwaku node" + +if test -n "${ETH_CLIENT_ADDRESS}" -o ; then + echo "ETH_CLIENT_ADDRESS variable was renamed to RLN_RELAY_ETH_CLIENT_ADDRESS" + echo "Please update your .env file" + exit 1 +fi + +if [ -z "${RLN_RELAY_ETH_CLIENT_ADDRESS}" ] && [ "${CLUSTER_ID}" -eq 1 ]; then + echo "Missing Eth client address, please refer to README.md for detailed instructions" + exit 1 +fi + +if [ "${CLUSTER_ID}" -ne 1 ]; then + echo "CLUSTER_ID is not equal to 1, clearing RLN configurations" + RLN_RELAY_CRED_PATH="" + RLN_RELAY_ETH_CLIENT_ADDRESS="" + RLN_RELAY_CRED_PASSWORD="" +fi + +MY_EXT_IP=$(wget -qO- https://api4.ipify.org) +DNS_WSS_CMD= + +if [ -n "${DOMAIN}" ]; then + + LETSENCRYPT_PATH=/etc/letsencrypt/live/${DOMAIN} + + if ! [ -d "${LETSENCRYPT_PATH}" ]; then + apk add --no-cache certbot + + certbot certonly\ + --non-interactive\ + --agree-tos\ + --no-eff-email\ + --no-redirect\ + --email admin@${DOMAIN}\ + -d ${DOMAIN}\ + --standalone + fi + + if ! [ -e "${LETSENCRYPT_PATH}/privkey.pem" ]; then + echo "The certificate does not exist" + sleep 60 + exit 1 + fi + + WS_SUPPORT="--websocket-support=true" + WSS_SUPPORT="--websocket-secure-support=true" + WSS_KEY="--websocket-secure-key-path=${LETSENCRYPT_PATH}/privkey.pem" + WSS_CERT="--websocket-secure-cert-path=${LETSENCRYPT_PATH}/cert.pem" + DNS4_DOMAIN="--dns4-domain-name=${DOMAIN}" + + DNS_WSS_CMD="${WS_SUPPORT} ${WSS_SUPPORT} ${WSS_CERT} ${WSS_KEY} ${DNS4_DOMAIN}" +fi + +if [ -n "${NODEKEY}" ]; then + NODEKEY=--nodekey=${NODEKEY} +fi + +if [ "${CLUSTER_ID}" -eq 1 ]; then + RLN_RELAY_CRED_PATH=--rln-relay-cred-path=${RLN_RELAY_CRED_PATH:-/keystore/keystore.json} +fi + +if [ -n "${RLN_RELAY_CRED_PASSWORD}" ]; then + RLN_RELAY_CRED_PASSWORD=--rln-relay-cred-password="${RLN_RELAY_CRED_PASSWORD}" +fi + +if [ -n "${RLN_RELAY_ETH_CLIENT_ADDRESS}" ]; then + RLN_RELAY_ETH_CLIENT_ADDRESS=--rln-relay-eth-client-address="${RLN_RELAY_ETH_CLIENT_ADDRESS}" +fi + +# TO DO: configure bootstrap nodes in env + +exec /usr/bin/wakunode\ + --relay=true\ + --filter=false\ + --lightpush=false\ + --keep-alive=true\ + --max-connections=150\ + --cluster-id="${CLUSTER_ID}"\ + --discv5-discovery=true\ + --discv5-udp-port=9005\ + --discv5-enr-auto-update=True\ + --log-level=DEBUG\ + --tcp-port=30304\ + --metrics-server=True\ + --metrics-server-port=8003\ + --metrics-server-address=0.0.0.0\ + --rest=true\ + --rest-admin=true\ + --rest-address=0.0.0.0\ + --rest-port=8645\ + --rest-allow-origin="waku-org.github.io"\ + --rest-allow-origin="localhost:*"\ + --nat=extip:"${MY_EXT_IP}"\ + --store=false\ + --pubsub-topic="/waku/2/rs/${CLUSTER_ID}/${SHARD}"\ + --discv5-bootstrap-node="enr:-QEKuECA0zhRJej2eaOoOPddNcYr7-5NdRwuoLCe2EE4wfEYkAZhFotg6Kkr8K15pMAGyUyt0smHkZCjLeld0BUzogNtAYJpZIJ2NIJpcISnYxMvim11bHRpYWRkcnO4WgAqNiVib290LTAxLmRvLWFtczMuc2hhcmRzLnRlc3Quc3RhdHVzLmltBnZfACw2JWJvb3QtMDEuZG8tYW1zMy5zaGFyZHMudGVzdC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEC3rRtFQSgc24uWewzXaxTY8hDAHB8sgnxr9k8Rjb5GeSDdGNwgnZfg3VkcIIjKIV3YWt1Mg0"\ + --discv5-bootstrap-node="enr:-QEcuEAgXDqrYd_TrpUWtn3zmxZ9XPm7O3GS6lV7aMJJOTsbOAAeQwSd_eoHcCXqVzTUtwTyB4855qtbd8DARnExyqHPAYJpZIJ2NIJpcIQihw1Xim11bHRpYWRkcnO4bAAzNi5ib290LTAxLmdjLXVzLWNlbnRyYWwxLWEuc2hhcmRzLnRlc3Quc3RhdHVzLmltBnZfADU2LmJvb3QtMDEuZ2MtdXMtY2VudHJhbDEtYS5zaGFyZHMudGVzdC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaECxjqgDQ0WyRSOilYU32DA5k_XNlDis3m1VdXkK9xM6kODdGNwgnZfg3VkcIIjKIV3YWt1Mg0"\ + --discv5-bootstrap-node="enr:-QEcuEAX6Qk-vVAoJLxR4A_4UVogGhvQrqKW4DFKlf8MA1PmCjgowL-LBtSC9BLjXbb8gf42FdDHGtSjEvvWKD10erxqAYJpZIJ2NIJpcIQI2hdMim11bHRpYWRkcnO4bAAzNi5ib290LTAxLmFjLWNuLWhvbmdrb25nLWMuc2hhcmRzLnRlc3Quc3RhdHVzLmltBnZfADU2LmJvb3QtMDEuYWMtY24taG9uZ2tvbmctYy5zaGFyZHMudGVzdC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEDP7CbRk-YKJwOFFM4Z9ney0GPc7WPJaCwGkpNRyla7mCDdGNwgnZfg3VkcIIjKIV3YWt1Mg0"\ + ${RLN_RELAY_CRED_PATH}\ + ${RLN_RELAY_CRED_PASSWORD}\ + ${RLN_RELAY_TREE_PATH}\ + ${RLN_RELAY_ETH_CLIENT_ADDRESS}\ + ${DNS_WSS_CMD}\ + ${NODEKEY}\ + ${EXTRA_ARGS} + diff --git a/third-party/nwaku/apps/sonda/sonda.py b/third-party/nwaku/apps/sonda/sonda.py new file mode 100644 index 0000000..8b74bd0 --- /dev/null +++ b/third-party/nwaku/apps/sonda/sonda.py @@ -0,0 +1,207 @@ +import requests +import time +import json +import os +import base64 +import sys +import urllib.parse +import requests +import argparse +from datetime import datetime +from prometheus_client import Counter, Gauge, start_http_server + +# Content topic where Sona messages are going to be sent +SONDA_CONTENT_TOPIC = '/sonda/2/polls/proto' + +# Prometheus metrics +successful_sonda_msgs = Counter('successful_sonda_msgs', 'Number of successful Sonda messages sent') +failed_sonda_msgs = Counter('failed_sonda_msgs', 'Number of failed Sonda messages attempts') +successful_store_queries = Counter('successful_store_queries', 'Number of successful store queries', ['node']) +failed_store_queries = Counter('failed_store_queries', 'Number of failed store queries', ['node', 'error']) +empty_store_responses = Counter('empty_store_responses', "Number of store responses without the latest Sonda message", ['node']) +store_query_latency = Gauge('store_query_latency', 'Latency of the last store query in seconds', ['node']) +consecutive_successful_responses = Gauge('consecutive_successful_responses', 'Consecutive successful store responses', ['node']) +node_health = Gauge('node_health', "Binary indicator of a node's health. 1 is healthy, 0 is not", ['node']) + + +# Argparser configuration +parser = argparse.ArgumentParser(description='') +parser.add_argument('-m', '--metrics-port', type=int, default=8004, help='Port to expose prometheus metrics.') +parser.add_argument('-a', '--node-rest-address', type=str, default="http://nwaku:8645", help='Address of the waku node to send messages to.') +parser.add_argument('-p', '--pubsub-topic', type=str, default='/waku/2/rs/1/0', help='PubSub topic.') +parser.add_argument('-d', '--delay-seconds', type=int, default=60, help='Delay in seconds between messages.') +parser.add_argument('-n', '--store-nodes', type=str, required=True, help='Comma separated list of store nodes to query.') +parser.add_argument('-t', '--health-threshold', type=int, default=5, help='Consecutive successful store requests to consider a store node healthy.') +args = parser.parse_args() + + +# Logs message including current UTC time +def log_with_utc(message): + utc_time = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S") + print(f"[{utc_time} UTC] {message}") + + +# Sends Sonda message. Returns True if successful, False otherwise +def send_sonda_msg(rest_address, pubsub_topic, content_topic, timestamp): + message = "Hi, I'm Sonda" + base64_message = base64.b64encode(message.encode('utf-8')).decode('ascii') + body = { + 'payload': base64_message, + 'contentTopic': content_topic, + 'version': 1, + 'timestamp': timestamp + } + + encoded_pubsub_topic = urllib.parse.quote(pubsub_topic, safe='') + url = f'{rest_address}/relay/v1/messages/{encoded_pubsub_topic}' + headers = {'content-type': 'application/json'} + + log_with_utc(f'Sending Sonda message via REST: {url} PubSubTopic: {pubsub_topic}, ContentTopic: {content_topic}, timestamp: {timestamp}') + + try: + start_time = time.time() + response = requests.post(url, json=body, headers=headers, timeout=10) + elapsed_seconds = time.time() - start_time + + log_with_utc(f'Response from {rest_address}: status:{response.status_code} content:{response.text} [{elapsed_seconds:.4f} s.]') + + if response.status_code == 200: + successful_sonda_msgs.inc() + return True + else: + response.raise_for_status() + except requests.RequestException as e: + log_with_utc(f'Error sending request: {e}') + + failed_sonda_msgs.inc() + return False + + +# We return true if both our node and the queried Store node returned a 200 +# If our message isn't found but we did get a store 200 response, this function still returns true +def check_store_response(json_response, store_node, timestamp): + # Check for the store node status code + if json_response.get('statusCode') != 200: + error = f"{json_response.get('statusCode')} {json_response.get('statusDesc')}" + log_with_utc(f'Failed performing store query {error}') + failed_store_queries.labels(node=store_node, error=error).inc() + consecutive_successful_responses.labels(node=store_node).set(0) + + return False + + messages = json_response.get('messages') + # If there's no message in the response, increase counters and return + if not messages: + log_with_utc("No messages in store response") + empty_store_responses.labels(node=store_node).inc() + consecutive_successful_responses.labels(node=store_node).set(0) + return True + + # Search for the Sonda message in the returned messages + for message in messages: + # If message field is missing in current message, continue + if not message.get("message"): + log_with_utc("Could not retrieve message") + continue + + # If a message is found with the same timestamp as sonda message, increase counters and return + if timestamp == message.get('message').get('timestamp'): + log_with_utc(f'Found Sonda message in store response node={store_node}') + successful_store_queries.labels(node=store_node).inc() + consecutive_successful_responses.labels(node=store_node).inc() + return True + + # If our message wasn't found in the returned messages, increase counter and return + empty_store_responses.labels(node=store_node).inc() + consecutive_successful_responses.labels(node=store_node).set(0) + return True + + +def send_store_query(rest_address, store_node, encoded_pubsub_topic, encoded_content_topic, timestamp): + url = f'{rest_address}/store/v3/messages' + params = { + 'peerAddr': urllib.parse.quote(store_node, safe=''), + 'pubsubTopic': encoded_pubsub_topic, + 'contentTopics': encoded_content_topic, + 'includeData': 'true', + 'startTime': timestamp + } + + s_time = time.time() + + try: + log_with_utc(f'Sending store request to {store_node}') + response = requests.get(url, params=params) + except Exception as e: + log_with_utc(f'Error sending request: {e}') + failed_store_queries.labels(node=store_node, error=str(e)).inc() + consecutive_successful_responses.labels(node=store_node).set(0) + return False + + elapsed_seconds = time.time() - s_time + log_with_utc(f'Response from {rest_address}: status:{response.status_code} [{elapsed_seconds:.4f} s.]') + + if response.status_code != 200: + failed_store_queries.labels(node=store_node, error=f'{response.status_code} {response.content}').inc() + consecutive_successful_responses.labels(node=store_node).set(0) + return False + + # Parse REST response into JSON + try: + json_response = response.json() + except Exception as e: + log_with_utc(f'Error parsing response JSON: {e}') + failed_store_queries.labels(node=store_node, error="JSON parse error").inc() + consecutive_successful_responses.labels(node=store_node).set(0) + return False + + # Analyze Store response. Return false if response is incorrect or has an error status + if not check_store_response(json_response, store_node, timestamp): + return False + + store_query_latency.labels(node=store_node).set(elapsed_seconds) + return True + + +def send_store_queries(rest_address, store_nodes, pubsub_topic, content_topic, timestamp): + log_with_utc(f'Sending store queries. nodes = {store_nodes} timestamp = {timestamp}') + encoded_pubsub_topic = urllib.parse.quote(pubsub_topic, safe='') + encoded_content_topic = urllib.parse.quote(content_topic, safe='') + + for node in store_nodes: + send_store_query(rest_address, node, encoded_pubsub_topic, encoded_content_topic, timestamp) + + +def main(): + log_with_utc(f'Running Sonda with args={args}') + + store_nodes = [] + if args.store_nodes is not None: + store_nodes = [s.strip() for s in args.store_nodes.split(",")] + log_with_utc(f'Store nodes to query: {store_nodes}') + + # Start Prometheus HTTP server at port set by the CLI(default 8004) + start_http_server(args.metrics_port) + + while True: + timestamp = time.time_ns() + + # Send Sonda message + res = send_sonda_msg(args.node_rest_address, args.pubsub_topic, SONDA_CONTENT_TOPIC, timestamp) + + log_with_utc(f'sleeping: {args.delay_seconds} seconds') + time.sleep(args.delay_seconds) + + # Only send store query if message was successfully published + if(res): + send_store_queries(args.node_rest_address, store_nodes, args.pubsub_topic, SONDA_CONTENT_TOPIC, timestamp) + + # Update node health metrics + for store_node in store_nodes: + if consecutive_successful_responses.labels(node=store_node)._value.get() >= args.health_threshold: + node_health.labels(node=store_node).set(1) + else: + node_health.labels(node=store_node).set(0) + + +main() diff --git a/third-party/nwaku/apps/wakucanary/README.md b/third-party/nwaku/apps/wakucanary/README.md new file mode 100644 index 0000000..1140823 --- /dev/null +++ b/third-party/nwaku/apps/wakucanary/README.md @@ -0,0 +1,58 @@ +# waku canary tool + +Attempts to dial a peer and asserts it supports a given set of protocols. + +```console +./build/wakucanary --help +Usage: + +wakucanary [OPTIONS]... + +The following options are available: + + -a, --address Multiaddress of the peer node to attempt to dial. + -t, --timeout Timeout to consider that the connection failed [=chronos.seconds(10)]. + -p, --protocol Protocol required to be supported: store,relay,lightpush,filter (can be used + multiple times). + -l, --log-level Sets the log level [=LogLevel.DEBUG]. + -np, --node-port Listening port for waku node [=60000]. + --websocket-secure-key-path Secure websocket key path: '/path/to/key.txt' . + --websocket-secure-cert-path Secure websocket Certificate path: '/path/to/cert.txt' . + -c, --cluster-id Cluster ID of the fleet node to check status [Default=1] + -s, --shard Shards index to subscribe to topics [ Argument may be repeated ] + +``` + +The tool can be built as: + +```console +$ make wakucanary +``` + +And used as follows. A reachable node that supports both `store` and `filter` protocols. + +```console +$ ./build/wakucanary --address=/dns4/node-01.ac-cn-hongkong-c.waku.sandbox.status.im/tcp/30303/p2p/16Uiu2HAmSJvSJphxRdbnigUV5bjRRZFBhTtWFTSyiKaQByCjwmpV --protocol=store --protocol=filter +$ echo $? +0 +``` + +A node that can't be reached. +```console +$ ./build/wakucanary --address=/dns4/node-01.ac-cn-hongkong-c.waku.sandbox.status.im/tcp/1000/p2p/16Uiu2HAmSJvSJphxRdbnigUV5bjRRZFBhTtWFTSyiKaQByCjwmpV --protocol=store --protocol=filter +$ echo $? +1 +``` + +Note that a domain name can also be used. +```console +$ ./build/wakucanary --address=/dns4/node-01.do-ams3.status.test.status.im/tcp/30303/p2p/16Uiu2HAkukebeXjTQ9QDBeNDWuGfbaSg79wkkhK4vPocLgR6QFDf --protocol=store --protocol=filter +$ echo $? +0 +``` + +Websockets are also supported. The websocket port openned by waku canary is calculated as `$(--node-port) + 1000` (e.g. when you set `-np 60000`, the WS port will be `61000`) +```console +$ ./build/wakucanary --address=/ip4/127.0.0.1/tcp/7777/ws/p2p/16Uiu2HAm4ng2DaLPniRoZtMQbLdjYYWnXjrrJkGoXWCoBWAdn1tu --protocol=store --protocol=filter +$ ./build/wakucanary --address=/ip4/127.0.0.1/tcp/7777/wss/p2p/16Uiu2HAmB6JQpewXScGoQ2syqmimbe4GviLxRwfsR8dCpwaGBPSE --protocol=store --websocket-secure-key-path=MyKey.key --websocket-secure-cert-path=MyCertificate.crt +``` diff --git a/third-party/nwaku/apps/wakucanary/certsgenerator.nim b/third-party/nwaku/apps/wakucanary/certsgenerator.nim new file mode 100644 index 0000000..b8a9e9d --- /dev/null +++ b/third-party/nwaku/apps/wakucanary/certsgenerator.nim @@ -0,0 +1,37 @@ +import osproc, os, httpclient, strutils + +proc getPublicIP(): string = + let client = newHttpClient() + try: + let response = client.get("http://api.ipify.org") + return response.body + except Exception as e: + echo "Could not fetch public IP: " & e.msg + return "127.0.0.1" + +# Function to generate a self-signed certificate +proc generateSelfSignedCertificate*(certPath: string, keyPath: string): int = + # Ensure the OpenSSL is installed + if findExe("openssl") == "": + echo "OpenSSL is not installed or not in the PATH." + return 1 + + let publicIP = getPublicIP() + + if publicIP != "127.0.0.1": + echo "Your public IP address is: ", publicIP + + # Command to generate private key and cert + let + cmd = + "openssl req -x509 -newkey rsa:4096 -keyout " & keyPath & " -out " & certPath & + " -sha256 -days 3650 -nodes -subj '/C=XX/ST=StateName/L=CityName/O=CompanyName/OU=CompanySectionName/CN=" & + publicIP & "'" + res = execCmd(cmd) + + if res == 0: + echo "Successfully generated self-signed certificate and key." + else: + echo "Failed to generate certificate and key." + + return res diff --git a/third-party/nwaku/apps/wakucanary/nim.cfg b/third-party/nwaku/apps/wakucanary/nim.cfg new file mode 100644 index 0000000..2231f2e --- /dev/null +++ b/third-party/nwaku/apps/wakucanary/nim.cfg @@ -0,0 +1,4 @@ +-d:chronicles_line_numbers +-d:chronicles_runtime_filtering:on +-d:discv5_protocol_id:d5waku +path = "../.." diff --git a/third-party/nwaku/apps/wakucanary/wakucanary.nim b/third-party/nwaku/apps/wakucanary/wakucanary.nim new file mode 100644 index 0000000..e770028 --- /dev/null +++ b/third-party/nwaku/apps/wakucanary/wakucanary.nim @@ -0,0 +1,300 @@ +import + std/[strutils, sequtils, tables, strformat], + confutils, + chronos, + chronicles/topics_registry, + os +import + libp2p/protocols/ping, + libp2p/crypto/[crypto, secp], + libp2p/nameresolving/dnsresolver, + libp2p/multicodec +import + ./certsgenerator, + waku/[waku_enr, node/peer_manager, waku_core, waku_node, factory/builder] + +# protocols and their tag +const ProtocolsTable = { + "store": "/vac/waku/store/", + "storev3": "/vac/waku/store-query/3", + "relay": "/vac/waku/relay/", + "lightpush": "/vac/waku/lightpush/", + "filter": "/vac/waku/filter-subscribe/2", + "filter-push": "/vac/waku/filter-push/", + "ipfs-id": "/ipfs/id/", + "autonat": "/libp2p/autonat/", + "circuit-relay": "/libp2p/circuit/relay/", + "metadata": "/vac/waku/metadata/", + "rendezvous": "/rendezvous/", + "ipfs-ping": "/ipfs/ping/", + "peer-exchange": "/vac/waku/peer-exchange/", + "mix": "mix/1.0.0", +}.toTable + +const WebSocketPortOffset = 1000 +const CertsDirectory = "./certs" + +# cli flags +type WakuCanaryConf* = object + address* {. + desc: "Multiaddress of the peer node to attempt to dial", + defaultValue: "", + name: "address", + abbr: "a" + .}: string + + timeout* {. + desc: "Timeout to consider that the connection failed", + defaultValue: chronos.seconds(10), + name: "timeout", + abbr: "t" + .}: chronos.Duration + + protocols* {. + desc: + "Protocol required to be supported: store,relay,lightpush,filter (can be used multiple times)", + name: "protocol", + abbr: "p" + .}: seq[string] + + logLevel* {. + desc: "Sets the log level", + defaultValue: LogLevel.INFO, + name: "log-level", + abbr: "l" + .}: LogLevel + + nodePort* {. + desc: "Listening port for waku node", + defaultValue: 60000, + name: "node-port", + abbr: "np" + .}: uint16 + + ## websocket secure config + websocketSecureKeyPath* {. + desc: "Secure websocket key path: '/path/to/key.txt' ", + defaultValue: "", + name: "websocket-secure-key-path" + .}: string + + websocketSecureCertPath* {. + desc: "Secure websocket Certificate path: '/path/to/cert.txt' ", + defaultValue: "", + name: "websocket-secure-cert-path" + .}: string + + ping* {. + desc: "Ping the peer node to measure latency", defaultValue: true, name: "ping" + .}: bool + + shards* {. + desc: + "Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.", + defaultValue: @[], + name: "shard", + abbr: "s" + .}: seq[uint16] + + clusterId* {. + desc: + "Cluster id that the node is running in. Node in a different cluster id is disconnected.", + defaultValue: 1, + name: "cluster-id", + abbr: "c" + .}: uint16 + +proc parseCmdArg*(T: type chronos.Duration, p: string): T = + try: + result = chronos.seconds(parseInt(p)) + except CatchableError: + raise newException(ValueError, "Invalid timeout value") + +proc completeCmdArg*(T: type chronos.Duration, val: string): seq[string] = + return @[] + +proc areProtocolsSupported( + toValidateProtocols: seq[string], nodeProtocols: seq[string] +): bool = + ## Checks if all toValidateProtocols are contained in nodeProtocols. + ## nodeProtocols contains the full list of protocols currently informed by the node under analysis. + ## toValidateProtocols contains the protocols, without version number, that we want to check if they are supported by the node. + var numOfSupportedProt: int = 0 + + for rawProtocol in toValidateProtocols: + let protocolTag = ProtocolsTable[rawProtocol] + debug "Checking if protocol is supported", expected_protocol_tag = protocolTag + + var protocolSupported = false + for nodeProtocol in nodeProtocols: + if nodeProtocol.startsWith(protocolTag): + info "The node supports the protocol", supported_protocol = nodeProtocol + numOfSupportedProt += 1 + protocolSupported = true + break + + if not protocolSupported: + error "The node does not support the protocol", expected_protocol = protocolTag + + if numOfSupportedProt == toValidateProtocols.len: + return true + + return false + +proc pingNode( + node: WakuNode, peerInfo: RemotePeerInfo +): Future[void] {.async, gcsafe.} = + try: + let conn = await node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec) + let pingDelay = await node.libp2pPing.ping(conn) + info "Peer response time (ms)", peerId = peerInfo.peerId, ping = pingDelay.millis + except CatchableError: + var msg = getCurrentExceptionMsg() + if msg == "Future operation cancelled!": + msg = "timedout" + error "Failed to ping the peer", peer = peerInfo, err = msg + +proc main(rng: ref HmacDrbgContext): Future[int] {.async.} = + let conf: WakuCanaryConf = WakuCanaryConf.load() + + # create dns resolver + let + nameServers = + @[ + initTAddress(parseIpAddress("1.1.1.1"), Port(53)), + initTAddress(parseIpAddress("1.0.0.1"), Port(53)), + ] + resolver: DnsResolver = DnsResolver.new(nameServers) + + if conf.logLevel != LogLevel.NONE: + setLogLevel(conf.logLevel) + + # ensure input protocols are valid + for p in conf.protocols: + if p notin ProtocolsTable: + error "invalid protocol", protocol = p, valid = ProtocolsTable + raise newException(ConfigurationError, "Invalid cli flag values" & p) + + info "Cli flags", + address = conf.address, + timeout = conf.timeout, + protocols = conf.protocols, + logLevel = conf.logLevel + + let peerRes = parsePeerInfo(conf.address) + if peerRes.isErr(): + error "Couldn't parse 'conf.address'", error = peerRes.error + quit(QuitFailure) + + let peer = peerRes.value + + let + nodeKey = crypto.PrivateKey.random(Secp256k1, rng[])[] + bindIp = parseIpAddress("0.0.0.0") + wsBindPort = Port(conf.nodePort + WebSocketPortOffset) + nodeTcpPort = Port(conf.nodePort) + isWs = peer.addrs[0].contains(multiCodec("ws")).get() + isWss = peer.addrs[0].contains(multiCodec("wss")).get() + keyPath = + if conf.websocketSecureKeyPath.len > 0: + conf.websocketSecureKeyPath + else: + CertsDirectory & "/key.pem" + certPath = + if conf.websocketSecureCertPath.len > 0: + conf.websocketSecureCertPath + else: + CertsDirectory & "/cert.pem" + + var builder = WakuNodeBuilder.init() + builder.withNodeKey(nodeKey) + + let netConfig = NetConfig.init( + bindIp = bindIp, + bindPort = nodeTcpPort, + wsBindPort = some(wsBindPort), + wsEnabled = isWs, + wssEnabled = isWss, + ) + + var enrBuilder = EnrBuilder.init(nodeKey) + + enrBuilder.withWakuRelaySharding( + RelayShards(clusterId: conf.clusterId, shardIds: conf.shards) + ).isOkOr: + error "could not initialize ENR with shards", error + quit(QuitFailure) + + let recordRes = enrBuilder.build() + let record = + if recordRes.isErr(): + error "failed to create enr record", error = recordRes.error + quit(QuitFailure) + else: + recordRes.get() + + if isWss and + (conf.websocketSecureKeyPath.len == 0 or conf.websocketSecureCertPath.len == 0): + info "WebSocket Secure requires key and certificate. Generating them" + if not dirExists(CertsDirectory): + createDir(CertsDirectory) + if generateSelfSignedCertificate(certPath, keyPath) != 0: + error "Error generating key and certificate" + quit(QuitFailure) + + builder.withRecord(record) + builder.withNetworkConfiguration(netConfig.tryGet()) + builder.withSwitchConfiguration( + secureKey = some(keyPath), secureCert = some(certPath), nameResolver = resolver + ) + + let node = builder.build().tryGet() + + if conf.ping: + try: + await mountLibp2pPing(node) + except CatchableError: + error "failed to mount libp2p ping protocol: " & getCurrentExceptionMsg() + quit(QuitFailure) + + node.mountMetadata(conf.clusterId, conf.shards).isOkOr: + error "failed to mount metadata protocol", error + quit(QuitFailure) + + await node.start() + + var pingFut: Future[bool] + if conf.ping: + pingFut = pingNode(node, peer).withTimeout(conf.timeout) + + let timedOut = not await node.connectToNodes(@[peer]).withTimeout(conf.timeout) + if timedOut: + error "Timedout after", timeout = conf.timeout + quit(QuitFailure) + + let lp2pPeerStore = node.switch.peerStore + let conStatus = node.peerManager.switch.peerStore[ConnectionBook][peer.peerId] + + if conf.ping: + discard await pingFut + + if conStatus in [Connected, CanConnect]: + let nodeProtocols = lp2pPeerStore[ProtoBook][peer.peerId] + + if not areProtocolsSupported(conf.protocols, nodeProtocols): + error "Not all protocols are supported", + expected = conf.protocols, supported = nodeProtocols + quit(QuitFailure) + elif conStatus == CannotConnect: + error "Could not connect", peerId = peer.peerId + quit(QuitFailure) + return 0 + +when isMainModule: + let rng = crypto.newRng() + let status = waitFor main(rng) + if status == 0: + info "The node is reachable and supports all specified protocols" + else: + error "The node has some problems (see logs)" + quit status diff --git a/third-party/nwaku/apps/wakunode2/nim.cfg b/third-party/nwaku/apps/wakunode2/nim.cfg new file mode 100644 index 0000000..a6fab9c --- /dev/null +++ b/third-party/nwaku/apps/wakunode2/nim.cfg @@ -0,0 +1,10 @@ +-d:chronicles_line_numbers +-d:discv5_protocol_id="d5waku" +-d:chronicles_runtime_filtering=on +-d:chronicles_sinks="textlines,json" +-d:chronicles_default_output_device=dynamic +# Disabling the following topics from nim-eth and nim-dnsdisc since some types cannot be serialized +-d:chronicles_disabled_topics="eth,dnsdisc.client" +# Results in empty output for some reason +#-d:"chronicles_enabled_topics=GossipSub:TRACE,WakuRelay:TRACE" +path = "../.." diff --git a/third-party/nwaku/apps/wakunode2/wakunode2.nim b/third-party/nwaku/apps/wakunode2/wakunode2.nim new file mode 100644 index 0000000..ac6b38a --- /dev/null +++ b/third-party/nwaku/apps/wakunode2/wakunode2.nim @@ -0,0 +1,102 @@ +{.push raises: [].} + +import + std/[options, strutils, sequtils, net], + chronicles, + chronos, + metrics, + libbacktrace, + system/ansi_c, + libp2p/crypto/crypto +import + ../../tools/[rln_keystore_generator/rln_keystore_generator, confutils/cli_args], + waku/[ + common/logging, + factory/waku, + node/health_monitor, + waku_api/rest/builder as rest_server_builder, + waku_core/message/default_values, + ] + +logScope: + topics = "wakunode main" + +const git_version* {.strdefine.} = "n/a" + +{.pop.} + # @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError +when isMainModule: + ## Node setup happens in 6 phases: + ## 1. Set up storage + ## 2. Initialize node + ## 3. Mount and initialize configured protocols + ## 4. Start node and mounted protocols + ## 5. Start monitoring tools and external interfaces + ## 6. Setup graceful shutdown hooks + + const versionString = "version / git commit hash: " & waku.git_version + + var wakuNodeConf = WakuNodeConf.load(version = versionString).valueOr: + error "failure while loading the configuration", error = error + quit(QuitFailure) + + ## Also called within Waku.new. The call to startRestServerEssentials needs the following line + logging.setupLog(wakuNodeConf.logLevel, wakuNodeConf.logFormat) + + case wakuNodeConf.cmd + of generateRlnKeystore: + let conf = wakuNodeConf.toKeystoreGeneratorConf() + doRlnKeystoreGenerator(conf) + of noCommand: + let conf = wakuNodeConf.toWakuConf().valueOr: + error "Waku configuration failed", error = error + quit(QuitFailure) + + var waku = (waitFor Waku.new(conf)).valueOr: + error "Waku initialization failed", error = error + quit(QuitFailure) + + (waitFor startWaku(addr waku)).isOkOr: + error "Starting waku failed", error = error + quit(QuitFailure) + + debug "Setting up shutdown hooks" + proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} = + await waku.stop() + quit(QuitSuccess) + + # Handle Ctrl-C SIGINT + proc handleCtrlC() {.noconv.} = + when defined(windows): + # workaround for https://github.com/nim-lang/Nim/issues/4057 + setupForeignThreadGc() + notice "Shutting down after receiving SIGINT" + asyncSpawn asyncStopper(waku) + + setControlCHook(handleCtrlC) + + # Handle SIGTERM + when defined(posix): + proc handleSigterm(signal: cint) {.noconv.} = + notice "Shutting down after receiving SIGTERM" + asyncSpawn asyncStopper(waku) + + c_signal(ansi_c.SIGTERM, handleSigterm) + + # Handle SIGSEGV + when defined(posix): + proc handleSigsegv(signal: cint) {.noconv.} = + # Require --debugger:native + fatal "Shutting down after receiving SIGSEGV", stacktrace = getBacktrace() + + # Not available in -d:release mode + writeStackTrace() + + waitFor waku.stop() + quit(QuitFailure) + + c_signal(ansi_c.SIGSEGV, handleSigsegv) + + info "Node setup complete" + + runForever() diff --git a/third-party/nwaku/ci/Jenkinsfile.lpt b/third-party/nwaku/ci/Jenkinsfile.lpt new file mode 100644 index 0000000..c81a21b --- /dev/null +++ b/third-party/nwaku/ci/Jenkinsfile.lpt @@ -0,0 +1,95 @@ +#!/usr/bin/env groovy +library 'status-jenkins-lib@v1.8.17' + +pipeline { + agent { label 'linux' } + + options { + timestamps() + timeout(time: 20, unit: 'MINUTES') + disableRestartFromStage() + buildDiscarder(logRotator( + numToKeepStr: '10', + daysToKeepStr: '30', + )) + } + + parameters { + string( + name: 'IMAGE_TAG', + description: 'Name of Docker tag to push. Optional Parameter.', + defaultValue: 'latest' + ) + string( + name: 'IMAGE_NAME', + description: 'Name of Docker image to push.', + defaultValue: params.IMAGE_NAME ?: 'wakuorg/liteprotocoltester', + ) + string( + name: 'DOCKER_CRED', + description: 'Name of Docker Registry credential.', + defaultValue: params.DOCKER_CRED ?: 'harbor-telemetry-robot', + ) + string( + name: 'DOCKER_REGISTRY', + description: 'URL of the Docker Registry', + defaultValue: params.DOCKER_REGISTRY ?: 'harbor.status.im' + ) + string( + name: 'NIMFLAGS', + description: 'Flags for Nim compilation.', + defaultValue: params.NIMFLAGS ?: [ + '--colors:off', + '-d:disableMarchNative', + '-d:chronicles_colors:none', + '-d:insecure', + ].join(' ') + ) + choice( + name: "LOWEST_LOG_LEVEL_ALLOWED", + choices: ['TRACE', 'DEGUG', 'INFO', 'NOTICE', 'WARN', 'ERROR', 'FATAL'], + description: "Defines the log level, which will be available at runtime (Chronicles log level)" + ) + } + + stages { + stage('Build') { + steps { script { + image = docker.build( + "${DOCKER_REGISTRY}/${params.IMAGE_NAME}:${params.IMAGE_TAG ?: env.GIT_COMMIT.take(8)}", + "--label=commit='${git.commit()}' " + + "--label=version='${git.describe('--tags')}' " + + "--build-arg=MAKE_TARGET='liteprotocoltester' " + + "--build-arg=NIMFLAGS='${params.NIMFLAGS}' " + + "--build-arg=LOG_LEVEL='${params.LOWEST_LOG_LEVEL_ALLOWED}' " + + "--target ${params.IMAGE_TAG == 'deploy' ? 'deployment_lpt' : 'standalone_lpt'} " + + "--file=apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile " + + " ." + ) + } } + } + + stage('Check') { + steps { script { + image.inside('--entrypoint=""') { c -> + sh '/usr/bin/liteprotocoltester --version' + } + } } + } + + stage('Push') { + when { expression { params.IMAGE_TAG != '' } } + steps { script { + withDockerRegistry([ + credentialsId: params.DOCKER_CRED, url: "https://${DOCKER_REGISTRY}" + ]) { + image.push(params.IMAGE_TAG) + } + } } + } + } // stages + + post { + cleanup { cleanWs() } + } // post +} // pipeline diff --git a/third-party/nwaku/ci/Jenkinsfile.prs b/third-party/nwaku/ci/Jenkinsfile.prs new file mode 100644 index 0000000..2aa5654 --- /dev/null +++ b/third-party/nwaku/ci/Jenkinsfile.prs @@ -0,0 +1,137 @@ +#!/usr/bin/env groovy + +library 'status-jenkins-lib@v1.6.0' + +pipeline { + agent { label "${getAgentLabel()} && x86_64" } + + parameters { + string( + name: 'NIMFLAGS', + description: 'Flags for Nim compilation.', + defaultValue: params.NIMFLAGS ?: [ + '--colors:off', + '-d:insecure', + '-d:disableMarchNative', + '--parallelBuild:6', + '-d:postgres', + ].join(' ') + ) + string( + name: 'LOG_LEVEL', + description: 'Build logging level. (DEBUG, TRACE)', + defaultValue: params.LOG_LEVEL ?: 'DEBUG' + ) + string( + name: 'VERBOSITY', + description: 'Makefile verbosity level.(0-2)', + defaultValue: params.VERBOSITY ?: '1' + ) + string( + name: 'MAKEFLAGS', + description: 'Makefile flags.', + defaultValue: params.MAKEFLAGS ?: '-j6' + ) + } + + options { + timestamps() + disableRestartFromStage() + /* Prevent Jenkins jobs from running forever */ + timeout(time: 30, unit: 'MINUTES') + /* Limit builds retained. */ + buildDiscarder(logRotator( + numToKeepStr: '3', + daysToKeepStr: '30', + artifactNumToKeepStr: '1', + )) + } + + environment { + TARGET = getAgentLabel() + } + + stages { + stage('Deps') { steps { script { + /* Avoid checking multiple times. */ + v2changed = versionWasChanged('v2') + /* TODO: Re-add caching of Nim compiler. */ + nix.shell("make ${params.MAKEFLAGS} V=${params.VERBOSITY} update", pure: false) + nix.shell("make ${params.MAKEFLAGS} V=${params.VERBOSITY} deps", pure: false) + } } } + + stage('Binaries') { + parallel { + stage('V2') { + when { expression { v2changed } } + steps { script { + nix.shell("make ${params.MAKEFLAGS} NIMFLAGS=\"${params.NIMFLAGS}\" V=${params.VERBOSITY} all") + } } + } + } + } + + stage('Run Tests') { + parallel { + stage('V2') { + when { expression { v2changed } } + steps { script { + nix.shell("make ${params.MAKEFLAGS} NIMFLAGS=\"${params.NIMFLAGS}\" V=${params.VERBOSITY} test") + } } + } + } + } + + stage('Upload') { + when { expression { v2changed } } + steps { script { + def out = genOutputFilename() + sh "mv build/wakunode2 ${out}" + env.PKG_URL = s3.uploadArtifact(out) + jenkins.setBuildDesc(Waku: env.PKG_URL) + } } + } + } // stages + post { + success { script { github.notifyPR(true) } } + failure { script { github.notifyPR(false) } } + always { cleanWs() } + } // post +} // pipeline + + +/* This allows us to use one Jenkinsfile and run + * jobs on different platforms based on job name. */ +def getAgentLabel() { + if (params.AGENT_LABEL) { + return params.AGENT_LABEL + } + def tokens = env.JOB_NAME.split('/') + for (platform in ['linux', 'macos', 'windows']) { + if (tokens.contains(platform)) { return platform } + } + throw new Exception('No agent provided or found in job path!') +} + +def genOutputFilename() { + return [ + "wakunode2", utils.timestamp(), utils.gitCommit(), getAgentLabel() + ].join('-') + (env.NODE_NAME.startsWith('windows') ? '.exe' : '.bin') +} + +def versionWasChanged(version) { + def changes = sh( + script: "git diff --name-only origin/${env.CHANGE_TARGET}", + returnStdout: true + ) + if (changes =~ "(?m)^(Makefile|waku.nimble|config.nims|vendor|ci|shell.nix).*") { + return true + } + if (version == 'v2' && changes =~ "(?m)^(apps|tools)/.*") { + return true + } + if (changes =~ "(?m)^(waku|tests|examples)/(${version}|common)/.*") { + return true + } + return false +} diff --git a/third-party/nwaku/ci/Jenkinsfile.release b/third-party/nwaku/ci/Jenkinsfile.release new file mode 100644 index 0000000..4a0cd0d --- /dev/null +++ b/third-party/nwaku/ci/Jenkinsfile.release @@ -0,0 +1,146 @@ +#!/usr/bin/env groovy +library 'status-jenkins-lib@v1.8.17' + +pipeline { + agent { label 'linux' } + + options { + timestamps() + disableRestartFromStage() + timeout(time: 20, unit: 'MINUTES') + buildDiscarder(logRotator( + numToKeepStr: '10', + daysToKeepStr: '30', + )) + } + + parameters { + string( + name: 'MAKE_TARGET', + description: 'Makefile target to build. Optional Parameter.', + defaultValue: params.MAKE_TARGET ?: 'wakunode2', + ) + string( + name: 'IMAGE_TAG', + description: 'Name of Docker tag to push. Optional Parameter.', + defaultValue: getDefaultImageTag() + ) + string( + name: 'IMAGE_NAME', + description: 'Name of Docker image to push.', + defaultValue: params.IMAGE_NAME ?: 'harbor.status.im/wakuorg/nwaku', + ) + string( + name: 'DOCKER_CRED', + description: 'Name of Docker Registry credential.', + defaultValue: params.DOCKER_CRED ?: 'harbor-wakuorg-robot', + ) + string( + name: 'DOCKER_REGISTRY_URL', + description: 'URL of the Docker Registry', + defaultValue: params.DOCKER_REGISTRY_URL ?: 'https://harbor.status.im' + ) + string( + name: 'NIMFLAGS', + description: 'Flags for Nim compilation.', + defaultValue: params.NIMFLAGS ?: [ + '--colors:off', + '-d:disableMarchNative', + '-d:chronicles_colors:none', + '-d:insecure', + ].join(' ') + ) + choice( + name: "LOWEST_LOG_LEVEL_ALLOWED", + choices: ['TRACE', 'DEGUG', 'INFO', 'NOTICE', 'WARN', 'ERROR', 'FATAL'], + description: "Defines the log level, which will be available at runtime (Chronicles log level)", + ) + booleanParam( + name: 'DEBUG', + description: 'Enable debug features', + defaultValue: false + ) + booleanParam( + name: 'HEAPTRACK', + description: 'Enable heaptrack build', + defaultValue: false + ) + } + + stages { + stage('Build') { + steps { script { + if (params.HEAPTRACK) { + echo 'Building with heaptrack support' + image = docker.build( + "${params.IMAGE_NAME}:${params.IMAGE_TAG ?: env.GIT_COMMIT.take(8)}", + "--label=build='${env.BUILD_URL}' " + + "--label=commit='${git.commit()}' " + + "--label=version='${git.describe('--tags')}' " + + "--build-arg=MAKE_TARGET='${params.MAKE_TARGET}' " + + "--build-arg=NIMFLAGS='${params.NIMFLAGS} -d:postgres -d:heaptracker ' " + + "--build-arg=LOG_LEVEL='${params.LOWEST_LOG_LEVEL_ALLOWED}' " + + "--build-arg=DEBUG='${params.DEBUG ? "1" : "0"} ' " + + "--build-arg=NIM_COMMIT='NIM_COMMIT=heaptrack_support_v2.0.12' " + + "--target='debug-with-heaptrack' ." + ) + } else { + image = docker.build( + "${params.IMAGE_NAME}:${params.IMAGE_TAG ?: env.GIT_COMMIT.take(8)}", + "--label=build='${env.BUILD_URL}' " + + "--label=commit='${git.commit()}' " + + "--label=version='${git.describe('--tags')}' " + + "--build-arg=MAKE_TARGET='${params.MAKE_TARGET}' " + + "--build-arg=NIMFLAGS='${params.NIMFLAGS} -d:postgres ' " + + "--build-arg=LOG_LEVEL='${params.LOWEST_LOG_LEVEL_ALLOWED}' " + + "--build-arg=DEBUG='${params.DEBUG ? "1" : "0"} ' " + + "--target='prod' ." + ) + } + } } + } + + stage('Check') { + steps { script { + image.inside('--entrypoint=""') { c -> + sh '/usr/bin/wakunode --version' + } + } } + } + + stage('Push') { + when { expression { params.IMAGE_TAG != '' } } + steps { script { + withDockerRegistry([ + credentialsId: params.DOCKER_CRED, url: params.DOCKER_REGISTRY_URL + ]) { + image.push() + /* If Git ref is a tag push it as Docker tag too. */ + if (params.GIT_REF ==~ /v\d+\.\d+\.\d+.*/) { + image.push(params.GIT_REF) + image.push('latest-release') + } + } + } } + } + } // stages + + post { + success { script { + discord.send( + header: '**Nim-Waku deployment successful!**', + cred: 'discord-waku-deployments-webhook', + descPrefix: "Image: [`${IMAGE_NAME}:${IMAGE_TAG}`](https://hub.docker.com/r/${IMAGE_NAME}/tags?name=${IMAGE_TAG})" + ) + } } + always { sh 'docker image prune -f' } + } // post +} // pipeline + +def getDefaultImageTag() { + switch (env.JOB_BASE_NAME) { + case 'docker-latest': return 'latest' + case 'docker-release': return 'stable' + default: return env.JOB_BASE_NAME + } +} diff --git a/third-party/nwaku/config.nims b/third-party/nwaku/config.nims new file mode 100644 index 0000000..f74fe18 --- /dev/null +++ b/third-party/nwaku/config.nims @@ -0,0 +1,127 @@ +import os + +if defined(release): + switch("nimcache", "nimcache/release/$projectName") +else: + switch("nimcache", "nimcache/debug/$projectName") + +if defined(windows): + switch("passL", "rln.lib") + switch("define", "postgres=false") + + # Automatically add all vendor subdirectories + for dir in walkDir("./vendor"): + if dir.kind == pcDir: + switch("path", dir.path) + switch("path", dir.path / "src") + + # disable timestamps in Windows PE headers - https://wiki.debian.org/ReproducibleBuilds/TimestampsInPEBinaries + switch("passL", "-Wl,--no-insert-timestamp") + # increase stack size + switch("passL", "-Wl,--stack,8388608") + # https://github.com/nim-lang/Nim/issues/4057 + --tlsEmulation: + off + if defined(i386): + # set the IMAGE_FILE_LARGE_ADDRESS_AWARE flag so we can use PAE, if enabled, and access more than 2 GiB of RAM + switch("passL", "-Wl,--large-address-aware") + + # The dynamic Chronicles output currently prevents us from using colors on Windows + # because these require direct manipulations of the stdout File object. + switch("define", "chronicles_colors=off") + +# https://github.com/status-im/nimbus-eth2/blob/stable/docs/cpu_features.md#ssse3-supplemental-sse3 +# suggests that SHA256 hashing with SSSE3 is 20% faster than without SSSE3, so +# given its near-ubiquity in the x86 installed base, it renders a distribution +# build more viable on an overall broader range of hardware. +# +if defined(disableMarchNative): + if defined(i386) or defined(amd64): + if defined(macosx): + # macOS Catalina is EOL as of 2022-09 + # https://support.apple.com/kb/sp833 + # "macOS Big Sur - Technical Specifications" lists current oldest + # supported models: MacBook (2015 or later), MacBook Air (2013 or later), + # MacBook Pro (Late 2013 or later), Mac mini (2014 or later), iMac (2014 + # or later), iMac Pro (2017 or later), Mac Pro (2013 or later). + # + # These all have Haswell or newer CPUs. + # + # This ensures AVX2, AES-NI, PCLMUL, BMI1, and BMI2 instruction set support. + switch("passC", "-march=haswell -mtune=generic") + switch("passL", "-march=haswell -mtune=generic") + else: + if defined(marchOptimized): + # https://github.com/status-im/nimbus-eth2/blob/stable/docs/cpu_features.md#bmi2--adx + switch("passC", "-march=broadwell -mtune=generic") + switch("passL", "-march=broadwell -mtune=generic") + else: + switch("passC", "-mssse3") + switch("passL", "-mssse3") +elif defined(macosx) and defined(arm64): + # Apple's Clang can't handle "-march=native" on M1: https://github.com/status-im/nimbus-eth2/issues/2758 + switch("passC", "-mcpu=apple-m1") + switch("passL", "-mcpu=apple-m1") +else: + if not defined(android): + switch("passC", "-march=native") + switch("passL", "-march=native") + if defined(windows): + # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65782 + # ("-fno-asynchronous-unwind-tables" breaks Nim's exception raising, sometimes) + switch("passC", "-mno-avx512f") + switch("passL", "-mno-avx512f") + +--threads: + on +--opt: + speed +--excessiveStackTrace: + on +# enable metric collection +--define: + metrics +# for heap-usage-by-instance-type metrics and object base-type strings +--define: + nimTypeNames + +switch("define", "withoutPCRE") + +# the default open files limit is too low on macOS (512), breaking the +# "--debugger:native" build. It can be increased with `ulimit -n 1024`. +if not defined(macosx) and not defined(android): + # add debugging symbols and original files and line numbers + --debugger: + native + if not (defined(windows) and defined(i386)) and not defined(disable_libbacktrace): + # light-weight stack traces using libbacktrace and libunwind + --define: + nimStackTraceOverride + switch("import", "libbacktrace") + +--define: + nimOldCaseObjects + # https://github.com/status-im/nim-confutils/issues/9 + +# `switch("warning[CaseTransition]", "off")` fails with "Error: invalid command line option: '--warning[CaseTransition]'" +switch("warning", "CaseTransition:off") + +# The compiler doth protest too much, methinks, about all these cases where it can't +# do its (N)RVO pass: https://github.com/nim-lang/RFCs/issues/230 +switch("warning", "ObservableStores:off") + +# Too many false positives for "Warning: method has lock level , but another method has 0 [LockLevel]" +switch("warning", "LockLevel:off") + +if defined(android): + var clang = getEnv("ANDROID_COMPILER") + var ndk_home = getEnv("ANDROID_TOOLCHAIN_DIR") + var sysroot = ndk_home & "/sysroot" + var cincludes = sysroot & "/usr/include/" & getEnv("ANDROID_ARCH") + + switch("clang.path", ndk_home & "/bin") + switch("clang.exe", clang) + switch("clang.linkerexe", clang) + switch("passC", "--sysroot=" & sysRoot) + switch("passL", "--sysroot=" & sysRoot) + switch("cincludes", sysRoot & "/usr/include/") diff --git a/third-party/nwaku/docker/binaries/Dockerfile.bn.amd64 b/third-party/nwaku/docker/binaries/Dockerfile.bn.amd64 new file mode 100644 index 0000000..7fba7ce --- /dev/null +++ b/third-party/nwaku/docker/binaries/Dockerfile.bn.amd64 @@ -0,0 +1,31 @@ +# Dockerfile to build a distributable container image from pre-existing binaries +FROM debian:bookworm-slim AS prod + +ARG MAKE_TARGET=wakunode2 + +LABEL maintainer="vaclav@status.im" +LABEL source="https://github.com/waku-org/nwaku" +LABEL description="Wakunode: Waku client" +LABEL commit="unknown" + +# DevP2P, LibP2P, and JSON RPC ports +EXPOSE 30303 60000 8545 + +# Referenced in the binary +RUN apt-get update &&\ + apt-get install -y libpq-dev curl iproute2 wget dnsutils &&\ + apt-get clean && rm -rf /var/lib/apt/lists/* + +# Copy to separate location to accomodate different MAKE_TARGET values +ADD ./build/$MAKE_TARGET /usr/local/bin/ + +# Copy migration scripts for DB upgrades +ADD ./migrations/ /app/migrations/ + +# Symlink the correct wakunode binary +RUN ln -sv /usr/local/bin/$MAKE_TARGET /usr/bin/wakunode + +ENTRYPOINT ["/usr/bin/wakunode"] + +# By default just show help if called without arguments +CMD ["--help"] diff --git a/third-party/nwaku/docker/binaries/Dockerfile.bn.local b/third-party/nwaku/docker/binaries/Dockerfile.bn.local new file mode 100644 index 0000000..79445d1 --- /dev/null +++ b/third-party/nwaku/docker/binaries/Dockerfile.bn.local @@ -0,0 +1,63 @@ +# Dockerfile to build a distributable container image from pre-existing binaries +# FROM debian:stable-slim AS prod +FROM ubuntu:24.04 AS prod + +ARG MAKE_TARGET=wakunode2 + +LABEL maintainer="vaclav@status.im" +LABEL source="https://github.com/waku-org/nwaku" +LABEL description="Wakunode: Waku client" +LABEL commit="unknown" + +# DevP2P, LibP2P, and JSON RPC ports +EXPOSE 30303 60000 8545 + +# Referenced in the binary +RUN apt-get update &&\ + apt-get install -y libpcre3 libpq-dev curl iproute2 wget jq dnsutils &&\ + apt-get clean && rm -rf /var/lib/apt/lists/* + +# Fix for 'Error loading shared library libpcre.so.3: No such file or directory' +RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3 + +# Copy to separate location to accomodate different MAKE_TARGET values +ADD ./build/$MAKE_TARGET /usr/local/bin/ + +# Copy migration scripts for DB upgrades +ADD ./migrations/ /app/migrations/ + +# Symlink the correct wakunode binary +RUN ln -sv /usr/local/bin/$MAKE_TARGET /usr/bin/wakunode + +ENTRYPOINT ["/usr/bin/wakunode"] + +# By default just show help if called without arguments +CMD ["--help"] + +# Build debug tools: heaptrack +FROM ubuntu:24.04 AS heaptrack-build + +RUN apt update +RUN apt install -y gdb git g++ make cmake zlib1g-dev libboost-all-dev libunwind-dev +RUN git clone https://github.com/KDE/heaptrack.git /heaptrack + +WORKDIR /heaptrack/build +# going to a commit that builds properly. We will revisit this for new releases +RUN git reset --hard f9cc35ebbdde92a292fe3870fe011ad2874da0ca +RUN cmake -DCMAKE_BUILD_TYPE=Release .. +RUN make -j$(nproc) + + +# Debug image +FROM prod AS debug-with-heaptrack + +RUN apt update +RUN apt install -y gdb libunwind8 + +# Add heaptrack +COPY --from=heaptrack-build /heaptrack/build/ /heaptrack/build/ + +ENV LD_LIBRARY_PATH=/heaptrack/build/lib/heaptrack/ +RUN ln -s /heaptrack/build/bin/heaptrack /usr/local/bin/heaptrack + +ENTRYPOINT ["/heaptrack/build/bin/heaptrack", "/usr/bin/wakunode"] diff --git a/third-party/nwaku/docs/api/node.md b/third-party/nwaku/docs/api/node.md new file mode 100644 index 0000000..ab1580f --- /dev/null +++ b/third-party/nwaku/docs/api/node.md @@ -0,0 +1,94 @@ +# Waku APIs + +## Nim API + +The Nim Waku API consist of a set of methods operating on the Waku Node object. +Some of them have different arity depending on what privacy/bandwidth trade-off +the consumer wants to make. These methods are: + +1. **Init** - create a node. +2. **Start** - start a created node. +3. **Subscribe** - to a topic or a specific content filter. +4. **Unsubscribe** - to a topic or a specific content filter. +5. **Publish** - to a topic, or a topic and a specific content filter. +6. **Query** - for historical messages. +7. **Info** - to get information about the node. +8. **Resume** - to retrieve and persist the message history since the node's last online time. + +```Nim +proc init*(T: type WakuNode, nodeKey: crypto.PrivateKey, + bindIp: ValidIpAddress, bindPort: Port, + extIp = none[ValidIpAddress](), extPort = none[Port]()): T = + ## Creates a Waku Node. + ## + ## Status: Implemented. + +proc start*(node: WakuNode) {.async.} = + ## Starts a created Waku Node. + ## + ## Status: Implemented. + +proc subscribe*(node: WakuNode, topic: Topic, handler: TopicHandler) = + ## Subscribes to a PubSub topic. Triggers handler when receiving messages on + ## this topic. TopicHandler is a method that takes a topic and some data. + ## + ## NOTE The data field SHOULD be decoded as a WakuMessage. + ## Status: Implemented. + +proc subscribe*(node: WakuNode, request: FilterRequest, handler: ContentFilterHandler) {.async, gcsafe.} = + ## Registers for messages that match a specific filter. Triggers the handler whenever a message is received. + ## FilterHandler is a method that takes a MessagePush. + ## + ## Status: Implemented. + +proc unsubscribe*(node: WakuNode, topic: Topic, handler: TopicHandler) = + ## Unsubscribes a handler from a PubSub topic. + ## + ## Status: Implemented. + +proc unsubscribeAll*(node: WakuNode, topic: Topic) = + ## Unsubscribes all handlers registered on a specific PubSub topic. + ## + ## Status: Implemented. + +proc unsubscribe*(w: WakuNode, contentFilter: ContentFilter) = + ## Unsubscribe from a content filter. + ## + ## Status: Not yet implemented. + ## TODO Implement. + +proc publish*(node: WakuNode, topic: Topic, message: WakuMessage) = + ## Publish a `WakuMessage` to a PubSub topic. `WakuMessage` should contain a + ## `contentTopic` field for light node functionality. This field may be also + ## be omitted. + ## + ## Status: Implemented. + +proc query*(w: WakuNode, query: HistoryQuery, handler: QueryHandlerFunc) {.async, gcsafe.} = + ## Queries known nodes for historical messages. Triggers the handler whenever a response is received. + ## QueryHandlerFunc is a method that takes a HistoryResponse. + ## + ## Status: Implemented. + +proc info*(node: WakuNode): WakuInfo = + ## Returns information about the Node, such as what multiaddress it can be reached at. + ## + ## Status: Implemented. + ## + +proc resume*(node: WakuNode, peerList: Option[seq[PeerInfo]]) = + ## Retrieves and persists the history of waku messages published on the default waku pubsub topic since the last time the waku node has been online. + ## It requires the waku node to have the store protocol mounted in the full mode (i.e., persisting messages). + ## `peerList` indicates the list of peers to query from. + ## The history is fetched from all available peers in this list and then consolidated into one deduplicated list. + ## If no peerList is passed, the history is fetched from one of the known peers. + ## It retrieves the history successfully given that the dialed peer has been online during the queried time window. + ## + ## Status: Implemented. + ## +``` + + +## REST API + +[Here](./rest-api.md) you can find more details on the Node HTTP REST API. diff --git a/third-party/nwaku/docs/api/rest-api.md b/third-party/nwaku/docs/api/rest-api.md new file mode 100644 index 0000000..eeb90ab --- /dev/null +++ b/third-party/nwaku/docs/api/rest-api.md @@ -0,0 +1,43 @@ +## HTTP REST API + +The HTTP REST API consists of a set of methods operating on the Waku Node remotely over HTTP. + +This API is divided in different _namespaces_ which group a set of resources: + +| Namespace | Description | +------------|-------------- +| `/debug` | Information about a Waku v2 node. | +| `/relay` | Control of the relaying of messages. See [11/WAKU2-RELAY](https://rfc.vac.dev/spec/11/) RFC | +| `/store` | Retrieve the message history. See [13/WAKU2-STORE](https://rfc.vac.dev/spec/13/) RFC | +| `/filter` | Control of the content filtering. See [12/WAKU2-FILTER](https://rfc.vac.dev/spec/12/) RFC | +| `/admin` | Privileged access to the internal operations of the node. | +| `/private` | Provides functionality to encrypt/decrypt `WakuMessage` payloads using either symmetric or asymmetric cryptography. This allows backwards compatibility with Waku v1 nodes. | + + +### API Specification + +The HTTP REST API has been designed following the OpenAPI 3.0.3 standard specification format. +The OpenAPI specification files can be found in the [Waku Node REST API Reference](https://waku-org.github.io/waku-rest-api/) repository. + +You can also use [hosted OpenAPI UI](https://waku-org.github.io/waku-rest-api/) to explore and execute the calls locally. + +Check the [OpenAPI Tools](https://openapi.tools/) site for the right tool for you (e.g. REST API client generator) + +A particular OpenAPI spec can be easily imported into [Postman](https://www.postman.com/downloads/) + 1. Open Postman. + 2. Click on File -> Import... + 2. Load the openapi.yaml of interest, stored in your computer. + 3. Then, requests can be made from within the 'Collections' section. + + +### Usage example + +#### [`get_waku_v2_debug_v1_info`](https://rfc.vac.dev/spec/16/#get_waku_v2_debug_v1_info) + +```bash +curl http://localhost:8645/debug/v1/info -s | jq +``` + + +### Node configuration +Find details [here](https://github.com/waku-org/nwaku/tree/master/docs/operators/how-to/configure-rest-api.md) diff --git a/third-party/nwaku/docs/benchmarks/cspell.json b/third-party/nwaku/docs/benchmarks/cspell.json new file mode 100644 index 0000000..8227630 --- /dev/null +++ b/third-party/nwaku/docs/benchmarks/cspell.json @@ -0,0 +1,20 @@ +{ "words": + [ + "pubsubtopic", + "jmeter", + "analyzed", + "queryc", + "wakudev", + "statusim", + "queryc", + "wakudev", + "statusim", + "chronos", + "libpqis", + "Conn", + "messageindex", + "storedat", + "pubsubtopic", + "wakudev" + ] +} diff --git a/third-party/nwaku/docs/benchmarks/imgs/digram_multiple_nodes_one_database.png b/third-party/nwaku/docs/benchmarks/imgs/digram_multiple_nodes_one_database.png new file mode 100644 index 0000000..e26f392 Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/digram_multiple_nodes_one_database.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-2.png b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-2.png new file mode 100644 index 0000000..a5af6a3 Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-2.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-3.png b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-3.png new file mode 100644 index 0000000..34980c7 Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-3.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-4.png b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-4.png new file mode 100644 index 0000000..7287c82 Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-4.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-5.png b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-5.png new file mode 100644 index 0000000..8e97630 Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-5.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-6.png b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-6.png new file mode 100644 index 0000000..de86a96 Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-6.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-postgres-2.png b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-postgres-2.png new file mode 100644 index 0000000..b515c5b Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-postgres-2.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-postgres-3.png b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-postgres-3.png new file mode 100644 index 0000000..5531572 Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-postgres-3.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-postgres.png b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-postgres.png new file mode 100644 index 0000000..dad6707 Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist-postgres.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist.png b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist.png new file mode 100644 index 0000000..9247b8e Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/insert-time-dist.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/jmeter-results.png b/third-party/nwaku/docs/benchmarks/imgs/jmeter-results.png new file mode 100644 index 0000000..451c662 Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/jmeter-results.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/num-queries-per-minute.png b/third-party/nwaku/docs/benchmarks/imgs/num-queries-per-minute.png new file mode 100644 index 0000000..f63c197 Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/num-queries-per-minute.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-2.png b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-2.png new file mode 100644 index 0000000..c101c81 Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-2.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-3.png b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-3.png new file mode 100644 index 0000000..078e3ee Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-3.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-4.png b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-4.png new file mode 100644 index 0000000..a6ea99e Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-4.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-5.png b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-5.png new file mode 100644 index 0000000..484545e Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-5.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-6.png b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-6.png new file mode 100644 index 0000000..4bc5ea9 Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-6.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-postgres-2.png b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-postgres-2.png new file mode 100644 index 0000000..3f7033f Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-postgres-2.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-postgres-3.png b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-postgres-3.png new file mode 100644 index 0000000..2816a8d Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-postgres-3.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-postgres.png b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-postgres.png new file mode 100644 index 0000000..034ca24 Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist-postgres.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/query-time-dist.png b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist.png new file mode 100644 index 0000000..ff7eca4 Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/query-time-dist.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/topology-only-store-protocol.png b/third-party/nwaku/docs/benchmarks/imgs/topology-only-store-protocol.png new file mode 100644 index 0000000..7ef28c1 Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/topology-only-store-protocol.png differ diff --git a/third-party/nwaku/docs/benchmarks/imgs/using-jmeter.png b/third-party/nwaku/docs/benchmarks/imgs/using-jmeter.png new file mode 100644 index 0000000..73918de Binary files /dev/null and b/third-party/nwaku/docs/benchmarks/imgs/using-jmeter.png differ diff --git a/third-party/nwaku/docs/benchmarks/postgres-adoption.md b/third-party/nwaku/docs/benchmarks/postgres-adoption.md new file mode 100644 index 0000000..89fba19 --- /dev/null +++ b/third-party/nwaku/docs/benchmarks/postgres-adoption.md @@ -0,0 +1,239 @@ +--- +title: PostgreSQL +description: Document that describes why Nim-Waku started to use Postgres and shows some benchmark and comparison results. +--- + +## Introduction + +The *Nim Waku Node*, *nwaku*, has the capability of archiving messages until a certain limit (e.g. 30 days) so that other nodes can synchronize their message history throughout the *Store* protocol. + +The *nwaku* originally used *SQLite* to archive messages but this has an impact on the node. *Nwaku* is single-threaded and therefore, any *SQLite* operation impacts the performance of other protocols, like *Relay.* + +Therefore, the *Postgres* adoption is needed to enhance that. + +[https://github.com/waku-org/nwaku/issues/1888](https://github.com/waku-org/nwaku/issues/1888) + +## How to connect the *nwaku* to *Postgres* + +Simply pass the next parameter to *nwaku* + +```bash +--store-message-db-url="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${POSTGRES_HOST}:${POSTGRES_PORT}/postgres +``` + +Notice that this only makes sense if the _nwaku_ has the _Store_ protocol mounted +```bash +--store=true +``` + +(start the _nwaku_ node with `--help` parameter for more _Store_ options) + +## Examples of *nwaku* using *Postgres* + +[https://github.com/waku-org/nwaku-compose](https://github.com/waku-org/nwaku-compose) + +[https://github.com/waku-org/test-waku-query](https://github.com/waku-org/test-waku-query) + +## Stress tests + +The following repository was created as a tool to stress and compare performance between *nwaku*+*Postgres* and *nwaku*+*SQLite*: + +[https://github.com/waku-org/test-waku-query](https://github.com/waku-org/test-waku-query) + +### Insert test results + +#### Maximum insert throughput + +**Scenario** + +- 1 node subscribed to pubsubtopic ‘x’ and the *Store* protocol mounted. +- ‘n’ nodes connected to the “store” node, and publishing messages simultaneously to pubsubtopic ‘x’. +- All nodes running locally in a *Dell Latitude 7640*. +- Each published message is fixed, 1.4 KB: [publish_one_client.sh](https://github.com/waku-org/test-waku-query/blob/master/sh/publish_one_client.sh) +- The next script is used to simulate multiple nodes publishing messages: [publish_multiple_clients.sh](https://github.com/waku-org/test-waku-query/blob/fe7061a21eb14395e723402face755c826077aec/sh/publish_multiple_clients.sh) + +**Sought goal** + +Find out the maximum number of concurrent inserts that both *SQLite* and *Postgres* could support, and check whether _Postgres_ behaves better than _SQLite_ or not. + +**Conclusion** + +Messages are lost after a certain threshold, and this message loss is due to limitations in the *Relay* protocol (GossipSub - libp2p.) + +For example, if we set 30 nodes publishing 300 messages simultaneously, then 8997 rows were stored and not the expected 9000, in both *SQLite* and *Postgres* databases. + +The reason why few messages were lost is because the message rate was higher than the *relay* protocol can support, and therefore a few messages were not stored. In this example, the test took 38.8’’, and therefore, the node was receiving 232 msgs/sec, which is much more than the normal rate a node will work with, which is ~10 msgs/sec (rate extracted from Grafana’s stats for the *status.prod* fleet.) + +As a conclusion, the bottleneck is within the *Relay* protocol itself and not the underlying databases. Or, in other words, both *SQLite* and *Postgres* can support the maximum insert rate a Waku node will operate within normal conditions. + +### Query test results (jmeter) + +In this case, we are comparing *Store* performance by means of Rest service. + +**Scenario** + +- node_a: one _nwaku_ node with *Store* and connected to *Postgres.* +- node_b: one _nwaku_ node with *Store* and using *SQLite*. +- Both *Postgres* and *SQLite* contain +1 million rows. +- node_c: one _nwaku_ node with *REST* enabled and acting as a *Store client* for node_a. +- node_d: one _nwaku_ node with *REST* enabled and acting as a *Store client* for node_b. +- With _jmeter_, 10 users make *REST* *Store* requests concurrently to each of the “rest” nodes (node_c and node_d.) +- All _nwaku_ nodes running statusteam/nim-waku:v0.19.0 + +[This](https://github.com/waku-org/test-waku-query/blob/master/docker/jmeter/http_store_requests.jmx) is the _jmeter_ project used. + +![Using jmeter](imgs/using-jmeter.png) + +*Results* + +With this, the *node_b* brings a higher throughput than the *node_a* and that indicates that the node that uses SQLite performs better. The following shows the measures taken by _jmeter_ with regard to the REST requests. + +![jmeter results](imgs/jmeter-results.png) + +### Query test results (only Store protocol) + +In this test suite, only the Store protocol is being analyzed, i.e. without REST. For that, a go-waku node is used, which acts as *Store* client. On the other hand, we have another go-waku app that publishes random *Relay* messages periodically. Therefore, this can be considered a more realistic approach. + +The following diagram shows the topology used: + +![Topology](imgs/topology-only-store-protocol.png) + +For that, the next apps were used: + +1. [Waku-publisher.](https://github.com/alrevuelta/waku-publisher/tree/9fb206c14a17dd37d20a9120022e86475ce0503f) This app can publish Relay messages with different numbers of clients +2. [Waku-store-query-generator](https://github.com/Ivansete-status/waku-store-query-generator/tree/19e6455537b6d44199cf0c8558480af5c6788b0d). This app is based on the Waku-publisher but in this case, it can spawn concurrent go-waku Store clients. + +That topology is defined in [this](https://github.com/waku-org/test-waku-query/blob/7090cd125e739306357575730d0e54665c279670/docker/docker-compose-manual-binaries.yml) docker-compose file. + +Notice that the two `nwaku` nodes run the very same version, which is compiled locally. + +#### Comparing archive SQLite & Postgres performance in [nwaku-b6dd6899](https://github.com/waku-org/nwaku/tree/b6dd6899030ee628813dfd60ad1ad024345e7b41) + +The next results were obtained by running the docker-compose-manual-binaries.yml from [test-waku-query-c078075](https://github.com/waku-org/test-waku-query/tree/c07807597faa781ae6c8c32eefdf48ecac03a7ba) in the sandbox machine (metal-01.he-eu-hel1.misc.wakudev.status.im.) + +**Scenario 1** + +**Store rate:** 1 user generating 1 store-req/sec. + +**Relay rate:** 1 user generating 10msg/sec, 10KB each. + +In this case, we can see that the SQLite performance is better regarding the store requests. + +![Insert time distribution](imgs/insert-time-dist.png) + +![Query time distribution](imgs/query-time-dist.png) + +The following graph shows how the *SQLite* node has blocking periods whereas the *Postgres* always gives a steady rate. + +![Num queries per minute](imgs/num-queries-per-minute.png) + +**Scenario 2** + +**Store rate:** 10 users generating 1 store-req/sec. + +**Relay rate:** 1 user generating 10msg/sec, 10KB each. + +In this case, is more evident that the *SQLite* performs better. + +![Insert time distribution](imgs/insert-time-dist-2.png) + +![Query time distribution](imgs/query-time-dist-2.png) + +**Scenario 3** + +**Store rate:** 25 users generating 1 store-req/sec. + +**Relay rate:** 1 user generating 10msg/sec, 10KB each. + +In this case, the performance is similar regarding the timings. The store rate is bigger in *SQLite* and *Postgres* keeps the same level as in scenario 2. + +![Insert time distribution](imgs/insert-time-dist-3.png) + +![Query time distribution](imgs/query-time-dist-3.png) + +#### Comparing archive SQLite & Postgres performance in [nwaku-b452ed8](https://github.com/waku-org/nwaku/tree/b452ed865466a33b7f5b87fa937a8471b28e466e) + +This nwaku commit is after a few **Postgres** optimizations were applied. + +The next results were obtained by running the docker-compose-manual-binaries.yml from [test-waku-query-c078075](https://github.com/waku-org/test-waku-query/tree/c07807597faa781ae6c8c32eefdf48ecac03a7ba) in the sandbox machine (metal-01.he-eu-hel1.misc.wakudev.status.im.) + +**Scenario 1** + +**Store rate** 1 user generating 1 store-req/sec. Notice that the current Store query used generates pagination which provokes more subsequent queries than the 1 req/sec that would be expected without pagination. + +**Relay rate:** 1 user generating 10msg/sec, 10KB each. + +![Insert time distribution](imgs/insert-time-dist-4.png) + +![Query time distribution](imgs/query-time-dist-4.png) + +It cannot be appreciated but the average *****Store***** time was 11ms. + +**Scenario 2** + +**Store rate:** 10 users generating 1 store-req/sec. Notice that the current Store query used generates pagination which provokes more subsequent queries than the 10 req/sec that would be expected without pagination. + +**Relay rate:** 1 user generating 10msg/sec, 10KB each. + +![Insert time distribution](imgs/insert-time-dist-5.png) + +![Query time distribution](imgs/query-time-dist-5.png) + +**Scenario 3** + +**Store rate:** 25 users generating 1 store-req/sec. Notice that the current Store query used generates pagination which provokes more subsequent queries than the 25 req/sec that would be expected without pagination. + +**Relay rate:** 1 user generating 10msg/sec, 10KB each. + +![Insert time distribution](imgs/insert-time-dist-6.png) + +![Query time distribution](imgs/query-time-dist-6.png) + +#### Conclusions + +After comparing both systems, *SQLite* performs much better than *Postgres* However, a benefit of using *Postgres* is that it performs asynchronous operations, and therefore doesn’t consume CPU time that would be better invested in *Relay* for example. + +Remember that _nwaku_ is single-threaded and *chronos* performs orchestration among a bunch of async tasks, and therefore it is not a good practice to block the whole _nwaku_ process in a query, as happens with *SQLite* + +After applying a few *Postgres* enhancements, it can be noticed that the use of concurrent *Store* queries doesn’t go below the 250ms barrier. The reason for that is that most of the time is being consumed in [this point](https://github.com/waku-org/nwaku/blob/6da1aeec5370bb1c116509e770178cca2662b69c/waku/common/databases/db_postgres/dbconn.nim#L124). The `libpqisBusy()` function indicates that the connection is still busy even the queries finished. + +Notice that we usually have a rate below 1100 req/minute in _status.prod_ fleet (checked November 7, 2023.) + +----------------------------- + +### Multiple nodes & one single database + +This study aims to look for possible issues when having only one single database while several Waku nodes insert or retrieve data from it. +The following diagram shows the scenery used for such analysis. + +![digram_multiple_nodes_one_database](imgs/digram_multiple_nodes_one_database.png) + +There are three nim-waku nodes that are connected to the same database and all of them are trying to write messages to the same _PostgreSQL_ instance. With that, it is very common to see errors like: +``` +ERR 2023-11-27 13:18:07.575+00:00 failed to insert message topics="waku archive" tid=2921 file=archive.nim:111 err="error in runStmt: error in dbConnQueryPrepared calling waitQueryToFinish: error in query: ERROR: duplicate key value violates unique constraint \"messageindex\"\nDETAIL: Key (storedat, id, pubsubtopic)=(1701091087417938405, 479c95bbf74222417abf76c7f9c480a6790e454374dc4f59bbb15ca183ce1abd, /waku/2/default-waku/proto) already exists.\n +``` + +The `db-postgres-hammer` is aimed to stress the database from the `select` point of view. It performs `N` concurrent `select` queries with a certain rate. + +#### Results + +The following results were obtained by using the sandbox machine (metal-01.he-eu-hel1.misc.wakudev) and running nim-waku nodes from https://github.com/waku-org/nwaku/tree/b452ed865466a33b7f5b87fa937a8471b28e466e and using the `test-waku-query` project from https://github.com/waku-org/test-waku-query/tree/fef29cea182cc744c7940abc6c96d38a68739356 + +The following shows the results + +1. Two `nwaku-postgres-additional` inserting messages plus 50 `db-postgres-hammer` making 10 `selects` per second. + +![Insert time distribution Postgres](imgs/insert-time-dist-postgres.png) + +![Query time distribution Postgres](imgs/query-time-dist-postgres.png) + +2. Five `nwaku-postgres-additional` inserting messages plus 50 `db-postgres-hammer` making 10 `selects` per second. +![Insert time distribution Postgres](imgs/insert-time-dist-postgres-2.png) +![Query time distribution Postgres](imgs/query-time-dist-postgres-2.png) + +In this case, the insert time gets more spread because the insert operations are shared amongst five more nodes. The _Store_ query time remains the same on average. + +3. Five `nwaku-postgres-additional` inserting messages plus 100 `db-postgres-hammer` making 10 `selects` per second. +This case is similar to 2. but stressing more the database. +![Insert time distribution Postgres](imgs/insert-time-dist-postgres-3.png) +![Query time distribution Postgres](imgs/query-time-dist-postgres-3.png) diff --git a/third-party/nwaku/docs/benchmarks/test-results-summary.md b/third-party/nwaku/docs/benchmarks/test-results-summary.md new file mode 100644 index 0000000..b5786bf --- /dev/null +++ b/third-party/nwaku/docs/benchmarks/test-results-summary.md @@ -0,0 +1,90 @@ +--- +title: Performance Benchmarks and Test Reports +--- + + +## Introduction +This page summarises key performance metrics for nwaku and provides links to detailed test reports. + +> ## TL;DR +> +> - Average Waku bandwidth usage: ~**10 KB/s** (minus discv5 Discovery) for 1KB message size and message injection rate of 1msg/s. +Confirmed for topologies of up to 2000 Relay nodes. +> - Average time for a message to propagate to 100% of nodes: **0.4s** for topologies of up to 2000 Relay nodes. +> - Average per-node bandwidth usage of the discv5 protocol: **8 KB/s** for incoming traffic and **7.4 KB/s** for outgoing traffic, + in a network with 100 continuously online nodes. +> - Future improvements: A messaging API is currently in development to streamline interactions with the Waku protocol suite. +Once completed, it will enable benchmarking at the messaging API level, allowing applications to more easily compare their +own performance results. + + +## Insights + +### Relay Bandwidth Usage: nwaku v0.34.0 +The average per-node `libp2p` bandwidth usage in a 1000-node Relay network with 1KB messages at varying injection rates. + + +| Message Injection Rate | Average libp2p incoming bandwidth (KB/s) | Average libp2p outgoing bandwidth (KB/s) | +|------------------------|------------------------------------------|------------------------------------------| +| 1 msg/s | ~10.1 | ~10.3 | +| 1 msg/10s | ~1.8 | ~1.9 | + +### Message Propagation Latency: nwaku v0.34.0-rc1 +The message propagation latency is measured as the total time for a message to reach all nodes. +We compare the latency in different network configurations for the following simulation parameters: +- Total messages published: 600 +- Message size: 1KB +- Message injection rate: 1msg/s + +The different network configurations tested are: +- Relay Config: 1000 nodes with relay enabled +- Mixed Config: 210 nodes, consisting of bootstrap nodes, filter clients and servers, lightpush clients and servers, store nodes +- Non-persistent Relay Config: 500 persistent relay nodes, 10 store nodes and 100 non-persistent relay nodes + +Click on a specific config to see the detailed test report. + +| Config | Average Message Propagation Latency (s) | Max Message Propagation Latency (s)| +|------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------|------------------------------------| +| [Relay](https://www.notion.so/Waku-regression-testing-v0-34-1618f96fb65c803bb7bad6ecd6bafff9) (1000 nodes) | 0.05 | 1.6 | +| [Mixed](https://www.notion.so/Mixed-environment-analysis-1688f96fb65c809eb235c59b97d6e15b) (210 nodes) | 0.0125 | 0.007 | +| [Non-persistent Relay](https://www.notion.so/High-Churn-Relay-Store-Reliability-16c8f96fb65c8008bacaf5e86881160c) (510 nodes)| 0.0125 | 0.25 | + +### Discv5 Bandwidth Usage: nwaku v0.34.0 +The average bandwidth usage of discv5 for a network of 100 nodes and message injection rate of 0 or 1msg/s. +The measurements are based on a stable network where all nodes have already connected to peers to form a healthy mesh. + +|Message size |Average discv5 incoming bandwidth (KB/s)|Average discv5 outgoing bandwidth (KB/s)| +|-------------------- |----------------------------------------|----------------------------------------| +| no message injection| 7.88 | 6.70 | +| 1KB | 8.04 | 7.40 | +| 10KB | 8.03 | 7.45 | + +## Testing +### DST +The VAC DST team performs regression testing on all new **nwaku** releases, comparing performance with previous versions. +They simulate large Waku networks with a variety of network and protocol configurations that are representative of real-world usage. + +**Test Reports**: [DST Reports](https://www.notion.so/DST-Reports-1228f96fb65c80729cd1d98a7496fe6f) + + +### QA +The VAC QA team performs interoperability tests for **nwaku** and **go-waku** using the latest main branch builds. +These tests run daily and verify protocol functionality by targeting specific features of each protocol. + +**Test Reports**: [QA Reports](https://discord.com/channels/1110799176264056863/1196933819614363678) + +### nwaku +The **nwaku** team follows a structured release procedure for all release candidates. +This involves deploying RCs to `status.staging` fleet for validation and performing sanity checks. + +**Release Process**: [nwaku Release Procedure](https://github.com/waku-org/nwaku/blob/master/.github/ISSUE_TEMPLATE/prepare_release.md) + + +### Research +The Waku Research team conducts a variety of benchmarking, performance testing, proof-of-concept validations and debugging efforts. +They also maintain a Waku simulator designed for small-scale, single-purpose, on-demand testing. + + +**Test Reports**: [Waku Research Reports](https://www.notion.so/Miscellaneous-2c02516248db4a28ba8cb2797a40d1bb) + +**Waku Simulator**: [Waku Simulator Book](https://waku-org.github.io/waku-simulator/) diff --git a/third-party/nwaku/docs/contributors/README.md b/third-party/nwaku/docs/contributors/README.md new file mode 100644 index 0000000..9f76cd7 --- /dev/null +++ b/third-party/nwaku/docs/contributors/README.md @@ -0,0 +1,11 @@ +# Contributors + +This folder contains documentation that is primarily useful for contributors. Some links and +resources here might require privileged access. + +Example resources: + +- How to do releases +- Viewing and modifying metrics dashboard +- Continuous integration process +- How to view Status cluster logs diff --git a/third-party/nwaku/docs/contributors/cluster-logs.md b/third-party/nwaku/docs/contributors/cluster-logs.md new file mode 100644 index 0000000..3e21c83 --- /dev/null +++ b/third-party/nwaku/docs/contributors/cluster-logs.md @@ -0,0 +1,11 @@ +# Cluster node logs + +These can be found in [Kibana](https://kibana.infra.status.im/goto/a3793b50-489d-11ed-a791-f14ad382fa11). + +Login with Github. For access issues, contact devops. + +Modify search field and time window as appropriate. + +Notice that there are two clusters, test and production. There is also a Waku v1 cluster. + + diff --git a/third-party/nwaku/docs/contributors/continuous-integration.md b/third-party/nwaku/docs/contributors/continuous-integration.md new file mode 100644 index 0000000..1c5fa56 --- /dev/null +++ b/third-party/nwaku/docs/contributors/continuous-integration.md @@ -0,0 +1,32 @@ +# Description + +This document describes the continuous integration setup for `nim-waku`. + +# Details + +The CI setup exists on the Status.im Jenkins instance: + +https://ci.infra.status.im/job/nim-waku/ + +It currently consists of four jobs: + +* [manual](https://ci.infra.status.im/job/nim-waku/job/manual/) - For manually executing builds using parameters. +* [deploy-waku-test](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-test/) - Builds every new commit in `master` and deploys to `waku.test` fleet. +* [deploy-waku-sandbox](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/) - Currently has no automatic trigger, and deploys to `waku.sandbox` fleet. + +# Configuration + +The main configuration file is [`Jenkinsfile.release`](../../ci/Jenkinsfile.release) in the `ci` folder. + +Key part is the definition of five `parameters`: + +* `MAKE_TARGET` - Which `Makefile` target is built. +* `IMAGE_TAG` - Tag of the Docker image to push. +* `IMAGE_NAME` - Name of the Docker image to push. +* `NIMFLAGS` - Nim compilation parameters. +* `GIT_REF` - Git reference to build from (branch, tag, commit...) + +The use of `?:` [Elvis operator](http://groovy-lang.org/operators.html#_elvis_operator) plays a key role in allowing parameters to be changed for each defined job in Jenkins without it being overridden by the `Jenkinsfile` defaults after every job run. +```groovy +defaultValue: params.IMAGE_TAG ?: 'deploy-waku-test', +``` diff --git a/third-party/nwaku/docs/contributors/git-submodules.md b/third-party/nwaku/docs/contributors/git-submodules.md new file mode 100644 index 0000000..52336e2 --- /dev/null +++ b/third-party/nwaku/docs/contributors/git-submodules.md @@ -0,0 +1,10 @@ +# Submodules + +We use Git submodules in the `vendor` directory to track internal Nim +dependencies. We want to update submodules all at once to avoid issues. + +``` +git submodule foreach --recursive git submodule update --init +git submodule update --remote +``` + diff --git a/third-party/nwaku/docs/contributors/nph.md b/third-party/nwaku/docs/contributors/nph.md new file mode 100644 index 0000000..91f4149 --- /dev/null +++ b/third-party/nwaku/docs/contributors/nph.md @@ -0,0 +1,34 @@ + +## nph - An opinionated Nim formatter +This prettifier tool is used to format the nwaku code base. + +### VSCode Extension +https://marketplace.visualstudio.com/items?itemName=arnetheduck.vscode-nph + +### GitHub +https://github.com/arnetheduck/nph + +Make sure you use a binary from the following release: +https://github.com/arnetheduck/nph/releases/tag/v0.5.1 + +```bash +$ nph --version +v0.5.1-0-gde5cd48 +``` + +### Installation and configuration +1. Ask the [nwaku team](https://discord.com/channels/1110799176264056863/1111541184490393691) about the required `nph` version. +2. Download the desired release from _GitHub_ and place the binary in the PATH env var. +3. Add the following content into `~/.config/Code/User/settings.json`: + +``` +{ + "[nim]": { + "editor.formatOnSave": true, + "editor.defaultFormatter": "arnetheduck.vscode-nph" + }, +} +``` + +With that, every time a Nim file is saved, it will be formatted automatically. + diff --git a/third-party/nwaku/docs/contributors/release-process.md b/third-party/nwaku/docs/contributors/release-process.md new file mode 100644 index 0000000..c0fb12d --- /dev/null +++ b/third-party/nwaku/docs/contributors/release-process.md @@ -0,0 +1,119 @@ +# Release Process + +How to do releases. + +For more context, see https://trunkbaseddevelopment.com/branch-for-release/ + +## How to do releases + +### Before release + +Ensure all items in this list are ticked: +- [ ] All issues under the corresponding release [milestone](https://github.com/waku-org/nwaku/milestones) has been closed or, after consultation, deferred to a next release. +- [ ] All submodules are up to date. + > **IMPORTANT:** Updating submodules requires a PR (and very often several "fixes" to maintain compatibility with the changes in submodules). That PR process must be done and merged a couple of days before the release. + > In case the submodules update has a low effort and/or risk for the release, follow the ["Update submodules"](./git-submodules.md) instructions. + > If the effort or risk is too high, consider postponing the submodules upgrade for the subsequent release or delaying the current release until the submodules updates are included in the release candidate. +- [ ] The [js-waku CI tests](https://github.com/waku-org/js-waku/actions/workflows/ci.yml) pass against the release candidate (i.e. nwaku latest `master`). + > **NOTE:** This serves as a basic regression test against typical clients of nwaku. + > The specific job that needs to pass is named `node_with_nwaku_master`. + +### Performing the release + +1. Checkout a release branch from master + + ``` + git checkout -b release/v0.1.0 + ``` + +1. Update `CHANGELOG.md` and ensure it is up to date. Use the helper Make target to get PR based release-notes/changelog update. + + ``` + make release-notes + ``` + +1. Create a release-candidate tag with the same name as release and `-rc.N` suffix a few days before the official release and push it + + ``` + git tag -as v0.1.0-rc.0 -m "Initial release." + git push origin v0.1.0-rc.0 + ``` + + This will trigger a [workflow](../../.github/workflows/pre-release.yml) which will build RC artifacts and create and publish a Github release + +1. Open a PR from the release branch for others to review the included changes and the release-notes + +1. In case additional changes are needed, create a new RC tag + + Make sure the new tag is associated + with CHANGELOG update. + + ``` + # Make changes, rebase and create new tag + # Squash to one commit and make a nice commit message + git rebase -i origin/master + git tag -as v0.1.0-rc.1 -m "Initial release." + git push origin v0.1.0-rc.1 + ``` + +1. Validate the release. For the release validation process, please refer to the following [guide](https://www.notion.so/Release-Process-61234f335b904cd0943a5033ed8f42b4#47af557e7f9744c68fdbe5240bf93ca9) + +1. Once the release-candidate has been validated, create a final release tag and push it. +We also need to merge release branch back to master as a final step. + + ``` + git checkout release/v0.1.0 + git tag -as v0.1.0 -m "Initial release." + git push origin v0.1.0 + git switch master + git pull + git merge release/v0.1.0 + ``` + +1. Create a [Github release](https://github.com/waku-org/nwaku/releases) from the release tag. + + * Add binaries produced by the ["Upload Release Asset"](https://github.com/waku-org/nwaku/actions/workflows/release-assets.yml) workflow. Where possible, test the binaries before uploading to the release. + +### After the release + +1. Announce the release on Twitter, Discord and other channels. +2. Deploy the release image to [Dockerhub](https://hub.docker.com/r/wakuorg/nwaku) by triggering [the manual Jenkins deployment job](https://ci.infra.status.im/job/nim-waku/job/docker-manual/). + > Ensure the following build parameters are set: + > - `MAKE_TARGET`: `wakunode2` + > - `IMAGE_TAG`: the release tag (e.g. `v0.16.0`) + > - `IMAGE_NAME`: `wakuorg/nwaku` + > - `NIMFLAGS`: `--colors:off -d:disableMarchNative -d:chronicles_colors:none -d:postgres` + > - `GIT_REF` the release tag (e.g. `v0.16.0`) +3. Update the default nwaku image in [nwaku-compose](https://github.com/waku-org/nwaku-compose/blob/master/docker-compose.yml) +4. Deploy the release to appropriate fleets: + - Inform clients + > **NOTE:** known clients are currently using some version of js-waku, go-waku, nwaku or waku-rs. + > Clients are reachable via the corresponding channels on the Vac Discord server. + > It should be enough to inform clients on the `#nwaku` and `#announce` channels on Discord. + > Informal conversations with specific repo maintainers are often part of this process. + - Check if nwaku configuration parameters changed. If so [update fleet configuration](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64) in [infra-nim-waku](https://github.com/status-im/infra-nim-waku) + - Deploy release to the `waku.sandbox` fleet from [Jenkins](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/). + - Ensure that nodes successfully start up and monitor health using [Grafana](https://grafana.infra.status.im/d/qrp_ZCTGz/nim-waku-v2?orgId=1) and [Kibana](https://kibana.infra.status.im/goto/a7728e70-eb26-11ec-81d1-210eb3022c76). + - If necessary, revert by deploying the previous release. Download logs and open a bug report issue. +5. Submit a PR to merge the release branch back to `master`. Make sure you use the option `Merge pull request (Create a merge commit)` to perform such merge. + +### Performing a patch release + +1. Cherry-pick the relevant commits from master to the release branch + + ``` + git cherry-pick + ``` + +2. Create a release-candidate tag with the same name as release and `-rc.N` suffix + +3. Update `CHANGELOG.md`. From the release branch, use the helper Make target after having cherry-picked the commits. + + ``` + make release-notes + ``` + Create a new branch and raise a PR with the changelog updates to master. + +4. Once the release-candidate has been validated and changelog PR got merged, cherry-pick the changelog update from master to the release branch. Create a final release tag and push it. + +5. Create a [Github release](https://github.com/waku-org/nwaku/releases) from the release tag and follow the same post-release process as usual. diff --git a/third-party/nwaku/docs/contributors/waku-fleets.md b/third-party/nwaku/docs/contributors/waku-fleets.md new file mode 100644 index 0000000..659d855 --- /dev/null +++ b/third-party/nwaku/docs/contributors/waku-fleets.md @@ -0,0 +1,107 @@ +# Waku fleet: management & monitoring + +## Background + +Status currently maintains two fleets for `nwaku` nodes, +the `waku.test` fleet and the `waku.sandbox` (sandbox) fleet. +They'll be referred to as `test` and `sandbox` in this document. +Status fleet nodes and addresses can be viewed [here](https://fleets.status.im/). + +### Fleet overview + +At the time of writing this, each fleet consists of three waku nodes, +with a [websockify](https://github.com/novnc/websockify) WebSocket-to-TCP bridge for each node. +Waku peers can choose to connect either directly to a node's TCP endpoint +or the bridged WebSocket depending on their own supported transports. +The `sandbox` fleet also has a deployed [`chat2bridge`](https://github.com/waku-org/nwaku/blob/master/docs/tutorial/chat2.md#bridge-messages-between-chat2-and-matterbridge), +which serves as a bridge between the [Waku toy-chat](https://rfc.vac.dev/spec/22/) and Matterbridge. +The `chat2bridge` is currently deployed to the `node-01.do-ams3` datacentre +and configured to bridge toy-chat messages to the `#waku channel` on the Vac Discord Server. + +### Fleet deployment rationale + +The `test` fleet is automatically updated after every commit to the `nwaku` repository `master` branch and is therefore the most up to date representation of Waku development. +It is suitable for testing new features before they're rolled out to the (more) stable `sandbox` fleet. + +In general only the latest release of `nwaku` is deployed to the `sandbox` fleet. +It requires manual updating and should therefore be more stable than `test`. +See the [section on Jenkins](#jenkins-for-deployment) below for more on the deployment process. + +### Related repos + +The [`infra-docs` repo](https://github.com/status-im/infra-docs) contains the most comprehensive overview of Status infrastructure. +This is a private repository. +Feel free to contact someone in the team to request access. + +The [`infra-nim-waku` repo](https://github.com/status-im/infra-nim-waku) contains the infrastructure definitions for Waku nodes implemented in Nim. + +## Monitoring and management + +The rest of this document highlights some infra services of specific interest to Waku fleet monitoring and management: + +1. [Consul](https://consul.infra.status.im/ui/do-ams3/services?filter=nim-waku) to view the health status of Waku nodes. +2. [Kibana](https://kibana.infra.status.im/app/discover#/) to view and filter logs. +3. [Grafana](https://grafana.infra.status.im/d/qrp_ZCTGz/nim-waku-v2) to view and filter metrics. +4. [Jenkins](https://ci.infra.status.im/job/nim-waku/) to configure and deploy new builds to the fleets. + +### 1. [Consul](https://consul.infra.status.im/ui/do-ams3/services?filter=nim-waku) for health checks + +Consul provides a useful high-level view of the health of the `nwaku` fleets. +It aggregates the result of various monitoring checks +and shows the health status for the node itself, the RPC API, exposed WebSocket and metrics. +The datacentre can be changed in the upper left-hand corner. + +### 2. [Kibana](https://kibana.infra.status.im/app/discover#/) for logs + +Kibana is a powerful visualisation tool for Elasticsearch data. +For Waku fleets it can be used to retrieve, filter and view the logs for all deployed services. +For example, to view the latest logs for `sandbox`, +Kibana can be opened in "Discover" mode with an [active filter for `fleet: waku.sandbox`](https://kibana.infra.status.im/goto/c0434f60-ca82-11ee-aaa4-85391103106b). + +### 3. [Grafana](https://grafana.infra.status.im/d/qrp_ZCTGz/nim-waku-v2?orgId=1&refresh=5m) for metrics + +The `Nim-Waku` Grafana dashboard displays live and historical metrics for Waku nodes. +The default view includes metrics from both fleets, +though it's possible to filter by `Hostname`, `Fleet name` or `Data Center`. +The time range can also be configured - +by default the latest metrics will be shown. + +The dashboard itself includes an _"At a glance"_ summary +with an overview of the latest connected peers, total messages, CPU usage, reported errors, etc. +The _"General"_ collection contains a more in-depth look at node, libp2p and performance-related metrics. +This is followed by separate panel collections showing _per-protocol_ metrics. + +A copy of the `Nim-Waku` fleets dashboard is maintained in the [`nwaku` repo](https://github.com/waku-org/nwaku/blob/master/metrics/waku-fleet-dashboard.json). +From time to time certain Prometheus queries may fail, +often when the underlying metrics are renamed. +Please report any broken panels via our Discord channels or by [creating an issue in `nwaku`](https://github.com/waku-org/nwaku/issues/new). + +### 4. [Jenkins](https://ci.status.im/job/nim-waku/) for deployment + +The [`nim-waku` jobs](https://ci.infra.status.im/job/nim-waku/) on Jenkins are configured to deploy `nwaku` builds to the fleets. +1. [`deploy-waku-test`](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-test/) is triggered automatically after every commit to the `nwaku` `master` branch. +2. [`deploy-waku-sandbox`](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/) must be triggered manually. Usually this job is only built after a tagged release in `nwaku`. + +Each job can be manually triggered using the _"Build with Parameters"_ option. +Options under _"Configure"_ include the build triggers, build target and branches to build. +These should only be changed with care. + +See [Continuous Integration docs](https://github.com/waku-org/nwaku/blob/master/docs/contributors/continuous-integration.md) for more. + +## Quick links + + 1. [`chat2bridge`](https://github.com/waku-org/nwaku/blob/master/docs/tutorial/chat2.md#bridge-messages-between-chat2-and-matterbridge) + 2. [Consul for do-ams3](https://consul.infra.status.im/ui/do-ams3/services?filter=nim-waku) + 3. [Consul for ac-cn-hongkong-c](https://consul.infra.status.im/ui/ac-cn-hongkong-c/services?filter=nim-waku) + 4. [Consul for gc-us-central1-a](https://consul.infra.status.im/ui/gc-us-central1-a/services?filter=nim-waku) + 5. [Grafana Nim-Waku dashboard](https://grafana.infra.status.im/d/qrp_ZCTGz/nim-waku-v2?orgId=1&refresh=5m) + 6. [`infra-docs` repo](https://github.com/status-im/infra-docs) + 7. [`infra-waku` repo](https://github.com/status-im/infra-waku) + 8. [Jenkins jobs for `nim-waku`](https://ci.infra.status.im/job/nim-waku/) + 9. [Jenkins deploy-waku-sandbox manual trigger](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/build) + 10. [Jenkins deploy-waku-test manual trigger](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-test/build) + 11. [Kibana logs for `sandbox`](https://kibana.infra.status.im/goto/c0434f60-ca82-11ee-aaa4-85391103106b) + 12. [Kibana logs for `test`](https://kibana.infra.status.im/goto/7cd22f20-ca83-11ee-aaa4-85391103106b) + 13. [Status fleets](https://fleets.status.im/) + 14. [Status fleets - Table](https://fleets.waku.org) + 15. [Websockify](https://github.com/novnc/websockify) diff --git a/third-party/nwaku/docs/faq.md b/third-party/nwaku/docs/faq.md new file mode 100644 index 0000000..40a189c --- /dev/null +++ b/third-party/nwaku/docs/faq.md @@ -0,0 +1,34 @@ +# FAQ + +## How do I see what address a node is listening for? + +Grep for "Listening on". It should be printed at INFO level at the beginning. E.g. from Kibana: + +`Oct 7, 2020 @ 23:17:00.383INF 2020-10-07 23:17:00.375+00:00 Listening on topics="wakunode" tid=1 file=wakunode2.nim:140 full=/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp` + +## How do I find out node addresses at the test cluster? + +The easiest way is to use `jq` and query the fleets registry that Status operates: + +``` +curl -s https://fleets.status.im | jq '.fleets["waku.test"]' + +# Output +{ + "tcp/p2p/waku": { + "node-01.do-ams3.waku.test": "/dns4/node-01.do-ams3.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W", + "node-01.gc-us-central1-a.waku.test": "/dns4/node-01.gc-us-central1-a.waku.test.status.im/tcp/30303/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG", + "node-01.ac-cn-hongkong-c.waku.test": "/dns4/node-01.ac-cn-hongkong-c.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp" + }, + "enr/p2p/waku": { + "node-01.do-ams3.waku.test": "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Ugl_r25UHQJ3f1rIRrpzxJXSMaJe4yk1XFSAYJpZIJ2NIJpcISygI2rim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQJATXRSRSUyTw_QLB6H_U3oziVQgNRgrXpK7wp2AMyNxYN0Y3CCdl-DdWRwgiMohXdha3UyDw", + "node-01.gc-us-central1-a.waku.test": "enr:-QEkuECnZ3IbVAgkOzv-QLnKC4dRKAPRY80m1-R7G8jZ7yfT3ipEfBrhKN7ARcQgQ-vg-h40AQzyvAkPYlHPaFKk6u9uAYJpZIJ2NIJpcIQiEAFDim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQMIJwesBVgUiBCi8yiXGx7RWylBQkYm1U9dvEy-neLG2YN0Y3CCdl-DdWRwgiMohXdha3UyDw", + "node-01.ac-cn-hongkong-c.waku.test": "enr:-QEkuEDzQyIAhs-CgBHIrJqtBv3EY1uP1Psrc-y8yJKsmxW7dh3DNcq2ergMUWSFVcJNlfcgBeVsFPkgd_QopRIiCV2pAYJpZIJ2NIJpcIQI2ttrim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS50ZXN0LnN0YXR1c2ltLm5ldAYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQJIN4qwz3v4r2Q8Bv8zZD0eqBcKw6bdLvdkV7-JLjqIj4N0Y3CCdl-DdWRwgiMohXdha3UyDw" + }, + "wss/p2p/waku": { + "node-01.do-ams3.waku.test": "/dns4/node-01.do-ams3.waku.test.status.im/tcp/8000/wss/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W", + "node-01.gc-us-central1-a.waku.test": "/dns4/node-01.gc-us-central1-a.waku.test.status.im/tcp/8000/wss/p2p/16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG", + "node-01.ac-cn-hongkong-c.waku.test": "/dns4/node-01.ac-cn-hongkong-c.waku.test.status.im/tcp/8000/wss/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp" + } +} +``` diff --git a/third-party/nwaku/docs/operators/README.md b/third-party/nwaku/docs/operators/README.md new file mode 100644 index 0000000..5850977 --- /dev/null +++ b/third-party/nwaku/docs/operators/README.md @@ -0,0 +1,35 @@ +# The nwaku guide for operators + +*If you're eager to get started, check out our [quickstart guide](./quickstart.md) for typical configurations or [step-by-step overview](./overview.md) for newcomers.* + +Nwaku is a client implementation in Nim of the [Waku v2 family of protocols](https://rfc.vac.dev/spec/10/) for peer-to-peer communication. +The protocols are designed to be secure, privacy-preserving, censorship-resistant and able to run in resource restricted environments. +Moreover, we've taken a modular approach so that node operators can choose which protocols they want to support +based on their own motivations and availability of resources. +We call this concept ["adaptive nodes"](https://rfc.vac.dev/spec/30/), +implying that a Waku v2 network can consist of heterogeneous nodes contributing at different levels to the network. + +Nwaku (formerly `nim-waku`) aims to be a lightweight and robust Waku v2 client. +It serves as the reference implementation for researchers, +who extend the client in parallel to spec development. +As such, it is first in line to support innovative and new Waku v2 protocols, +but configurable enough to serve the adaptive needs of various operators. +We are also developing a set of operator-focused tools to monitor and maintain a running nwaku node. + +This guide provides step-by-step tutorials covering how to build and configure your own nwaku node, +connect to an existing Waku v2 network +and use existing tools for monitoring and maintaining a running node. + +## Helpful resources + + + +## Getting in touch or reporting an issue + +For an inquiry, or if you would like to propose new features, feel free to [open a general issue](https://github.com/waku-org/nwaku/issues/new/). + +For bug reports, please [tag your issue with the `bug` label](https://github.com/waku-org/nwaku/issues/new/). + +If you believe the reported issue requires critical attention, please [use the `critical` label](https://github.com/waku-org/nwaku/issues/new?labels=critical,bug) to assist with triaging. + +To get help, or participate in the conversation, join the [Vac Discord](https://discord.gg/KNj3ctuZvZ) server. diff --git a/third-party/nwaku/docs/operators/docker-quickstart.md b/third-party/nwaku/docs/operators/docker-quickstart.md new file mode 100644 index 0000000..0907772 --- /dev/null +++ b/third-party/nwaku/docs/operators/docker-quickstart.md @@ -0,0 +1,79 @@ +# Quickstart: running nwaku in a Docker container + +This guide explains how to run a nwaku node in a Docker container. + +## Prerequisites + +Make sure you have Docker installed. +Installation instructions for different platforms can be found in the [Docker docs](https://docs.docker.com/engine/install/). + +For example, to use Docker's convenience script for installation: + +```bash +curl -fsSL https://get.docker.com -o get-docker.sh +sudo sh get-docker.sh +``` + +## Step 1: Get Docker image + +Nwaku Docker images are published to the Docker Hub public registry under [`wakuorg/nwaku`](https://hub.docker.com/r/wakuorg/nwaku). +For specific releases the published images are tagged with the release version, e.g. [`wakuorg/nwaku:v0.20.0`](https://hub.docker.com/layers/wakuorg/nwaku/v0.20.0/images/sha256-9976ac2dc536fae49b21f7b77618aa6f0efb59c694e7b3181e54c08be0c4f089?context=explore). +Images are also published for each commit to the `master` branch in the [nwaku repo](https://github.com/status-im/nwaku/commits/master) +and tagged with the corresponding commit hash. +See [`wakuorg/nwaku`](https://hub.docker.com/r/wakuorg/nwaku/tags) on Docker Hub for a full list of available tags. + +To pull the image of your choice, use + +```bash +docker pull wakuorg/nwaku:v0.20.0 # or, whichever tag you prefer in the format wakuorg/nwaku:[tag] +``` + +You can also build the Docker image locally using + +```bash +git clone --recurse-submodules https://github.com/waku-org/nwaku +cd nwaku +docker build -t wakuorg/nwaku:latest . +``` + +## Step 2: Run + +To run nwaku in a new Docker container, +use the following command: + +```bash +docker run [OPTIONS] IMAGE [ARG...] +``` + +where `OPTIONS` are your selected Docker options, +`IMAGE` the image and tag you pulled from the registry or built in Step 1 +and `ARG...` the list of nwaku arguments for your [chosen nwaku configuration](./how-to/configure.md). + +For Docker options we recommend explicit port mappings (`-p`) at least +for your exposed libp2p listening ports +and any discovery ports (e.g. the Waku discv5 port) that must be reachable from outside the host. + +As an example, consider the following command to run nwaku in a Docker container with the most typical configuration: + +```bash +docker run -i -t -p 60000:60000 -p 9000:9000/udp wakuorg/nwaku:v0.20.0 \ + --dns-discovery:true \ + --dns-discovery-url:enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im \ + --discv5-discovery \ + --nat:extip:[yourpublicip] # or, if you are behind a nat: --nat=any +``` + +This runs nwaku in a new container from the `wakuorg/nwaku:v0.20.0` image, +connects to `wakuv.sandbox` as bootstrap fleet and +enables [Waku Discovery v5](https://rfc.vac.dev/spec/33/) for ambient peer discovery, +while mapping the default libp2p listening port (`60000`) +and default discv5 UDP port (`9000`) to the host. + +> **Tip:** The `docker run` command will pull the specified image from Docker Hub if it's not yet available locally, +so it's possible to skip Step 1 and pull the image from your configured registry automatically when running. + +If you've used the `-i` and `-t` Docker options when running the new container, +the `run` command would have allocated an interactive terminal +where you'll see the `stdout` logs from the running nwaku process. +To detach gracefully from the running container, +use `Ctrl-P` followed by `Ctrl-Q`. diff --git a/third-party/nwaku/docs/operators/droplet-quickstart.md b/third-party/nwaku/docs/operators/droplet-quickstart.md new file mode 100644 index 0000000..070cf6d --- /dev/null +++ b/third-party/nwaku/docs/operators/droplet-quickstart.md @@ -0,0 +1,305 @@ +# Quickstart for running nwaku on a DigitalOcean Droplet + +This guide explains how to run a nwaku node on a +DigitalOcean Droplet. We enable the following protocols - + +1. Relay +2. Store +3. DNS Discovery +4. Discv5 + +A Droplet is a simple virtual machine that runs in DigitalOcean's datacenters. + +Note that Droplets do cost money, the size described in the guide costs approximately $12 a month. + +The guide makes heavy use of the `doctl` cli to make it as UI agnostic as possible. +There are similar steps to accomplish the same through DigitalOcean's cloud console, accessible [here](https://cloud.digitalocean.com/) + +## Prerequisites + +1. A DigitalOcean account. Upon signing up, you have $100 worth of credits to use. + + + +## 1. Get the `doctl` binary + +Follow this [guide](https://docs.digitalocean.com/reference/doctl/how-to/install/) to install, +and configure the `doctl` cli, which will help with setting up the Droplet. + +> Note: It is not required to set up the droplet that is mentioned in the `doctl` cli guide + +## 2. Set up SSH credentials + +Run the following command - +```bash +export DROPLET_SSH_KEY_PATH=~/.ssh/id_nwaku_droplet +ssh-keygen -f $DROPLET_SSH_KEY_PATH +``` + +Press `enter` twice, i.e do NOT set a passphrase. + +Run the following command - +```bash +export DROPLET_SSH_PUBLIC_KEY=$(cat "$DROPLET_SSH_KEY_PATH".pub) +``` + +*Alternatively*, if you would like to supply your own credentials, make sure that the public key is in the `DROPLET_SSH_PUBLIC_KEY` env variable. + + +Lastly, add the ssh key to your DigitalOcean account - +```bash +doctl compute ssh-key create nwaku-key --public-key="$DROPLET_SSH_PUBLIC_KEY" +``` + +## 3. Select the region closest to you + +Run the following command to get the list of available +regions - + +```bash +doctl compute region list | grep true +``` + +You should get an output similar to this - + +```bash +nyc1 New York 1 true +sgp1 Singapore 1 true +lon1 London 1 true +nyc3 New York 3 true +ams3 Amsterdam 3 true +fra1 Frankfurt 1 true +tor1 Toronto 1 true +blr1 Bangalore 1 true +sfo3 San Francisco 3 true +``` +Choose the region closest to you, and run the following command - + +```bash +export DROPLET_REGION= +``` + +For example, if you live in NYC - +```bash +export DROPLET_REGION=nyc1 +``` + +Note that it is *optional* to choose the datacenter closest to you. This is merely done for operational efficiency. + +## 4. Select the OS distribution + +Run the following command to get the list of distributions - + +```bash +doctl compute image list-distribution +``` + +You should get an output similar to this - + +```bash +ID Name Type Distribution Slug Public Min Disk +78547182 1.5.8 x64 snapshot RancherOS rancheros true 15 +106433672 7 x64 snapshot CentOS centos-7-x64 true 9 +106434098 9 Stream x64 snapshot CentOS centos-stream-9-x64 true 10 +106434191 8 Stream x64 snapshot CentOS centos-stream-8-x64 true 10 +... +``` + +Choose the distribution you are most comfortable with, and then run the following command + +```bash +export DROPLET_IMAGE= +``` + +For example, if you chose Debian 11 x64 - + +```bash +export DROPLET_IMAGE=debian-11-x64 +``` + +## 5. Select the size of the Droplet + +Run the following command to get the list of Droplet sizes for the previously selected region - + +```bash +doctl compute size list +``` + +You should get an output similar to this - +```bash +Slug Description Memory VCPUs Disk Price Monthly Price Hourly +s-1vcpu-512mb-10gb Basic 512 1 10 4.00 0.005950 +s-1vcpu-1gb Basic 1024 1 25 6.00 0.008930 +s-1vcpu-1gb-amd Basic AMD 1024 1 25 7.00 0.010420 +s-1vcpu-1gb-intel Basic Intel 1024 1 25 7.00 0.010420 +s-1vcpu-2gb Basic 2048 1 50 12.00 0.017860 +s-1vcpu-2gb-amd Basic AMD 2048 1 50 14.00 0.020830 +s-1vcpu-2gb-intel Basic Intel 2048 1 50 14.00 0.020830 +s-2vcpu-2gb Legacy Basic 2048 2 60 18.00 0.026790 +... +``` + +> Note: To compile the nwaku binary, a minimum of 2GB of RAM is required. You may choose a smaller Droplet, however, you would have to supply the binary in an alternate manner, i.e via the official release on Github, or compiling it on another machine and copying it over. Currently, we only supply binaries for macOS and Ubuntu. + +Choose the Droplet size that you are most comfortable with, and then run the following command - + +```bash +export DROPLET_SIZE= +``` + +For example, `s-1vcpu-2gb` is more than capable to handle the protocols we mentioned above - + +```bash +export DROPLET_SIZE=s-1vcpu-2gb +``` + +## 6. Create the Droplet + +Run the following command to create the droplet - + +> Note: Droplet names must be valid hostnames, i.e they must only contain alphanumeric characters and hyphens (-) + +```bash +export DROPLET_NAME= +export DROPLET_ID=$(doctl compute droplet create --region=$DROPLET_REGION --image=$DROPLET_IMAGE --size=$DROPLET_SIZE --enable-monitoring --format=ID --wait $DROPLET_NAME | sed -n '2 p') +``` + +For example, to create a droplet named `nwaku` - + +```bash +export DROPLET_NAME=nwaku +export DROPLET_ID=$(doctl compute droplet create --region=$DROPLET_REGION --image=$DROPLET_IMAGE --size=$DROPLET_SIZE --enable-monitoring --format=ID --wait $DROPLET_NAME | sed -n '2 p') +``` + +## 7. Create a Domain and attach it to the droplet (OPTIONAL) + +Follow this [guide](https://docs.digitalocean.com/products/networking/dns/how-to/add-domains/) to create a domain, and add it to the droplet appropriately. + +## 8. SSH into the Droplet + +You can get the following details in the email that DigitalOcean sends upon successful creation of the Droplet - + +1. username +2. password +3. public ipv4 address + +Since the public key we previously generated was automatically added to the authorized_keys list, we can run the following command to ssh into the Droplet - + +```bash +export DROPLET_USERNAME= +export DROPLET_IP= +ssh -i $DROPLET_SSH_KEY_PATH $DROPLET_USERNAME@$DROPLET_IP +``` + +For example, if the username was `root`, and the ipv4 address was `0.0.0.0`, + +```bash +export USERNAME=root +export IP=0.0.0.0 +ssh -i $DROPLET_SSH_KEY_PATH $DROPLET_USERNAME@$DROPLET_IP +``` + +Enter the password received in the email. + +## 9. Build nwaku + +To build `nwaku`, follow this [guide](./how-to/build.md) + +OR + +To fetch the latest release from Github, navigate to https://github.com/status-im/nwaku/releases and download the latest tarball for your distribution. + +This [guide](https://www.itprotoday.com/development-techniques-and-management/how-install-targz-file-ubuntu-linux) describes how to install a tarball for your distribution. + +OR + +Run the following script to copy over the wakunode2 binary (from the host machine) - + +```bash +scp -i $DROPLET_SSH_KEY_PATH ./build/wakunode2 $DROPLET_USERNAME@$DROPLET_IP:~/wakunode2 +``` + +## 10. Set up a terminal multiplexer of choice + +You may decide to use either `screen` or `tmux` to be able to reattach to the process +after closing the ssh connection. + +Installation instructions for - +1. [screen](https://linuxhint.com/screen-linux/) +2. [tmux](https://linuxhint.com/install-tmux-ubuntu/) + +## 10. Run nwaku + +First, start the `screen` or `tmux` session by following the instructions of the terminal multiplexer chosen previously - +1. [screen](https://linuxize.com/post/how-to-use-linux-screen/#starting-linux-screen) +2. [tmux](https://linuxize.com/post/getting-started-with-tmux/#starting-your-first-tmux-session) + +Run the following command to run `nwaku` - + +*Note the path to the wakunode2 binary* + +a. Add the parent directory of the wakunode2 binary to your environment: + + If you built it locally and copied it via scp - + + ```bash + export WAKUNODE_DIR="$pwd" + ``` + + OR + + If you compiled it on the Droplet - + + ```bash + export WAKUNODE_DIR="$pwd"/build + ``` + +b. Choose the fleet you wish to connect your node to: + - waku sandbox: enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im + - waku test: enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im + + ```bash + export WAKU_FLEET= + ``` + + +c. Run `nwaku`: + + If you set up a domain previously - + + ```bash + export DOMAIN_NAME= + $WAKUNODE_DIR/wakunode2 \ + --store:true \ + --persist-messages \ + --dns-discovery \ + --dns-discovery-url:"$WAKU_FLEET" \ + --dns4-domain-name:"$DOMAIN_NAME" \ + --discv5-discovery:true + ``` + + OR + + If you did not set up a domain - + + ```bash + $WAKUNODE_DIR/wakunode2 \ + --store:true \ + --persist-messages \ + --dns-discovery \ + --dns-discovery-url:"$WAKU_FLEET" \ + --discv5-discovery:true + ``` + +You now have nwaku running! You can verify this by observing the logs. The logs should show that the node completed 7 steps of setup, and is actively discovering other nodes. + +You may now detach from stdout, by following instructions according to the terminal multiplexer chosen previously - +1. [screen](https://linuxize.com/post/how-to-use-linux-screen/#detach-from-linux-screen-session) +2. [tmux](https://linuxize.com/post/getting-started-with-tmux/#starting-your-first-tmux-session) + +To re-attach and observe the logs at a later date, follow these instructions - +1. [screen](https://linuxize.com/post/how-to-use-linux-screen/#reattach-to-a-linux-screen) +2. [tmux](https://linuxize.com/post/getting-started-with-tmux/#re-attaching-to-tmux-session) + +For alternative configurations, refer to this [guide](./how-to/configure.md) + diff --git a/third-party/nwaku/docs/operators/how-to/build.md b/third-party/nwaku/docs/operators/how-to/build.md new file mode 100644 index 0000000..473547c --- /dev/null +++ b/third-party/nwaku/docs/operators/how-to/build.md @@ -0,0 +1,59 @@ +# Build nwaku + +Nwaku can be built on Linux and macOS. +Windows support is experimental. + +## Installing dependencies + +Cloning and building nwaku requires the usual developer tools, +such as a C compiler, Make, Bash and Git. + +### Linux + +On common Linux distributions the dependencies can be installed with + +```sh +# Debian and Ubuntu +sudo apt-get install build-essential git + +# Fedora +dnf install @development-tools + +# Archlinux, using an AUR manager +yourAURmanager -S base-devel +``` + +### macOS + +Assuming you use [Homebrew](https://brew.sh/) to manage packages + +```sh +brew install cmake +``` + +## Building nwaku + +### 1. Clone the nwaku repository + +```sh +git clone https://github.com/status-im/nwaku +cd nwaku +``` + +### 2. Make the `wakunode2` target + +```sh +# The first `make` invocation will update all Git submodules. +# You'll run `make update` after each `git pull`, in the future, to keep those submodules up to date. +make wakunode2 +``` + +This will create a `wakunode2` binary in the `./build/` directory. + +> **Note:** Building `wakunode2` requires 2GB of RAM. +The build will fail on systems not fulfilling this requirement. + +> Setting up a `wakunode2` on the smallest [digital ocean](https://docs.digitalocean.com/products/droplets/how-to/) droplet, you can either +> * compile on a stronger droplet featuring the same CPU architecture and downgrade after compiling, or +> * activate swap on the smallest droplet, or +> * use Docker. diff --git a/third-party/nwaku/docs/operators/how-to/configure-dns-disc.md b/third-party/nwaku/docs/operators/how-to/configure-dns-disc.md new file mode 100644 index 0000000..f003c61 --- /dev/null +++ b/third-party/nwaku/docs/operators/how-to/configure-dns-disc.md @@ -0,0 +1,27 @@ +# Use DNS discovery to connect to existing nodes + +> **Note:** This page describes using DNS to discover other peers +and is unrelated to the [domain name configuration](./configure-domain.md) for your nwaku node. + +A node can discover other nodes to connect to using [DNS-based discovery](../../tutorial/dns-disc.md). +The following command line options are available: + +``` +--dns-discovery Enable DNS Discovery +--dns-discovery-url URL for DNS node list in format 'enrtree://@' +``` + +- `--dns-discovery` is used to enable DNS discovery on the node. +Waku DNS discovery is disabled by default. +- `--dns-discovery-url` is mandatory if DNS discovery is enabled. +It contains the URL for the node list. +The URL must be in the format `enrtree://@` where `` is the fully qualified domain name and `` is the base32 encoding of the compressed 32-byte public key that signed the list at that location. + +A node will attempt connection to all discovered nodes. + +This can be used, for example, to connect to one of the existing fleets. +Current URLs for the published fleet lists: +- production fleet: `enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im` +- test fleet: `enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im` + +See the [separate tutorial](../../tutorial/dns-disc.md) for a complete guide to DNS discovery. diff --git a/third-party/nwaku/docs/operators/how-to/configure-domain.md b/third-party/nwaku/docs/operators/how-to/configure-domain.md new file mode 100644 index 0000000..226da4b --- /dev/null +++ b/third-party/nwaku/docs/operators/how-to/configure-domain.md @@ -0,0 +1,16 @@ +# Configure a domain name + +> **Note:** This page describes configuring a domain name that resolves to your node's IP +and is unrelated to [DNS discovery](./configure-dns-disc.md), +by which a node may discover the listening addresses of other peers using DNS. + +It is possible to configure an IPv4 DNS domain name that resolves to the node's public IPv4 address. + +```shell +wakunode2 --dns4-domain-name=mynode.example.com +``` + +This allows for the node's publicly announced `multiaddrs` to use the `/dns4` scheme. +In addition, nodes with domain name and [secure websocket configured](./configure-websocket.md), +will generate a discoverable ENR containing the `/wss` multiaddr with `/dns4` domain name. +This is necessary to verify domain certificates when connecting to this node over secure websocket. \ No newline at end of file diff --git a/third-party/nwaku/docs/operators/how-to/configure-key.md b/third-party/nwaku/docs/operators/how-to/configure-key.md new file mode 100644 index 0000000..8da84f9 --- /dev/null +++ b/third-party/nwaku/docs/operators/how-to/configure-key.md @@ -0,0 +1,54 @@ +# Generate and configure a node key + +By default a node will generate a new, random key pair each time it boots, +resulting in a different public libp2p `multiaddrs` after each restart. + +To maintain consistent addressing across restarts, +it is possible to configure the node with a previously generated private key using the `--nodekey` option. + +```shell +wakunode2 --nodekey=<64_char_hex> +``` + +This option takes a [Secp256k1](https://en.bitcoin.it/wiki/Secp256k1) private key in 64 char hexstring format. + +To generate such a key on Linux systems, +use the openssl `rand` command to generate a pseudo-random 32 byte hexstring. + +```sh +openssl rand -hex 32 +``` + +Example output: + +```sh +$ openssl rand -hex 32 +6a29e767c96a2a380bb66b9a6ffcd6eb54049e14d796a1d866307b8beb7aee58 +``` + +where the key `6a29e767c96a2a380bb66b9a6ffcd6eb54049e14d796a1d866307b8beb7aee58` can be used as `nodekey`. + +To create a reusable keyfile on Linux using `openssl`, +use the `ecparam` command coupled with some standard utilities +whenever you want to extract the 32 byte private key in hex format. + +```sh +# Generate keyfile +openssl ecparam -genkey -name secp256k1 -out my_private_key.pem +# Extract 32 byte private key +openssl ec -in my_private_key.pem -outform DER | tail -c +8 | head -c 32| xxd -p -c 32 +``` + +Example output: + +```sh +read EC key +writing EC key +0c687bb8a7984c770b566eae08520c67f53d302f24b8d4e5e47cc479a1e1ce23 +``` + +where the key `0c687bb8a7984c770b566eae08520c67f53d302f24b8d4e5e47cc479a1e1ce23` can be used as `nodekey`. + +```sh +wakunode2 --nodekey=0c687bb8a7984c770b566eae08520c67f53d302f24b8d4e5e47cc479a1e1ce23 +``` diff --git a/third-party/nwaku/docs/operators/how-to/configure-rest-api.md b/third-party/nwaku/docs/operators/how-to/configure-rest-api.md new file mode 100644 index 0000000..3fe070a --- /dev/null +++ b/third-party/nwaku/docs/operators/how-to/configure-rest-api.md @@ -0,0 +1,23 @@ + +# Configure a REST API node + +A subset of the node configuration can be used to modify the behaviour of the HTTP REST API. + +These are the relevant command line options: + +| CLI option | Description | Default value | +|------------|-------------|---------------| +|`--rest` | Enable Waku REST HTTP server. | `false` | +|`--rest-address` | Listening address of the REST HTTP server. | `127.0.0.1` | +|`--rest-port` | Listening port of the REST HTTP server. | `8645` | +|`--rest-relay-cache-capacity` | Capacity of the Relay REST API message cache. | `30` | +|`--rest-admin` | Enable access to REST HTTP Admin API. | `false` | +|`--rest-private` | Enable access to REST HTTP Private API. | `false` | + +Note that these command line options have their counterpart option in the node configuration file. + +Example: + +```shell +wakunode2 --rest=true +``` diff --git a/third-party/nwaku/docs/operators/how-to/configure-store-v0.12.0.md b/third-party/nwaku/docs/operators/how-to/configure-store-v0.12.0.md new file mode 100644 index 0000000..237c7fc --- /dev/null +++ b/third-party/nwaku/docs/operators/how-to/configure-store-v0.12.0.md @@ -0,0 +1,56 @@ +# Configure store protocol (versions prior to v0.13.0) + +Store protocol is enabled by default on a nwaku node. +This is controlled by the `--store` CLI option. + +```sh +# Disable store protocol on startup +./build/wakunode2 --store:false +``` + +Note that this only mounts the `store` protocol, +meaning your node will indicate to other peers that it supports `store`. +It does not yet allow your node to either retrieve historical messages as a client +or store and serve historical messages itself. + +## Configuring a store client + +Ensure that `store` is enabled (this is `true` by default) and provide at least one store service node address with the `--storenode` CLI option. + +See the following example, using the peer at `/dns4/node-01.ac-cn-hongkong-c.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp` as store service node. + +```sh +./build/wakunode2 \ + --store:true \ + --storenode:/dns4/node-01.ac-cn-hongkong-c.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp +``` + +Your node can now send queries to retrieve historical messages +from the configured store service node. +One way to trigger such queries is asking your node for historical messages using the [Waku v2 JSON RPC API](https://rfc.vac.dev/spec/16/). + +## Configuring a store service node + +To store historical messages on your node which can be served to store clients the `--persist-messages` CLI option must be enabled. +By default a node would store up to the latest `50 000` messages. +This is configurable using the `--store-capacity` option. +A node that has a `--db-path` set will backup historical messages to a local database at the DB path +and persist these messages even after a restart. + +```sh +./build/wakunode2 \ + --store:true \ + --persist-messages:true \ + --db-path:/mnt/nwaku/data/db1/ \ + --store-capacity:150000 +``` + +### How much resources should I allocate? + +Currently store service nodes use an in-memory key-value store as primary storage with the disk-based database only used for backups. +Most Waku messages average a size of 1KB - 2KB, +implying a minimum memory requirement of at least ~250MB +for a medium capacity store of 100k messages. +Note, however, that the allowable maximum size for Waku messages is up to 1MB. +We are working on a disk-only and hybrid store to lower the memory requirement. +It will soon also be possible to configure store capacity on maximum store size or number of days' history to keep. diff --git a/third-party/nwaku/docs/operators/how-to/configure-store.md b/third-party/nwaku/docs/operators/how-to/configure-store.md new file mode 100644 index 0000000..ffa39cf --- /dev/null +++ b/third-party/nwaku/docs/operators/how-to/configure-store.md @@ -0,0 +1,58 @@ +# Configure store protocol + +> :information_source: This instructions apply to nwaku version v0.13.0+. For versions prior to v0.13.0, check [this page](./configure-store-v0.12.0.md). + +The waku store protocol is disabled by default the nwaku node. +This is controlled by the `--store` option. To enable waku store protocol on startup, specify explicitly the `--store` option set to `true`: + +```shell +wakunode2 --store=true +``` + +This option controls the mounting of the Waku Store protocol, meaning that your node will indicate to other peers that it supports the Waku store protocol. + +## Configuring the node as a waku store client + +Provide at least one store service node address with the `--storenode` option. This option is independent of the `--store` option i.e., one node can act as a waku store client without mounting the Waku Store protocol. + +For example, to use the peer at `/dns4/node-01.ac-cn-hongkong-c.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp` as the waku store service node: + +```shell +wakunode2 \ + --storenode=/dns4/node-01.ac-cn-hongkong-c.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp +``` + +Your node can now send queries to retrieve historical messages +from the configured store service node. One way to trigger such queries is asking your node for historical messages using the [Waku v2 JSON RPC API](https://rfc.vac.dev/spec/16/). + +## Configuring the node as a store service node + +If the waku store node is enabled (the `--store` option is set to `true`) the node will store historical messages and will be able to serve those messages to the waku store clients. + +There is a set of configuration options to customize the waku store protocol's message store. These are the most relevant: + +* `--store-message-retention-policy`: This option controls the retention policy i.e., how long certain messages will be persisted. Three different retention policies are supported: + + The time retention policy,`time:` (e.g., `time:14400`) + + The capacity retention policy,`capacity:` (e.g, `capacity:25000`) + + The size retention policy,`size:` (e.g, `size:25Gb`) + + To disable the retention policy, explicitly, set this option to `""`, an empty string. +* `--store-message-db-url`: The message store database url option controls the message storage engine. This option follows the [_SQLAlchemy_ database URL format](https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls). + + + SQLite engine: The only database engine supported by the nwaku node. The database URL has this shape: `sqlite://`. If the `` is not an absolute path (preceded by a `/` character), the file will be created in the current working directory. The SQLite engine also supports to select a non-persistent in-memory database by setting the `` to `:memory:`. + + In the case you don't want to use a persistent message store; set the `--store-message-db-url` to an empty string, `""`. This will instruct the node to use the fallback in-memory message store. + +By default the node message store will be configured with a time retention policy set to `14400` seconds (4 hours). Additionally, by default, the node message store will use the SQLite database engine to store historical messages in order to persist these between restarts. + +> :warning: Note the 3 slashes, `///`, after the SQLite database URL schema. The third slash indicates that it is an absolute path: `/mnt/nwaku/data/db1/store.sqlite3` + +```shell +wakunode2 \ + --store=true \ + --store-message-db-url=sqlite:///mnt/nwaku/data/db1/store.sqlite3 \ + --store-message-retention-policy=capacity:150000 +``` + +### How much resources should I allocate? + +Currently store service nodes use, by default, a message store backed by an in-disk SQLite database. Most Waku messages average a size of 1KB - 2KB, implying a minimum memory requirement of at least ~250MB +for a typical store capacity of 100k messages. Note, however, that the allowable maximum size for Waku messages is up to 1MB. diff --git a/third-party/nwaku/docs/operators/how-to/configure-websocket.md b/third-party/nwaku/docs/operators/how-to/configure-websocket.md new file mode 100644 index 0000000..4f1c461 --- /dev/null +++ b/third-party/nwaku/docs/operators/how-to/configure-websocket.md @@ -0,0 +1,35 @@ +# Configure websocket transport + +Websocket is currently the only Waku transport supported by browser nodes using [js-waku](https://github.com/status-im/js-waku). +Setting up websocket enables your node to directly serve browser peers. + +A valid certificate is necessary to serve browser nodes, +you can use [`letsencrypt`](https://letsencrypt.org/): + +```shell +sudo letsencrypt -d +``` + +You will need the `privkey.pem` and `fullchain.pem` files. + +To enable secure websocket, pass the generated files to `wakunode2`: +Note, the default port for websocket is 8000. + +```shell +wakunode2 --websocket-secure-support=true --websocket-secure-key-path="/privkey.pem" --websocket-secure-cert-path="/fullchain.pem" +``` + +## Self-signed certificates + +Self-signed certificates are not recommended for production setups because: + +- Browsers do not accept self-signed certificates +- Browsers do not display an error when rejecting a certificate for websocket. + +However, they can be used for local testing purposes: + +```shell +mkdir -p ./ssl_dir/ +openssl req -x509 -newkey rsa:4096 -keyout ./ssl_dir/key.pem -out ./ssl_dir/cert.pem -sha256 -nodes +wakunode2 --websocket-secure-support=true --websocket-secure-key-path="./ssl_dir/key.pem" --websocket-secure-cert-path="./ssl_dir/cert.pem" +``` \ No newline at end of file diff --git a/third-party/nwaku/docs/operators/how-to/configure.md b/third-party/nwaku/docs/operators/how-to/configure.md new file mode 100644 index 0000000..f052b22 --- /dev/null +++ b/third-party/nwaku/docs/operators/how-to/configure.md @@ -0,0 +1,136 @@ +# Configure a nwaku node + +Nwaku can be configured to serve the adaptive needs of different operators. + +> :bulb: **Tip:** The recommended configuration method is through environment variables. + +## Node configuration methods + +One node can be configured using a combination of the following methods: + +1. Command line options and flags +2. Environment variables +3. Configuration file (currently, only TOML format is supported) +4. Default value + +Note the precedence order, each configuration mechanism overrides the configuration set by one below (e.g., _command line options_ override the configuration set by the _environment variables_ and by the _configuration file_). + +### Command line options/flags + +The main mechanism to configure the node is via command line options. Any configuration option provided via the command line will override any other configuration mechanism. + +> :warning: nwaku is under heavy development. It is likely that configuration will change from one version to another. +> +> If after an upgrade, the node refuses to start, check if any of the command line configuration options provided to the node have been changed or removed. +> +> To overcome this issue, we recommend to configure the node via environment variables. + +The configuration options should be provided after the binary name as follows: + +```shell +wakunode2 --tcp-port=65000 +``` + +In the case of using docker to run you node you should provide the commandline options after the image name as follows: + +```shell +docker run wakuorg/nwaku --tcp-port=65000 +``` + +Run `wakunode2 --help` to get a comprehensive list of configuration options (and its default values): + +```shell +$ wakunode2 --help +Usage: + +wakunode2 [OPTIONS]... + +The following options are available: + + --config-file Loads configuration from a TOML file (cmd-line parameters take precedence). + --log-level Sets the log level. [=LogLevel.INFO]. + --version prints the version [=false]. + +<...> +``` + +Check the configuration tutorials for specific configuration use cases. + +### Environment variables + +The node can also be configured via environment variables. + +> :information_source: Support for configuring the node via environment variables was added in v0.13.0 + +The environment variable name should be prefixed by the app's name, in this case `WAKUNODE2_` followed by the commandline option in [screaming snake case](https://en.wiktionary.org/wiki/screaming_snake_case). + +For example, to set the `--tcp-port` configuration we should call `wakunode2` binary as follows: + +```shell +WAKUNODE2_TCP_PORT=65000 wakunode2 +``` + +In the case of using docker to run you node you should start the node using the `-e` command options: + +```shell +docker run -e "WAKUNODE2_TCP_PORT=65000" wakuorg/nwaku +``` + +This is the second configuration method in order of precedence. Any command line configuration option will override the configuration +provided via environment variables. + +### Configuration file + +The third configuration mechanism in order of precedence is the configuration via a TOML file. The previous mechanisms take precedence over this mechanism as explained above. + +The configuration file follows the [TOML](https://toml.io/en/) format: + +```toml +log-level = "DEBUG" +tcp-port = 65000 +``` + +The path to the TOML file can be specified using one of the previous configuration mechanisms: + +* By passing the `--config-file` command line option: + ```shell + wakunode2 --config-file= + ``` +* By passing the path via environment variables: + ```shell + WAKUNODE2_CONFIG_FILE= wakunode2 + ``` + +### Configuration default values + +As usual, if no configuration option is specified by any of the previous mechanisms, the default configuration will be used. + +The default configuration value is listed in the `wakunode2 --help` output: + +```shell +$ wakunode2 --help +Usage: + +wakunode2 [OPTIONS]... + +The following options are available: + + --config-file Loads configuration from a TOML file (cmd-line parameters take precedence). + --log-level Sets the log level. [=LogLevel.INFO]. + --version prints the version [=false].--tcp-port TCP listening port. [=60000]. + --websocket-port WebSocket listening port. [=8000]. +<...> +``` + +## Configuration use cases + +This is an index of tutorials explaining how to configure your nwaku node for different use cases. + +1. [Connect to other peers](./connect.md) +2. [Configure a domain name](./configure-domain.md) +3. [Use DNS discovery to connect to existing nodes](./configure-dns-disc.md) +4. [Configure store protocol and message store](./configure-store.md) +5. [Generate and configure a node key](./configure-key.md) +6. [Configure websocket transport](./configure-websocket.md) +7. [Run nwaku with rate limiting enabled](./run-with-rln.md) +8. [Configure a REST API node](./configure-rest-api.md) diff --git a/third-party/nwaku/docs/operators/how-to/connect.md b/third-party/nwaku/docs/operators/how-to/connect.md new file mode 100644 index 0000000..b375401 --- /dev/null +++ b/third-party/nwaku/docs/operators/how-to/connect.md @@ -0,0 +1,60 @@ +# Connect to other peers + +*Note that this tutorial describes how to **configure** a node to connect to other peers before running the node. +For connecting a running node to existing peers, +see the [WAKU REST API reference](https://waku-org.github.io/waku-rest-api/#post-/admin/v1/peers).* + +There are currently three options. +Note that each of these options can be used in combination with any of the other two. +In other words, it is possible to configure a node to connect +to a static list of peers and +to discover such peer lists using DNS discovery and +discover and connect to random peers using discovery v5 with a bootstrap node. + +## Option 1: Configure peers statically + +Static peers can be provided to a nwaku node on startup using the `--staticnode` CLI parameter. +The `--staticnode` option can be repeated for each peer you want to connect to on startup. + +```sh +./build/wakunode2 \ + --staticnode: \ + --staticnode: +``` + +As an example, consider a nwaku node that connects to two known peers +on the same local host (with IP `0.0.0.0`) +with TCP ports `60002` and `60003`, +and peer IDs `16Uiu2HAkzjwwgEAXfeGNMKFPSpc6vGBRqCdTLG5q3Gmk2v4pQw7H` and `16Uiu2HAmFBA7LGtwY5WVVikdmXVo3cKLqkmvVtuDu63fe8safeQJ` respectively. + +```sh +./build/wakunode2 \ + --staticnode:/ip4/0.0.0.0/tcp/60002/p2p/16Uiu2HAkzjwwgEAXfeGNMKFPSpc6vGBRqCdTLG5q3Gmk2v4pQw7H \ + --staticnode:/ip4/0.0.0.0/tcp/60003/p2p/16Uiu2HAmFBA7LGtwY5WVVikdmXVo3cKLqkmvVtuDu63fe8safeQJ +``` + +## Option 2: Discover peers using DNS discovery + +A node can discover other nodes to connect to using DNS-based discovery. +For a quickstart guide on how to configure DNS discovery, +see [this tutorial](./configure-dns-disc.md). +There is also a [more comprehensive tutorial](../../tutorial/dns-disc.md) for advanced users. + +## Option 3: Discover peers using Waku Discovery v5 + + + +Enable Discovery v5 using the `--discv5-discovery` option. + +It is possible to configure bootstrap entries for the Discovery v5 routing table +using the `--discv5-bootstrap-node` option repeatedly. + +```sh +./build/wakunode2 \ + --discv5-discovery:true \ + --discv5-bootstrap-node: \ + --discv5-bootstrap-node: +``` + +Note that if Discovery v5 is enabled and used in conjunction with DNS-based discovery, +the nwaku node will attempt to bootstrap the Discovery v5 routing table with ENRs extracted from the peers discovered via DNS. diff --git a/third-party/nwaku/docs/operators/how-to/monitor.md b/third-party/nwaku/docs/operators/how-to/monitor.md new file mode 100644 index 0000000..8ef3f06 --- /dev/null +++ b/third-party/nwaku/docs/operators/how-to/monitor.md @@ -0,0 +1,111 @@ +# Monitor nwaku using Prometheus and Grafana + +## Prerequisites + +1. A running nwaku instance with HTTP metrics server enabled (i.e. with `--metrics-server:true`) +2. [Prometheus](https://prometheus.io/) and [Grafana](https://grafana.com/) installed + +### Installing Prometheus + +Prometheus can be installed by downloading and extracting +the latest release for your system distribution from the [Prometheus download page](https://prometheus.io/download/). + +For example, on a DebianOS distribution you could run + +```bash +wget https://github.com/prometheus/prometheus/releases/download/v2.38.0/prometheus-2.38.0.linux-amd64.tar.gz +tar xvfz prometheus-2.38.0.linux-amd64.tar.gz +``` + +For more advanced installations, +Prometheus has a handy [Getting Started](https://prometheus.io/docs/prometheus/latest/getting_started/) page to guide you through the process. +There are also many third party guides on installing Prometheus for specific distributions, +such as [this old but still relevant one](https://www.digitalocean.com/community/tutorials/how-to-install-prometheus-on-ubuntu-16-04) from DigitalOcean. +We also suggest running Prometheus as a service, +as explained by [this guide](https://www.devopsschool.com/blog/how-to-run-prometheus-server-as-a-service/). +Bear in mind that we'll be creating our own `prometheus.yml` configuration file later on when you encounter this in any of the guides. + +### Installing Grafana + +Follow the [installation instructions](https://grafana.com/docs/grafana/latest/setup-grafana/installation/) appropriate to your distribution to install Grafana. +The stable version of the Grafana Enterprise Edition is the free, recommended edition to install. + +## Configure Prometheus + +1. Create a file called `prometheus.yml` with the following content: + +```yml +global: + scrape_interval: 15s + +scrape_configs: + - job_name: 'prometheus' + scrape_interval: 5s + static_configs: + - targets: ['localhost:9090'] + - job_name: 'nwaku' + scrape_interval: 1s + static_configs: + - targets: ['localhost:'] +``` + +Replace `` with the metrics HTTP server port of your running nwaku instance. +For default configurations metrics are reported on port `8008` of the `localhost`. +If you've used `--ports-shift`, or explicitly set the metrics port using `--metrics-server-port`, this port will be different from the default. +It's possible to extract the metrics server port from the startup logs of the nwaku node. +Look for a log with the format below and substitute `nwaku_port` with the value reported after `serverPort=`: + +``` +INF 2022-09-16 12:14:12.739+01:00 Metrics HTTP server started topics="wakunode.setup.metrics" tid=6243 file=wakunode2_setup_metrics.nim:29 serverIp=127.0.0.1 serverPort=8009 +``` + +2. Start Prometheus using the config file you created above: + +```bash +./path/to/prometheus --config.file=/path/to/prometheus.yml & +``` + +3. Verify that Prometheus is running correctly. + +Once Prometheus is running, it exposes by default a management console on port `9090`. +If you are running Prometheus locally, for example, +you can visit http://localhost:9090/ in a browser to view basic info about the running instance. +http://localhost:9090/targets shows the state of the different metrics server endpoints that we configured in `prometheus.yml`. +In our case we'd expect Prometheus to successfully scrape metrics off two endpoints, +the running nwaku instance and Prometheus itself. + +## Configure Grafana + +1. Start the Grafana server, if it's not running already after installation. + +```bash +sudo systemctl start grafana-server +``` + +2. Open Grafana in your browser. + +Grafana exposes its interface by default on port `3000`. +For example, if you are running Grafana locally, +you can find it by navigating to http://localhost:3000/. +If you are prompted for a username and password, +the default is `admin` in both cases. + +3. Set Prometheus as your data source. + +[These instructions](https://grafana.com/docs/grafana/latest/datasources/add-a-data-source/) describe how to add a new data source. +The default values for setting up a Prometheus data source should be sufficient. + +4. Create a new dashboard or import an existing one. + +You can now visualize metrics off your running nwaku instance by [creating a new dashboard and adding panels](https://grafana.com/docs/grafana/latest/dashboards/add-organize-panels/) for the metric(s) of your choice. +To get you started, +we have published a [basic monitoring dashboard for a single nwaku node](https://github.com/status-im/nwaku/blob/d4e899fba77389d20ca19c73a9443501039cdef2/metrics/waku-single-node-dashboard.json) +which you can [import to your Grafana instance](https://grafana.com/docs/grafana/latest/dashboards/manage-dashboards/#import-a-dashboard). + +5. Happy monitoring! + +Some of the most important metrics to keep an eye on include: +- `libp2p_peers` as an indication of how many peers your node is connected to, +- `waku_node_messages_total` to view the total amount of network traffic relayed by your node and +- `waku_node_errors` as a rough indication of basic operating errors logged by the node. + diff --git a/third-party/nwaku/docs/operators/how-to/run-with-rln.md b/third-party/nwaku/docs/operators/how-to/run-with-rln.md new file mode 100644 index 0000000..ef1a6c2 --- /dev/null +++ b/third-party/nwaku/docs/operators/how-to/run-with-rln.md @@ -0,0 +1,89 @@ +# How to run spam prevention on your nwaku node (RLN) + +This guide explains how to run a nwaku node with RLN (Rate Limiting Nullifier) enabled. + +[RLN](https://rfc.vac.dev/spec/32/) is a protocol integrated into waku v2, +which prevents spam-based attacks on the network. + +For further background on the research for RLN tailored to waku, refer +to [this](https://rfc.vac.dev/spec/17/) RFC. + +Registering to the membership group has been left out for brevity. +If you would like to register to the membership group and send messages with RLN, +refer to the [on-chain chat2 tutorial](../../tutorial/onchain-rln-relay-chat2.md). + +This guide specifically allows a node to participate in RLN testnet 2. +You may alter the rln-specific arguments as required. + +## Prerequisites + +1. Follow the [droplet quickstart](../droplet-quickstart.md) or the [build guide](./build.md) till the `make` command for the wakunode2 binary. + +> Note: If you would like to run a nwaku node with RLN enabled within a docker container, skip ahead to step 2. + +## 1. Build wakunode2 + +Run - +```bash +make wakunode2 +``` + +## 2. Update the runtime arguments + +Follow [Step 10](../droplet-quickstart.md#10-run-nwaku) of the [droplet quickstart](../droplet-quickstart.md) guide, while replacing the run command with - + +```bash +export LINEA_SEPOLIA_HTTP_NODE_ADDRESS= +export RLN_RELAY_CONTRACT_ADDRESS="0xB9cd878C90E49F797B4431fBF4fb333108CB90e6" # Replace this with any compatible implementation +$WAKUNODE_DIR/wakunode2 \ +--store:true \ +--persist-messages \ +--dns-discovery \ +--dns-discovery-url:"$WAKU_FLEET" \ +--discv5-discovery:true \ +--rln-relay:true \ +--rln-relay-dynamic:true \ +--rln-relay-eth-contract-address:"$RLN_RELAY_CONTRACT_ADDRESS" \ +--rln-relay-eth-client-address:"$LINEA_SEPOLIA_HTTP_NODE_ADDRESS" +``` + +OR + +If you are running the nwaku node within docker, follow [Step 2](../docker-quickstart.md#step-2-run) while replacing the run command with - + +```bash +export WAKU_FLEET= +export LINEA_SEPOLIA_HTTP_NODE_ADDRESS= +export RLN_RELAY_CONTRACT_ADDRESS="0xB9cd878C90E49F797B4431fBF4fb333108CB90e6" # Replace this with any compatible implementation +docker run -i -t -p 60000:60000 -p 9000:9000/udp wakuorg/nwaku:v0.36.0 \ + --dns-discovery:true \ + --dns-discovery-url:"$WAKU_FLEET" \ + --discv5-discovery \ + --nat:extip:[yourpublicip] \ # or, if you are behind a nat: --nat=any + --rln-relay:true \ + --rln-relay-dynamic:true \ + --rln-relay-eth-contract-address:"$RLN_RELAY_CONTRACT_ADDRESS" \ + --rln-relay-eth-client-address:"$LINEA_SEPOLIA_HTTP_NODE_ADDRESS" +``` + +> Note: You can choose to keep connections to other nodes alive by adding the `--keep-alive` flag. + +Following is the list of additional fields that have been added to the +runtime arguments - + +1. `--rln-relay`: Allows waku-rln-relay to be mounted into the setup of the nwaku node +2. `--rln-relay-dynamic`: Enables waku-rln-relay to connect to an ethereum node to fetch the membership group +3. `--rln-relay-eth-contract-address`: The contract address of an RLN membership group +4. `--rln-relay-eth-client-address`: The HTTP url to a Linea Sepolia ethereum node + +You should now have nwaku running, with RLN enabled! + +To see metrics related to the functioning of RLN, refer to this [guide](./todo). +You can also refer to the periodic logging, for a few metrics like - + +- number of spam messages +- number of valid messages +- number of invalid messages + + +> Note: This guide will be updated in the future to include features like slashing. diff --git a/third-party/nwaku/docs/operators/how-to/run.md b/third-party/nwaku/docs/operators/how-to/run.md new file mode 100644 index 0000000..bc90394 --- /dev/null +++ b/third-party/nwaku/docs/operators/how-to/run.md @@ -0,0 +1,196 @@ +# Running nwaku + +Nwaku binaries can be [built](./build.md) and run on Linux and macOS. +Windows support is experimental. + +```sh +# Run with default configuration +./build/wakunode2 + +# See available command line options +./build/wakunode2 --help +``` + +## Default configuration + +By default a nwaku node will: +- generate a new private key and libp2p identities after every restart. +See [this tutorial](./configure-key.md) if you want to generate and configure a persistent private key. +- listen for incoming libp2p connections on the default TCP port (`60000`) +- enable `relay` protocol +- subscribe to the default clusterId (0) and shard (0) +- enable `store` protocol, but only as a client. +This implies that the nwaku node will not persist any historical messages itself, +but can query `store` service peers who do so. +To configure `store` as a service node, +see [this tutorial](./configure-store.md). + +> **Note:** The `filter` and `lightpush` protocols are _not_ enabled by default. +Consult the [configuration guide](./configure.md) on how to configure your nwaku node to run these protocols. + +Some typical non-default configurations are explained below. +For more advanced configuration, see the [configuration guide](./configure.md). +Different ways to connect to other nodes are expanded upon in our [connection guide](./connect.md). + +## Finding your listening address(es) + +Find the log entry beginning with `Listening on`. +It should be printed at INFO level when you start your node +and contains a list of all publicly announced listening addresses for the nwaku node. + +For example + +``` +INF 2022-05-11 16:42:30.591+02:00 Listening on topics="wakunode" tid=6661 file=wakunode2.nim:941 full=[/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAkzjwwgEAXfeGNMKFPSpc6vGBRqCdTLG5q3Gmk2v4pQw7H][/ip4/0.0.0.0/tcp/8000/ws/p2p/16Uiu2HAkzjwwgEAXfeGNMKFPSpc6vGBRqCdTLG5q3Gmk2v4pQw7H] +``` + +indicates that your node is listening on the TCP transport address + +``` +/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAkzjwwgEAXfeGNMKFPSpc6vGBRqCdTLG5q3Gmk2v4pQw7H +``` + +and websocket address + +``` +/ip4/0.0.0.0/tcp/8000/ws/p2p/16Uiu2HAkzjwwgEAXfeGNMKFPSpc6vGBRqCdTLG5q3Gmk2v4pQw7H +``` + +You can also query a running node for its listening addresses +using the REST API. + +```bash +curl http://localhost:8645/debug/v1/info -s | jq +``` + +## Finding your discoverable ENR address(es) + +A nwaku node can encode its addressing information in an [Ethereum Node Record (ENR)](https://eips.ethereum.org/EIPS/eip-778) according to [`31/WAKU2-ENR`](https://rfc.vac.dev/spec/31/). +These ENR are most often used for discovery purposes. + +### ENR for DNS discovery + +Find the log entry beginning with `DNS: discoverable ENR`. +It should be printed at INFO level when you start your node with [DNS discovery enabled](./configure-dns-disc.md) +and contains an ENR that can be added to node lists discoverable via DNS. + +For example + +``` +INF 2022-05-20 11:52:48.772+02:00 DNS: discoverable ENR topics="wakunode" tid=5182 file=wakunode2.nim:941 enr=enr:-Iu4QBZs5huNuEAjI9WA0HOAjzpmp39vKJAtYRG3HXH86-i3HGcxMgupIkyDBmBq9qJ2wFfgMiW8AUzUxTFMAzfJM5MBgmlkgnY0gmlwhAAAAACJc2VjcDI1NmsxoQN0EcrUbHrL_O_kNXDlBvcO1I4yZUdNk7VZI5GsXaWgvYN0Y3CC6mCFd2FrdTID +``` + +indicates that your node addresses are encoded in the ENR + +``` +enr=enr:-Iu4QBZs5huNuEAjI9WA0HOAjzpmp39vKJAtYRG3HXH86-i3HGcxMgupIkyDBmBq9qJ2wFfgMiW8AUzUxTFMAzfJM5MBgmlkgnY0gmlwhAAAAACJc2VjcDI1NmsxoQN0EcrUbHrL_O_kNXDlBvcO1I4yZUdNk7VZI5GsXaWgvYN0Y3CC6mCFd2FrdTID +``` + +### ENR for Discovery v5 + +Find the log entry beginning with `Discv5: discoverable ENR`. +It should be printed at INFO level when you start your node with [Waku Discovery v5 enabled](https://rfc.vac.dev/spec/33/) +and contains the ENR that will be discoverable by other peers. + +For example + +``` +INF 2022-05-20 11:52:48.775+02:00 Discv5: discoverable ENR topics="wakunode" tid=5182 file=wakunode2.nim:905 enr=enr:-IO4QDxToTg86pPCK2KvMeVCXC2ADVZWrxXSvNZeaoa0JhShbM5qed69RQz1s1mWEEqJ3aoklo_7EU9iIBcPMVeKlCQBgmlkgnY0iXNlY3AyNTZrMaEDdBHK1Gx6y_zv5DVw5Qb3DtSOMmVHTZO1WSORrF2loL2DdWRwgiMohXdha3UyAw +``` + +indicates that your node addresses are encoded in the ENR + +``` +enr=enr:-IO4QDxToTg86pPCK2KvMeVCXC2ADVZWrxXSvNZeaoa0JhShbM5qed69RQz1s1mWEEqJ3aoklo_7EU9iIBcPMVeKlCQBgmlkgnY0iXNlY3AyNTZrMaEDdBHK1Gx6y_zv5DVw5Qb3DtSOMmVHTZO1WSORrF2loL2DdWRwgiMohXdha3UyAw +``` + +## Typical configuration (relay node) + +The typical configuration for a nwaku node is to run the `relay` protocol, +subscribed to the default pubsub topic `/waku/2/rs/0/0`, +and connecting to one or more existing peers. +We assume below that running nodes also participate in Discovery v5 +to continually discover and connect to random peers for a more robust mesh. + +### Connecting to known peer(s) + +A typical run configuration for a nwaku node is to connect to existing peers with known listening addresses using the `--staticnode` option. +The `--staticnode` option can be repeated for each peer you want to connect to on startup. +This is also useful if you want to run several nwaku instances locally +and therefore know the listening addresses of all peers. + +As an example, consider a nwaku node that connects to two known peers +on the same local host (with IP `0.0.0.0`) +with TCP ports `60002` and `60003`, +and peer IDs `16Uiu2HAkzjwwgEAXfeGNMKFPSpc6vGBRqCdTLG5q3Gmk2v4pQw7H` and `16Uiu2HAmFBA7LGtwY5WVVikdmXVo3cKLqkmvVtuDu63fe8safeQJ` respectively. +The Discovery v5 routing table can similarly be bootstrapped using a static ENR. +We include an example below. + +```sh +./build/wakunode2 \ + --ports-shift:1 \ + --staticnode:/ip4/0.0.0.0/tcp/60002/p2p/16Uiu2HAkzjwwgEAXfeGNMKFPSpc6vGBRqCdTLG5q3Gmk2v4pQw7H \ + --staticnode:/ip4/0.0.0.0/tcp/60003/p2p/16Uiu2HAmFBA7LGtwY5WVVikdmXVo3cKLqkmvVtuDu63fe8safeQJ \ + --discv5-discovery:true \ + --discv5-bootstrap-node:enr:-JK4QM2ylZVUhVPqXrqhWWi38V46bF2XZXPSHh_D7f2PmUHbIw-4DidCBnBnm-IbxtjXOFbdMMgpHUv4dYVH6TgnkucBgmlkgnY0gmowhCJ6_HaJc2VjcDI1NmsxoQM06FsT6EJ57mzR_wiLu2Bz1dER2nUFSCpaFzCccQtnhYN0Y3CCdl-DdWRwgiMohXdha3UyDw +``` + +> **Tip:** `--ports-shift` shifts all configured ports forward by the configured amount. +This is another useful option when running several nwaku instances on a single machine +and would like to avoid port clashes without manually configuring each port. + +### Connecting to the `waku.sandbox` network + +*See [this explainer](https://github.com/status-im/nwaku/blob/6ebe26ad0587d56a87a879d89b7328f67f048911/docs/contributors/waku-fleets.md) on the different networks and Waku v2 fleets.* + +You can use DNS discovery to bootstrap connection to the existing production network. +Discovery v5 will attempt to extract the ENRs of the discovered nodes as bootstrap entries to the routing table. + +```sh +./build/wakunode2 \ + --ports-shift:1 \ + --dns-discovery:true \ + --dns-discovery-url:enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im \ + --discv5-discovery:true +``` + +### Connecting to the `waku.test` network + +*See [this explainer](https://github.com/status-im/nwaku/blob/6ebe26ad0587d56a87a879d89b7328f67f048911/docs/contributors/waku-fleets.md) on the different networks and Waku v2 fleets.* + +You can use DNS discovery to bootstrap connection to the existing test network. +Discovery v5 will attempt to extract the ENRs of the discovered nodes as bootstrap entries to the routing table. + +```sh +./build/wakunode2 \ + --ports-shift:1 \ + --dns-discovery:true \ + --dns-discovery-url:enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im \ + --discv5-discovery:true +``` + +## Typical configuration (relay and store service node) + +Often nwaku nodes choose to also store historical messages +from where it can be queried by other peers who may have been temporarily offline. +For example, a typical configuration for such a store service node, +[connecting to the `waku.test`](#connecting-to-the-wakutest-network) fleet on startup, +appears below. + +```sh +./build/wakunode2 \ + --ports-shift:1 \ + --store:true \ + --persist-messages:true \ + --db-path:/mnt/nwaku/data/db1/ \ + --store-capacity:150000 \ + --dns-discovery:true \ + --dns-discovery-url:enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im \ + --discv5-discovery:true +``` + +See our [store configuration tutorial](./configure-store.md) for more. + +## Interact with a running nwaku node + +A running nwaku node can be interacted with using the [REST API](https://github.com/waku-org/nwaku/blob/master/docs/api/rest-api.md). diff --git a/third-party/nwaku/docs/operators/overview.md b/third-party/nwaku/docs/operators/overview.md new file mode 100644 index 0000000..f43652a --- /dev/null +++ b/third-party/nwaku/docs/operators/overview.md @@ -0,0 +1,39 @@ +# Overview: running a nwaku node + +This guide provides on overview for newcomers +on how to build and run a nwaku node +for the most common use cases. +For a more advanced configuration see our [configuration guides](./how-to/configure.md) + +To set up a nwaku node on a DigitalOcean droplet, +refer to our [quickstart guide for droplets](./droplet-quickstart.md). +If you prefer running nwaku in Docker container, +see our [Docker guide](./docker-quickstart.md). + +## 1. Build + +[Build the nwaku node](./how-to/build.md) +or download a precompiled binary from our [releases page](https://github.com/waku-org/nwaku/releases). + +If you'd like to test latest changes without building the binaries yourself, you can refer to [nightly release](https://github.com/waku-org/nwaku/releases/tag/nightly). + +Docker images are published to [wakuorg/nwaku](https://hub.docker.com/r/wakuorg/nwaku/tags) on Docker Hub. +See our [Docker quickstart guide](./docker-quickstart.md) to run nwaku in a Docker container. + +## 2. Run + +[Run the nwaku node](./how-to/run.md) using a default or common configuration +or [configure](./how-to/configure.md) the node for more advanced use cases. + +[Connect](./how-to/connect.md) the nwaku node to other peers to start communicating. + +## 3. Interact + +A running nwaku node can be interacted with using the [REST API](../api/v2/rest-api.md). + +> **Note:** REST API functionality is in ALPHA and therefore it is disabled by default. To configure a nwaku node with this enabled, use the `--rest:true` CLI option. + + +```bash +curl http://localhost:8546/debug/v1/info -s | jq +``` diff --git a/third-party/nwaku/docs/operators/quickstart.md b/third-party/nwaku/docs/operators/quickstart.md new file mode 100644 index 0000000..47c6195 --- /dev/null +++ b/third-party/nwaku/docs/operators/quickstart.md @@ -0,0 +1,61 @@ +# Quickstart: running a nwaku node + +This guide helps you run a nwaku node with typical configuration. +It connects your node to the `waku.sandbox` fleet for bootstrapping +and enables discovery v5 for continuous peer discovery. +Only [`relay`](https://rfc.vac.dev/spec/11/) protocol is enabled. +For a more comprehensive overview, +see our [step-by-step guide](./overview.md). + +## Option 1: run nwaku binary + +*Prerequisites are the usual developer tools, +such as a C compiler, Make, Bash and Git.* + +```bash +git clone --recurse-submodules https://github.com/waku-org/nwaku +cd nwaku +make wakunode2 +./build/wakunode2 \ + --dns-discovery:true \ + --dns-discovery-url:enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im \ + --discv5-discovery \ + --nat=extip:[yourpublicip] # or, if you are behind a nat: --nat=any +``` + +## Option 2: run nwaku in a Docker container + +*Prerequisite is a [Docker installation](./docker-quickstart.md#prerequisites).* + +```bash +docker run -i -t -p 60000:60000 -p 9000:9000/udp \ + wakuorg/nwaku:v0.20.0 \ # or, the image:tag of your choice + --dns-discovery:true \ + --dns-discovery-url:enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im \ + --discv5-discovery \ + --nat:extip:[yourpublicip] # or, if you are behind a nat: --nat=any +``` + +## Option 3: run nwaku with docker compose + +*Prerequisites: `docker` and `docker-compose`*. +Allows to run `nwaku` with `prometheus` and `grafana`, with an already provisioned dashboard, in a few simple steps. +See [nwaku-compose](https://github.com/waku-org/nwaku-compose). + +```bash +git clone https://github.com/waku-org/nwaku-compose +cd nwaku-compose +docker-compose up -d +``` + +Go to [http://localhost:3000/d/yns_4vFVk/nwaku-monitoring?orgId=1](http://localhost:3000/d/yns_4vFVk/nwaku-monitoring?orgId=1) and after some seconds, your node metrics will be live there. +As simple as that. + +## Tips and tricks + +To find the public IP of your host, +you can use + +```bash +dig TXT +short o-o.myaddr.l.google.com @ns1.google.com | awk -F'"' '{ print $2}' +``` diff --git a/third-party/nwaku/docs/tutorial/chat2.md b/third-party/nwaku/docs/tutorial/chat2.md new file mode 100644 index 0000000..f6489f9 --- /dev/null +++ b/third-party/nwaku/docs/tutorial/chat2.md @@ -0,0 +1,212 @@ +# Using the `chat2` application + +## Background + +The `chat2` application is a basic command-line chat app using the [Waku v2 suite of protocols](https://rfc.vac.dev/). +It optionally connects to a [fleet of nodes](fleets.status.im) to provide end-to-end p2p chat capabilities. +Each fleet is a publicly accessible network of Waku v2 peers, providing a bootstrap connection point for new peers, historical message storage, etc. +The Waku team is currently using this application on the _sandbox_ fleet for internal testing. +For more information on the available fleets, see [`Connecting to a Waku v2 fleet`](#connecting-to-a-waku-v2-fleet). +If you want to try our protocols, or join the dogfooding fun, follow the instructions below. + +## Preparation + +Ensure you have cloned the `nim-waku` repository and installed all prerequisites as per [these instructions](https://github.com/status-im/nim-waku). + +Make the `chat2` target. + +``` +make chat2 +``` + +## Basic application usage + +To start the `chat2` application in its most basic form, run the following from the project directory + +``` +./build/chat2 +``` + +You should be prompted to provide a nickname for the chat session. + +``` +Choose a nickname >> +``` + +After entering a nickname, the app will randomly select and connect to a peer from the `sandbox` fleet. + +``` +No static peers configured. Choosing one at random from sandbox fleet... +``` + +It will then attempt to download historical messages from a random peer in the `sandbox` fleet. + +``` +Store enabled, but no store nodes configured. Choosing one at random from sandbox fleet... +``` + +Wait for the chat prompt (`>>`) and chat away! + +To gracefully exit the `chat2` application, use the `/exit` [in-chat option](#in-chat-options) + +``` +>> /exit +quitting... +``` + +## Retrieving historical messages + +The `chat2` application can retrieve historical chat messages from a node supporting and running the [Waku v2 store protocol](https://rfc.vac.dev/spec/13/), and will attempt to do so by default. +It's possible to query a *specific* store node by configuring its `multiaddr` as `storenode` when starting the app: + +``` +./build/chat2 --storenode:/dns4/node-01.do-ams3.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W +``` + +Alternatively, the `chat2` application will select a random `storenode` for you from the configured fleet (`sandbox` by default) if `storenode` is left unspecified. + +``` +./build/chat2 +``` + +To disable historical message retrieval, use the `--store:false` option: + +``` +./build/chat2 --store:false +``` + +## Specifying a static peer + +In order to connect to a *specific* node as [`relay`](https://rfc.vac.dev/spec/11/) peer, define that node's `multiaddr` as a `staticnode` when starting the app: + +``` +./build/chat2 --staticnode:/dns4/node-01.do-ams3.waku.test.status.im/tcp/30303/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W +``` + +This will bypass the random peer selection process and connect to the specified node. + +## Connecting to a Waku v2 fleet + +It is possible to specify a specific Waku v2 fleet to connect to when starting the app by using the `--fleet` option: + +``` +./build/chat2 --fleet:test +``` + +There are currently two fleets to select from, namely _sandbox_ (`waku.sandbox`) and _test_ (`waku.test`). +The `test` fleet is updated with each incremental change to the `nim-waku` codebase. +As a result it may have more advanced and experimental features, but will be less stable than `sandbox`. +The `sandbox` fleet is a deployed network of the latest released Waku v2 nodes. +If no `fleet` is specified, `chat2` will connect to the `sandbox` fleet by default. +To start `chat2` without connecting to a fleet, use the `--fleet:none` option _or_ [specify a static peer](#specifying-a-static-peer). + +## Specifying a content topic + +To publish chat messages on a specific [content topic](https://rfc.vac.dev/spec/14/#wakumessage), use the `--content-topic` option: + +``` +./build/chat2 --content-topic:/waku/2/my-content-topic/proto +``` + +> **NOTE:** Currently (2021/05/26) the content topic defaults to `/waku/2/huilong/proto` if left unspecified, where `huilong` is the name of our latest testnet. + +## In-chat options + +| Command | Effect | +| --- | --- | +| `/help` | displays available in-chat commands | +| `/connect` | interactively connect to a new peer | +| `/nick` | change nickname for current chat session | +| `/exit` | exits the current chat session | + +## `chat2` message protobuf format + +Each `chat2` message is encoded as follows + +```protobuf +message Chat2Message { + uint64 timestamp = 1; + string nick = 2; + bytes payload = 3; +} +``` + +where `timestamp` is the Unix timestamp of the message, `nick` is the relevant `chat2` user's selected nickname and `payload` is the actual chat message being sent. +The `payload` is the byte array representation of a UTF8 encoded string. + +# Bridge messages between `chat2` and matterbridge + +To facilitate `chat2` use in a variety of contexts, a `chat2bridge` can be deployed to bridge messages between `chat2` and any protocol supported by matterbridge. + +## Configure and run matterbridge + +1. Download and install [matterbridge](https://github.com/42wim/matterbridge) and configure an instance for the protocol(s) you want to bridge to. +Basic configuration instructions [here](https://github.com/42wim/matterbridge/wiki/How-to-create-your-config) +2. Configure the matterbridge API. +This is used by the `chat2bridge` to relay `chat2` messages to and from matterbridge. +Configuration instructions for the matterbridge API can be found [here](https://github.com/42wim/matterbridge/wiki/Api). +The full matterbridge API specification can be found [here](https://app.swaggerhub.com/apis-docs/matterbridge/matterbridge-api/0.1.0-oas3). +The template below shows an example of a `matterbridge.toml` configuration file for bridging `chat2` to Discord. +Follow the matterbridge [Discord instructions](https://github.com/42wim/matterbridge/wiki/Section-Discord-%28basic%29) to configure your own `Token` and `Server`. +```toml +[discord.mydiscord] + +# You can get your token by following the instructions on +# https://github.com/42wim/matterbridge/wiki/Discord-bot-setup. +# If you want roles/groups mentions to be shown with names instead of ID, +# you'll need to give your bot the "Manage Roles" permission. +Token="MTk4NjIyNDgzNDcdOTI1MjQ4.Cl2FMZ.ZnCjm1XVW7vRze4b7Cq4se7kKWs-abD" + +Server="myserver" # picked from guilds the bot is connected to + +RemoteNickFormat="{NICK}@chat2: " + +[api.myapi] +BindAddress="127.0.0.1:4242" +Buffer=1000 +RemoteNickFormat="{NICK}@{PROTOCOL}" + +[[gateway]] +name="gateway1" +enable=true + +[[gateway.inout]] +account="discord.mydiscord" +channel="general" + +[[gateway.inout]] +account="api.myapi" +channel="api" +``` +3. Run matterbridge using the configuration file created in the previous step. +Note the API listening address and port in the matterbridge logs (configured as the `BindAddress` in the previous step). +``` +./matterbridge -conf matterbridge.toml +``` +``` +[0000] INFO api: Listening on 127.0.0.1:4242 +``` +## Configure and run `chat2bridge` +1. From the `nim-waku` project directory, make the `chat2bridge` target +``` +make chat2bridge +``` +2. Run `chat2bridge` with the following configuration options: +``` +--mb-host-address Listening address of the Matterbridge host +--mb-host-port Listening port of the Matterbridge host +--mb-gateway Matterbridge gateway +``` +``` +./build/chat2bridge --mb-host-address=127.0.0.1 --mb-host-port=4242 --mb-gateway="gateway1" +``` +Note that `chat2bridge` encompasses a full `wakunode2` which can be configured with the normal configuration parameters. +For a full list of configuration options, run `--help`. +``` +./build/chat2bridge --help +``` +## Connect `chat2bridge` to a `chat2` network +1. To bridge messages on an existing `chat2` network, connect to any relay peer(s) in that network from `chat2bridge`. +This can be done by either specifying the peer(s) as a `--staticnode` when starting the `chat2bridge` or calling the [`post_waku_v2_admin_v1_peers`](https://rfc.vac.dev/spec/16/#post_waku_v2_admin_v1_peers) method on the JSON-RPC API. +Note that the latter requires the `chat2bridge` to be run with `--rpc=true` and `--rpc-admin=true`. +1. To bridge from a new `chat2` instance, simply specify the `chat2bridge` listening address as a `chat2` [static peer](#Specifying-a-static-peer). diff --git a/third-party/nwaku/docs/tutorial/db-migration.md b/third-party/nwaku/docs/tutorial/db-migration.md new file mode 100644 index 0000000..905d5d1 --- /dev/null +++ b/third-party/nwaku/docs/tutorial/db-migration.md @@ -0,0 +1,61 @@ +# Database Migration +This tutorial explains the database migration process in nim-waku. + +# Contributors Guide +## Database Migration Flow +Nim-waku utilizes the built-in `user_version` variable that Sqlite provides for tracking the database versions. +The [user_version](https://github.com/waku-org/nwaku/blob/master/waku/waku_archive/driver/sqlite_driver/migrations.nim) MUST be bumped up for every update on the database e.g, table schema/title change. +Each update should be accompanied by a migration script to move the content of the old version of the database to the new version. +The script MUST be added to the respective folder as explained in [Migration Folder Structure](#migration-folder-structure) with the proper naming as given in [ Migration Script Naming](#migration-file-naming). + +The migration is invoked whenever the database `user_version` is behind the target [user_version](https://github.com/waku-org/nwaku/blob/master/waku/waku_archive/driver/sqlite_driver/migrations.nim) indicated in the nim-waku application. +The respective migration scripts located in the [migrations folder](https://github.com/waku-org/nwaku/tree/master/migrations) will be executed to upgrade the database from its old version to the target version. + +## Migration Folder Structure +The [migrations folder](https://github.com/waku-org/nwaku/tree/master/migrations) is structured as below. + +``` +migrations/ +├── message_store +│   ├── 00001_addMessageTable.up.sql +│   ├── 00002_addSenderTimeStamp.up.sql +│   ├── ... +└── peer_store + └── 00001_addPeerTable.up.sql +``` + + + +The migration scripts are managed in two separate folders `message_store` and `peer_store`. +The `message_store` folder contains the migration scripts related to the message store tables. Similarly, the `peer_store` folder contains the scripts relevant to the peer store tables. + + +## Migration File Naming +The files in [migrations folder](https://github.com/waku-org/nwaku/tree/master/migrations) MUST follow the following naming style in order to be properly included in the migration process. +Files with invalid naming will be eliminated from the migration process. + +`_..sql` + +- `version number`: This number should match the target value of `user_version`. +- `migration script description`: A short description of what the migration script does. +- `up|down`: One of the keywords of `up` or `down` should be selected. + `up` stands for upgrade and `down` means downgrade. + +### Example +A migration file with the name `00002_addTableX.up.sql` should be interpreted as: +- `00002`: The targeted `user_version` number. +- `addTableX`: What the script does. +- `up`: This script `upgrade`s the database from `user_version = 00001` to the `user_version = 00002`. + +A downgrade migration file corresponding to the `00002_addTableX.up.sql` can be e.g., `00001_removeTableX.down.sql` and should be interpreted as: +- `00001`: The targeted `user_version` number. +- `removeTableX`: What the script does. +- `down`: This script `downgrade`s the database from `user_version = 00002` to the `user_version = 00001`. + +There can be more than one migration file for the same `user_version`. +The migration process will consider all such files while upgrading/downgrading the database. +Note that currently we **DO NOT** support **down migration**. + +# User Guide +Migrations work out of the box. +However, if you want to be extra sure, please take a backup of the SQLite database prior to upgrading your nim-waku version since we currently don't support downgrades of DB. diff --git a/third-party/nwaku/docs/tutorial/dingpu.md b/third-party/nwaku/docs/tutorial/dingpu.md new file mode 100644 index 0000000..0d21978 --- /dev/null +++ b/third-party/nwaku/docs/tutorial/dingpu.md @@ -0,0 +1,44 @@ +# Dingpu testnet + +> TODO (2023-05-24): Deprecate or fix + +*NOTE: Some of these addresses might change. To get the latest, please see `curl -s https://fleets.status.im | jq '.fleets["waku.test"]'`* + +## Basic chat usage + +> If historical messaging is desired, the chat app requires that the remote peer specified in `storenode` option supports the WakuStore protocol. For the current cluster node deployed as part of Dingpu this is already the case. + +Start two chat apps: + +``` +./build/chat2 --ports-shift:0 --storenode:/ip4/178.128.141.171/tcp/60000/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W --staticnode:/ip4/178.128.141.171/tcp/60000/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W +./build/chat2 --ports-shift:1 --storenode:/ip4/178.128.141.171/tcp/60000/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W --staticnode:/ip4/178.128.141.171/tcp/60000/p2p/16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W +``` + +By specifying `staticnode` it connects to that node subscribes to the `waku` topic. This ensures messages are relayed properly. + +Then type messages to publish. + +## Interactively add a node + +There is also an interactive mode. Type `/connect` then paste address of other node. However, this currently has some timing issues with mesh not being updated, so it is advised not to use this until this has been addressed. See https://github.com/waku-org/nwaku/issues/231 + +## Dingpu cluster node + +> TODO (2024-03-11): Fix node multiaddr + +## Run a node + +To just run a node and not interact on the chat it is enough to run `wakunode2`: +``` +./build/wakunode2 --staticnode: +``` + +You can also run the `wakubridge` process, which runs both a Waku v1 and Waku v2 +node. Currently, it has the same effect as running a `wakunode` and `wakunode2` +process separately, but bridging functionality will be added later to this +application. + +``` +./build/wakubridge --staticnodev2: --fleetv1:test +``` diff --git a/third-party/nwaku/docs/tutorial/dns-disc.md b/third-party/nwaku/docs/tutorial/dns-disc.md new file mode 100644 index 0000000..c9cc24e --- /dev/null +++ b/third-party/nwaku/docs/tutorial/dns-disc.md @@ -0,0 +1,69 @@ +# Waku v2 DNS-based Discovery Basic Tutorial + +## Background + +Waku v2 DNS discovery is a method by which a node may find other peers by retrieving an encoded node list via DNS. +To achieve this, Waku v2 uses a Nim implementation of [EIP-1459](https://eips.ethereum.org/EIPS/eip-1459). +According to EIP-1459, the peer list is encoded as a [Merkle tree](https://www.wikiwand.com/en/Merkle_tree) of TXT records. +Connectivity information for each peer, including [wire address](https://docs.libp2p.io/concepts/addressing/) and [peer ID](https://docs.libp2p.io/concepts/peer-id/), are encapsulated in signed [Ethereum Node Records (ENR)](https://eips.ethereum.org/EIPS/eip-778). + +## Mapping ENR to `multiaddr` + +EIP-1459 DNS discovery is a scheme for retrieving an ENR list via DNS. +Waku v2 addressing is based on [libp2p addressing](https://docs.libp2p.io/concepts/addressing/), which uses a `multiaddr` scheme. + +The ENR is constructed according to [EIP-778](https://eips.ethereum.org/EIPS/eip-778). +It maps to the equivalent `libp2p` `multiaddr` for the Waku v2 node as follows: + +| ENR Key | ENR Value | +|-------------|------------------------------------------------------------------------| +| `id` | name of identity scheme. For Waku v2 generally `v4` | +| `secp256k1` | the compressed `secp256k1` public key belong to the libp2p peer ID as per [specification](https://github.com/libp2p/specs/blob/master/peer-ids/peer-ids.md#keys). This is used to construct the `/p2p/` portion of the node's `multiaddr` | +| ip | IPv4 address. Corresponds to `/ip4/` portion of the node's `multiaddr` | +| tcp | TCP port. Corresponds to `/tcp/` portion of the node's `multiaddr` | + +The `nim-waku` implementation with integrated DNS discovery already takes care of the ENR to `multiaddr` conversion. + +## Usage + +Ensure you have built [`wakunode2`](https://github.com/status-im/nim-waku) or [`chat2`](./chat2.md) as per the linked instructions. + +The following command line options are available for both `wakunode2` or `chat2`. + +``` +--dns-discovery Enable DNS Discovery +--dns-discovery-url URL for DNS node list in format 'enrtree://@' +``` + +- `--dns-discovery` is used to enable DNS discovery on the node. Waku DNS discovery is disabled by default. +- `--dns-discovery-url` is mandatory if DNS discovery is enabled. It contains the URL for the node list. The URL must be in the format `enrtree://@` where `` is the fully qualified domain name and `` is the base32 encoding of the compressed 32-byte public key that signed the list at that location. See [EIP-1459](https://eips.ethereum.org/EIPS/eip-1459#specification) or the example below to illustrate. + +A node will attempt connection to all discovered nodes. + +## Example for `waku.test` fleet + +To illustrate the above and prove the concept, +a list of `waku.test` fleet nodes was encoded according to EIP-1459 and deployed to `test.waku.nodes.status.im`. +The list was signed by the public key `AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI`. +The complete URL for DNS discovery is therefore: `enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im`. + +To run a `wakunode2` with DNS-based discovery of `waku.test` nodes: + +``` +./build/wakunode2 --dns-discovery:true --dns-discovery-url:enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im +``` + +Similarly, for `chat2`: + +``` +./build/chat2 --dns-discovery:true --dns-discovery-url:enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im +``` + +The node will discover and attempt connection to all `waku.test` nodes during setup procedures. + +To use specific DNS name servers, one or more `--dns-addrs-name-server` arguments can be added: + +``` +./build/wakunode2 --dns-discovery:true --dns-discovery-url:enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im --dns-dis +covery-name-server:8.8.8.8 --dns-addrs-name-server:8.8.4.4 +``` diff --git a/third-party/nwaku/docs/tutorial/filter.md b/third-party/nwaku/docs/tutorial/filter.md new file mode 100644 index 0000000..2f1a423 --- /dev/null +++ b/third-party/nwaku/docs/tutorial/filter.md @@ -0,0 +1,35 @@ +# Running Filter Protocol + +> TODO (2023-05-24): Deprecate or fix + +## How to + +Build: + +``` +# make wakunode2 is run as part of scripts2 target +make scripts2 +``` + +Run two nodes and connect them: + +``` +# Starts listening on 60000 with RPC server on 8545. +# Note the "listening on address" in logs. +./build/wakunode2 --ports-shift:0 + +# Run another node with staticnode argument +./build/wakunode2 --ports-shift:1 --staticnode:/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAmF4tuht6fmna6uDqoSMgFqhUrdaVR6VQRyGr6sCpfS2jp --filternode:/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAmF4tuht6fmna6uDqoSMgFqhUrdaVR6VQRyGr6sCpfS2jp +``` + +You should see your nodes connecting. + +Do basic RPC calls: + +``` +./build/rpc_subscribe 8545 +./build/rpc_subscribe_filter 8546 # enter your content topic; default is "1" +./build/rpc_publish 8545 # enter your message in STDIN +``` + +You should see other node receive something. diff --git a/third-party/nwaku/docs/tutorial/heaptrack.md b/third-party/nwaku/docs/tutorial/heaptrack.md new file mode 100644 index 0000000..d651489 --- /dev/null +++ b/third-party/nwaku/docs/tutorial/heaptrack.md @@ -0,0 +1,114 @@ +# Heaptrack in Nim Waku + +## Background +Given that RAM is a limited resource, it is crucial to have a good insight on what is going on with the memory used by a particular process. + +## Heaptrack +Heaptrack is a tool that allows to generate memory usage reports. +It operates in two modes: +- preload: the tracking is made from the beginning. +- inject: the tracking starts at a certain time by attaching to a running process. + +### Building Heaptrack (tried on Ubuntu) +- `git clone git@github.com:KDE/heaptrack.git` +- `mkdir build; cd build` +- `cmake ..` + At this point, make sure the cmake doesn't complain about any missing dependency. + Among others, the most tricky deps are obtained by the next commands: + - `sudo apt install libkf5i18n-dev` + - `sudo apt install libkf5itemmodels-dev` + - `sudo apt install libkf5threadweaver-dev` + - `sudo apt install libkf5service-dev` + - `sudo apt install libkf5completion-dev` + - `sudo apt install libkf5itemviews-dev` + - `sudo apt install libkf5jobwidgets-dev` + - `sudo apt install libkf5solid-dev` + - `sudo apt install libkf5coreaddons-dev` + - `sudo apt install libkf5auth-dev` + - `sudo apt install libkf5codecs-dev` + - `sudo apt install libkf5configwidgets-dev` + - `sudo apt install libkf5xmlgui-dev` + - `sudo apt install libkf5widgetsaddons-dev` + - `sudo apt install libqt5gui5` + - `sudo apt install libkf5kio-dev` + - `sudo apt install libkf5iconthemes-dev` +- `make` +- On completion, the `bin/heaptrack_gui` and `bin/heaptrack` binaries will be generated. + - heaptrack: needed to generate the report. + - heaptrack_gui: needed to analyse the report. + +## Heaptrack & Nwaku +nwaku supports heaptrack, but it needs a special compilation setting. + +### Patch Nim compiler to register allocations on Heaptrack + +Currently, we rely on the official Nim repository. So we need to patch the Nim compiler to register allocations and deallocations on Heaptrack. +For Nim 2.2.4 version, we created a patch that can be applied as: +```bash +git apply --directory=vendor/nimbus-build-system/vendor/Nim docs/tutorial/nim.2.2.4_heaptracker_addon.patch +git add . +git commit -m "Add heaptrack support to Nim compiler - temporary patch" +``` + +> Until heaptrack support is not available in official Nim, so it is important to keep it in the `nimbus-build-system` repository. +> Commit ensures that `make update` will not override the patch unintentionally. + +> We are planning to make it available through an official PR for Nim. + +When the patch is applied, we can build wakunode2 with heaptrack support. + +### Build nwaku with heaptrack support + +`make -j HEAPTRACKER=1 wakunode2` + +### Create nwaku memory report with heaptrack + +nwaku only works correctly with heaptrack operating in 'preload' mode, i.e. the memory is tracked from the beginning. +To achieve this, the `heaptrack` binary should be prepended to the usual `wakunode`: + +e.g.: +`/build/bin/heaptrack /build/wakunode2 ...` + +While the above is running, a file with the next format is being populated with allocs/deallocs stats in the current folder: + + ``` + heaptrack...gz +e.g.: + heaptrack.wakunode2.23125.gz + ``` + +### Build a Docker image with Heaptrack + Nim Waku +Having Docker properly installed in your machine, do the next: + +- cd to the `nwaku` root folder. +- ```sudo make docker-image DOCKER_IMAGE_NAME=docker_repo:docker_tag HEAPTRACKER=1``` +- alternatively you can use the `docker-quick-image` target, this is faster but creates an ubuntu based image, so your local build environment must match. + +That will create a Docker image with both nwaku and heaptrack. The container's entry point is `ENTRYPOINT ["/heaptrack/build/bin/heaptrack", "/usr/bin/wakunode"]`, so the memory report starts being generated from the beginning. + +#### Notice for using heaptrack supporting image with `docker compose` + +Take care that wakunode2 should be started as +``` +exec /heaptrack/build/bin/heaptrack /usr/bin/wakunode\ +... all the arguments you want to pass to wakunode ... +``` + +### Extract report file from a running Docker container +Bear in mind that if you restart the container, the previous report will get lost. Therefore, before restarting, it is important to extract it from the container once you consider it has enough information. + +While the Docker container is running, run the next command: +``` +sudo docker cp 768e7de52d3c:/heaptrack.wakunode.1.gz . +``` +(replace the 768.. with your docker container id.). + +### Analyse a heaptrack report +``` +/bin/heaptrack_gui heaptrack.wakunode.1.gz +``` + +You should be able to see memory allocations. It is important +to see a legend like shown below: + +![Example of a good heaptrack report](imgs/good_heaptrack_report_example.png) diff --git a/third-party/nwaku/docs/tutorial/imgs/good_heaptrack_report_example.png b/third-party/nwaku/docs/tutorial/imgs/good_heaptrack_report_example.png new file mode 100644 index 0000000..088ef51 Binary files /dev/null and b/third-party/nwaku/docs/tutorial/imgs/good_heaptrack_report_example.png differ diff --git a/third-party/nwaku/docs/tutorial/imgs/infura-dashboard-mainnet.png b/third-party/nwaku/docs/tutorial/imgs/infura-dashboard-mainnet.png new file mode 100644 index 0000000..c5cf2df Binary files /dev/null and b/third-party/nwaku/docs/tutorial/imgs/infura-dashboard-mainnet.png differ diff --git a/third-party/nwaku/docs/tutorial/imgs/infura-dashboard.png b/third-party/nwaku/docs/tutorial/imgs/infura-dashboard.png new file mode 100644 index 0000000..5b2a846 Binary files /dev/null and b/third-party/nwaku/docs/tutorial/imgs/infura-dashboard.png differ diff --git a/third-party/nwaku/docs/tutorial/imgs/infura-endpoints.png b/third-party/nwaku/docs/tutorial/imgs/infura-endpoints.png new file mode 100644 index 0000000..86ce254 Binary files /dev/null and b/third-party/nwaku/docs/tutorial/imgs/infura-endpoints.png differ diff --git a/third-party/nwaku/docs/tutorial/imgs/infura-key.png b/third-party/nwaku/docs/tutorial/imgs/infura-key.png new file mode 100644 index 0000000..020ca0c Binary files /dev/null and b/third-party/nwaku/docs/tutorial/imgs/infura-key.png differ diff --git a/third-party/nwaku/docs/tutorial/imgs/rln-relay-chat2-overview.png b/third-party/nwaku/docs/tutorial/imgs/rln-relay-chat2-overview.png new file mode 100644 index 0000000..5208963 Binary files /dev/null and b/third-party/nwaku/docs/tutorial/imgs/rln-relay-chat2-overview.png differ diff --git a/third-party/nwaku/docs/tutorial/nangang.md b/third-party/nwaku/docs/tutorial/nangang.md new file mode 100644 index 0000000..1c40701 --- /dev/null +++ b/third-party/nwaku/docs/tutorial/nangang.md @@ -0,0 +1,42 @@ +# Nangang Test + +> TODO (2023-05-24): Deprecate or fix + +Nangang is the first internal testnet. See +https://github.com/vacp2p/research/issues/43 for more. + +## How to + +Build: + +``` +# make wakunode2 is run as part of scripts2 target +make scripts2 +``` + +Run two nodes and connect them: + +``` +# Starts listening on 60000 with RPC server on 8545. +# Note the "listening on address" in logs. +./build/wakunode2 --ports-shift:0 + +# Run another node with staticnode argument +./build/wakunode2 --ports-shift:1 --staticnode:/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAmF4tuht6fmna6uDqoSMgFqhUrdaVR6VQRyGr6sCpfS2jp +``` + +You should see your nodes connecting. + +Do basic RPC calls: + +``` +./build/rpc_subscribe 8545 +./build/rpc_subscribe 8546 +./build/rpc_publish 8545 # enter your message in STDIN +``` + +You should see other node receive something. + +## Nangang cluster node + +> TODO (2024-03-11): Fix node multiaddr diff --git a/third-party/nwaku/docs/tutorial/nim.2.2.4_heaptracker_addon.patch b/third-party/nwaku/docs/tutorial/nim.2.2.4_heaptracker_addon.patch new file mode 100644 index 0000000..3cd3384 --- /dev/null +++ b/third-party/nwaku/docs/tutorial/nim.2.2.4_heaptracker_addon.patch @@ -0,0 +1,44 @@ +diff --git a/lib/system/alloc.nim b/lib/system/alloc.nim +index e2dd43075..7f8c8e04e 100644 +--- a/lib/system/alloc.nim ++++ b/lib/system/alloc.nim +@@ -1,4 +1,4 @@ +-# ++#!fmt: off + # + # Nim's Runtime Library + # (c) Copyright 2012 Andreas Rumpf +@@ -862,6 +862,15 @@ when defined(gcDestructors): + dec maxIters + if it == nil: break + ++when defined(heaptracker): ++ const heaptrackLib = ++ when defined(heaptracker_inject): ++ "libheaptrack_inject.so" ++ else: ++ "libheaptrack_preload.so" ++ proc heaptrack_malloc(a: pointer, size: int) {.cdecl, importc, dynlib: heaptrackLib.} ++ proc heaptrack_free(a: pointer) {.cdecl, importc, dynlib: heaptrackLib.} ++ + proc rawAlloc(a: var MemRegion, requestedSize: int): pointer = + when defined(nimTypeNames): + inc(a.allocCounter) +@@ -984,6 +993,8 @@ proc rawAlloc(a: var MemRegion, requestedSize: int): pointer = + sysAssert(isAccessible(a, result), "rawAlloc 14") + sysAssert(allocInv(a), "rawAlloc: end") + when logAlloc: cprintf("var pointer_%p = alloc(%ld) # %p\n", result, requestedSize, addr a) ++ when defined(heaptracker): ++ heaptrack_malloc(result, requestedSize) + + proc rawAlloc0(a: var MemRegion, requestedSize: int): pointer = + result = rawAlloc(a, requestedSize) +@@ -992,6 +1003,8 @@ proc rawAlloc0(a: var MemRegion, requestedSize: int): pointer = + proc rawDealloc(a: var MemRegion, p: pointer) = + when defined(nimTypeNames): + inc(a.deallocCounter) ++ when defined(heaptracker): ++ heaptrack_free(p) + #sysAssert(isAllocatedPtr(a, p), "rawDealloc: no allocated pointer") + sysAssert(allocInv(a), "rawDealloc: begin") + var c = pageAddr(p) diff --git a/third-party/nwaku/docs/tutorial/onchain-rln-relay-chat2.md b/third-party/nwaku/docs/tutorial/onchain-rln-relay-chat2.md new file mode 100644 index 0000000..ac2bdc3 --- /dev/null +++ b/third-party/nwaku/docs/tutorial/onchain-rln-relay-chat2.md @@ -0,0 +1,232 @@ +# Spam-protected chat2 application with on-chain group management + +This document is a tutorial on how to run the chat2 application in the spam-protected mode using the Waku-RLN-Relay protocol and with dynamic/on-chain group management. +In the on-chain/dynamic group management, the state of the group members i.e., their identity commitment keys is moderated via a membership smart contract deployed on the Linea Sepolia network which is one of the test-nets. +Members can be dynamically added to the group and the group size can grow up to 2^20 members. +This differs from the prior test scenarios in which the RLN group was static and the set of members' keys was hardcoded and fixed. + + +## Prerequisites +To complete this tutorial, you will need + +1. An rln keystore file with credentials to the rln membership smart contract you wish to use. You may obtain this by registering to the smart contract and generating a keystore, or by using the [rln-keystore-generator](./rln-keystore-generator.md) which does that for you. + + +## Overview +Figure 1 provides an overview of the interaction of the chat2 clients with the test fleets and the membership contract. +At a high level, when a chat2 client is run with Waku-RLN-Relay mounted in on-chain mode. + +Under the hood, the chat2 client constantly listens to the membership contract and keeps itself updated with the latest state of the group. + +In the following test setting, the chat2 clients are to be connected to the Waku test fleets as their first hop. +The test fleets will act as routers and are also set to run Waku-RLN-Relay over the same pubsub topic and content topic as chat2 clients i.e., the default pubsub topic of `/waku/2/rs/0/0` and the content topic of `/toy-chat/3/mingde/proto`. +Spam messages published on the said combination of topics will be caught by the test fleet nodes and will not be routed. +Note that spam protection does not rely on the presence of the test fleets. +In fact, all the chat2 clients are also capable of catching and dropping spam messages if they receive any. +You can test it by connecting two chat2 clients (running Waku-RLN-Relay) directly to each other and see if they can spot each other's spam activities. + + ![](./imgs/rln-relay-chat2-overview.png) + Figure 1. + +# Set up +## Build chat2 +First, build chat2 + +```bash +make chat2 +``` + +## Set up a chat2 client + +Run the following command to set up your chat2 client. + +```bash +./build/chat2 --fleet:test \ +--content-topic:/toy-chat/3/mingde/proto \ +--rln-relay:true \ +--rln-relay-dynamic:true \ +--rln-relay-eth-contract-address:0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 \ +--rln-relay-cred-path:xxx/xx/rlnKeystore.json \ +--rln-relay-cred-password:xxxx \ +--rln-relay-eth-client-address:xxxx \ +--ports-shift:1 +``` + +In this command +- the `--fleet:test` indicates that the chat2 app gets connected to the test fleets. +- the `toy-chat/3/mingde/proto` passed to the `content-topic` option indicates the content topic on which the chat2 application is going to run. +- the `rln-relay` flag is set to `true` to enable the Waku-RLN-Relay protocol for spam protection. +- the `--rln-relay-dynamic` flag is set to `true` to enable the on-chain mode of Waku-RLN-Relay protocol with dynamic group management. +- the `--rln-relay-eth-contract-address` option gets the address of the membership contract. + The current address of the contract is `0xB9cd878C90E49F797B4431fBF4fb333108CB90e6`. + You may check the state of the contract on the [Linea Sepolia testnet](https://sepolia.lineascan.build/address/0xB9cd878C90E49F797B4431fBF4fb333108CB90e6). +- the `--rln-relay-cred-path` option denotes the path to the keystore file described above +- the `--rln-relay-cred-password` option denotes the password to the keystore +- the `rln-relay-eth-client-address` is the WebSocket address of the hosted node on the Linea Sepolia testnet. + You need to replace the `xxxx` with the actual node's address. + +For `rln-relay-eth-client-address`, if you do not know how to obtain it, you may use the following tutorial on the [prerequisites of running on-chain spam-protected chat2](./pre-requisites-of-running-on-chain-spam-protected-chat2.md). + +You may set up more than one chat client, +just make sure that you increment the `--ports-shift` value for each new client you set up e.g., `--ports-shift=2`. + +Once you run the command, you are asked to choose your nickname: +``` +Choose a nickname >> Alice +``` + +then you will see a couple of other messages related to setting up the connections of your chat app, +the content may differ on your screen though: +``` +Connecting to test fleet using DNS discovery... +Discovered and connecting to @[16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp, 16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W, 16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG] +Listening on + /ip4/75.157.120.249/tcp/60001/p2p/16Uiu2HAmQXuZmbjFWGagthwVsPFrc5ZrZ9c53qdUA45TWoZaokQn +Store enabled, but no store nodes configured. Choosing one at random from discovered peers +Connecting to storenode: 16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp +``` +You will also see some historical messages being fetched, again the content may be different on your end: + +``` + Bob: hi + Bob: hi + Alice: spam1 + Alice: hiiii + Alice: hello + Bob: hi + Bob: hi + Alice: hi + b: hi + h: hi +... +``` + +Next, you see the following message: +``` +rln-relay preparation is in progress ... +``` +Also, the registered RLN identity key, the RLN identity commitment key, and the index of the registered credential will be displayed as given below. +Note that in the figure, the RLN identity key is not shown for security reasons (replaced by a string of `x`s). +But, you will see your RLN identity key. + +``` +your membership index is: xx +your RLN identity key is: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +your RLN identity commitment key is: 6c6598126ba10d1b70100893b76d7f8d7343eeb8f5ecfd48371b421c5aa6f012 +``` + +Finally, the chat prompt `>>` will appear which means your chat2 client is ready. +Once you type a chat line and hit enter, you will see a message that indicates the epoch at which the message is sent e.g., + +``` +>> Hi +--rln epoch: 165886530 + Alice: Hi +``` +The numerical value `165886530` indicates the epoch of the message `Hi`. +You will see a different value than `165886530` on your screen. +If two messages sent by the same chat2 client happen to have the same RLN epoch value, then one of them will be detected as spam and won't be routed (by test fleets in this test setting). +At the time of this tutorial, the epoch duration is set to `10` seconds. +You can inspect the current epoch value by checking the following [constant variable](https://github.com/waku-org/nwaku/blob/44c543129ee4149255a00a05f1e7d21f8fa28626/waku/v2/waku_rln_relay/constants.nim#L51) in the nim-waku codebase. +Thus, if you send two messages less than `10` seconds apart, they are likely to get the same `rln epoch` values. + +After sending a chat message, you may experience some delay before the next chat prompt appears. +The reason is that under the hood a zero-knowledge proof is being generated and attached to your message. + + +Try to spam the network by violating the message rate limit i.e., +sending more than one message per epoch. +Your messages will be routed via test fleets that are running in spam-protected mode over the same content topic i.e., `/toy-chat/3/mingde/proto` as your chat client. +Your spam activity will be detected by them and your message will not reach the rest of the chat clients. +You can check this by running a second chat user and verifying that spam messages are not displayed as they are filtered by the test fleets. +Furthermore, the chat client will prompt you with the following warning message indicating that the message rate is being violated: +``` +⚠️ message rate violation! you are spamming the network! +``` +A sample test scenario is illustrated in the [Sample test output section](#sample-test-output). + +Once you are done with the test, make sure you close all the chat2 clients by typing the `/exit` command. +``` +>> /exit +quitting... +``` + + +# Sample test output +In this section, a sample test of running two chat clients is provided. +Note that the value used for `rln-relay-eth-client-address` in the following code snippets is junk and not valid. + +The two chat clients namely `Alice` and `Bob` are connected to the test fleets. +`Alice` sends 4 messages i.e., `message1`, `message2`, `message3`, and `message4`. +However, only three of them reach `Bob`. +This is because the two messages `message2` and `message3` have identical RLN epoch values, so, one of them gets discarded by the test fleets as a spam message. +The test fleets do not relay `message3` further, hence `Bob` never receives it. +You can check this fact by looking at `Bob`'s console, where `message3` is missing. + + +**Alice** +```bash +./build/chat2 --fleet:test --content-topic:/toy-chat/3/mingde/proto --rln-relay:true --rln-relay-dynamic:true --rln-relay-eth-contract-address:0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 --rln-relay-cred-path:rlnKeystore.json --rln-relay-cred-password:password --rln-relay-eth-client-address:https://sepolia.infura.io/v3/12345678901234567890123456789012 --ports-shift=1 +``` + +``` +Choose a nickname >> Alice +Welcome, Alice! +Connecting to test fleet using DNS discovery... +Discovered and connecting to @[16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp, 16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W, 16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG] +Listening on + /ip4/75.157.120.249/tcp/60001/p2p/16Uiu2HAmH7XbkcdbA1CCs91r93HuwZHSdXppCNvJTDVvgGhuxyuG +Store enabled, but no store nodes configured. Choosing one at random from discovered peers +Connecting to storenode: 16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp + Bob: hi + Bob: hi + Alice: spam1 + Alice: hiiii + Alice: hello + Bob: hi + Bob: hi + Alice: hi + b: hi + h: hi +rln-relay preparation is in progress ... +your membership index is: xx +your rln identity key is: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +your rln identity commitment key is: bd093cbf14fb933d53f596c33f98b3df83b7e9f7a1906cf4355fac712077cb28 +>> message1 +--rln epoch: 165886591 + Alice: message1 +>> message2 +--rln epoch: 165886592 + Alice: message2 +>> message3 +--rln epoch: 165886592 ⚠️ message rate violation! you are spamming the network! + Alice: message3 +>> message4 +--rln epoch: 165886593 + Alice: message4 +>> +``` + +**Bob** +```bash +./build/chat2 --fleet:test --content-topic:/toy-chat/3/mingde/proto --rln-relay:true --rln-relay-dynamic:true --rln-relay-eth-contract-address:0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 --rln-relay-cred-path:rlnKeystore.json --rln-relay-cred-index:1 --rln-relay-cred-password:password --rln-relay-eth-client-address:https://sepolia.infura.io/v3/12345678901234567890123456789012 --ports-shift=2 +``` + +``` +Choose a nickname >> Bob +Welcome, Bob! +Connecting to test fleet using DNS discovery... +Discovered and connecting to @[16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp, 16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W, 16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG] +Listening on + /ip4/75.157.120.249/tcp/60002/p2p/16Uiu2HAmE7fPUWGJ7UFJ3p2a3RNiEtEvAWhpfUStcCDmVGhm4h4Z +Store enabled, but no store nodes configured. Choosing one at random from discovered peers +Connecting to storenode: 16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp +rln-relay preparation is in progress ... +your membership index is: xx +your rln identity key is: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +your rln identity commitment key is: d4961a7681521730bc7f9ade185c632b94b70624b2e87e21a97c07b83353f306 +>> Alice: message1 +>> Alice: message2 +>> Alice: message4 +>> +``` diff --git a/third-party/nwaku/docs/tutorial/pre-requisites-of-running-on-chain-spam-protected-chat2.md b/third-party/nwaku/docs/tutorial/pre-requisites-of-running-on-chain-spam-protected-chat2.md new file mode 100644 index 0000000..f1757ad --- /dev/null +++ b/third-party/nwaku/docs/tutorial/pre-requisites-of-running-on-chain-spam-protected-chat2.md @@ -0,0 +1,104 @@ +In this tutorial you will learn how to: +1. Create a Sepolia Ethereum account and obtain its private key. +2. Obtain Sepolia ETH from faucet. +3. Access a node on the Sepolia testnet using Infura. + +## 1. Create a Sepolia Ethereum account and obtain its private key + +> _**WARNING:**_ The private key is used elsewhere by Waku RLN registration tools to assist with membership registration in the Sepolia test network. +> We strongly recommend that you create an account only for this purpose. +> NEVER expose a private key that controls any valuable assets or funds. + +1. Download and install Metamask. [https://metamask.io/download/](https://metamask.io/download/) + If you already have Metamask installed, go to step 3. + If you encounter any issues during the Metamask setup process, please refer to the [official Metamask support page](https://support.metamask.io/hc/en-us). +2. Create a new wallet and save your secret recovery phrase. + + ![](https://i.imgur.com/HEOI0kp.jpg) + +3. Login to Metamask. + + ![](https://i.imgur.com/zFduIV8.jpg) + +4. By default, Metamask connects to the Ethereum Mainnet (dropdown menu in the top right corner). + + ![](https://i.imgur.com/gk3TWUd.jpg) + + To publish messages to the Waku Network, you need to connect to the Sepolia test network. +5. Switch to the Sepolia test network by selecting it from the dropdown menu. Ensure "Show test networks" is enabled. + + ![image](https://github.com/waku-org/nwaku/assets/68783915/670778eb-8bf0-42a6-8dd7-1dedfabeeb37) + + The same account can be used with different networks. Note that the ETH balance is different for each network (each has its own native token). + + ![image](https://github.com/waku-org/nwaku/assets/68783915/0a5aa3a7-359c-4f4b-bd12-bad7c4844b34) + +6. To view the private key for this account, click on the three dots next to the account name and select "Account Details". + + ![image](https://github.com/waku-org/nwaku/assets/68783915/83fffa23-4a3b-46f9-a492-9748bfd47cff) + + Select "Show Private Key". + + ![image](https://github.com/waku-org/nwaku/assets/68783915/3a513389-2df1-4e32-86da-a1794126cdac) + + Enter your Metamask password and click "Confirm" + + ![image](https://github.com/waku-org/nwaku/assets/68783915/ffbac631-b933-4292-a2c6-dc445bff153c) + + You will be shown the private key. + +## 2. Obtain Sepolia ETH from faucet + +Sepolia ETH can be obtained from different faucets. +Three popular examples include: + + 1. [sepoliafaucet.com](https://sepoliafaucet.com/) (requires an Alchemy account) + 2. [Infura Sepolia faucet](https://www.infura.io/faucet/sepolia) (requires an Infura account) + 3. [Sepolia POW faucet](https://sepolia-faucet.pk910.de/) + +> _**NOTE:**_ This list is provided for convenience. We do not necessarily recommend or guarantee the security of any of these options. + +Many faucets limit the amount of Sepolia ETH you can obtain per day. +We include instructions for [sepolia-faucet.pk910.de](https://sepolia-faucet.pk910.de/) as an example: + +1. Enter your Sepolia Ethereum account public address, solve the Captcha and start mining. + + ![image](https://github.com/waku-org/nwaku/assets/68783915/8bf2eece-956c-4449-ac4c-a7b9f4641c99) + +2. Keep the browser tab open for a while. You can see the estimated Sepolia ETH mined per hour. + + ![image](https://github.com/waku-org/nwaku/assets/68783915/fac1c6cb-b72f-47b1-a358-4ce41224a688) + + Each session is limited to a few hours. +3. When you've mined enough Sepolia ETH (minimum of 0.05 Sepolia ETH), click on "Stop Mining" and claim your reward. + + ![image](https://github.com/waku-org/nwaku/assets/68783915/9ace2824-9030-4507-9b5f-50354bb99127) + +## 3. Access a node on the Sepolia testnet using Infura + +> _**NOTE:**_ Infura provides a simple way of setting up endpoints for interaction with the Ethereum chain and the Waku RLN smart contract without having to run a dedicated Ethereum node. +> Setting up Infura is not mandatory. Operators concerned with the centralized aspect introduced by Infura should use their own node. + +1. Sign up for Infura if you do not have an account already. [https://infura.io/register](https://infura.io/register) + + ![](https://i.imgur.com/SyLaG6s.jpg) + + Follow the instructions to register and verify the account. + +2. An API Key named "My First Key" should be auto-generated. Click on it, otherwise click on the "Create New API Key" button. + + ![image](imgs/infura-key.png) + + +3. You will be presented with a dashboard for your new key. Make sure to have Ethereum Sepolia's checkbox selected in the Networks section. + + ![image](imgs/infura-dashboard.png) + + +4. Select the "Sepolia" endpoint in the Ethereum menu. + + ![image](imgs/infura-endpoints.png) + + Both Https and WebSockets endpoints are available. Waku requires the Https endpoint. + +5. Copy the address (starting with `https://sepolia.infura`) as needed when setting up your Waku node. diff --git a/third-party/nwaku/docs/tutorial/rln-chat-cross-client.md b/third-party/nwaku/docs/tutorial/rln-chat-cross-client.md new file mode 100644 index 0000000..837bf89 --- /dev/null +++ b/third-party/nwaku/docs/tutorial/rln-chat-cross-client.md @@ -0,0 +1,33 @@ +# Waku-RLN-Relay Testnet2: Cross-Client + +In this tutorial, the aim is to test the interoperability of the 3 available Waku v2 clients namely, Nim, Go, and JS over the Waku network in the spam-protected mode. +Spam protection is done by rate-limiting each message publisher. +At the time of this tutorial, the messaging rate is set to `1` per Epoch where Epoch duration is set to `10` seconds. +You will find more about the details of spam protection in the chat clients tutorial provided below. +Messaging rate/spam protection is enabled through [Waku-RLN-Relay protocol](https://rfc.vac.dev/spec/17/) that is mounted on the routing hops. +For ease of demonstration, we make use of Nim-chat, Go-chat, and JS-chat applications that are developed on top of their respective Waku v2 clients. + +You need to set up a chat application in spam-protected mode and then start messaging with it. +As for the setup, please follow the tutorials below: +- [Nim-chat](./onchain-rln-relay-chat2.md) +- [Go-chat](https://github.com/waku-org/go-waku/blob/master/docs/tutorials/rln.md) +- [JS-chat](https://examples.waku.org/rln-js/) + +Once you set up your chat client, it will be connected to the Waku v2 test fleets as its first hop. +Messages generated by the chat client are set to be published on a specific combination of pubsub and content topic i.e., the default pubsub topic of `/waku/2/rs/0/0` and the content topic of `/toy-chat/3/mingde/proto`. +The test fleets also run Waku-RLN-Relay over the same pubsub topic and content topic. +Test fleets act as routers and enforce the message rate limit. +As such, any spam messages published by a chat client on the said combination of topics will be caught by the Waku v2 test fleet nodes and will not be routed. +You may also run multiple chat instances from the same or different client implementations to better observe the spam protection done by the Waku v2 test fleets. +Note that spam protection does not rely on the presence of the test fleets. +In fact, all the chat clients (except js-chat as it is in progress) are also capable of catching and dropping spam messages if they receive any. +You can test it by connecting two chat clients (running Waku-RLN-Relay) directly to each other and see if they can spot each other's spam activities. + +Note: JS-chat will use the [WAKU2-LIGHTPUSH protocol](https://rfc.vac.dev/spec/19/) to push its messages to the Waku v2 test fleets. +Waku v2 test fleets will act according to the WAKU2-LIGHTPUSH specifications and push that message to the network without any further verification. +That is, they do not enforce spam protection in that specific protocol but rather act merely as a message publisher (this behavior may change in the future though). +As such, you can expect to receive spam messages published by the JS-chat clients from other connecting chat clients i.e., Go-chat and Nim-chat. +However, you will see that such messages will be immediately identified as spam on those clients and a proper message will be displayed on the console. + + +You can also find a recorded demo of this testnet in the following [video](https://drive.proton.me/urls/EC4G8SY2J8#ie92Wtje1f4O). \ No newline at end of file diff --git a/third-party/nwaku/docs/tutorial/rln-chat2-live-testnet.md b/third-party/nwaku/docs/tutorial/rln-chat2-live-testnet.md new file mode 100644 index 0000000..c874294 --- /dev/null +++ b/third-party/nwaku/docs/tutorial/rln-chat2-live-testnet.md @@ -0,0 +1,125 @@ +# Communicating with waku2 test fleets using chat2 application in spam-protected mode + +This document is a tutorial on how to run chat2 in spam-protected/rate-limited mode using the waku-RLN-Relay protocol on a designated content topic `/toy-chat/3/mingde/proto`. +You will connect your chat2 client to waku2 test fleets. +Note that test fleets will not filter spam messages, they merely route messages. +Spam detection takes place at the chat2 users end. +In this setting, you should try to spam the network by violating the message rate limit i.e., +sending more than one message per epoch. +At the time of this tutorial, the epoch duration is set to `10` seconds. +You can inspect the current epoch value by checking the following [constant variable](https://github.com/status-im/nim-waku/blob/21cac6d491a6d995a7a8ba84c85fecc7817b3d8b/waku/v2/protocol/waku_rln_relay/constants.nim#L245) in the nim-waku codebase. +Your messages will be routed via test fleets and will arrive at other live chat2 clients that are running in rate-limited mode over the same content topic i.e., `/toy-chat/3/mingde/proto`. +Your spam activity will be detected by them and a proper message will be shown on their console. + +# Set up +## Build chat2 +First, build chat2 + +``` +make chat2 +``` + +## Setup a chat2 node in rate-limited mode +Run the following command to set up your chat2 client. + +``` +./build/chat2 --content-topic:/toy-chat/3/mingde/proto --ports-shift=1 --fleet:test --rln-relay:true --rln-relay-membership-index:your_index + +``` +In this command +- the `rln-relay` flag is set to true to enable RLN-Relay protocol for spam protection. +- the `rln-relay-membership-index` is used to pick one RLN key out of the 100 available hardcoded RLN keys. +You can pass your index using this command `--rln-relay-membership-index: your_index` e.g., `--rln-relay-membership-index:19` . +Please use the index assigned to you in the dogfooding coordination phase. +If you pick an index at random you may end up using the same key-pair as someone else, hence your messaging rate will be shared with that person(s). + + +Next, choose your nickname: +``` +Choose a nickname >> your_nick_name +``` +Wait for the chat prompt `>>` to appear. +Now your chat2 client is ready. + +You may set up more than one chat client, +just make sure that you increment the `--ports-shift` value for each new client you set up e.g., `--ports-shift=2`. + +# Run the test +Now that you have set up your client, start chatting. +Once you type a chat line and hit enter, you will see a message that indicates the epoch at which the message is sent e.g., +``` +>> Hi! +--rln epoch: 164495684 + Bob: Hi! +``` +The numerical value `164495684` indicates the epoch of the message `Hi!`. +You will see a different value than `164495684` on your screen. +If two messages sent by the same chat2 client happen to have the same RLN epoch value, then one of them will be detected as spam by the receiving chat2 clients. +At the time of this tutorial, the epoch duration is set to `10` seconds. +Thus, if you send two messages less than `10` seconds apart, they are likely to get the same `rln epoch` values. + +After sending a chat message, you may experience some delay before the next chat prompt appears. +The reason is that under the hood a zero-knowledge proof is being generated and attached to your message. + +Once you are done with the test, make sure you close all the chat2 clients by typing `/exit` command. +``` +>> /exit +quitting... +``` + +# Sample test output + +In the following sample test, two chat2 clients are set up, namely `Alice` and `Bob`. +`Bob` sends three messages i.e., `message1`, `message2`, and `message3` to the test fleets. +Test fleets will route the messages to their connections including `Alice`. +The two messages `message2` and `message3` have an identical RLN epoch value of `164504930`, so, one of them will be detected as a spam message by `Alice`. +You can check this fact by looking at the `Alice` console, where `A spam message is found and discarded : Bob: message3` is presented. + + +Bob +``` +./build/chat2 --content-topic:/toy-chat/3/mingde/proto --ports-shift=2 --fleet:test --rln-relay:true --rln-relay-membership-index:2 +Choose a nickname >> Bob +Welcome, Bob! +Connecting to test fleet using DNS discovery... +Discovered and connecting to @[16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W, 16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp, 16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG] +Listening on + /ip4/75.157.120.249/tcp/60002/p2p/16Uiu2HAmKdCdP89q6CwLc6PeFDJnVR1EmM7fTgtphHiacSNBnuAz +Store enabled, but no store nodes configured. Choosing one at random from discovered peers +Connecting to storenode: 16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W +>> message1 +--rln epoch: 164504929 + Bob: message1 +>> message2 +--rln epoch: 164504930 + Bob: message2 +>> message3 +--rln epoch: 164504930 + Bob: message3 +>> message4 +--rln epoch: 164504973 + Bob: message4 +>> /exit +quitting... +``` + + +Alice +``` +./build/chat2 --content-topic:/toy-chat/3/mingde/proto --ports-shift=1 --fleet:test --rln-relay:true --rln-relay-membership-index:1 + +Choose a nickname >> Alice +Welcome, Alice! +Connecting to test fleet using DNS discovery... +Discovered and connecting to @[16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W, 16Uiu2HAkzHaTP5JsUwfR9NR8Rj9HC24puS6ocaU8wze4QrXr9iXp, 16Uiu2HAmDCp8XJ9z1ev18zuv8NHekAsjNyezAvmMfFEJkiharitG] +Listening on + /ip4/75.157.120.249/tcp/60001/p2p/16Uiu2HAkyTos6LeGrj1YJyA3WYzp9qKQGCsxbtvyoBRHSu9PCrQZ +Store enabled, but no store nodes configured. Choosing one at random from discovered peers +Connecting to storenode: 16Uiu2HAkykgaECHswi3YKJ5dMLbq2kPVCo89fcyTd38UcQD6ej5W +>> Bob: message1 +>> Bob: message2 +>> A spam message is found and discarded : Bob: message3 + Bob: message4 +>> /exit +quitting... +``` diff --git a/third-party/nwaku/docs/tutorial/rln-chat2-local-test.md b/third-party/nwaku/docs/tutorial/rln-chat2-local-test.md new file mode 100644 index 0000000..f5d4bdb --- /dev/null +++ b/third-party/nwaku/docs/tutorial/rln-chat2-local-test.md @@ -0,0 +1,147 @@ +# Building a local network of spam-protected chat2 clients + +This document is a tutorial on how to locally set up a small network of chat2 clients in a spam-protected mode using the waku-RLN-Relay protocol. +In the provided test scenario, you will set up three chat2 clients. +For ease of explanation, we will refer to them as `Alice`, `Bob`, and `Carol`. +`Bob` and `Carol` are directly connected to `Alice` so that their message will be routed via `Alice`. +In this setting, if `Bob` or `Carol` attempts to spam the network by violating the message rate limit then `Alice` will detect their spamming activity, and does not relay the spam messages. +The message rate is one per epoch. +At the time of this tutorial, the epoch duration is set to `10` seconds. +You can inspect its current value by checking the following [constant variable](https://github.com/waku-org/nwaku/blob/44c543129ee4149255a00a05f1e7d21f8fa28626/waku/v2/waku_rln_relay/constants.nim#L51) in the nim-waku codebase. + + +# Set up +## Build chat2 +First, build chat2 + +``` +make chat2 +``` + +## Create a local network of chat2 clients +Next, set up the following three chat2 clients in order. +As `Alice` is going to be the only connection point between `Bob` and `Carol`, you need to set it up first before `Bob` and `Carol`. + +**Alice setup**: +Run the following command to set up the first chat2 client. In this command, the `rln-relay` flag is set to true to enable RLN-Relay protocol for the spam protection. +The `rln-relay-membership-index` is used to pick one RLN key out of the 100 available hardcoded RLN keys. +We use the first RLN key of the list for `Alice` i.e., `--rln-relay-membership-index:1`. + +``` +./build/chat2 --staticnode:/ip4/127.0.0.1/tcp/60010/p2p/16Uiu2HAmKdCdP89q6CwLc6PeFDJnVR1EmM7fTgtphHiacSNBnuAz --content-topic:/toy-chat/3/mingde/proto --ports-shift=1 --fleet:none --nodekey=f157b19b13e9ee818acfc9d3d7eec6b81f70c0a978dec19def261172acbe26e6 --rln-relay:true --rln-relay-membership-index:1 + +``` + +Next, you will be prompted with a message to choose a nickname, set it to `Alice`: +``` +Choose a nickname >> Alice +``` +Wait for the chat prompt `>>` to appear. +Now your first chat2 client is ready. + + + + +**Bob setup**: +Set up the second chat2 client using the command below. Choose `Bob` as the nickname. +``` +./build/chat2 --staticnode:/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAkyTos6LeGrj1YJyA3WYzp9qKQGCsxbtvyoBRHSu9PCrQZ --content-topic:/toy-chat/3/mingde/proto --ports-shift=2 --fleet:none --nodekey=9ab635854ffe8fed32b17d7ef38e0b2f354ca1f3283b7f78fb77227004d2cbe6 --rln-relay:true --rln-relay-membership-index:2 + +Choose a nickname >> Bob +``` + +**Carol setup**: +Run the following command to set up the third chat2 client, and choose `Carol` as the nickname. +``` +./build/chat2 --staticnode:/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAkyTos6LeGrj1YJyA3WYzp9qKQGCsxbtvyoBRHSu9PCrQZ --content-topic:/toy-chat/3/mingde/proto --ports-shift=3 --fleet:none --nodekey=0aa89d7f27300c9fb4e119acc225c8873a3bf96bbb4c82045c94934bcc6a6af8 --rln-relay:true --rln-relay-membership-index:3 + +Choose a nickname >> Carol + +``` + +# Run the test +Now that the network is formed, you can start chatting. +For a better illustration of spam protection, use `Bob` and `Carol` clients for chatting and let `Alice` act only as a router. +Once you type a chat line and hit enter, you will see a message that indicates the epoch at which the message is sent e.g., +``` +>> Hi! +--rln epoch: 164495684 + Bob: Hi! +``` +The numerical value `164495684` indicates the epoch of the message `Hi!`. +You will see a different value than `164495684` on your screen. +If two messages sent by the same chat2 client happen to have the same RLN epoch value, then one of them will be detected as spam and won't be routed (by Alice in this test setting). +At the time of this tutorial, the epoch duration is set to `10` seconds. +Thus, if you send two messages less than `10` seconds apart, they are likely to get the same `rln epoch` values. + +After sending a chat message, you may experience some delay before the next chat prompt appears. +The reason is that under the hood a zero-knowledge proof is being generated and attached to your message. + +Once you are done with the test, make sure you close all the chat2 clients by typing `/exit` command. +``` +>> /exit +quitting... +``` + +# Sample test output + +In the following sample test, `Bob` sends three messages namely, `message1`, `message2`, and `message3`. +The two messages `message2` and `message3` have identical RLN epoch value of `164504930`, so, one of them will be discarded by `Alice` as a spam message. +You can check this fact by looking at the `Alice` console, where `A spam message is found and discarded : Bob: message3` is presented. +`Alice` does not relay `message3` further, hence `Carol` never receives it. + +Bob +``` +./build/chat2 --staticnode:/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAkyTos6LeGrj1YJyA3WYzp9qKQGCsxbtvyoBRHSu9PCrQZ --content-topic:/toy-chat/3/mingde/proto --ports-shift=2 --fleet:none --nodekey=9ab635854ffe8fed32b17d7ef38e0b2f354ca1f3283b7f78fb77227004d2cbe6 --rln-relay:true --rln-relay-membership-index:2 +Choose a nickname >> Bob +Welcome, Bob! +Connecting to nodes +Listening on + /ip4/75.157.120.249/tcp/60002/p2p/16Uiu2HAmKdCdP89q6CwLc6PeFDJnVR1EmM7fTgtphHiacSNBnuAz +>> message1 +--rln epoch: 164504929 + Bob: message1 +>> message2 +--rln epoch: 164504930 + Bob: message2 +>> message3 +--rln epoch: 164504930 + Bob: message3 +>> message4 +--rln epoch: 164504973 + Bob: message4 +>> /exit +quitting... +``` + +Alice +``` +./build/chat2 --staticnode:/ip4/127.0.0.1/tcp/60010/p2p/16Uiu2HAmKdCdP89q6CwLc6PeFDJnVR1EmM7fTgtphHiacSNBnuAz --content-topic:/toy-chat/3/mingde/proto --ports-shift=1 --fleet:none --nodekey=f157b19b13e9ee818acfc9d3d7eec6b81f70c0a978dec19def261172acbe26e6 --rln-relay:true --rln-relay-membership-index:1 + +Choose a nickname >> Alice +Welcome, Alice! +Connecting to nodes +Listening on + /ip4/75.157.120.249/tcp/60001/p2p/16Uiu2HAkyTos6LeGrj1YJyA3WYzp9qKQGCsxbtvyoBRHSu9PCrQZ +>> Bob: message1 +>> Bob: message2 +>> A spam message is found and discarded : Bob: message3 + Bob: message4 +>> /exit +quitting... +``` + +Carol +``` +./build/chat2 --staticnode:/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAkyTos6LeGrj1YJyA3WYzp9qKQGCsxbtvyoBRHSu9PCrQZ --content-topic:/toy-chat/3/mingde/proto --ports-shift=3 --fleet:none --nodekey=0aa89d7f27300c9fb4e119acc225c8873a3bf96bbb4c82045c94934bcc6a6af8 --rln-relay:true --rln-relay-membership-index:3 +Choose a nickname >> Carol +Welcome, Carol! +Connecting to nodes +Listening on + /ip4/75.157.120.249/tcp/60003/p2p/16Uiu2HAm1bEDWZqjxfYRvGo1UpjaejkenJVmMFMPMDmgWWGkREJu +>> Bob: message1 +>> Bob: message2 +>> Bob: message4 +>> /exit +quitting... +``` \ No newline at end of file diff --git a/third-party/nwaku/docs/tutorial/rln-keystore-generator.md b/third-party/nwaku/docs/tutorial/rln-keystore-generator.md new file mode 100644 index 0000000..4ce7008 --- /dev/null +++ b/third-party/nwaku/docs/tutorial/rln-keystore-generator.md @@ -0,0 +1,73 @@ +# rln-keystore-generator + +This document describes how to run and use the `rln-keystore-generator` tool. +It is meant to be used to generate and persist a set of valid RLN credentials to be used with rln-relay. + +## Pre-requisites + +1. An EOA with some ETH to pay for the registration transaction ($PRIVATE_KEY) +2. An RPC endpoint to connect to an Ethereum node ($RPC_URL) + +## Usage + +1. First, we compile the binary + + ```bash + make -j16 wakunode2 + ``` + This command will fetch the rln static library and link it automatically. + + +2. Define the arguments you wish to use + + ```bash + export RPC_URL="https://linea-sepolia.infura.io/v3/..." + export PRIVATE_KEY="0x..." + export RLN_CONTRACT_ADDRESS="0xB9cd878C90E49F797B4431fBF4fb333108CB90e6" + export RLN_CREDENTIAL_PATH="rlnKeystore.json" + export RLN_CREDENTIAL_PASSWORD="xxx" + ``` + +3. Dry run the command to ensure better degree of execution + + ```bash + ./build/wakunode2 generateRlnKeystore \ + --rln-relay-eth-client-address:$RPC_URL \ + --rln-relay-eth-private-key:$PRIVATE_KEY \ + --rln-relay-eth-contract-address:$RLN_CONTRACT_ADDRESS \ + --rln-relay-cred-path:$RLN_CREDENTIAL_PATH \ + --rln-relay-cred-password:$RLN_CREDENTIAL_PASSWORD + ``` + By default, the tool will not execute a transaction. It will execute only if `--execute` is passed in. + +4. Run the keystore generator with the onchain registration + + ```bash + ./build/wakunode2 generateRlnKeystore \ + --rln-relay-eth-client-address:$RPC_URL \ + --rln-relay-eth-private-key:$PRIVATE_KEY \ + --rln-relay-eth-contract-address:$RLN_CONTRACT_ADDRESS \ + --rln-relay-cred-path:$RLN_CREDENTIAL_PATH \ + --rln-relay-cred-password:$RLN_CREDENTIAL_PASSWORD \ + --execute + ``` + + What this does is - + a. generate a set of valid rln credentials + b. registers it to the contract address provided + c. persists the credentials to the path provided + +5. You may now use this keystore with wakunode2 or chat2. + +## Troubleshooting + +1. `KeystoreCredentialNotFoundError` + + ``` + KeystoreCredentialNotFoundError: Credential not found in keystore + ``` + This is most likely due to multiple credentials present in the same keystore. + To navigate around this, both chat2 and wakunode2 have provided an option to specify the credential index to use (`--rln-relay-membership-index`). + Please use this option with the appropriate tree index of the credential you wish to use. + + diff --git a/third-party/nwaku/docs/tutorial/store.md b/third-party/nwaku/docs/tutorial/store.md new file mode 100644 index 0000000..0d3aee7 --- /dev/null +++ b/third-party/nwaku/docs/tutorial/store.md @@ -0,0 +1,42 @@ +# Running Store Protocol + +> TODO (2023-05-24): Deprecate or fix + +## How to + +Build: + +``` +# make wakunode2 is run as part of scripts2 target +make scripts2 +``` + +Run two nodes and connect them: + +``` +# Starts listening on 60000 with RPC server on 8545. +# Note the "listening on address" in logs. +./build/wakunode2 --ports-shift:0 + +# Run another node with staticnode argument +./build/wakunode2 --ports-shift:1 --staticnode:/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAmF4tuht6fmna6uDqoSMgFqhUrdaVR6VQRyGr6sCpfS2jp --storenode:/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAmF4tuht6fmna6uDqoSMgFqhUrdaVR6VQRyGr6sCpfS2jp +``` + +When flag `persist-messages` is passed messages are going to be persisted in-memory. +If additionally flag `dbpath` is passed with a path, messages are persisted and stored in a database called `store` under the specified path. +If flag `persist-messages` is not passed, messages are not persisted and stored at all. + + + +You should see your nodes connecting. + +Do basic RPC calls: + +``` +./build/rpc_subscribe 8545 +./build/rpc_subscribe 8546 +./build/rpc_publish 8545 # enter your message in STDIN +./build/rpc_query 8546 # enter your content topic; default is "1" +``` + +You should see other node receive something. diff --git a/third-party/nwaku/docs/tutorial/websocket.md b/third-party/nwaku/docs/tutorial/websocket.md new file mode 100644 index 0000000..bc07744 --- /dev/null +++ b/third-party/nwaku/docs/tutorial/websocket.md @@ -0,0 +1,69 @@ +# Listening on Websocket to Enable Connections With Waku v2 Browser Peers + +> TODO (2023-05-24): Deprecate or fix + +Currently, nim-waku only supports TCP transport. +This means it is not possible to directly connect from a browser using [js-waku](https://github.com/waku-org/js-waku/) +to a nim-waku based node such as wakunode2. + +To remediate to this, utilities such as [websockify](https://github.com/novnc/websockify) can be used. +This tutorial explains how one can setup websockify alongside wakunode2 to accept connections from peer browsers. + +Note that popular browsers only accept secure websocket connections (`wss`) in a secure page (`https`), +hence we will also cover the creation of SSL certificates. + +## Creating certificate using cerbot + +Feel free to skip this step if you already own SSL certificate for your domain. + +To do so, simply follow the instructions at https://certbot.eff.org/. + +Note that you do not need to have a web server (e.g. apache, nginx) running to setup wakunode2 with websockify. + +## Setting up websockify + +You can install [Websockify](https://github.com/novnc/websockify) via your preferred package manager +or [using Python](https://github.com/novnc/websockify#installing-websockify). + +To start websockify, use the following command: + +```shell +sudo websockify \ +--cert /etc/letsencrypt/live//fullchain.pem \ +--key /etc/letsencrypt/live//privkey.pem 0.0.0.0:443 \ +127.0.0.1: +``` + +With: +- `your.domain` being your domain name (.e.g `www.example.org`). +- `tcp_port` being the port on which wakunode2 is listening, by default `60000`. + +Notes: +- This assumes you used `certbot` to generate certificate, `/etc/letsencrypt/live` is where `certbot store certificates, + if you have your own certificates, changes the path and be sure to pass the full certificate chain including + the CA certificate to the `--cert` argument. +- `sudo` is needed because websockify listens on port `443`; + You can avoid using `sudo` by using a custom port, just be sure it is open to the internet by checking your firewall. + +## Getting your wakunode2's multiaddr + +Start `wakunode2` as you usually do, +be sure to take in account the listening port to reflect it in the websockify command line. + +`wakunode2` prints the multiaddr it is listening too at the start of the logs: + +``` +INF 2021-06-23 10:37:25.274+10:00 Listening on topics="wakunode" tid=2271871 file=wakunode2.nim:170 full=/ip4/1.2.3.4/tcp/60000/p2p/16Uiu2HAmPRmVHjZSP3U1T9ez4EQBBUsji5RyvAyDGVNgTQajtEQJ +``` + +To get the websocket multiaddr, simply change the port and insert `wss` after said port: + +``` +/ip4/1.2.3.4/tcp/443/wss/p2p/16Uiu2HAmPRmVHjZSP3U1T9ez4EQBBUsji5RyvAyDGVNgTQajtEQJ +``` + +You can also use your domain name instead of ip address: + +``` +/dns4/your.domain/tcp/443/wss/p2p/16Uiu2HAmPRmVHjZSP3U1T9ez4EQBBUsji5RyvAyDGVNgTQajtEQJ +``` diff --git a/third-party/nwaku/env.sh b/third-party/nwaku/env.sh new file mode 100755 index 0000000..f90ba9a --- /dev/null +++ b/third-party/nwaku/env.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +# We use ${BASH_SOURCE[0]} instead of $0 to allow sourcing this file +# and we fall back to a Zsh-specific special var to also support Zsh. +REL_PATH="$(dirname ${BASH_SOURCE[0]:-${(%):-%x}})" +ABS_PATH="$(cd ${REL_PATH}; pwd)" +source ${ABS_PATH}/vendor/nimbus-build-system/scripts/env.sh + diff --git a/third-party/nwaku/examples/README.md b/third-party/nwaku/examples/README.md new file mode 100644 index 0000000..7055cf6 --- /dev/null +++ b/third-party/nwaku/examples/README.md @@ -0,0 +1,75 @@ +# Examples + +## Compile + +Make all examples. +```console +make example2 +``` + +## Waku API + +Uses the simplified Waku API to create and start a node, +you need an RPC endpoint for Linea Sepolia for RLN: + +```console +./build/waku_api --ethRpcEndpoint=https://linea-sepolia.infura.io/v3/ +``` + +If you can't be bothered but still want to see some action, +just run the binary and it will use a non-RLN network: + +```console +./build/waku_api +``` + +## publisher/subscriber + +Within `examples/` you can find a `publisher` and a `subscriber`. The first one publishes messages to the default pubsub topic on a given content topic, and the second one runs forever listening to that pubsub topic and printing the content it receives. + +**Some notes:** +* These examples are meant to work even if you are behind a firewall and you can't be discovered by discv5. +* You only need to provide a reachable bootstrap peer (see our [fleets](https://fleets.status.im/)) +* The examples are meant to work out of the box. +* Note that both services wait for some time until a given minimum amount of connections are reached. This is to ensure messages are gossiped. + +**Run:** + +Wait until the subscriber is ready. +```console +./build/subscriber +``` + +And run a publisher +```console +./build/publisher +``` + +See how the subscriber received the messages published by the publisher. Feel free to experiment from different machines in different locations. + +## resource-restricted publisher/subscriber (lightpush/filter) + +To illustrate publishing and receiving messages on a resource-restricted client, +`examples/v2` also provides a `lightpush_publisher` and a `filter_subscriber`. +The `lightpush_publisher` continually publishes messages via a lightpush service node +to the default pubsub topic on a given content topic. +The `filter_subscriber` subscribes via a filter service node +to the same pubsub and content topic. +It runs forever, maintaining this subscription +and printing the content it receives. + +**Run** +Start the filter subscriber. +```console +./build/filter_subscriber +``` + +And run a lightpush publisher +```console +./build/lightpush_publisher +``` + +See how the filter subscriber receives messages published by the lightpush publisher. +Neither the publisher nor the subscriber participates in `relay`, +but instead make use of service nodes to save resources. +Feel free to experiment from different machines in different locations. diff --git a/third-party/nwaku/examples/cbindings/README.md b/third-party/nwaku/examples/cbindings/README.md new file mode 100644 index 0000000..5465cf5 --- /dev/null +++ b/third-party/nwaku/examples/cbindings/README.md @@ -0,0 +1,18 @@ +## App description +This is a very simple example that shows how to invoke libwaku functions from a C program. + +## Build +1. Open terminal +2. cd to nwaku root folder +3. make cwaku_example -j8 + +This will create libwaku.so and cwaku_example binary within the build folder. + +## Run +1. Open terminal +2. cd to nwaku root folder +3. export LD_LIBRARY_PATH=build +4. `./build/cwaku_example --host=0.0.0.0 --port=60001` + +Use `./build/cwaku_example --help` to see some other options. + diff --git a/third-party/nwaku/examples/cbindings/base64.c b/third-party/nwaku/examples/cbindings/base64.c new file mode 100644 index 0000000..0f9acdf --- /dev/null +++ b/third-party/nwaku/examples/cbindings/base64.c @@ -0,0 +1,58 @@ + +#include "base64.h" + +// Base64 encoding +// source: https://nachtimwald.com/2017/11/18/base64-encode-and-decode-in-c/ +size_t b64_encoded_size(size_t inlen) +{ + size_t ret; + + ret = inlen; + if (inlen % 3 != 0) + ret += 3 - (inlen % 3); + ret /= 3; + ret *= 4; + + return ret; +} + +const char b64chars[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +char *b64_encode(const unsigned char *in, size_t len) +{ + char *out; + size_t elen; + size_t i; + size_t j; + size_t v; + + if (in == NULL || len == 0) + return NULL; + + elen = b64_encoded_size(len); + out = malloc(elen+1); + out[elen] = '\0'; + + for (i=0, j=0; i> 18) & 0x3F]; + out[j+1] = b64chars[(v >> 12) & 0x3F]; + if (i+1 < len) { + out[j+2] = b64chars[(v >> 6) & 0x3F]; + } else { + out[j+2] = '='; + } + if (i+2 < len) { + out[j+3] = b64chars[v & 0x3F]; + } else { + out[j+3] = '='; + } + } + + return out; +} + +// End of Base64 encoding diff --git a/third-party/nwaku/examples/cbindings/base64.h b/third-party/nwaku/examples/cbindings/base64.h new file mode 100644 index 0000000..37ca50f --- /dev/null +++ b/third-party/nwaku/examples/cbindings/base64.h @@ -0,0 +1,11 @@ + +#ifndef _BASE64_H_ +#define _BASE64_H_ + +#include + +size_t b64_encoded_size(size_t inlen); + +char *b64_encode(const unsigned char *in, size_t len); + +#endif diff --git a/third-party/nwaku/examples/cbindings/waku_example.c b/third-party/nwaku/examples/cbindings/waku_example.c new file mode 100644 index 0000000..35ac8a2 --- /dev/null +++ b/third-party/nwaku/examples/cbindings/waku_example.c @@ -0,0 +1,366 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "base64.h" +#include "../../library/libwaku.h" + +// Shared synchronization variables +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t cond = PTHREAD_COND_INITIALIZER; +int callback_executed = 0; + +void waitForCallback() { + pthread_mutex_lock(&mutex); + while (!callback_executed) { + pthread_cond_wait(&cond, &mutex); + } + callback_executed = 0; + pthread_mutex_unlock(&mutex); +} + +#define WAKU_CALL(call) \ +do { \ + int ret = call; \ + if (ret != 0) { \ + printf("Failed the call to: %s. Returned code: %d\n", #call, ret); \ + exit(1); \ + } \ + waitForCallback(); \ +} while (0) + +struct ConfigNode { + char host[128]; + int port; + char key[128]; + int relay; + char peers[2048]; + int store; + char storeNode[2048]; + char storeRetentionPolicy[64]; + char storeDbUrl[256]; + int storeVacuum; + int storeDbMigration; + int storeMaxNumDbConnections; +}; + +// libwaku Context +void* ctx; + +// For the case of C language we don't need to store a particular userData +void* userData = NULL; + +// Arguments parsing +static char doc[] = "\nC example that shows how to use the waku library."; +static char args_doc[] = ""; + +static struct argp_option options[] = { + { "host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"}, + { "port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"}, + { "key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."}, + { "relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"}, + { "peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\ + to. (default: \"\") e.g. \"/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\""}, + { 0 } +}; + +static error_t parse_opt(int key, char *arg, struct argp_state *state) { + + struct ConfigNode *cfgNode = state->input; + switch (key) { + case 'h': + snprintf(cfgNode->host, 128, "%s", arg); + break; + case 'p': + cfgNode->port = atoi(arg); + break; + case 'k': + snprintf(cfgNode->key, 128, "%s", arg); + break; + case 'r': + cfgNode->relay = atoi(arg); + break; + case 'a': + snprintf(cfgNode->peers, 2048, "%s", arg); + break; + case ARGP_KEY_ARG: + if (state->arg_num >= 1) /* Too many arguments. */ + argp_usage(state); + break; + case ARGP_KEY_END: + break; + default: + return ARGP_ERR_UNKNOWN; + } + + return 0; +} + +void signal_cond() { + pthread_mutex_lock(&mutex); + callback_executed = 1; + pthread_cond_signal(&cond); + pthread_mutex_unlock(&mutex); +} + +static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 }; + +void event_handler(int callerRet, const char* msg, size_t len, void* userData) { + if (callerRet == RET_ERR) { + printf("Error: %s\n", msg); + exit(1); + } + else if (callerRet == RET_OK) { + printf("Receiving event: %s\n", msg); + } + + signal_cond(); +} + +void on_event_received(int callerRet, const char* msg, size_t len, void* userData) { + if (callerRet == RET_ERR) { + printf("Error: %s\n", msg); + exit(1); + } + else if (callerRet == RET_OK) { + printf("Receiving event: %s\n", msg); + } +} + +char* contentTopic = NULL; +void handle_content_topic(int callerRet, const char* msg, size_t len, void* userData) { + if (contentTopic != NULL) { + free(contentTopic); + } + + contentTopic = malloc(len * sizeof(char) + 1); + strcpy(contentTopic, msg); + signal_cond(); +} + +char* publishResponse = NULL; +void handle_publish_ok(int callerRet, const char* msg, size_t len, void* userData) { + printf("Publish Ok: %s %lu\n", msg, len); + + if (publishResponse != NULL) { + free(publishResponse); + } + + publishResponse = malloc(len * sizeof(char) + 1); + strcpy(publishResponse, msg); +} + +#define MAX_MSG_SIZE 65535 + +void publish_message(const char* msg) { + char jsonWakuMsg[MAX_MSG_SIZE]; + char *msgPayload = b64_encode(msg, strlen(msg)); + + WAKU_CALL( waku_content_topic(ctx, + "appName", + 1, + "contentTopicName", + "encoding", + handle_content_topic, + userData) ); + snprintf(jsonWakuMsg, + MAX_MSG_SIZE, + "{\"payload\":\"%s\",\"contentTopic\":\"%s\"}", + msgPayload, contentTopic); + + free(msgPayload); + + WAKU_CALL( waku_relay_publish(ctx, + "/waku/2/rs/16/32", + jsonWakuMsg, + 10000 /*timeout ms*/, + event_handler, + userData) ); +} + +void show_help_and_exit() { + printf("Wrong parameters\n"); + exit(1); +} + +void print_default_pubsub_topic(int callerRet, const char* msg, size_t len, void* userData) { + printf("Default pubsub topic: %s\n", msg); + signal_cond(); +} + +void print_waku_version(int callerRet, const char* msg, size_t len, void* userData) { + printf("Git Version: %s\n", msg); + signal_cond(); +} + +// Beginning of UI program logic + +enum PROGRAM_STATE { + MAIN_MENU, + SUBSCRIBE_TOPIC_MENU, + CONNECT_TO_OTHER_NODE_MENU, + PUBLISH_MESSAGE_MENU +}; + +enum PROGRAM_STATE current_state = MAIN_MENU; + +void show_main_menu() { + printf("\nPlease, select an option:\n"); + printf("\t1.) Subscribe to topic\n"); + printf("\t2.) Connect to other node\n"); + printf("\t3.) Publish a message\n"); +} + +void handle_user_input() { + char cmd[1024]; + memset(cmd, 0, 1024); + int numRead = read(0, cmd, 1024); + if (numRead <= 0) { + return; + } + + switch (atoi(cmd)) + { + case SUBSCRIBE_TOPIC_MENU: + { + printf("Indicate the Pubsubtopic to subscribe:\n"); + char pubsubTopic[128]; + scanf("%127s", pubsubTopic); + + WAKU_CALL( waku_relay_subscribe(ctx, + pubsubTopic, + event_handler, + userData) ); + printf("The subscription went well\n"); + + show_main_menu(); + } + break; + + case CONNECT_TO_OTHER_NODE_MENU: + printf("Connecting to a node. Please indicate the peer Multiaddress:\n"); + printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n"); + char peerAddr[512]; + scanf("%511s", peerAddr); + WAKU_CALL(waku_connect(ctx, peerAddr, 10000 /* timeoutMs */, event_handler, userData)); + show_main_menu(); + break; + + case PUBLISH_MESSAGE_MENU: + { + printf("Type the message to publish:\n"); + char msg[1024]; + scanf("%1023s", msg); + + publish_message(msg); + + show_main_menu(); + } + break; + + case MAIN_MENU: + break; + } +} + +// End of UI program logic + +int main(int argc, char** argv) { + struct ConfigNode cfgNode; + // default values + snprintf(cfgNode.host, 128, "0.0.0.0"); + cfgNode.port = 60000; + cfgNode.relay = 1; + + cfgNode.store = 0; + snprintf(cfgNode.storeNode, 2048, ""); + snprintf(cfgNode.storeRetentionPolicy, 64, "time:6000000"); + snprintf(cfgNode.storeDbUrl, 256, "postgres://postgres:test123@localhost:5432/postgres"); + cfgNode.storeVacuum = 0; + cfgNode.storeDbMigration = 0; + cfgNode.storeMaxNumDbConnections = 30; + + if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode) + == ARGP_ERR_UNKNOWN) { + show_help_and_exit(); + } + + char jsonConfig[5000]; + snprintf(jsonConfig, 5000, "{ \ + \"clusterId\": 16, \ + \"shards\": [ 1, 32, 64, 128, 256 ], \ + \"numShardsInNetwork\": 257, \ + \"listenAddress\": \"%s\", \ + \"tcpPort\": %d, \ + \"relay\": %s, \ + \"store\": %s, \ + \"storeMessageDbUrl\": \"%s\", \ + \"storeMessageRetentionPolicy\": \"%s\", \ + \"storeMaxNumDbConnections\": %d , \ + \"logLevel\": \"DEBUG\", \ + \"discv5Discovery\": true, \ + \"discv5BootstrapNodes\": \ + [\"enr:-QEKuED9AJm2HGgrRpVaJY2nj68ao_QiPeUT43sK-aRM7sMJ6R4G11OSDOwnvVacgN1sTw-K7soC5dzHDFZgZkHU0u-XAYJpZIJ2NIJpcISnYxMvim11bHRpYWRkcnO4WgAqNiVib290LTAxLmRvLWFtczMuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfACw2JWJvb3QtMDEuZG8tYW1zMy5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEC3rRtFQSgc24uWewzXaxTY8hDAHB8sgnxr9k8Rjb5GeSDdGNwgnZfg3VkcIIjKIV3YWt1Mg0\", \"enr:-QEcuED7ww5vo2rKc1pyBp7fubBUH-8STHEZHo7InjVjLblEVyDGkjdTI9VdqmYQOn95vuQH-Htku17WSTzEufx-Wg4mAYJpZIJ2NIJpcIQihw1Xim11bHRpYWRkcnO4bAAzNi5ib290LTAxLmdjLXVzLWNlbnRyYWwxLWEuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfADU2LmJvb3QtMDEuZ2MtdXMtY2VudHJhbDEtYS5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaECxjqgDQ0WyRSOilYU32DA5k_XNlDis3m1VdXkK9xM6kODdGNwgnZfg3VkcIIjKIV3YWt1Mg0\", \"enr:-QEcuEAoShWGyN66wwusE3Ri8hXBaIkoHZHybUB8cCPv5v3ypEf9OCg4cfslJxZFANl90s-jmMOugLUyBx4EfOBNJ6_VAYJpZIJ2NIJpcIQI2hdMim11bHRpYWRkcnO4bAAzNi5ib290LTAxLmFjLWNuLWhvbmdrb25nLWMuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfADU2LmJvb3QtMDEuYWMtY24taG9uZ2tvbmctYy5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEDP7CbRk-YKJwOFFM4Z9ney0GPc7WPJaCwGkpNRyla7mCDdGNwgnZfg3VkcIIjKIV3YWt1Mg0\"], \ + \"discv5UdpPort\": 9999, \ + \"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \ + \"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \ + }", cfgNode.host, + cfgNode.port, + cfgNode.relay ? "true":"false", + cfgNode.store ? "true":"false", + cfgNode.storeDbUrl, + cfgNode.storeRetentionPolicy, + cfgNode.storeMaxNumDbConnections); + + ctx = waku_new(jsonConfig, event_handler, userData); + waitForCallback(); + + WAKU_CALL( waku_default_pubsub_topic(ctx, print_default_pubsub_topic, userData) ); + WAKU_CALL( waku_version(ctx, print_waku_version, userData) ); + + printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port); + printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES": "NO"); + + waku_set_event_callback(ctx, on_event_received, userData); + + waku_start(ctx, event_handler, userData); + waitForCallback(); + + WAKU_CALL( waku_listen_addresses(ctx, event_handler, userData) ); + + WAKU_CALL( waku_relay_subscribe(ctx, + "/waku/2/rs/0/0", + event_handler, + userData) ); + + WAKU_CALL( waku_discv5_update_bootnodes(ctx, + "[\"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\",\"enr:-QEkuEB3WHNS-xA3RDpfu9A2Qycr3bN3u7VoArMEiDIFZJ66F1EB3d4wxZN1hcdcOX-RfuXB-MQauhJGQbpz3qUofOtLAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQPK35Nnz0cWUtSAhBp7zvHEhyU_AqeQUlqzLiLxfP2L4oN0Y3CCdl-DdWRwgiMohXdha3UyDw\"]", + event_handler, + userData) ); + + WAKU_CALL( waku_get_peerids_from_peerstore(ctx, + event_handler, + userData) ); + + show_main_menu(); + while(1) { + handle_user_input(); + + // Uncomment the following if need to test the metrics retrieval + // WAKU_CALL( waku_get_metrics(ctx, + // event_handler, + // userData) ); + } + + pthread_mutex_destroy(&mutex); + pthread_cond_destroy(&cond); +} diff --git a/third-party/nwaku/examples/cpp/README.md b/third-party/nwaku/examples/cpp/README.md new file mode 100644 index 0000000..fa8d246 --- /dev/null +++ b/third-party/nwaku/examples/cpp/README.md @@ -0,0 +1,18 @@ +## App description +This is a very simple example that shows how to invoke libwaku functions from a C++ program. + +## Build +1. Open terminal +2. cd to nwaku root folder +3. make cppwaku_example -j8 + +This will create libwaku.so and cppwaku_example binary within the build folder. + +## Run +1. Open terminal +2. cd to nwaku root folder +3. export LD_LIBRARY_PATH=build +4. `./build/cppwaku_example --host=0.0.0.0 --port=60001` + +Use `./build/cppwaku_example --help` to see some other options. + diff --git a/third-party/nwaku/examples/cpp/base64.cpp b/third-party/nwaku/examples/cpp/base64.cpp new file mode 100644 index 0000000..517e4b2 --- /dev/null +++ b/third-party/nwaku/examples/cpp/base64.cpp @@ -0,0 +1,53 @@ + +#include +#include "base64.h" + +// Base64 encoding +// source: https://nachtimwald.com/2017/11/18/base64-encode-and-decode-in-c/ +size_t b64_encoded_size(size_t inlen) +{ + size_t ret; + + ret = inlen; + if (inlen % 3 != 0) + ret += 3 - (inlen % 3); + ret /= 3; + ret *= 4; + + return ret; +} + +const char b64chars[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +void b64_encode(char* in, size_t len, std::vector& out) +{ + size_t elen; + size_t i; + size_t j; + size_t v; + + if (in == NULL || len == 0) + return; + + elen = b64_encoded_size(len); + out.reserve(elen+1); + + for (i=0, j=0; i> 18) & 0x3F]; + out[j+1] = b64chars[(v >> 12) & 0x3F]; + if (i+1 < len) { + out[j+2] = b64chars[(v >> 6) & 0x3F]; + } else { + out[j+2] = '='; + } + if (i+2 < len) { + out[j+3] = b64chars[v & 0x3F]; + } else { + out[j+3] = '='; + } + } +} diff --git a/third-party/nwaku/examples/cpp/base64.h b/third-party/nwaku/examples/cpp/base64.h new file mode 100644 index 0000000..0994a0b --- /dev/null +++ b/third-party/nwaku/examples/cpp/base64.h @@ -0,0 +1,11 @@ + +#ifndef _BASE64_H_ +#define _BASE64_H_ + +#include + +size_t b64_encoded_size(size_t inlen); + +void b64_encode(char* in, size_t len, std::vector& out); + +#endif diff --git a/third-party/nwaku/examples/cpp/waku.cpp b/third-party/nwaku/examples/cpp/waku.cpp new file mode 100644 index 0000000..c47877d --- /dev/null +++ b/third-party/nwaku/examples/cpp/waku.cpp @@ -0,0 +1,331 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "base64.h" +#include "../../library/libwaku.h" + +// Shared synchronization variables +pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t cond = PTHREAD_COND_INITIALIZER; +int callback_executed = 0; + +void waitForCallback() { + pthread_mutex_lock(&mutex); + while (!callback_executed) { + pthread_cond_wait(&cond, &mutex); + } + callback_executed = 0; + pthread_mutex_unlock(&mutex); +} + +void signal_cond() { + pthread_mutex_lock(&mutex); + callback_executed = 1; + pthread_cond_signal(&cond); + pthread_mutex_unlock(&mutex); +} + +#define WAKU_CALL(call) \ +do { \ + int ret = call; \ + if (ret != 0) { \ + std::cout << "Failed the call to: " << #call << ". Code: " << ret << "\n"; \ + } \ + waitForCallback(); \ +} while (0) + +struct ConfigNode { + char host[128]; + int port; + char key[128]; + int relay; + char peers[2048]; +}; + +// Arguments parsing +static char doc[] = "\nC example that shows how to use the waku library."; +static char args_doc[] = ""; + +static struct argp_option options[] = { + { "host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"}, + { "port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"}, + { "key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."}, + { "relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"}, + { "peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\ + to. (default: \"\") e.g. \"/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\""}, + { 0 } +}; + +static error_t parse_opt(int key, char *arg, struct argp_state *state) { + + struct ConfigNode *cfgNode = (ConfigNode *) state->input; + switch (key) { + case 'h': + snprintf(cfgNode->host, 128, "%s", arg); + break; + case 'p': + cfgNode->port = atoi(arg); + break; + case 'k': + snprintf(cfgNode->key, 128, "%s", arg); + break; + case 'r': + cfgNode->relay = atoi(arg); + break; + case 'a': + snprintf(cfgNode->peers, 2048, "%s", arg); + break; + case ARGP_KEY_ARG: + if (state->arg_num >= 1) /* Too many arguments. */ + argp_usage(state); + break; + case ARGP_KEY_END: + break; + default: + return ARGP_ERR_UNKNOWN; + } + + return 0; +} + +void event_handler(const char* msg, size_t len) { + printf("Receiving event: %s\n", msg); +} + +void handle_error(const char* msg, size_t len) { + printf("handle_error: %s\n", msg); + exit(1); +} + +template +auto cify(F&& f) { + static F fn = std::forward(f); + return [](int callerRet, const char* msg, size_t len, void* userData) { + signal_cond(); + return fn(msg, len); + }; +} + +static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 }; + +// Beginning of UI program logic + +enum PROGRAM_STATE { + MAIN_MENU, + SUBSCRIBE_TOPIC_MENU, + CONNECT_TO_OTHER_NODE_MENU, + PUBLISH_MESSAGE_MENU +}; + +enum PROGRAM_STATE current_state = MAIN_MENU; + +void show_main_menu() { + printf("\nPlease, select an option:\n"); + printf("\t1.) Subscribe to topic\n"); + printf("\t2.) Connect to other node\n"); + printf("\t3.) Publish a message\n"); +} + +void handle_user_input(void* ctx) { + char cmd[1024]; + memset(cmd, 0, 1024); + int numRead = read(0, cmd, 1024); + if (numRead <= 0) { + return; + } + + switch (atoi(cmd)) + { + case SUBSCRIBE_TOPIC_MENU: + { + printf("Indicate the Pubsubtopic to subscribe:\n"); + char pubsubTopic[128]; + scanf("%127s", pubsubTopic); + + WAKU_CALL( waku_relay_subscribe(ctx, + pubsubTopic, + cify([&](const char* msg, size_t len) { + event_handler(msg, len); + }), + nullptr) ); + printf("The subscription went well\n"); + + show_main_menu(); + } + break; + + case CONNECT_TO_OTHER_NODE_MENU: + printf("Connecting to a node. Please indicate the peer Multiaddress:\n"); + printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n"); + char peerAddr[512]; + scanf("%511s", peerAddr); + WAKU_CALL( waku_connect(ctx, + peerAddr, + 10000 /* timeoutMs */, + cify([&](const char* msg, size_t len) { + event_handler(msg, len); + }), + nullptr)); + show_main_menu(); + break; + + case PUBLISH_MESSAGE_MENU: + { + printf("Type the message to publish:\n"); + char msg[1024]; + scanf("%1023s", msg); + + char jsonWakuMsg[2048]; + std::vector msgPayload; + b64_encode(msg, strlen(msg), msgPayload); + + std::string contentTopic; + waku_content_topic(ctx, + "appName", + 1, + "contentTopicName", + "encoding", + cify([&contentTopic](const char* msg, size_t len) { + contentTopic = msg; + }), + nullptr); + + snprintf(jsonWakuMsg, + 2048, + "{\"payload\":\"%s\",\"contentTopic\":\"%s\"}", + msgPayload.data(), contentTopic.c_str()); + + WAKU_CALL( waku_relay_publish(ctx, + "/waku/2/rs/16/32", + jsonWakuMsg, + 10000 /*timeout ms*/, + cify([&](const char* msg, size_t len) { + event_handler(msg, len); + }), + nullptr) ); + + show_main_menu(); + } + break; + + case MAIN_MENU: + break; + } +} + +// End of UI program logic + +void show_help_and_exit() { + printf("Wrong parameters\n"); + exit(1); +} + +int main(int argc, char** argv) { + struct ConfigNode cfgNode; + // default values + snprintf(cfgNode.host, 128, "0.0.0.0"); + snprintf(cfgNode.key, 128, + "364d111d729a6eb6d2e6113e163f017b5ef03a6f94c9b5b7bb1bb36fa5cb07a9"); + cfgNode.port = 60000; + cfgNode.relay = 1; + + if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode) + == ARGP_ERR_UNKNOWN) { + show_help_and_exit(); + } + + char jsonConfig[2048]; + snprintf(jsonConfig, 2048, "{ \ + \"host\": \"%s\", \ + \"port\": %d, \ + \"relay\": true, \ + \"clusterId\": 16, \ + \"shards\": [ 1, 32, 64, 128, 256 ], \ + \"logLevel\": \"FATAL\", \ + \"discv5Discovery\": true, \ + \"discv5BootstrapNodes\": \ + [\"enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw\", \"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\"], \ + \"discv5UdpPort\": 9999, \ + \"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \ + \"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \ + }", cfgNode.host, + cfgNode.port); + + void* ctx = + waku_new(jsonConfig, + cify([](const char* msg, size_t len) { + std::cout << "waku_new feedback: " << msg << std::endl; + } + ), + nullptr + ); + waitForCallback(); + + // example on how to retrieve a value from the `libwaku` callback. + std::string defaultPubsubTopic; + WAKU_CALL( + waku_default_pubsub_topic( + ctx, + cify([&defaultPubsubTopic](const char* msg, size_t len) { + defaultPubsubTopic = msg; + } + ), + nullptr)); + + std::cout << "Default pubsub topic: " << defaultPubsubTopic << std::endl; + + WAKU_CALL(waku_version(ctx, + cify([&](const char* msg, size_t len) { + std::cout << "Git Version: " << msg << std::endl; + }), + nullptr)); + + printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port); + printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES": "NO"); + + std::string pubsubTopic; + WAKU_CALL(waku_pubsub_topic(ctx, + "example", + cify([&](const char* msg, size_t len) { + pubsubTopic = msg; + }), + nullptr)); + + std::cout << "Custom pubsub topic: " << pubsubTopic << std::endl; + + waku_set_event_callback(ctx, + cify([&](const char* msg, size_t len) { + event_handler(msg, len); + }), + nullptr); + + WAKU_CALL( waku_start(ctx, + cify([&](const char* msg, size_t len) { + event_handler(msg, len); + }), + nullptr)); + + WAKU_CALL( waku_relay_subscribe(ctx, + defaultPubsubTopic.c_str(), + cify([&](const char* msg, size_t len) { + event_handler(msg, len); + }), + nullptr) ); + + show_main_menu(); + while(1) { + handle_user_input(ctx); + } +} diff --git a/third-party/nwaku/examples/filter_subscriber.nim b/third-party/nwaku/examples/filter_subscriber.nim new file mode 100644 index 0000000..e4e26bd --- /dev/null +++ b/third-party/nwaku/examples/filter_subscriber.nim @@ -0,0 +1,117 @@ +import + std/[tables, sequtils], + stew/byteutils, + chronicles, + chronos, + confutils, + libp2p/crypto/crypto, + eth/keys, + eth/p2p/discoveryv5/enr + +import + waku/[ + common/logging, + node/peer_manager, + waku_core, + waku_node, + waku_enr, + discovery/waku_discv5, + factory/builder, + waku_relay, + waku_filter_v2/client, + ] + +# careful if running pub and sub in the same machine +const wakuPort = 50000 + +const clusterId = 1 +const shardId = @[0'u16] + +const + FilterPeer = + "/ip4/64.225.80.192/tcp/30303/p2p/16Uiu2HAmNaeL4p3WEYzC9mgXBmBWSgWjPHRvatZTXnp8Jgv3iKsb" + FilterPubsubTopic = PubsubTopic("/waku/2/rs/1/0") + FilterContentTopic = ContentTopic("/examples/1/light-pubsub-example/proto") + +proc messagePushHandler( + pubsubTopic: PubsubTopic, message: WakuMessage +) {.async, gcsafe.} = + let payloadStr = string.fromBytes(message.payload) + notice "message received", + payload = payloadStr, + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + timestamp = message.timestamp + +proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} = + # use notice to filter all waku messaging + setupLog(logging.LogLevel.NOTICE, logging.LogFormat.TEXT) + + notice "starting subscriber", wakuPort = wakuPort + let + nodeKey = crypto.PrivateKey.random(Secp256k1, rng[])[] + ip = parseIpAddress("0.0.0.0") + flags = CapabilitiesBitfield.init(relay = true) + + let relayShards = RelayShards.init(clusterId, shardId).valueOr: + error "Relay shards initialization failed", error = error + quit(QuitFailure) + + var enrBuilder = EnrBuilder.init(nodeKey) + enrBuilder.withWakuRelaySharding(relayShards).expect( + "Building ENR with relay sharding failed" + ) + + let recordRes = enrBuilder.build() + let record = + if recordRes.isErr(): + error "failed to create enr record", error = recordRes.error + quit(QuitFailure) + else: + recordRes.get() + + var builder = WakuNodeBuilder.init() + builder.withNodeKey(nodeKey) + builder.withRecord(record) + builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet() + let node = builder.build().tryGet() + + node.mountMetadata(clusterId, shardId).expect( + "failed to mount waku metadata protocol" + ) + await node.mountFilterClient() + + await node.start() + + node.peerManager.start() + + node.wakuFilterClient.registerPushHandler(messagePushHandler) + + let filterPeer = parsePeerInfo(FilterPeer).get() + + while true: + notice "maintaining subscription" + # First use filter-ping to check if we have an active subscription + let pingRes = await node.wakuFilterClient.ping(filterPeer) + if pingRes.isErr(): + # No subscription found. Let's subscribe. + notice "no subscription found. Sending subscribe request" + + let subscribeRes = await node.wakuFilterClient.subscribe( + filterPeer, FilterPubsubTopic, @[FilterContentTopic] + ) + + if subscribeRes.isErr(): + notice "subscribe request failed. Quitting.", err = subscribeRes.error + break + else: + notice "subscribe request successful." + else: + notice "subscription found." + + await sleepAsync(60.seconds) # Subscription maintenance interval + +when isMainModule: + let rng = crypto.newRng() + asyncSpawn setupAndSubscribe(rng) + runForever() diff --git a/third-party/nwaku/examples/golang/README.md b/third-party/nwaku/examples/golang/README.md new file mode 100644 index 0000000..39d4882 --- /dev/null +++ b/third-party/nwaku/examples/golang/README.md @@ -0,0 +1,44 @@ + +## Pre-requisite +libwaku.so is needed to be compiled and present in build folder. To create it: + +- Run only the first time and after changing the current commit +```code +make update +``` +- Run the next every time you want to compile libwaku +```code +make POSTGRES=1 libwaku -j4 +``` + +Also needed: + +- Install libpq (needed by Postgres client) + - On Linux: +```code +sudo apt install libpq5 +``` + - On MacOS (not tested) +```code +brew install libpq +``` + +## Compilation + +From the nwaku root folder: + +```code +go build -o waku-go examples/golang/waku.go +``` + +## Run +From the nwaku root folder: + + +```code +export LD_LIBRARY_PATH=build +``` + +```code +./waku-go +``` diff --git a/third-party/nwaku/examples/golang/waku.go b/third-party/nwaku/examples/golang/waku.go new file mode 100644 index 0000000..846362d --- /dev/null +++ b/third-party/nwaku/examples/golang/waku.go @@ -0,0 +1,652 @@ +package main + +/* + #cgo LDFLAGS: -L../../build/ -lwaku + #cgo LDFLAGS: -L../../ -Wl,-rpath,../../ + + #include "../../library/libwaku.h" + #include + #include + + extern void globalEventCallback(int ret, char* msg, size_t len, void* userData); + + typedef struct { + int ret; + char* msg; + size_t len; + } Resp; + + static void* allocResp() { + return calloc(1, sizeof(Resp)); + } + + static void freeResp(void* resp) { + if (resp != NULL) { + free(resp); + } + } + + static char* getMyCharPtr(void* resp) { + if (resp == NULL) { + return NULL; + } + Resp* m = (Resp*) resp; + return m->msg; + } + + static size_t getMyCharLen(void* resp) { + if (resp == NULL) { + return 0; + } + Resp* m = (Resp*) resp; + return m->len; + } + + static int getRet(void* resp) { + if (resp == NULL) { + return 0; + } + Resp* m = (Resp*) resp; + return m->ret; + } + + // resp must be set != NULL in case interest on retrieving data from the callback + static void callback(int ret, char* msg, size_t len, void* resp) { + if (resp != NULL) { + Resp* m = (Resp*) resp; + m->ret = ret; + m->msg = msg; + m->len = len; + } + } + + #define WAKU_CALL(call) \ + do { \ + int ret = call; \ + if (ret != 0) { \ + printf("Failed the call to: %s. Returned code: %d\n", #call, ret); \ + exit(1); \ + } \ + } while (0) + + static void* cGoWakuNew(const char* configJson, void* resp) { + // We pass NULL because we are not interested in retrieving data from this callback + void* ret = waku_new(configJson, (WakuCallBack) callback, resp); + return ret; + } + + static void cGoWakuStart(void* wakuCtx, void* resp) { + WAKU_CALL(waku_start(wakuCtx, (WakuCallBack) callback, resp)); + } + + static void cGoWakuStop(void* wakuCtx, void* resp) { + WAKU_CALL(waku_stop(wakuCtx, (WakuCallBack) callback, resp)); + } + + static void cGoWakuDestroy(void* wakuCtx, void* resp) { + WAKU_CALL(waku_destroy(wakuCtx, (WakuCallBack) callback, resp)); + } + + static void cGoWakuStartDiscV5(void* wakuCtx, void* resp) { + WAKU_CALL(waku_start_discv5(wakuCtx, (WakuCallBack) callback, resp)); + } + + static void cGoWakuStopDiscV5(void* wakuCtx, void* resp) { + WAKU_CALL(waku_stop_discv5(wakuCtx, (WakuCallBack) callback, resp)); + } + + static void cGoWakuVersion(void* wakuCtx, void* resp) { + WAKU_CALL(waku_version(wakuCtx, (WakuCallBack) callback, resp)); + } + + static void cGoWakuSetEventCallback(void* wakuCtx) { + // The 'globalEventCallback' Go function is shared amongst all possible Waku instances. + + // Given that the 'globalEventCallback' is shared, we pass again the + // wakuCtx instance but in this case is needed to pick up the correct method + // that will handle the event. + + // In other words, for every call the libwaku makes to globalEventCallback, + // the 'userData' parameter will bring the context of the node that registered + // that globalEventCallback. + + // This technique is needed because cgo only allows to export Go functions and not methods. + + waku_set_event_callback(wakuCtx, (WakuCallBack) globalEventCallback, wakuCtx); + } + + static void cGoWakuContentTopic(void* wakuCtx, + char* appName, + int appVersion, + char* contentTopicName, + char* encoding, + void* resp) { + + WAKU_CALL( waku_content_topic(wakuCtx, + appName, + appVersion, + contentTopicName, + encoding, + (WakuCallBack) callback, + resp) ); + } + + static void cGoWakuPubsubTopic(void* wakuCtx, char* topicName, void* resp) { + WAKU_CALL( waku_pubsub_topic(wakuCtx, topicName, (WakuCallBack) callback, resp) ); + } + + static void cGoWakuDefaultPubsubTopic(void* wakuCtx, void* resp) { + WAKU_CALL (waku_default_pubsub_topic(wakuCtx, (WakuCallBack) callback, resp)); + } + + static void cGoWakuRelayPublish(void* wakuCtx, + const char* pubSubTopic, + const char* jsonWakuMessage, + int timeoutMs, + void* resp) { + + WAKU_CALL (waku_relay_publish(wakuCtx, + pubSubTopic, + jsonWakuMessage, + timeoutMs, + (WakuCallBack) callback, + resp)); + } + + static void cGoWakuRelaySubscribe(void* wakuCtx, char* pubSubTopic, void* resp) { + WAKU_CALL ( waku_relay_subscribe(wakuCtx, + pubSubTopic, + (WakuCallBack) callback, + resp) ); + } + + static void cGoWakuRelayUnsubscribe(void* wakuCtx, char* pubSubTopic, void* resp) { + + WAKU_CALL ( waku_relay_unsubscribe(wakuCtx, + pubSubTopic, + (WakuCallBack) callback, + resp) ); + } + + static void cGoWakuConnect(void* wakuCtx, char* peerMultiAddr, int timeoutMs, void* resp) { + WAKU_CALL( waku_connect(wakuCtx, + peerMultiAddr, + timeoutMs, + (WakuCallBack) callback, + resp) ); + } + + static void cGoWakuDialPeerById(void* wakuCtx, + char* peerId, + char* protocol, + int timeoutMs, + void* resp) { + + WAKU_CALL( waku_dial_peer_by_id(wakuCtx, + peerId, + protocol, + timeoutMs, + (WakuCallBack) callback, + resp) ); + } + + static void cGoWakuDisconnectPeerById(void* wakuCtx, char* peerId, void* resp) { + WAKU_CALL( waku_disconnect_peer_by_id(wakuCtx, + peerId, + (WakuCallBack) callback, + resp) ); + } + + static void cGoWakuListenAddresses(void* wakuCtx, void* resp) { + WAKU_CALL (waku_listen_addresses(wakuCtx, (WakuCallBack) callback, resp) ); + } + + static void cGoWakuGetMyENR(void* ctx, void* resp) { + WAKU_CALL (waku_get_my_enr(ctx, (WakuCallBack) callback, resp) ); + } + + static void cGoWakuGetMyPeerId(void* ctx, void* resp) { + WAKU_CALL (waku_get_my_peerid(ctx, (WakuCallBack) callback, resp) ); + } + + static void cGoWakuListPeersInMesh(void* ctx, char* pubSubTopic, void* resp) { + WAKU_CALL (waku_relay_get_num_peers_in_mesh(ctx, pubSubTopic, (WakuCallBack) callback, resp) ); + } + + static void cGoWakuGetNumConnectedPeers(void* ctx, char* pubSubTopic, void* resp) { + WAKU_CALL (waku_relay_get_num_connected_peers(ctx, pubSubTopic, (WakuCallBack) callback, resp) ); + } + + static void cGoWakuGetPeerIdsFromPeerStore(void* wakuCtx, void* resp) { + WAKU_CALL (waku_get_peerids_from_peerstore(wakuCtx, (WakuCallBack) callback, resp) ); + } + + static void cGoWakuLightpushPublish(void* wakuCtx, + const char* pubSubTopic, + const char* jsonWakuMessage, + void* resp) { + + WAKU_CALL (waku_lightpush_publish(wakuCtx, + pubSubTopic, + jsonWakuMessage, + (WakuCallBack) callback, + resp)); + } + + static void cGoWakuStoreQuery(void* wakuCtx, + const char* jsonQuery, + const char* peerAddr, + int timeoutMs, + void* resp) { + + WAKU_CALL (waku_store_query(wakuCtx, + jsonQuery, + peerAddr, + timeoutMs, + (WakuCallBack) callback, + resp)); + } + + static void cGoWakuPeerExchangeQuery(void* wakuCtx, + uint64_t numPeers, + void* resp) { + + WAKU_CALL (waku_peer_exchange_request(wakuCtx, + numPeers, + (WakuCallBack) callback, + resp)); + } + + static void cGoWakuGetPeerIdsByProtocol(void* wakuCtx, + const char* protocol, + void* resp) { + + WAKU_CALL (waku_get_peerids_by_protocol(wakuCtx, + protocol, + (WakuCallBack) callback, + resp)); + } + +*/ +import "C" + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "os/signal" + "syscall" + "unsafe" +) + +type WakuMessageHash = string +type WakuPubsubTopic = string +type WakuContentTopic = string + +type WakuConfig struct { + Host string `json:"host,omitempty"` + Port int `json:"port,omitempty"` + NodeKey string `json:"key,omitempty"` + EnableRelay bool `json:"relay"` + LogLevel string `json:"logLevel"` +} + +type WakuNode struct { + ctx unsafe.Pointer +} + +func WakuNew(config WakuConfig) (*WakuNode, error) { + jsonConfig, err := json.Marshal(config) + if err != nil { + return nil, err + } + + var cJsonConfig = C.CString(string(jsonConfig)) + var resp = C.allocResp() + + defer C.free(unsafe.Pointer(cJsonConfig)) + defer C.freeResp(resp) + + ctx := C.cGoWakuNew(cJsonConfig, resp) + if C.getRet(resp) == C.RET_OK { + return &WakuNode{ctx: ctx}, nil + } + + errMsg := "error WakuNew: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return nil, errors.New(errMsg) +} + +func (self *WakuNode) WakuStart() error { + var resp = C.allocResp() + defer C.freeResp(resp) + C.cGoWakuStart(self.ctx, resp) + + if C.getRet(resp) == C.RET_OK { + return nil + } + errMsg := "error WakuStart: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (self *WakuNode) WakuStop() error { + var resp = C.allocResp() + defer C.freeResp(resp) + C.cGoWakuStop(self.ctx, resp) + + if C.getRet(resp) == C.RET_OK { + return nil + } + errMsg := "error WakuStop: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (self *WakuNode) WakuDestroy() error { + var resp = C.allocResp() + defer C.freeResp(resp) + C.cGoWakuDestroy(self.ctx, resp) + + if C.getRet(resp) == C.RET_OK { + return nil + } + errMsg := "error WakuDestroy: " + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (self *WakuNode) WakuVersion() (string, error) { + var resp = C.allocResp() + defer C.freeResp(resp) + + C.cGoWakuVersion(self.ctx, resp) + + if C.getRet(resp) == C.RET_OK { + var version = C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return version, nil + } + + errMsg := "error WakuVersion: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return "", errors.New(errMsg) +} + +//export globalEventCallback +func globalEventCallback(callerRet C.int, msg *C.char, len C.size_t, userData unsafe.Pointer) { + // This is shared among all Golang instances + + self := WakuNode{ctx: userData} + self.MyEventCallback(callerRet, msg, len) +} + +func (self *WakuNode) MyEventCallback(callerRet C.int, msg *C.char, len C.size_t) { + fmt.Println("Event received:", C.GoStringN(msg, C.int(len))) +} + +func (self *WakuNode) WakuSetEventCallback() { + // Notice that the events for self node are handled by the 'MyEventCallback' method + C.cGoWakuSetEventCallback(self.ctx) +} + +func (self *WakuNode) FormatContentTopic( + appName string, + appVersion int, + contentTopicName string, + encoding string) (WakuContentTopic, error) { + + var cAppName = C.CString(appName) + var cContentTopicName = C.CString(contentTopicName) + var cEncoding = C.CString(encoding) + var resp = C.allocResp() + + defer C.free(unsafe.Pointer(cAppName)) + defer C.free(unsafe.Pointer(cContentTopicName)) + defer C.free(unsafe.Pointer(cEncoding)) + defer C.freeResp(resp) + + C.cGoWakuContentTopic(self.ctx, + cAppName, + C.int(appVersion), + cContentTopicName, + cEncoding, + resp) + + if C.getRet(resp) == C.RET_OK { + var contentTopic = C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return contentTopic, nil + } + + errMsg := "error FormatContentTopic: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + + return "", errors.New(errMsg) +} + +func (self *WakuNode) FormatPubsubTopic(topicName string) (WakuPubsubTopic, error) { + var cTopicName = C.CString(topicName) + var resp = C.allocResp() + + defer C.free(unsafe.Pointer(cTopicName)) + defer C.freeResp(resp) + + C.cGoWakuPubsubTopic(self.ctx, cTopicName, resp) + if C.getRet(resp) == C.RET_OK { + var pubsubTopic = C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return pubsubTopic, nil + } + + errMsg := "error FormatPubsubTopic: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + + return "", errors.New(errMsg) +} + +func (self *WakuNode) WakuDefaultPubsubTopic() (WakuPubsubTopic, error) { + var resp = C.allocResp() + defer C.freeResp(resp) + C.cGoWakuDefaultPubsubTopic(self.ctx, resp) + if C.getRet(resp) == C.RET_OK { + var defaultPubsubTopic = C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return defaultPubsubTopic, nil + } + + errMsg := "error WakuDefaultPubsubTopic: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + + return "", errors.New(errMsg) +} + +func (self *WakuNode) WakuRelayPublish( + pubsubTopic string, + message string, + timeoutMs int) (WakuMessageHash, error) { + + var cPubsubTopic = C.CString(pubsubTopic) + var msg = C.CString(message) + var resp = C.allocResp() + + defer C.freeResp(resp) + defer C.free(unsafe.Pointer(cPubsubTopic)) + defer C.free(unsafe.Pointer(msg)) + + C.cGoWakuRelayPublish(self.ctx, cPubsubTopic, msg, C.int(timeoutMs), resp) + if C.getRet(resp) == C.RET_OK { + msgHash := C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return msgHash, nil + } + errMsg := "error WakuRelayPublish: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return "", errors.New(errMsg) +} + +func (self *WakuNode) WakuRelaySubscribe(pubsubTopic string) error { + var resp = C.allocResp() + var cPubsubTopic = C.CString(pubsubTopic) + + defer C.freeResp(resp) + defer C.free(unsafe.Pointer(cPubsubTopic)) + C.cGoWakuRelaySubscribe(self.ctx, cPubsubTopic, resp) + + if C.getRet(resp) == C.RET_OK { + return nil + } + errMsg := "error WakuRelaySubscribe: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (self *WakuNode) WakuRelayUnsubscribe(pubsubTopic string) error { + var resp = C.allocResp() + var cPubsubTopic = C.CString(pubsubTopic) + defer C.freeResp(resp) + defer C.free(unsafe.Pointer(cPubsubTopic)) + C.cGoWakuRelayUnsubscribe(self.ctx, cPubsubTopic, resp) + + if C.getRet(resp) == C.RET_OK { + return nil + } + errMsg := "error WakuRelayUnsubscribe: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (self *WakuNode) WakuConnect(peerMultiAddr string, timeoutMs int) error { + var resp = C.allocResp() + var cPeerMultiAddr = C.CString(peerMultiAddr) + defer C.freeResp(resp) + defer C.free(unsafe.Pointer(cPeerMultiAddr)) + + C.cGoWakuConnect(self.ctx, cPeerMultiAddr, C.int(timeoutMs), resp) + + if C.getRet(resp) == C.RET_OK { + return nil + } + errMsg := "error WakuConnect: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return errors.New(errMsg) +} + +func (self *WakuNode) WakuListenAddresses() (string, error) { + var resp = C.allocResp() + defer C.freeResp(resp) + C.cGoWakuListenAddresses(self.ctx, resp) + + if C.getRet(resp) == C.RET_OK { + var listenAddresses = C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return listenAddresses, nil + } + errMsg := "error WakuListenAddresses: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return "", errors.New(errMsg) +} + +func (self *WakuNode) WakuGetMyENR() (string, error) { + var resp = C.allocResp() + defer C.freeResp(resp) + C.cGoWakuGetMyENR(self.ctx, resp) + + if C.getRet(resp) == C.RET_OK { + var myENR = C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return myENR, nil + } + errMsg := "error WakuGetMyENR: " + + C.GoStringN(C.getMyCharPtr(resp), C.int(C.getMyCharLen(resp))) + return "", errors.New(errMsg) +} + +func main() { + config := WakuConfig{ + Host: "0.0.0.0", + Port: 30304, + NodeKey: "11d0dcea28e86f81937a3bd1163473c7fbc0a0db54fd72914849bc47bdf78710", + EnableRelay: true, + LogLevel: "DEBUG", + } + + node, err := WakuNew(config) + if err != nil { + fmt.Println("Error happened:", err.Error()) + return + } + + node.WakuSetEventCallback() + + defaultPubsubTopic, err := node.WakuDefaultPubsubTopic() + if err != nil { + fmt.Println("Error happened:", err.Error()) + return + } + + err = node.WakuRelaySubscribe(defaultPubsubTopic) + if err != nil { + fmt.Println("Error happened:", err.Error()) + return + } + + err = node.WakuConnect( + // tries to connect to a localhost node with key: 0d714a1fada214dead6dc9c7274585eca0ff292451866e7d6d677dc818e8ccd2 + "/ip4/0.0.0.0/tcp/60000/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN", + 10000) + if err != nil { + fmt.Println("Error happened:", err.Error()) + return + } + + err = node.WakuStart() + if err != nil { + fmt.Println("Error happened:", err.Error()) + return + } + + version, err := node.WakuVersion() + if err != nil { + fmt.Println("Error happened:", err.Error()) + return + } + + formattedContentTopic, err := node.FormatContentTopic("appName", 1, "cTopicName", "enc") + if err != nil { + fmt.Println("Error happened:", err.Error()) + return + } + + formattedPubsubTopic, err := node.FormatPubsubTopic("my-ctopic") + if err != nil { + fmt.Println("Error happened:", err.Error()) + return + } + + listenAddresses, err := node.WakuListenAddresses() + if err != nil { + fmt.Println("Error happened:", err.Error()) + return + } + + myENR, err := node.WakuGetMyENR() + if err != nil { + fmt.Println("Error happened:", err.Error()) + return + } + + fmt.Println("Version:", version) + fmt.Println("Custom content topic:", formattedContentTopic) + fmt.Println("Custom pubsub topic:", formattedPubsubTopic) + fmt.Println("Default pubsub topic:", defaultPubsubTopic) + fmt.Println("Listen addresses:", listenAddresses) + fmt.Println("My ENR:", myENR) + + // Wait for a SIGINT or SIGTERM signal + ch := make(chan os.Signal, 1) + signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) + <-ch + + err = node.WakuStop() + if err != nil { + fmt.Println("Error happened:", err.Error()) + return + } + + err = node.WakuDestroy() + if err != nil { + fmt.Println("Error happened:", err.Error()) + return + } +} diff --git a/third-party/nwaku/examples/lightpush_mix/lightpush_publisher_mix.nim b/third-party/nwaku/examples/lightpush_mix/lightpush_publisher_mix.nim new file mode 100644 index 0000000..26c0a06 --- /dev/null +++ b/third-party/nwaku/examples/lightpush_mix/lightpush_publisher_mix.nim @@ -0,0 +1,196 @@ +import + std/[tables, times, sequtils, strutils], + stew/byteutils, + chronicles, + results, + chronos, + confutils, + libp2p/crypto/crypto, + libp2p/crypto/curve25519, + libp2p/multiaddress, + eth/keys, + eth/p2p/discoveryv5/enr, + metrics, + metrics/chronos_httpserver + +import mix, mix/mix_protocol, mix/curve25519 + +import + waku/[ + common/logging, + node/peer_manager, + waku_core, + waku_core/codecs, + waku_node, + waku_enr, + discovery/waku_discv5, + factory/builder, + waku_lightpush/client, + ], + ./lightpush_publisher_mix_config, + ./lightpush_publisher_mix_metrics + +const clusterId = 66 +const shardId = @[0'u16] + +const + LightpushPubsubTopic = PubsubTopic("/waku/2/rs/66/0") + LightpushContentTopic = ContentTopic("/examples/1/light-pubsub-mix-example/proto") + +proc splitPeerIdAndAddr(maddr: string): (string, string) = + let parts = maddr.split("/p2p/") + if parts.len != 2: + error "Invalid multiaddress format", parts = parts + return + + let + address = parts[0] + peerId = parts[1] + return (address, peerId) + +proc setupAndPublish(rng: ref HmacDrbgContext, conf: LightPushMixConf) {.async.} = + # use notice to filter all waku messaging + setupLog(logging.LogLevel.DEBUG, logging.LogFormat.TEXT) + + notice "starting publisher", wakuPort = conf.port + + let + nodeKey = crypto.PrivateKey.random(Secp256k1, rng[]).get() + ip = parseIpAddress("0.0.0.0") + flags = CapabilitiesBitfield.init(relay = true) + + let relayShards = RelayShards.init(clusterId, shardId).valueOr: + error "Relay shards initialization failed", error = error + quit(QuitFailure) + + var enrBuilder = EnrBuilder.init(nodeKey) + enrBuilder.withWakuRelaySharding(relayShards).expect( + "Building ENR with relay sharding failed" + ) + + let record = enrBuilder.build().valueOr: + error "failed to create enr record", error = error + quit(QuitFailure) + + setLogLevel(logging.LogLevel.TRACE) + var builder = WakuNodeBuilder.init() + builder.withNodeKey(nodeKey) + builder.withRecord(record) + builder.withNetworkConfigurationDetails(ip, Port(conf.port)).tryGet() + + let node = builder.build().tryGet() + + node.mountMetadata(clusterId, shardId).expect( + "failed to mount waku metadata protocol" + ) + node.mountLightPushClient() + try: + await node.mountPeerExchange(some(uint16(clusterId))) + except CatchableError: + error "failed to mount waku peer-exchange protocol", + error = getCurrentExceptionMsg() + return + + let (destPeerAddr, destPeerId) = splitPeerIdAndAddr(conf.destPeerAddr) + let (pxPeerAddr, pxPeerId) = splitPeerIdAndAddr(conf.pxAddr) + info "dest peer address", destPeerAddr = destPeerAddr, destPeerId = destPeerId + info "peer exchange address", pxPeerAddr = pxPeerAddr, pxPeerId = pxPeerId + let pxPeerInfo = + RemotePeerInfo.init(destPeerId, @[MultiAddress.init(destPeerAddr).get()]) + node.peerManager.addServicePeer(pxPeerInfo, WakuPeerExchangeCodec) + + let pxPeerInfo1 = + RemotePeerInfo.init(pxPeerId, @[MultiAddress.init(pxPeerAddr).get()]) + node.peerManager.addServicePeer(pxPeerInfo1, WakuPeerExchangeCodec) + + if not conf.mixDisabled: + let (mixPrivKey, mixPubKey) = generateKeyPair().valueOr: + error "failed to generate mix key pair", error = error + return + (await node.mountMix(clusterId, mixPrivKey)).isOkOr: + error "failed to mount waku mix protocol: ", error = $error + return + + let dPeerId = PeerId.init(destPeerId).valueOr: + error "Failed to initialize PeerId", error = error + return + var conn: Connection + if not conf.mixDisabled: + conn = node.wakuMix.toConnection( + MixDestination.init(dPeerId, pxPeerInfo.addrs[0]), # destination lightpush peer + WakuLightPushCodec, # protocol codec which will be used over the mix connection + Opt.some(MixParameters(expectReply: Opt.some(true), numSurbs: Opt.some(byte(1)))), + # mix parameters indicating we expect a single reply + ).valueOr: + error "failed to create mix connection", error = error + return + + await node.start() + node.peerManager.start() + node.startPeerExchangeLoop() + try: + startMetricsHttpServer("0.0.0.0", Port(8008)) + except Exception: + error "failed to start metrics server: ", error = getCurrentExceptionMsg() + (await node.fetchPeerExchangePeers()).isOkOr: + warn "Cannot fetch peers from peer exchange", cause = error + + if not conf.mixDisabled: + while node.getMixNodePoolSize() < conf.minMixPoolSize: + info "waiting for mix nodes to be discovered", + currentpoolSize = node.getMixNodePoolSize() + await sleepAsync(1000) + notice "publisher service started with mix node pool size ", + currentpoolSize = node.getMixNodePoolSize() + + var i = 0 + while i < conf.numMsgs: + if conf.mixDisabled: + let connOpt = await node.peerManager.dialPeer(dPeerId, WakuLightPushCodec) + if connOpt.isNone(): + error "failed to dial peer with WakuLightPushCodec", target_peer_id = dPeerId + return + conn = connOpt.get() + i = i + 1 + let text = + """Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam venenatis magna ut tortor faucibus, in vestibulum nibh commodo. Aenean eget vestibulum augue. Nullam suscipit urna non nunc efficitur, at iaculis nisl consequat. Mauris quis ultrices elit. Suspendisse lobortis odio vitae laoreet facilisis. Cras ornare sem felis, at vulputate magna aliquam ac. Duis quis est ultricies, euismod nulla ac, interdum dui. Maecenas sit amet est vitae enim commodo gravida. Proin vitae elit nulla. Donec tempor dolor lectus, in faucibus velit elementum quis. Donec non mauris eu nibh faucibus cursus ut egestas dolor. Aliquam venenatis ligula id velit pulvinar malesuada. Vestibulum scelerisque, justo non porta gravida, nulla justo tempor purus, at sollicitudin erat erat vel libero. + Fusce nec eros eu metus tristique aliquet. Sed ut magna sagittis, vulputate diam sit amet, aliquam magna. Aenean sollicitudin velit lacus, eu ultrices magna semper at. Integer vitae felis ligula. In a eros nec risus condimentum tincidunt fermentum sit amet ex. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Nullam vitae justo maximus, fringilla tellus nec, rutrum purus. Etiam efficitur nisi dapibus euismod vestibulum. Phasellus at felis elementum, tristique nulla ac, consectetur neque. + Maecenas hendrerit nibh eget velit rutrum, in ornare mauris molestie. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia curae; Praesent dignissim efficitur eros, sit amet rutrum justo mattis a. Fusce mollis neque at erat placerat bibendum. Ut fringilla fringilla orci, ut fringilla metus fermentum vel. In hac habitasse platea dictumst. Donec hendrerit porttitor odio. Suspendisse ornare sollicitudin mauris, sodales pulvinar velit finibus vel. Fusce id pulvinar neque. Suspendisse eget tincidunt sapien, ac accumsan turpis. + Curabitur cursus tincidunt leo at aliquet. Nunc dapibus quam id venenatis varius. Aenean eget augue vel velit dapibus aliquam. Nulla facilisi. Curabitur cursus, turpis vel congue volutpat, tellus eros cursus lacus, eu fringilla turpis orci non ipsum. In hac habitasse platea dictumst. Nulla aliquam nisl a nunc placerat, eget dignissim felis pulvinar. Fusce sed porta mauris. Donec sodales arcu in nisl sodales, quis posuere massa ultricies. Nam feugiat massa eget felis ultricies finibus. Nunc magna nulla, interdum a elit vel, egestas efficitur urna. Ut posuere tincidunt odio in maximus. Sed at dignissim est. + Morbi accumsan elementum ligula ut fringilla. Praesent in ex metus. Phasellus urna est, tempus sit amet elementum vitae, sollicitudin vel ipsum. Fusce hendrerit eleifend dignissim. Maecenas tempor dapibus dui quis laoreet. Cras tincidunt sed ipsum sed pellentesque. Proin ut tellus nec ipsum varius interdum. Curabitur id velit ligula. Etiam sapien nulla, cursus sodales orci eu, porta lobortis nunc. Nunc at dapibus velit. Nulla et nunc vehicula, condimentum erat quis, elementum dolor. Quisque eu metus fermentum, vestibulum tellus at, sollicitudin odio. Ut vel neque justo. + Praesent porta porta velit, vel porttitor sem. Donec sagittis at nulla venenatis iaculis. Nullam vel eleifend felis. Nullam a pellentesque lectus. Aliquam tincidunt semper dui sed bibendum. Donec hendrerit, urna et cursus dictum, neque neque convallis magna, id condimentum sem urna quis massa. Fusce non quam vulputate, fermentum mauris at, malesuada ipsum. Mauris id pellentesque libero. Donec vel erat ullamcorper, dapibus quam id, imperdiet urna. Praesent sed ligula ut est pellentesque pharetra quis et diam. Ut placerat lorem eget mi fermentum aliquet. + This is message #""" & + $i & """ sent from a publisher using mix. End of transmission.""" + let message = WakuMessage( + payload: toBytes(text), # content of the message + contentTopic: LightpushContentTopic, # content topic to publish to + ephemeral: true, # tell store nodes to not store it + timestamp: getNowInNanosecondTime(), + ) # current timestamp + + let res = await node.wakuLightpushClient.publishWithConn( + LightpushPubsubTopic, message, conn, dPeerId + ) + + if res.isOk(): + lp_mix_success.inc() + notice "published message", + text = text, + timestamp = message.timestamp, + psTopic = LightpushPubsubTopic, + contentTopic = LightpushContentTopic + else: + error "failed to publish message", error = $res.error + lp_mix_failed.inc(labelValues = ["publish_error"]) + + if conf.mixDisabled: + await conn.close() + await sleepAsync(conf.msgIntervalMilliseconds) + info "###########Sent all messages via mix" + quit(0) + +when isMainModule: + let conf = LightPushMixConf.load() + let rng = crypto.newRng() + asyncSpawn setupAndPublish(rng, conf) + runForever() diff --git a/third-party/nwaku/examples/lightpush_mix/lightpush_publisher_mix_config.nim b/third-party/nwaku/examples/lightpush_mix/lightpush_publisher_mix_config.nim new file mode 100644 index 0000000..7a135e3 --- /dev/null +++ b/third-party/nwaku/examples/lightpush_mix/lightpush_publisher_mix_config.nim @@ -0,0 +1,28 @@ +import confutils/defs + +type LightPushMixConf* = object + destPeerAddr* {.desc: "Destination peer address with peerId.", name: "dp-addr".}: + string + + pxAddr* {.desc: "Peer exchange address with peerId.", name: "px-addr".}: string + + port* {.desc: "Port to listen on.", defaultValue: 50000, name: "port".}: int + + numMsgs* {.desc: "Number of messages to send.", defaultValue: 1, name: "num-msgs".}: + int + + msgIntervalMilliseconds* {. + desc: "Interval between messages in milliseconds.", + defaultValue: 1000, + name: "msg-interval" + .}: int + + minMixPoolSize* {. + desc: "Number of mix nodes to be discovered before sending lightpush messages.", + defaultValue: 3, + name: "min-mix-pool-size" + .}: int + + mixDisabled* {. + desc: "Do not use mix for publishing.", defaultValue: false, name: "without-mix" + .}: bool diff --git a/third-party/nwaku/examples/lightpush_mix/lightpush_publisher_mix_metrics.nim b/third-party/nwaku/examples/lightpush_mix/lightpush_publisher_mix_metrics.nim new file mode 100644 index 0000000..cd06b3e --- /dev/null +++ b/third-party/nwaku/examples/lightpush_mix/lightpush_publisher_mix_metrics.nim @@ -0,0 +1,8 @@ +{.push raises: [].} + +import metrics + +declarePublicCounter lp_mix_success, "number of lightpush messages sent via mix" + +declarePublicCounter lp_mix_failed, + "number of lightpush messages failed via mix", labels = ["error"] diff --git a/third-party/nwaku/examples/lightpush_publisher.nim b/third-party/nwaku/examples/lightpush_publisher.nim new file mode 100644 index 0000000..e9fa217 --- /dev/null +++ b/third-party/nwaku/examples/lightpush_publisher.nim @@ -0,0 +1,109 @@ +import + std/[tables, times, sequtils], + stew/byteutils, + chronicles, + results, + chronos, + confutils, + libp2p/crypto/crypto, + eth/keys, + eth/p2p/discoveryv5/enr + +import + waku/[ + common/logging, + node/peer_manager, + waku_core, + waku_node, + waku_enr, + discovery/waku_discv5, + factory/builder, + ] + +proc now*(): Timestamp = + getNanosecondTime(getTime().toUnixFloat()) + +# careful if running pub and sub in the same machine +const wakuPort = 60000 + +const clusterId = 1 +const shardId = @[0'u16] + +const + LightpushPeer = + "/ip4/64.225.80.192/tcp/30303/p2p/16Uiu2HAmNaeL4p3WEYzC9mgXBmBWSgWjPHRvatZTXnp8Jgv3iKsb" + LightpushPubsubTopic = PubsubTopic("/waku/2/rs/1/0") + LightpushContentTopic = ContentTopic("/examples/1/light-pubsub-example/proto") + +proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} = + # use notice to filter all waku messaging + setupLog(logging.LogLevel.NOTICE, logging.LogFormat.TEXT) + + notice "starting publisher", wakuPort = wakuPort + let + nodeKey = crypto.PrivateKey.random(Secp256k1, rng[]).get() + ip = parseIpAddress("0.0.0.0") + flags = CapabilitiesBitfield.init(relay = true) + + let relayShards = RelayShards.init(clusterId, shardId).valueOr: + error "Relay shards initialization failed", error = error + quit(QuitFailure) + + var enrBuilder = EnrBuilder.init(nodeKey) + enrBuilder.withWakuRelaySharding(relayShards).expect( + "Building ENR with relay sharding failed" + ) + + let recordRes = enrBuilder.build() + let record = + if recordRes.isErr(): + error "failed to create enr record", error = recordRes.error + quit(QuitFailure) + else: + recordRes.get() + + var builder = WakuNodeBuilder.init() + builder.withNodeKey(nodeKey) + builder.withRecord(record) + builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet() + let node = builder.build().tryGet() + + node.mountMetadata(clusterId, shardId).expect( + "failed to mount waku metadata protocol" + ) + node.mountLegacyLightPushClient() + + await node.start() + node.peerManager.start() + + notice "publisher service started" + while true: + let text = "hi there i'm a publisher" + let message = WakuMessage( + payload: toBytes(text), # content of the message + contentTopic: LightpushContentTopic, # content topic to publish to + ephemeral: true, # tell store nodes to not store it + timestamp: now(), + ) # current timestamp + + let lightpushPeer = parsePeerInfo(LightpushPeer).get() + + let res = await node.legacyLightpushPublish( + some(LightpushPubsubTopic), message, lightpushPeer + ) + + if res.isOk: + notice "published message", + text = text, + timestamp = message.timestamp, + psTopic = LightpushPubsubTopic, + contentTopic = LightpushContentTopic + else: + error "failed to publish message", error = res.error + + await sleepAsync(5000) + +when isMainModule: + let rng = crypto.newRng() + asyncSpawn setupAndPublish(rng) + runForever() diff --git a/third-party/nwaku/examples/mobile/.bundle/config b/third-party/nwaku/examples/mobile/.bundle/config new file mode 100644 index 0000000..848943b --- /dev/null +++ b/third-party/nwaku/examples/mobile/.bundle/config @@ -0,0 +1,2 @@ +BUNDLE_PATH: "vendor/bundle" +BUNDLE_FORCE_RUBY_PLATFORM: 1 diff --git a/third-party/nwaku/examples/mobile/.eslintrc.js b/third-party/nwaku/examples/mobile/.eslintrc.js new file mode 100644 index 0000000..187894b --- /dev/null +++ b/third-party/nwaku/examples/mobile/.eslintrc.js @@ -0,0 +1,4 @@ +module.exports = { + root: true, + extends: '@react-native', +}; diff --git a/third-party/nwaku/examples/mobile/.gitignore b/third-party/nwaku/examples/mobile/.gitignore new file mode 100644 index 0000000..d5ae456 --- /dev/null +++ b/third-party/nwaku/examples/mobile/.gitignore @@ -0,0 +1,74 @@ +# OSX +# +.DS_Store + +# Xcode +# +build/ +*.pbxuser +!default.pbxuser +*.mode1v3 +!default.mode1v3 +*.mode2v3 +!default.mode2v3 +*.perspectivev3 +!default.perspectivev3 +xcuserdata +*.xccheckout +*.moved-aside +DerivedData +*.hmap +*.ipa +*.xcuserstate +**/.xcode.env.local + +# Android/IntelliJ +# +build/ +.idea +.gradle +local.properties +*.iml +*.hprof +.cxx/ +*.keystore +!debug.keystore + +# node.js +# +node_modules/ +npm-debug.log +yarn-error.log + +# fastlane +# +# It is recommended to not store the screenshots in the git repo. Instead, use fastlane to re-generate the +# screenshots whenever they are needed. +# For more information about the recommended setup visit: +# https://docs.fastlane.tools/best-practices/source-control/ + +**/fastlane/report.xml +**/fastlane/Preview.html +**/fastlane/screenshots +**/fastlane/test_output + +# Bundle artifact +*.jsbundle + +# Ruby / CocoaPods +**/Pods/ +/vendor/bundle/ + +# Temporary files created by Metro to check the health of the file watcher +.metro-health-check* + +# testing +/coverage + +# Yarn +.yarn/* +!.yarn/patches +!.yarn/plugins +!.yarn/releases +!.yarn/sdks +!.yarn/versions diff --git a/third-party/nwaku/examples/mobile/.prettierrc.js b/third-party/nwaku/examples/mobile/.prettierrc.js new file mode 100644 index 0000000..2b54074 --- /dev/null +++ b/third-party/nwaku/examples/mobile/.prettierrc.js @@ -0,0 +1,7 @@ +module.exports = { + arrowParens: 'avoid', + bracketSameLine: true, + bracketSpacing: false, + singleQuote: true, + trailingComma: 'all', +}; diff --git a/third-party/nwaku/examples/mobile/.watchmanconfig b/third-party/nwaku/examples/mobile/.watchmanconfig new file mode 100644 index 0000000..0967ef4 --- /dev/null +++ b/third-party/nwaku/examples/mobile/.watchmanconfig @@ -0,0 +1 @@ +{} diff --git a/third-party/nwaku/examples/mobile/.yarnrc b/third-party/nwaku/examples/mobile/.yarnrc new file mode 100644 index 0000000..85b738b --- /dev/null +++ b/third-party/nwaku/examples/mobile/.yarnrc @@ -0,0 +1,5 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +yarn-path ".yarn/releases/yarn-1.22.22.cjs" diff --git a/third-party/nwaku/examples/mobile/App.tsx b/third-party/nwaku/examples/mobile/App.tsx new file mode 100644 index 0000000..bf01de3 --- /dev/null +++ b/third-party/nwaku/examples/mobile/App.tsx @@ -0,0 +1,327 @@ +/* eslint-disable no-alert */ +/** + * Sample React Native App + * https://github.com/facebook/react-native + * + * @format + */ + +import React from 'react'; +import type {PropsWithChildren} from 'react'; +import { + SafeAreaView, + ScrollView, + StatusBar, + StyleSheet, + Text, + useColorScheme, + View, +} from 'react-native'; + +import {Colors} from 'react-native/Libraries/NewAppScreen'; +import { + NativeEventEmitter, + NativeModules, + EmitterSubscription, + Button, +} from 'react-native'; + +type SectionProps = PropsWithChildren<{ + title: string; +}>; + +function Section({children, title}: SectionProps): React.JSX.Element { + const isDarkMode = useColorScheme() === 'dark'; + return ( + + + {title} + + + {children} + + + ); +} + +const WakuFactory = (() => { + let isSetup = false; + + const eventEmitter = new NativeEventEmitter(NativeModules.WakuModule); + class Waku { + wakuPtr: Number; + + constructor(wakuPtr: Number) { + this.wakuPtr = wakuPtr; + } + + async destroy(): Promise { + await NativeModules.WakuModule.destroy(this.wakuPtr); + } + + async start(): Promise { + return NativeModules.WakuModule.start(this.wakuPtr); + } + + async stop(): Promise { + return NativeModules.WakuModule.stop(this.wakuPtr); + } + + async version(): Promise { + return NativeModules.WakuModule.version(this.wakuPtr); + } + + async listenAddresses(): Promise> { + let addresses = await NativeModules.WakuModule.listenAddresses( + this.wakuPtr, + ); + return addresses; + } + + async connect(peerMultiaddr: String, timeoutMs: Number): Promise { + return NativeModules.WakuModule.connect( + this.wakuPtr, + peerMultiaddr, + timeoutMs, + ); + } + + async relaySubscribe(pubsubTopic: String): Promise { + return NativeModules.WakuModule.relaySubscribe(this.wakuPtr, pubsubTopic); + } + + async relayUnsubscribe(pubsubTopic: String): Promise { + return NativeModules.WakuModule.relayUnsubscribe( + this.wakuPtr, + pubsubTopic, + ); + } + + // TODO: Use a type instead of `any` + async relayPublish( + pubsubTopic: string, + msg: any, + timeoutMs: Number, + ): Promise { + return NativeModules.WakuModule.relayPublish( + this.wakuPtr, + pubsubTopic, + msg, + timeoutMs, + ); + } + + onEvent(cb: (event: any) => void): EmitterSubscription { + return eventEmitter.addListener('wakuEvent', evt => { + if (evt.wakuPtr === this.wakuPtr) { + cb(JSON.parse(evt.event)); + } + }); + } + } + + async function createInstance(config: any) { + if (!isSetup) { + console.debug('initializing waku library'); + await NativeModules.WakuModule.setup(); + isSetup = true; + alert('waku instance created!'); + } + + let wakuPtr = await NativeModules.WakuModule.new(config); + return new Waku(wakuPtr); + } + + // Expose the factory method + return { + createInstance, + Waku, + }; +})(); + +function App(): React.JSX.Element { + const isDarkMode = useColorScheme() === 'dark'; + + const backgroundStyle = { + backgroundColor: isDarkMode ? Colors.darker : Colors.lighter, + }; + + var waku: Waku; + + const onClickNew = async () => { + const config = { + host: '0.0.0.0', + port: 42342, + key: '1122334455667788990011223344556677889900112233445566778899000022', + relay: true, + }; + waku = await WakuFactory.createInstance(config); + }; + + const onClickStart = async () => { + await waku.start(); + alert('start executed succesfully'); + }; + + const onClickVersion = async () => { + let version = await waku.version(); + alert(version); + }; + + const onClickListenAddresses = async () => { + let addresses = await waku.listenAddresses(); + alert(addresses[0]); + }; + + const onClickStop = async () => { + await waku.stop(); + alert('stopped!'); + }; + + const onClickDestroy = async () => { + await waku.destroy(); + alert('destroyed!'); + }; + + const onClickConnect = async () => { + let result = await waku.connect( + '/ip4/127.0.0.1/tcp/48117/p2p/16Uiu2HAmVrsyU3y3pQYuSEyaqrBgevQeshp7YZsL8rY3nWb2yWD5', + 0, + ); + alert( + 'connected? (TODO: bindings function do not return connection attempt status)', + ); + }; + + const onClickSubscribe = async () => { + await waku.relaySubscribe('test'); + alert('subscribed to test'); + }; + + const onClickUnsubscribe = async () => { + await waku.relayUnsubscribe('test'); + alert('unsubscribed from test'); + }; + + const onClickSetEventCallback = async () => { + const eventSubs = waku.onEvent((event: any) => { + console.log(event); + alert('received a message'); + }); + // TODO: eventSubs.remove() should be used to avoid a mem leak. + + alert("event callback set"); + }; + + const onClickPublish = async () => { + const pubsubTopic = 'test'; + const msg = { + payload: 'aGVsbG8', + contentTopic: 'test', + timestamp: 0, + version: 0, + }; + let hash = await waku.relayPublish(pubsubTopic, msg, 0); + alert('published - msgHash: ' + hash); + }; + + return ( + + + + +
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ); +} + +const styles = StyleSheet.create({ + sectionContainer: { + marginTop: 32, + paddingHorizontal: 24, + }, + sectionTitle: { + fontSize: 24, + fontWeight: '600', + }, + sectionDescription: { + marginTop: 8, + fontSize: 18, + fontWeight: '400', + }, + highlight: { + fontWeight: '700', + }, +}); + +export default App; diff --git a/third-party/nwaku/examples/mobile/Gemfile b/third-party/nwaku/examples/mobile/Gemfile new file mode 100644 index 0000000..8d72c37 --- /dev/null +++ b/third-party/nwaku/examples/mobile/Gemfile @@ -0,0 +1,9 @@ +source 'https://rubygems.org' + +# You may use http://rbenv.org/ or https://rvm.io/ to install and use this version +ruby ">= 2.6.10" + +# Cocoapods 1.15 introduced a bug which break the build. We will remove the upper +# bound in the template on Cocoapods with next React Native release. +gem 'cocoapods', '>= 1.13', '< 1.15' +gem 'activesupport', '>= 6.1.7.5', '< 7.1.0' diff --git a/third-party/nwaku/examples/mobile/README.md b/third-party/nwaku/examples/mobile/README.md new file mode 100644 index 0000000..12470c3 --- /dev/null +++ b/third-party/nwaku/examples/mobile/README.md @@ -0,0 +1,79 @@ +This is a new [**React Native**](https://reactnative.dev) project, bootstrapped using [`@react-native-community/cli`](https://github.com/react-native-community/cli). + +# Getting Started + +>**Note**: Make sure you have completed the [React Native - Environment Setup](https://reactnative.dev/docs/environment-setup) instructions till "Creating a new application" step, before proceeding. + +## Step 1: Start the Metro Server + +First, you will need to start **Metro**, the JavaScript _bundler_ that ships _with_ React Native. + +To start Metro, run the following command from the _root_ of your React Native project: + +```bash +# using npm +npm start + +# OR using Yarn +yarn start +``` + +## Step 2: Start your Application + +Let Metro Bundler run in its _own_ terminal. Open a _new_ terminal from the _root_ of your React Native project. Run the following command to start your _Android_ or _iOS_ app: + +### For Android + +```bash +# using npm +npm run android + +# OR using Yarn +yarn android +``` + +### For iOS + +```bash +# using npm +npm run ios + +# OR using Yarn +yarn ios +``` + +If everything is set up _correctly_, you should see your new app running in your _Android Emulator_ or _iOS Simulator_ shortly provided you have set up your emulator/simulator correctly. + +This is one way to run your app — you can also run it directly from within Android Studio and Xcode respectively. + +## Step 3: Modifying your App + +Now that you have successfully run the app, let's modify it. + +1. Open `App.tsx` in your text editor of choice and edit some lines. +2. For **Android**: Press the R key twice or select **"Reload"** from the **Developer Menu** (Ctrl + M (on Window and Linux) or Cmd ⌘ + M (on macOS)) to see your changes! + + For **iOS**: Hit Cmd ⌘ + R in your iOS Simulator to reload the app and see your changes! + +## Congratulations! :tada: + +You've successfully run and modified your React Native App. :partying_face: + +### Now what? + +- If you want to add this new React Native code to an existing application, check out the [Integration guide](https://reactnative.dev/docs/integration-with-existing-apps). +- If you're curious to learn more about React Native, check out the [Introduction to React Native](https://reactnative.dev/docs/getting-started). + +# Troubleshooting + +If you can't get this to work, see the [Troubleshooting](https://reactnative.dev/docs/troubleshooting) page. + +# Learn More + +To learn more about React Native, take a look at the following resources: + +- [React Native Website](https://reactnative.dev) - learn more about React Native. +- [Getting Started](https://reactnative.dev/docs/environment-setup) - an **overview** of React Native and how setup your environment. +- [Learn the Basics](https://reactnative.dev/docs/getting-started) - a **guided tour** of the React Native **basics**. +- [Blog](https://reactnative.dev/blog) - read the latest official React Native **Blog** posts. +- [`@facebook/react-native`](https://github.com/facebook/react-native) - the Open Source; GitHub **repository** for React Native. diff --git a/third-party/nwaku/examples/mobile/__tests__/App.test.tsx b/third-party/nwaku/examples/mobile/__tests__/App.test.tsx new file mode 100644 index 0000000..9eac6fb --- /dev/null +++ b/third-party/nwaku/examples/mobile/__tests__/App.test.tsx @@ -0,0 +1,17 @@ +/** + * @format + */ + +import 'react-native'; +import React from 'react'; +import App from '../App'; + +// Note: import explicitly to use the types shipped with jest. +import {it} from '@jest/globals'; + +// Note: test renderer must be required after react-native. +import renderer from 'react-test-renderer'; + +it('renders correctly', () => { + renderer.create(); +}); diff --git a/third-party/nwaku/examples/mobile/android/app/build.gradle b/third-party/nwaku/examples/mobile/android/app/build.gradle new file mode 100644 index 0000000..46cf8da --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/app/build.gradle @@ -0,0 +1,141 @@ +apply plugin: "com.android.application" +apply plugin: "org.jetbrains.kotlin.android" +apply plugin: "com.facebook.react" + +/** + * This is the configuration block to customize your React Native Android app. + * By default you don't need to apply any configuration, just uncomment the lines you need. + */ +react { + /* Folders */ + // The root of your project, i.e. where "package.json" lives. Default is '..' + // root = file("../") + // The folder where the react-native NPM package is. Default is ../node_modules/react-native + // reactNativeDir = file("../node_modules/react-native") + // The folder where the react-native Codegen package is. Default is ../node_modules/@react-native/codegen + // codegenDir = file("../node_modules/@react-native/codegen") + // The cli.js file which is the React Native CLI entrypoint. Default is ../node_modules/react-native/cli.js + // cliFile = file("../node_modules/react-native/cli.js") + + /* Variants */ + // The list of variants to that are debuggable. For those we're going to + // skip the bundling of the JS bundle and the assets. By default is just 'debug'. + // If you add flavors like lite, prod, etc. you'll have to list your debuggableVariants. + // debuggableVariants = ["liteDebug", "prodDebug"] + + /* Bundling */ + // A list containing the node command and its flags. Default is just 'node'. + // nodeExecutableAndArgs = ["node"] + // + // The command to run when bundling. By default is 'bundle' + // bundleCommand = "ram-bundle" + // + // The path to the CLI configuration file. Default is empty. + // bundleConfig = file(../rn-cli.config.js) + // + // The name of the generated asset file containing your JS bundle + // bundleAssetName = "MyApplication.android.bundle" + // + // The entry file for bundle generation. Default is 'index.android.js' or 'index.js' + // entryFile = file("../js/MyApplication.android.js") + // + // A list of extra flags to pass to the 'bundle' commands. + // See https://github.com/react-native-community/cli/blob/main/docs/commands.md#bundle + // extraPackagerArgs = [] + + /* Hermes Commands */ + // The hermes compiler command to run. By default it is 'hermesc' + // hermesCommand = "$rootDir/my-custom-hermesc/bin/hermesc" + // + // The list of flags to pass to the Hermes compiler. By default is "-O", "-output-source-map" + // hermesFlags = ["-O", "-output-source-map"] +} + +/** + * Set this to true to Run Proguard on Release builds to minify the Java bytecode. + */ +def enableProguardInReleaseBuilds = false + +/** + * The preferred build flavor of JavaScriptCore (JSC) + * + * For example, to use the international variant, you can use: + * `def jscFlavor = 'org.webkit:android-jsc-intl:+'` + * + * The international variant includes ICU i18n library and necessary data + * allowing to use e.g. `Date.toLocaleString` and `String.localeCompare` that + * give correct results when using with locales other than en-US. Note that + * this variant is about 6MiB larger per architecture than default. + */ +def jscFlavor = 'org.webkit:android-jsc:+' + +android { + ndkVersion rootProject.ext.ndkVersion + buildToolsVersion rootProject.ext.buildToolsVersion + compileSdk rootProject.ext.compileSdkVersion + + namespace "com.mobile" + defaultConfig { + applicationId "com.mobile" + minSdkVersion rootProject.ext.minSdkVersion + targetSdkVersion rootProject.ext.targetSdkVersion + versionCode 1 + versionName "1.0" + ndk { + abiFilters "x86_64", "arm64-v8a", "x86" /* "armeabi-v7a", */ + moduleName "waku_jni" + ldLibs "log" + cFlags "-std=c99" + } + } + externalNativeBuild { + ndkBuild { + path file('src/main/jni/Android.mk') + } + } + signingConfigs { + debug { + storeFile file('debug.keystore') + storePassword 'android' + keyAlias 'androiddebugkey' + keyPassword 'android' + } + } + buildTypes { + debug { + signingConfig signingConfigs.debug + } + release { + // Caution! In production, you need to generate your own keystore file. + // see https://reactnative.dev/docs/signed-apk-android. + signingConfig signingConfigs.debug + minifyEnabled enableProguardInReleaseBuilds + proguardFiles getDefaultProguardFile("proguard-android.txt"), "proguard-rules.pro" + } + } + + sourceSets { + main { + jniLibs { + srcDirs 'src/main/jniLibs' + } + } + } +} + +dependencies { + implementation fileTree(dir: "libs", include: ["*.aar"]) + + // The version of react-native is set by the React Native Gradle Plugin + implementation("com.facebook.react:react-android") + + implementation 'com.google.code.gson:gson:2.8.8' + + if (hermesEnabled.toBoolean()) { + implementation("com.facebook.react:hermes-android") + } else { + implementation jscFlavor + } +} + +apply from: file("../../node_modules/@react-native-community/cli-platform-android/native_modules.gradle"); applyNativeModulesAppBuildGradle(project) diff --git a/third-party/nwaku/examples/mobile/android/app/debug.keystore b/third-party/nwaku/examples/mobile/android/app/debug.keystore new file mode 100644 index 0000000..364e105 Binary files /dev/null and b/third-party/nwaku/examples/mobile/android/app/debug.keystore differ diff --git a/third-party/nwaku/examples/mobile/android/app/proguard-rules.pro b/third-party/nwaku/examples/mobile/android/app/proguard-rules.pro new file mode 100644 index 0000000..11b0257 --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/app/proguard-rules.pro @@ -0,0 +1,10 @@ +# Add project specific ProGuard rules here. +# By default, the flags in this file are appended to flags specified +# in /usr/local/Cellar/android-sdk/24.3.3/tools/proguard/proguard-android.txt +# You can edit the include path and order by changing the proguardFiles +# directive in build.gradle. +# +# For more details, see +# http://developer.android.com/guide/developing/tools/proguard.html + +# Add any project specific keep options here: diff --git a/third-party/nwaku/examples/mobile/android/app/src/debug/AndroidManifest.xml b/third-party/nwaku/examples/mobile/android/app/src/debug/AndroidManifest.xml new file mode 100644 index 0000000..fbf78ef --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/app/src/debug/AndroidManifest.xml @@ -0,0 +1,9 @@ + + + + + diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/AndroidManifest.xml b/third-party/nwaku/examples/mobile/android/app/src/main/AndroidManifest.xml new file mode 100644 index 0000000..4122f36 --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/app/src/main/AndroidManifest.xml @@ -0,0 +1,25 @@ + + + + + + + + + + + + + diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/java/com/mobile/MainActivity.kt b/third-party/nwaku/examples/mobile/android/app/src/main/java/com/mobile/MainActivity.kt new file mode 100644 index 0000000..8b315da --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/app/src/main/java/com/mobile/MainActivity.kt @@ -0,0 +1,29 @@ +package com.mobile + +import com.facebook.react.ReactActivity +import com.facebook.react.ReactActivityDelegate +import com.facebook.react.defaults.DefaultNewArchitectureEntryPoint.fabricEnabled +import com.facebook.react.defaults.DefaultReactActivityDelegate + +class MainActivity : ReactActivity() { + + companion object { + init { + System.loadLibrary("rln") + System.loadLibrary("waku") + System.loadLibrary("waku_jni"); + } + } + /** + * Returns the name of the main component registered from JavaScript. This is used to schedule + * rendering of the component. + */ + override fun getMainComponentName(): String = "mobile" + + /** + * Returns the instance of the [ReactActivityDelegate]. We use [DefaultReactActivityDelegate] + * which allows you to enable New Architecture with a single boolean flags [fabricEnabled] + */ + override fun createReactActivityDelegate(): ReactActivityDelegate = + DefaultReactActivityDelegate(this, mainComponentName, fabricEnabled) +} diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/java/com/mobile/MainApplication.kt b/third-party/nwaku/examples/mobile/android/app/src/main/java/com/mobile/MainApplication.kt new file mode 100644 index 0000000..79121f1 --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/app/src/main/java/com/mobile/MainApplication.kt @@ -0,0 +1,43 @@ +package com.mobile + +import android.app.Application +import com.facebook.react.PackageList +import com.facebook.react.ReactApplication +import com.facebook.react.ReactHost +import com.facebook.react.ReactNativeHost +import com.facebook.react.ReactPackage +import com.facebook.react.defaults.DefaultNewArchitectureEntryPoint.load +import com.facebook.react.defaults.DefaultReactHost.getDefaultReactHost +import com.facebook.react.defaults.DefaultReactNativeHost +import com.facebook.soloader.SoLoader + +class MainApplication : Application(), ReactApplication { + + override val reactNativeHost: ReactNativeHost = + object : DefaultReactNativeHost(this) { + override fun getPackages(): List = + PackageList(this).packages.apply { + // Packages that cannot be autolinked yet can be added manually here, for example: + add(MyAppPackage()) + } + + override fun getJSMainModuleName(): String = "index" + + override fun getUseDeveloperSupport(): Boolean = BuildConfig.DEBUG + + override val isNewArchEnabled: Boolean = BuildConfig.IS_NEW_ARCHITECTURE_ENABLED + override val isHermesEnabled: Boolean = BuildConfig.IS_HERMES_ENABLED + } + + override val reactHost: ReactHost + get() = getDefaultReactHost(applicationContext, reactNativeHost) + + override fun onCreate() { + super.onCreate() + SoLoader.init(this, false) + if (BuildConfig.IS_NEW_ARCHITECTURE_ENABLED) { + // If you opted-in for the New Architecture, we load the native entry point for this app. + load() + } + } +} diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/java/com/mobile/ReactNativePackage.kt b/third-party/nwaku/examples/mobile/android/app/src/main/java/com/mobile/ReactNativePackage.kt new file mode 100644 index 0000000..5e03685 --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/app/src/main/java/com/mobile/ReactNativePackage.kt @@ -0,0 +1,19 @@ +package com.mobile + +import android.view.View +import com.facebook.react.ReactPackage +import com.facebook.react.bridge.NativeModule +import com.facebook.react.bridge.ReactApplicationContext +import com.facebook.react.uimanager.ReactShadowNode +import com.facebook.react.uimanager.ViewManager + +class MyAppPackage : ReactPackage { + + override fun createViewManagers( + reactContext: ReactApplicationContext + ): MutableList>> = mutableListOf() + + override fun createNativeModules( + reactContext: ReactApplicationContext + ): MutableList = listOf(WakuModule(reactContext)).toMutableList() +} diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/java/com/mobile/WakuModule.kt b/third-party/nwaku/examples/mobile/android/app/src/main/java/com/mobile/WakuModule.kt new file mode 100644 index 0000000..4c1c020 --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/app/src/main/java/com/mobile/WakuModule.kt @@ -0,0 +1,247 @@ +package com.mobile + +import com.facebook.react.bridge.Arguments +import com.facebook.react.bridge.Promise +import com.facebook.react.bridge.ReactApplicationContext +import com.facebook.react.bridge.ReactContext +import com.facebook.react.bridge.ReactContextBaseJavaModule +import com.facebook.react.bridge.ReactMethod +import com.facebook.react.bridge.ReadableArray +import com.facebook.react.bridge.ReadableMap +import com.facebook.react.bridge.ReadableType +import com.facebook.react.bridge.WritableNativeArray +import com.facebook.react.modules.core.DeviceEventManagerModule +import com.google.gson.Gson +import java.math.BigInteger +import org.json.JSONArray + +class EventCallbackManager { + companion object { + + lateinit var reactContext: ReactContext + + @JvmStatic + fun execEventCallback(wakuPtr: Long, evt: String) { + val params = + Arguments.createMap().apply { + putString("wakuPtr", wakuPtr.toString()) + putString("event", evt) + } + + reactContext + .getJSModule(DeviceEventManagerModule.RCTDeviceEventEmitter::class.java) + .emit("wakuEvent", params) + } + } +} + +fun convertStringToArray(stringArray: String): WritableNativeArray { + val writableArray = WritableNativeArray() + val jsonArray = JSONArray(stringArray) + for (i in 0 until jsonArray.length()) { + writableArray.pushString(jsonArray.getString(i)) + } + return writableArray +} + +fun stringifyReadableMap(map: ReadableMap): String { + val gson = Gson() + return gson.toJson(readableMapToMap(map)) +} + +fun readableMapToMap(readableMap: ReadableMap): Map { + val map = mutableMapOf() + val iterator = readableMap.keySetIterator() + while (iterator.hasNextKey()) { + val key = iterator.nextKey() + when (readableMap.getType(key)) { + ReadableType.Null -> map[key] = null + ReadableType.Boolean -> map[key] = readableMap.getBoolean(key) + ReadableType.Number -> map[key] = readableMap.getInt(key) + ReadableType.String -> map[key] = readableMap.getString(key) + ReadableType.Map -> map[key] = readableMapToMap(readableMap.getMap(key)!!) + ReadableType.Array -> map[key] = readableArrayToList(readableMap.getArray(key)!!) + } + } + return map +} + +fun readableArrayToList(readableArray: ReadableArray): List { + val list = mutableListOf() + for (i in 0 until readableArray.size()) { + when (readableArray.getType(i)) { + ReadableType.Null -> list.add(null) + ReadableType.Boolean -> list.add(readableArray.getBoolean(i)) + ReadableType.Number -> list.add(readableArray.getInt(i)) + ReadableType.String -> list.add(readableArray.getString(i)) + ReadableType.Map -> list.add(readableMapToMap(readableArray.getMap(i))) + ReadableType.Array -> list.add(readableArrayToList(readableArray.getArray(i))) + } + } + return list +} + +class WakuPtr(val error: Boolean, val errorMessage: String, val ptr: Long) + +class WakuResult(val error: Boolean, val message: String) + +class WakuModule(reactContext: ReactApplicationContext) : ReactContextBaseJavaModule(reactContext) { + var reactContext = reactContext + + override fun getName() = "WakuModule" + + external fun wakuSetup() + external fun wakuNew(configJson: String): WakuPtr + external fun wakuStart(ctx: Long): WakuResult + external fun wakuVersion(ctx: Long): WakuResult + external fun wakuStop(ctx: Long): WakuResult + external fun wakuDestroy(ctx: Long): WakuResult + external fun wakuConnect(ctx: Long, peerMultiAddr: String, timeoutMs: Int): WakuResult + external fun wakuListenAddresses(ctx: Long): WakuResult + external fun wakuRelayPublish( + ctx: Long, + topic: String, + wakuMsg: String, + timeoutMs: Int + ): WakuResult + external fun wakuRelaySubscribe(ctx: Long, topic: String): WakuResult + external fun wakuRelayUnsubscribe(ctx: Long, topic: String): WakuResult + external fun wakuSetEventCallback(ctx: Long) + + init { + EventCallbackManager.reactContext = reactContext + } + + @ReactMethod + fun setup(promise: Promise) { + wakuSetup() + promise.resolve(null) + } + + @ReactMethod + fun new(config: ReadableMap, promise: Promise) { + val configStr = stringifyReadableMap(config) + val response = wakuNew(configStr) + if (response.error) { + promise.reject("waku_new", response.errorMessage) + } else { + // With this we just indicate to waku_ffi that we have registered a + // closure, for this wakuPtr. Later once a message is received the + // callback manager will receive both the wakuPtr and the message, + // and it will use these values to emit a JS event + wakuSetEventCallback(response.ptr) + + promise.resolve(BigInteger.valueOf(response.ptr).toString()) + } + } + + @ReactMethod + fun start(ctx: String, promise: Promise) { + val wakuPtr = BigInteger(ctx).toLong() + val response = wakuStart(wakuPtr) + if (response.error) { + promise.reject("waku_start", response.message) + } else { + promise.resolve(null) + } + } + + @ReactMethod + fun version(ctx: String, promise: Promise) { + val wakuPtr = BigInteger(ctx).toLong() + val response = wakuVersion(wakuPtr) + if (response.error) { + promise.reject("waku_version", response.message) + } else { + promise.resolve(response.message) + } + } + + @ReactMethod + fun stop(ctx: String, promise: Promise) { + val wakuPtr = BigInteger(ctx).toLong() + val response = wakuStop(wakuPtr) + if (response.error) { + promise.reject("waku_stop", response.message) + } else { + promise.resolve(null) + } + } + + @ReactMethod + fun destroy(ctx: String, promise: Promise) { + val wakuPtr = BigInteger(ctx).toLong() + val response = wakuDestroy(wakuPtr) + if (response.error) { + promise.reject("waku_destroy", response.message) + } else { + promise.resolve(null) + } + } + + @ReactMethod + fun listenAddresses(ctx: String, promise: Promise) { + val wakuPtr = BigInteger(ctx).toLong() + val response = wakuListenAddresses(wakuPtr) + if (response.error) { + promise.reject("waku_listen_addresses", response.message) + } else { + promise.resolve(convertStringToArray(response.message)) + } + } + + @ReactMethod + fun connect(ctx: String, peerMultiAddr: String, timeoutMs: Int, promise: Promise) { + val wakuPtr = BigInteger(ctx).toLong() + val response = wakuConnect(wakuPtr, peerMultiAddr, timeoutMs) + if (response.error) { + promise.reject("waku_connect", response.message) + } else { + promise.resolve(null) + } + } + + @ReactMethod + fun relaySubscribe(ctx: String, topic: String, promise: Promise) { + val wakuPtr = BigInteger(ctx).toLong() + val response = wakuRelaySubscribe(wakuPtr, topic) + if (response.error) { + promise.reject("waku_relay_subscribe", response.message) + } else { + promise.resolve(null) + } + } + + @ReactMethod + fun relayUnsubscribe(ctx: String, topic: String, promise: Promise) { + val wakuPtr = BigInteger(ctx).toLong() + val response = wakuRelayUnsubscribe(wakuPtr, topic) + if (response.error) { + promise.reject("waku_relay_unsubscribe", response.message) + } else { + promise.resolve(null) + } + } + + @ReactMethod + fun relayPublish(ctx: String, topic: String, msg: ReadableMap, timeoutMs: Int, promise: Promise) { + val wakuPtr = BigInteger(ctx).toLong() + val msgStr = stringifyReadableMap(msg) + val response = wakuRelayPublish(wakuPtr, topic, msgStr, timeoutMs) + if (response.error) { + promise.reject("waku_relay_publish", response.message) + } else { + promise.resolve(null) + } + } + + @ReactMethod + fun addListener(eventName: String) { + // No impl required + } + + @ReactMethod + fun removeListeners(count: Int) { + // No impl required + } +} diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/jni/.gitignore b/third-party/nwaku/examples/mobile/android/app/src/main/jni/.gitignore new file mode 100644 index 0000000..dcb1665 --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/app/src/main/jni/.gitignore @@ -0,0 +1 @@ +libwaku.h diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/jni/Android.mk b/third-party/nwaku/examples/mobile/android/app/src/main/jni/Android.mk new file mode 100644 index 0000000..f7ec7ff --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/app/src/main/jni/Android.mk @@ -0,0 +1,17 @@ +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) + +LOCAL_MODULE := waku +LOCAL_SRC_FILES := ../jniLibs/$(TARGET_ARCH_ABI)/libwaku.so + +include $(PREBUILT_SHARED_LIBRARY) + +include $(CLEAR_VARS) + +LOCAL_SRC_FILES := waku_ffi.c +LOCAL_MODULE := waku_jni +LOCAL_LDLIBS := -llog +LOCAL_SHARED_LIBRARIES := waku + +include $(BUILD_SHARED_LIBRARY) \ No newline at end of file diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/jni/Application.mk b/third-party/nwaku/examples/mobile/android/app/src/main/jni/Application.mk new file mode 100644 index 0000000..e619d92 --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/app/src/main/jni/Application.mk @@ -0,0 +1 @@ +APP_ABI := all \ No newline at end of file diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/jni/waku_ffi.c b/third-party/nwaku/examples/mobile/android/app/src/main/jni/waku_ffi.c new file mode 100644 index 0000000..477e2da --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/app/src/main/jni/waku_ffi.c @@ -0,0 +1,325 @@ +#include "libwaku.h" +#include +#include +#include +#include +#include +#include +#include + +#define APPNAME "waku-jni" +#define LOGD(TAG) __android_log_print(ANDROID_LOG_DEBUG , APPNAME,TAG); + +// cb_result represents a response received when executing a callback. +// If `error` is true, `message` will contain the error message description +// otherwise, it will contain the result of the callback execution +typedef struct { + bool error; + char *message; +} cb_result; + +// cb_env is a struct passed as userdata when setting up the event callback. +// This is so we can pass the pointer back to kotlin to indicate which instance +// of waku received the message, and also so we can have access to `env` from +// within the event callback +typedef struct { + jlong wakuPtr; + JNIEnv *env; +} cb_env; + +// frees the results associated to the allocation of a cb_result +void free_cb_result(cb_result *result) { + if (result != NULL) { + if (result->message != NULL) { + free(result->message); + result->message = NULL; + } + free(result); + result = NULL; + } +} + +// callback executed by libwaku functions. It expects user_data to be a +// cb_result*. +void on_response(int ret, const char *msg, size_t len, void *user_data) { + if (ret != RET_OK) { + char errMsg[300]; + snprintf(errMsg, 300, "function execution failed. Returned code: %d, %s\n", ret, msg); + if (user_data != NULL) { + cb_result **data_ref = (cb_result **)user_data; + (*data_ref) = malloc(sizeof(cb_result)); + (*data_ref)->error = true; + (*data_ref)->message = malloc(len * sizeof(char) + 1); + (*data_ref)->message[0] = '\0'; + strncat((*data_ref)->message, msg, len); + } + return; + } + + if (user_data == NULL) + return; + + if (len == 0) { + len = 14; + msg = "on_response-ok"; + } + + cb_result **data_ref = (cb_result **)user_data; + (*data_ref) = malloc(sizeof(cb_result)); + (*data_ref)->error = false; + (*data_ref)->message = malloc(len * sizeof(char) + 1); + (*data_ref)->message[0] = '\0'; + strncat((*data_ref)->message, msg, len); +} + +// converts a cb_result into an instance of the kotlin WakuResult class +jobject to_jni_result(JNIEnv *env, cb_result *result) { + jclass myStructClass = (*env)->FindClass(env, "com/mobile/WakuResult"); + jmethodID constructor = (*env)->GetMethodID(env, myStructClass, "", + "(ZLjava/lang/String;)V"); + + jboolean error; + jstring message; + if (result != NULL) { + error = result->error; + message = (*env)->NewStringUTF(env, result->message); + } else { + error = false; + message = (*env)->NewStringUTF(env, "ok"); + } + + jobject response = + (*env)->NewObject(env, myStructClass, constructor, error, message); + + // Free the intermediate message var + (*env)->DeleteLocalRef(env, message); + + return response; +} + +// converts a cb_result into an instance of the kotlin WakuPtr class +jobject to_jni_ptr(JNIEnv *env, cb_result *result, void *ptr) { + jclass myStructClass = (*env)->FindClass(env, "com/mobile/WakuPtr"); + jmethodID constructor = (*env)->GetMethodID(env, myStructClass, "", + "(ZLjava/lang/String;J)V"); + + jboolean error; + jstring message; + jlong wakuPtr; + if (result != NULL) { + error = result->error; + message = (*env)->NewStringUTF(env, result->message); + wakuPtr = -1; + } else { + error = false; + message = (*env)->NewStringUTF(env, "ok"); + wakuPtr = (jlong)ptr; + } + + jobject response = (*env)->NewObject(env, myStructClass, constructor, error, + message, wakuPtr); + + // Free the intermediate message var + (*env)->DeleteLocalRef(env, message); + + return response; +} + +// libwaku functions +// ============================================================================ + +// JVM, required for executing JNI functions in a third party thread. +JavaVM *jvm; +static jobject jClassLoader; +static jmethodID jLoadClass; + +JNIEnv *getEnv() { + JNIEnv *env; + int status = (*jvm)->GetEnv(jvm, (void **)&env, JNI_VERSION_1_6); + if (status < 0) { + status = (*jvm)->AttachCurrentThread(jvm, &env, NULL); + assert(status == JNI_OK && "could not obtain env"); + } + return env; +} + +JNIEXPORT jint JNICALL JNI_OnLoad(JavaVM *pjvm, void *reserved) { + jvm = pjvm; // cache the JavaVM pointer + JNIEnv *env = getEnv(); + + jclass jLibraryClass = + (*env)->FindClass(env, "com/mobile/EventCallbackManager"); + jclass jClassRef = (*env)->GetObjectClass(env, jLibraryClass); + jclass jClassLoaderClass = (*env)->FindClass(env, "java/lang/ClassLoader"); + jmethodID getClassLoader = (*env)->GetMethodID( + env, jClassRef, "getClassLoader", "()Ljava/lang/ClassLoader;"); + + jobject jClassLoaderLocal = + (*env)->CallObjectMethod(env, jLibraryClass, getClassLoader); + jLoadClass = (*env)->GetMethodID(env, jClassLoaderClass, "loadClass", + "(Ljava/lang/String;)Ljava/lang/Class;"); + jClassLoader = (*env)->NewGlobalRef(env, jClassLoaderLocal); + + (*env)->DeleteLocalRef(env, jClassLoaderLocal); + (*env)->DeleteLocalRef(env, jClassLoaderClass); + (*env)->DeleteLocalRef(env, jClassRef); + (*env)->DeleteLocalRef(env, jLibraryClass); + + return JNI_VERSION_1_6; +} + +jclass loadClass(JNIEnv *env, const char *className) { + jstring jName = (*env)->NewStringUTF(env, className); + jclass jClass = (*env)->CallObjectMethod(env, jClassLoader, jLoadClass, jName); + assert((*env)->ExceptionCheck(env) == JNI_FALSE && "class could not be loaded"); + (*env)->DeleteLocalRef(env, jName); + return jClass; +} + +void Java_com_mobile_WakuModule_wakuSetup(JNIEnv *env, jobject thiz) { + LOGD("log example for debugging purposes...") +} + +jobject Java_com_mobile_WakuModule_wakuNew(JNIEnv *env, jobject thiz, + jstring configJson) { + const char *config = (*env)->GetStringUTFChars(env, configJson, 0); + cb_result *result = NULL; + void *wakuPtr = waku_new(config, on_response, (void *)&result); + jobject response = to_jni_ptr(env, result, wakuPtr); + (*env)->ReleaseStringUTFChars(env, configJson, config); + free_cb_result(result); + return response; +} + +jobject Java_com_mobile_WakuModule_wakuStart(JNIEnv *env, jobject thiz, + jlong wakuPtr) { + cb_result *result = NULL; + waku_start((void *)wakuPtr, on_response, &result); + jobject response = to_jni_result(env, result); + free_cb_result(result); + return response; +} + +jobject Java_com_mobile_WakuModule_wakuVersion(JNIEnv *env, jobject thiz, + jlong wakuPtr) { + cb_result *result = NULL; + waku_version((void *)wakuPtr, on_response, (void *)&result); + jobject response = to_jni_result(env, result); + free_cb_result(result); + return response; +} + +jobject Java_com_mobile_WakuModule_wakuStop(JNIEnv *env, jobject thiz, + jlong wakuPtr) { + cb_result *result = NULL; + waku_stop((void *)wakuPtr, on_response, &result); + jobject response = to_jni_result(env, result); + free_cb_result(result); + return response; +} + +jobject Java_com_mobile_WakuModule_wakuDestroy(JNIEnv *env, jobject thiz, + jlong wakuPtr) { + cb_result *result = NULL; + waku_destroy((void *)wakuPtr, on_response, &result); + jobject response = to_jni_result(env, result); + free_cb_result(result); + return response; +} + +jobject Java_com_mobile_WakuModule_wakuConnect(JNIEnv *env, jobject thiz, + jlong wakuPtr, + jstring peerMultiAddr, + jint timeoutMs) { + cb_result *result = NULL; + const char *peer = (*env)->GetStringUTFChars(env, peerMultiAddr, 0); + waku_connect((void *)wakuPtr, peer, timeoutMs, on_response, &result); + jobject response = to_jni_result(env, result); + free_cb_result(result); + (*env)->ReleaseStringUTFChars(env, peerMultiAddr, peer); + return response; +} + +jobject Java_com_mobile_WakuModule_wakuListenAddresses(JNIEnv *env, + jobject thiz, + jlong wakuPtr) { + cb_result *result = NULL; + waku_listen_addresses((void *)wakuPtr, on_response, (void *)&result); + jobject response = to_jni_result(env, result); + free_cb_result(result); + return response; +} + +jobject Java_com_mobile_WakuModule_wakuRelayPublish(JNIEnv *env, jobject thiz, + jlong wakuPtr, + jstring pubsubTopic, + jstring jsonWakuMessage, + jint timeoutMs) { + cb_result *result = NULL; + const char *topic = (*env)->GetStringUTFChars(env, pubsubTopic, 0); + const char *msg = (*env)->GetStringUTFChars(env, jsonWakuMessage, 0); + waku_relay_publish((void *)wakuPtr, topic, msg, timeoutMs, on_response, + (void *)&result); + jobject response = to_jni_result(env, result); + free_cb_result(result); + (*env)->ReleaseStringUTFChars(env, pubsubTopic, topic); + (*env)->ReleaseStringUTFChars(env, jsonWakuMessage, msg); + return response; +} + +jobject Java_com_mobile_WakuModule_wakuRelaySubscribe(JNIEnv *env, jobject thiz, + jlong wakuPtr, + jstring pubsubTopic) { + cb_result *result = NULL; + const char *topic = (*env)->GetStringUTFChars(env, pubsubTopic, 0); + waku_relay_subscribe((void *)wakuPtr, topic, on_response, (void *)&result); + jobject response = to_jni_result(env, result); + free_cb_result(result); + (*env)->ReleaseStringUTFChars(env, pubsubTopic, topic); + return response; +} + +jobject Java_com_mobile_WakuModule_wakuRelayUnsubscribe(JNIEnv *env, + jobject thiz, + jlong wakuPtr, + jstring pubsubTopic) { + cb_result *result = NULL; + const char *topic = (*env)->GetStringUTFChars(env, pubsubTopic, 0); + waku_relay_unsubscribe((void *)wakuPtr, topic, on_response, (void *)&result); + jobject response = to_jni_result(env, result); + free_cb_result(result); + (*env)->ReleaseStringUTFChars(env, pubsubTopic, topic); + return response; +} + +void wk_callback(int callerRet, const char *msg, size_t len, void *userData) { + cb_env *c = (cb_env *)userData; + + // TODO: might be too much overhead to attach/detach per call? + JNIEnv *env = c->env; + JNIEnv *attachedEnv = NULL; + assert((*jvm)->AttachCurrentThread(jvm, &attachedEnv, NULL) == JNI_OK && "could not attach to current thread"); + + jclass clazz = loadClass(attachedEnv, "com/mobile/EventCallbackManager"); + + jmethodID methodID = + (*attachedEnv) + ->GetStaticMethodID(attachedEnv, clazz, "execEventCallback", "(JLjava/lang/String;)V"); + + jstring message = (*attachedEnv)->NewStringUTF(attachedEnv, msg); + (*attachedEnv)->CallStaticVoidMethod(attachedEnv, clazz, methodID, c->wakuPtr, message); + + (*attachedEnv)->DeleteLocalRef(attachedEnv, clazz); + + (*attachedEnv)->DeleteLocalRef(attachedEnv, message); + + (*jvm)->DetachCurrentThread(jvm); +} + +void Java_com_mobile_WakuModule_wakuSetEventCallback(JNIEnv *env, jobject thiz, + jlong wakuPtr) { + cb_env *c = (cb_env *)malloc(sizeof(cb_env)); + c->wakuPtr = wakuPtr; + c->env = env; + waku_set_event_callback((void *)wakuPtr, wk_callback, (void *)c); +} diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/jniLibs/arm64-v8a/.gitkeep b/third-party/nwaku/examples/mobile/android/app/src/main/jniLibs/arm64-v8a/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/jniLibs/armeabi-v7a/.gitkeep b/third-party/nwaku/examples/mobile/android/app/src/main/jniLibs/armeabi-v7a/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/jniLibs/x86/.gitkeep b/third-party/nwaku/examples/mobile/android/app/src/main/jniLibs/x86/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/jniLibs/x86_64/.gitkeep b/third-party/nwaku/examples/mobile/android/app/src/main/jniLibs/x86_64/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/res/drawable/rn_edit_text_material.xml b/third-party/nwaku/examples/mobile/android/app/src/main/res/drawable/rn_edit_text_material.xml new file mode 100644 index 0000000..5c25e72 --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/app/src/main/res/drawable/rn_edit_text_material.xml @@ -0,0 +1,37 @@ + + + + + + + + + + + diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-hdpi/ic_launcher.png b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-hdpi/ic_launcher.png new file mode 100644 index 0000000..a2f5908 Binary files /dev/null and b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-hdpi/ic_launcher.png differ diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-hdpi/ic_launcher_round.png b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-hdpi/ic_launcher_round.png new file mode 100644 index 0000000..1b52399 Binary files /dev/null and b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-hdpi/ic_launcher_round.png differ diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-mdpi/ic_launcher.png b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-mdpi/ic_launcher.png new file mode 100644 index 0000000..ff10afd Binary files /dev/null and b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-mdpi/ic_launcher.png differ diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-mdpi/ic_launcher_round.png b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-mdpi/ic_launcher_round.png new file mode 100644 index 0000000..115a4c7 Binary files /dev/null and b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-mdpi/ic_launcher_round.png differ diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xhdpi/ic_launcher.png b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xhdpi/ic_launcher.png new file mode 100644 index 0000000..dcd3cd8 Binary files /dev/null and b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xhdpi/ic_launcher.png differ diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png new file mode 100644 index 0000000..459ca60 Binary files /dev/null and b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xhdpi/ic_launcher_round.png differ diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xxhdpi/ic_launcher.png b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xxhdpi/ic_launcher.png new file mode 100644 index 0000000..8ca12fe Binary files /dev/null and b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xxhdpi/ic_launcher.png differ diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png new file mode 100644 index 0000000..8e19b41 Binary files /dev/null and b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xxhdpi/ic_launcher_round.png differ diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png new file mode 100644 index 0000000..b824ebd Binary files /dev/null and b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xxxhdpi/ic_launcher.png differ diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png new file mode 100644 index 0000000..4c19a13 Binary files /dev/null and b/third-party/nwaku/examples/mobile/android/app/src/main/res/mipmap-xxxhdpi/ic_launcher_round.png differ diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/res/values/strings.xml b/third-party/nwaku/examples/mobile/android/app/src/main/res/values/strings.xml new file mode 100644 index 0000000..377157c --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/app/src/main/res/values/strings.xml @@ -0,0 +1,3 @@ + + mobile + diff --git a/third-party/nwaku/examples/mobile/android/app/src/main/res/values/styles.xml b/third-party/nwaku/examples/mobile/android/app/src/main/res/values/styles.xml new file mode 100644 index 0000000..7ba83a2 --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/app/src/main/res/values/styles.xml @@ -0,0 +1,9 @@ + + + + + + diff --git a/third-party/nwaku/examples/mobile/android/build.gradle b/third-party/nwaku/examples/mobile/android/build.gradle new file mode 100644 index 0000000..03443cd --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/build.gradle @@ -0,0 +1,21 @@ +buildscript { + ext { + buildToolsVersion = "34.0.0" + minSdkVersion = 30 + compileSdkVersion = 34 + targetSdkVersion = 34 + ndkVersion = "26.1.10909125" + kotlinVersion = "1.9.22" + } + repositories { + google() + mavenCentral() + } + dependencies { + classpath("com.android.tools.build:gradle") + classpath("com.facebook.react:react-native-gradle-plugin") + classpath("org.jetbrains.kotlin:kotlin-gradle-plugin") + } +} + +apply plugin: "com.facebook.react.rootproject" diff --git a/third-party/nwaku/examples/mobile/android/gradle.properties b/third-party/nwaku/examples/mobile/android/gradle.properties new file mode 100644 index 0000000..a46a5b9 --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/gradle.properties @@ -0,0 +1,41 @@ +# Project-wide Gradle settings. + +# IDE (e.g. Android Studio) users: +# Gradle settings configured through the IDE *will override* +# any settings specified in this file. + +# For more details on how to configure your build environment visit +# http://www.gradle.org/docs/current/userguide/build_environment.html + +# Specifies the JVM arguments used for the daemon process. +# The setting is particularly useful for tweaking memory settings. +# Default value: -Xmx512m -XX:MaxMetaspaceSize=256m +org.gradle.jvmargs=-Xmx2048m -XX:MaxMetaspaceSize=512m + +# When configured, Gradle will run in incubating parallel mode. +# This option should only be used with decoupled projects. More details, visit +# http://www.gradle.org/docs/current/userguide/multi_project_builds.html#sec:decoupled_projects +# org.gradle.parallel=true + +# AndroidX package structure to make it clearer which packages are bundled with the +# Android operating system, and which are packaged with your app's APK +# https://developer.android.com/topic/libraries/support-library/androidx-rn +android.useAndroidX=true +# Automatically convert third-party libraries to use AndroidX +android.enableJetifier=true + +# Use this property to specify which architecture you want to build. +# You can also override it from the CLI using +# ./gradlew -PreactNativeArchitectures=x86_64 +reactNativeArchitectures=armeabi-v7a,arm64-v8a,x86,x86_64 + +# Use this property to enable support to the new architecture. +# This will allow you to use TurboModules and the Fabric render in +# your application. You should enable this flag either if you want +# to write custom TurboModules/Fabric components OR use libraries that +# are providing them. +newArchEnabled=false + +# Use this property to enable or disable the Hermes JS engine. +# If set to false, you will be using JSC instead. +hermesEnabled=true diff --git a/third-party/nwaku/examples/mobile/android/gradle/wrapper/gradle-wrapper.jar b/third-party/nwaku/examples/mobile/android/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000..7f93135 Binary files /dev/null and b/third-party/nwaku/examples/mobile/android/gradle/wrapper/gradle-wrapper.jar differ diff --git a/third-party/nwaku/examples/mobile/android/gradle/wrapper/gradle-wrapper.properties b/third-party/nwaku/examples/mobile/android/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000..2ea3535 --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,7 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-8.6-all.zip +networkTimeout=10000 +validateDistributionUrl=true +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/third-party/nwaku/examples/mobile/android/gradlew b/third-party/nwaku/examples/mobile/android/gradlew new file mode 100755 index 0000000..1aa94a4 --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/gradlew @@ -0,0 +1,249 @@ +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/third-party/nwaku/examples/mobile/android/gradlew.bat b/third-party/nwaku/examples/mobile/android/gradlew.bat new file mode 100644 index 0000000..7101f8e --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/gradlew.bat @@ -0,0 +1,92 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%"=="" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if %ERRORLEVEL% equ 0 goto execute + +echo. 1>&2 +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. 1>&2 +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 1>&2 +echo. 1>&2 +echo Please set the JAVA_HOME variable in your environment to match the 1>&2 +echo location of your Java installation. 1>&2 + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if %ERRORLEVEL% equ 0 goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/third-party/nwaku/examples/mobile/android/settings.gradle b/third-party/nwaku/examples/mobile/android/settings.gradle new file mode 100644 index 0000000..fa70819 --- /dev/null +++ b/third-party/nwaku/examples/mobile/android/settings.gradle @@ -0,0 +1,4 @@ +rootProject.name = 'mobile' +apply from: file("../node_modules/@react-native-community/cli-platform-android/native_modules.gradle"); applyNativeModulesSettingsGradle(settings) +include ':app' +includeBuild('../node_modules/@react-native/gradle-plugin') diff --git a/third-party/nwaku/examples/mobile/app.json b/third-party/nwaku/examples/mobile/app.json new file mode 100644 index 0000000..1cd68dc --- /dev/null +++ b/third-party/nwaku/examples/mobile/app.json @@ -0,0 +1,4 @@ +{ + "name": "mobile", + "displayName": "mobile" +} diff --git a/third-party/nwaku/examples/mobile/babel.config.js b/third-party/nwaku/examples/mobile/babel.config.js new file mode 100644 index 0000000..f7b3da3 --- /dev/null +++ b/third-party/nwaku/examples/mobile/babel.config.js @@ -0,0 +1,3 @@ +module.exports = { + presets: ['module:@react-native/babel-preset'], +}; diff --git a/third-party/nwaku/examples/mobile/build-nwaku.js b/third-party/nwaku/examples/mobile/build-nwaku.js new file mode 100644 index 0000000..53f3dba --- /dev/null +++ b/third-party/nwaku/examples/mobile/build-nwaku.js @@ -0,0 +1,64 @@ +const fs = require('fs-extra'); +const {spawn} = require('child_process'); + +// Parse command line arguments +const args = process.argv.slice(2); +const forceFlagIndex = args.indexOf('--force'); + +const nwakuRootFolder = '../../'; +const libwakuHeaderSrc = 'library/libwaku.h'; + +// Android -------------------------------------------------------------------------------------- + +const androidArchitectures = ['arm64-v8a', 'x86', 'x86_64']; // 'armeabi-v7a' +const androidSrcFolder = 'build/android'; +const androidDstFolder = 'android/app/src/main/jniLibs'; +const androidFilesToCheck = ['libwaku.so', 'librln.so']; +const androidLibDst = 'android/app/src/main/jni/libwaku.h'; + +const androidDstFiles = [androidLibDst]; +androidArchitectures.forEach(architecture => { + androidFilesToCheck.forEach(file => { + androidDstFiles.push(`${androidDstFolder}/${architecture}/${file}`); + }); +}); + +// Check if all files exist +const filesExist = androidDstFiles.every(file => fs.existsSync(file)); +if (!filesExist || forceFlagIndex !== -1) { + console.log('Running make to generate all architecture libraries...'); + const makeCommand = 'make'; + const makeProcess = spawn(makeCommand, ['libwaku-android'], {cwd: '../../'}); + + makeProcess.stdout.on('data', data => process.stdout.write(data)); + makeProcess.stderr.on('data', data => process.stdout.write(data)); + makeProcess.on('close', code => { + if (code == 0) { + console.log('Copying generated libraries...'); + androidArchitectures.forEach(architecture => { + androidFilesToCheck.forEach(file => { + androidDstFiles.push(`${androidDstFolder}/${architecture}/${file}`); + fs.copyFile( + `${nwakuRootFolder}/${androidSrcFolder}/${architecture}/${file}`, + `${androidDstFolder}/${architecture}/${file}`, + err => { + if (err) throw err; + }, + ); + }); + }); + console.log('Copying header...'); + fs.copyFile( + `${nwakuRootFolder}/${libwakuHeaderSrc}`, + androidLibDst, + err => { + if (err) throw err; + }, + ); + } else { + console.error(`make exited with ${code}`); + } + }); +} else { + console.log('All files exist. Skipping make.'); +} diff --git a/third-party/nwaku/examples/mobile/index.js b/third-party/nwaku/examples/mobile/index.js new file mode 100644 index 0000000..a850d03 --- /dev/null +++ b/third-party/nwaku/examples/mobile/index.js @@ -0,0 +1,9 @@ +/** + * @format + */ + +import {AppRegistry} from 'react-native'; +import App from './App'; +import {name as appName} from './app.json'; + +AppRegistry.registerComponent(appName, () => App); diff --git a/third-party/nwaku/examples/mobile/ios/.xcode.env b/third-party/nwaku/examples/mobile/ios/.xcode.env new file mode 100644 index 0000000..3d5782c --- /dev/null +++ b/third-party/nwaku/examples/mobile/ios/.xcode.env @@ -0,0 +1,11 @@ +# This `.xcode.env` file is versioned and is used to source the environment +# used when running script phases inside Xcode. +# To customize your local environment, you can create an `.xcode.env.local` +# file that is not versioned. + +# NODE_BINARY variable contains the PATH to the node executable. +# +# Customize the NODE_BINARY variable here. +# For example, to use nvm with brew, add the following line +# . "$(brew --prefix nvm)/nvm.sh" --no-use +export NODE_BINARY=$(command -v node) diff --git a/third-party/nwaku/examples/mobile/ios/Podfile b/third-party/nwaku/examples/mobile/ios/Podfile new file mode 100644 index 0000000..3a46190 --- /dev/null +++ b/third-party/nwaku/examples/mobile/ios/Podfile @@ -0,0 +1,40 @@ +# Resolve react_native_pods.rb with node to allow for hoisting +require Pod::Executable.execute_command('node', ['-p', + 'require.resolve( + "react-native/scripts/react_native_pods.rb", + {paths: [process.argv[1]]}, + )', __dir__]).strip + +platform :ios, min_ios_version_supported +prepare_react_native_project! + +linkage = ENV['USE_FRAMEWORKS'] +if linkage != nil + Pod::UI.puts "Configuring Pod with #{linkage}ally linked Frameworks".green + use_frameworks! :linkage => linkage.to_sym +end + +target 'mobile' do + config = use_native_modules! + + use_react_native!( + :path => config[:reactNativePath], + # An absolute path to your application root. + :app_path => "#{Pod::Config.instance.installation_root}/.." + ) + + target 'mobileTests' do + inherit! :complete + # Pods for testing + end + + post_install do |installer| + # https://github.com/facebook/react-native/blob/main/packages/react-native/scripts/react_native_pods.rb#L197-L202 + react_native_post_install( + installer, + config[:reactNativePath], + :mac_catalyst_enabled => false, + # :ccache_enabled => true + ) + end +end diff --git a/third-party/nwaku/examples/mobile/ios/mobile.xcodeproj/project.pbxproj b/third-party/nwaku/examples/mobile/ios/mobile.xcodeproj/project.pbxproj new file mode 100644 index 0000000..1c67104 --- /dev/null +++ b/third-party/nwaku/examples/mobile/ios/mobile.xcodeproj/project.pbxproj @@ -0,0 +1,688 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 54; + objects = { + +/* Begin PBXBuildFile section */ + 00E356F31AD99517003FC87E /* mobileTests.m in Sources */ = {isa = PBXBuildFile; fileRef = 00E356F21AD99517003FC87E /* mobileTests.m */; }; + 0C80B921A6F3F58F76C31292 /* libPods-mobile.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 5DCACB8F33CDC322A6C60F78 /* libPods-mobile.a */; }; + 13B07FBC1A68108700A75B9A /* AppDelegate.mm in Sources */ = {isa = PBXBuildFile; fileRef = 13B07FB01A68108700A75B9A /* AppDelegate.mm */; }; + 13B07FBF1A68108700A75B9A /* Images.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 13B07FB51A68108700A75B9A /* Images.xcassets */; }; + 13B07FC11A68108700A75B9A /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 13B07FB71A68108700A75B9A /* main.m */; }; + 7699B88040F8A987B510C191 /* libPods-mobile-mobileTests.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 19F6CBCC0A4E27FBF8BF4A61 /* libPods-mobile-mobileTests.a */; }; + 81AB9BB82411601600AC10FF /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 81AB9BB72411601600AC10FF /* LaunchScreen.storyboard */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + 00E356F41AD99517003FC87E /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 83CBB9F71A601CBA00E9B192 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 13B07F861A680F5B00A75B9A; + remoteInfo = mobile; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXFileReference section */ + 00E356EE1AD99517003FC87E /* mobileTests.xctest */ = {isa = PBXFileReference; explicitFileType = wrapper.cfbundle; includeInIndex = 0; path = mobileTests.xctest; sourceTree = BUILT_PRODUCTS_DIR; }; + 00E356F11AD99517003FC87E /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + 00E356F21AD99517003FC87E /* mobileTests.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = mobileTests.m; sourceTree = ""; }; + 13B07F961A680F5B00A75B9A /* mobile.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = mobile.app; sourceTree = BUILT_PRODUCTS_DIR; }; + 13B07FAF1A68108700A75B9A /* AppDelegate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = AppDelegate.h; path = mobile/AppDelegate.h; sourceTree = ""; }; + 13B07FB01A68108700A75B9A /* AppDelegate.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = AppDelegate.mm; path = mobile/AppDelegate.mm; sourceTree = ""; }; + 13B07FB51A68108700A75B9A /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; name = Images.xcassets; path = mobile/Images.xcassets; sourceTree = ""; }; + 13B07FB61A68108700A75B9A /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; name = Info.plist; path = mobile/Info.plist; sourceTree = ""; }; + 13B07FB71A68108700A75B9A /* main.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; name = main.m; path = mobile/main.m; sourceTree = ""; }; + 13B07FB81A68108700A75B9A /* PrivacyInfo.xcprivacy */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; name = PrivacyInfo.xcprivacy; path = mobile/PrivacyInfo.xcprivacy; sourceTree = ""; }; + 19F6CBCC0A4E27FBF8BF4A61 /* libPods-mobile-mobileTests.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-mobile-mobileTests.a"; sourceTree = BUILT_PRODUCTS_DIR; }; + 3B4392A12AC88292D35C810B /* Pods-mobile.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-mobile.debug.xcconfig"; path = "Target Support Files/Pods-mobile/Pods-mobile.debug.xcconfig"; sourceTree = ""; }; + 5709B34CF0A7D63546082F79 /* Pods-mobile.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-mobile.release.xcconfig"; path = "Target Support Files/Pods-mobile/Pods-mobile.release.xcconfig"; sourceTree = ""; }; + 5B7EB9410499542E8C5724F5 /* Pods-mobile-mobileTests.debug.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-mobile-mobileTests.debug.xcconfig"; path = "Target Support Files/Pods-mobile-mobileTests/Pods-mobile-mobileTests.debug.xcconfig"; sourceTree = ""; }; + 5DCACB8F33CDC322A6C60F78 /* libPods-mobile.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libPods-mobile.a"; sourceTree = BUILT_PRODUCTS_DIR; }; + 81AB9BB72411601600AC10FF /* LaunchScreen.storyboard */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = file.storyboard; name = LaunchScreen.storyboard; path = mobile/LaunchScreen.storyboard; sourceTree = ""; }; + 89C6BE57DB24E9ADA2F236DE /* Pods-mobile-mobileTests.release.xcconfig */ = {isa = PBXFileReference; includeInIndex = 1; lastKnownFileType = text.xcconfig; name = "Pods-mobile-mobileTests.release.xcconfig"; path = "Target Support Files/Pods-mobile-mobileTests/Pods-mobile-mobileTests.release.xcconfig"; sourceTree = ""; }; + ED297162215061F000B7C4FE /* JavaScriptCore.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = JavaScriptCore.framework; path = System/Library/Frameworks/JavaScriptCore.framework; sourceTree = SDKROOT; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 00E356EB1AD99517003FC87E /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 7699B88040F8A987B510C191 /* libPods-mobile-mobileTests.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 13B07F8C1A680F5B00A75B9A /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 0C80B921A6F3F58F76C31292 /* libPods-mobile.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 00E356EF1AD99517003FC87E /* mobileTests */ = { + isa = PBXGroup; + children = ( + 00E356F21AD99517003FC87E /* mobileTests.m */, + 00E356F01AD99517003FC87E /* Supporting Files */, + ); + path = mobileTests; + sourceTree = ""; + }; + 00E356F01AD99517003FC87E /* Supporting Files */ = { + isa = PBXGroup; + children = ( + 00E356F11AD99517003FC87E /* Info.plist */, + ); + name = "Supporting Files"; + sourceTree = ""; + }; + 13B07FAE1A68108700A75B9A /* mobile */ = { + isa = PBXGroup; + children = ( + 13B07FAF1A68108700A75B9A /* AppDelegate.h */, + 13B07FB01A68108700A75B9A /* AppDelegate.mm */, + 13B07FB51A68108700A75B9A /* Images.xcassets */, + 13B07FB61A68108700A75B9A /* Info.plist */, + 81AB9BB72411601600AC10FF /* LaunchScreen.storyboard */, + 13B07FB71A68108700A75B9A /* main.m */, + 13B07FB81A68108700A75B9A /* PrivacyInfo.xcprivacy */, + ); + name = mobile; + sourceTree = ""; + }; + 2D16E6871FA4F8E400B85C8A /* Frameworks */ = { + isa = PBXGroup; + children = ( + ED297162215061F000B7C4FE /* JavaScriptCore.framework */, + 5DCACB8F33CDC322A6C60F78 /* libPods-mobile.a */, + 19F6CBCC0A4E27FBF8BF4A61 /* libPods-mobile-mobileTests.a */, + ); + name = Frameworks; + sourceTree = ""; + }; + 832341AE1AAA6A7D00B99B32 /* Libraries */ = { + isa = PBXGroup; + children = ( + ); + name = Libraries; + sourceTree = ""; + }; + 83CBB9F61A601CBA00E9B192 = { + isa = PBXGroup; + children = ( + 13B07FAE1A68108700A75B9A /* mobile */, + 832341AE1AAA6A7D00B99B32 /* Libraries */, + 00E356EF1AD99517003FC87E /* mobileTests */, + 83CBBA001A601CBA00E9B192 /* Products */, + 2D16E6871FA4F8E400B85C8A /* Frameworks */, + BBD78D7AC51CEA395F1C20DB /* Pods */, + ); + indentWidth = 2; + sourceTree = ""; + tabWidth = 2; + usesTabs = 0; + }; + 83CBBA001A601CBA00E9B192 /* Products */ = { + isa = PBXGroup; + children = ( + 13B07F961A680F5B00A75B9A /* mobile.app */, + 00E356EE1AD99517003FC87E /* mobileTests.xctest */, + ); + name = Products; + sourceTree = ""; + }; + BBD78D7AC51CEA395F1C20DB /* Pods */ = { + isa = PBXGroup; + children = ( + 3B4392A12AC88292D35C810B /* Pods-mobile.debug.xcconfig */, + 5709B34CF0A7D63546082F79 /* Pods-mobile.release.xcconfig */, + 5B7EB9410499542E8C5724F5 /* Pods-mobile-mobileTests.debug.xcconfig */, + 89C6BE57DB24E9ADA2F236DE /* Pods-mobile-mobileTests.release.xcconfig */, + ); + path = Pods; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + 00E356ED1AD99517003FC87E /* mobileTests */ = { + isa = PBXNativeTarget; + buildConfigurationList = 00E357021AD99517003FC87E /* Build configuration list for PBXNativeTarget "mobileTests" */; + buildPhases = ( + A55EABD7B0C7F3A422A6CC61 /* [CP] Check Pods Manifest.lock */, + 00E356EA1AD99517003FC87E /* Sources */, + 00E356EB1AD99517003FC87E /* Frameworks */, + 00E356EC1AD99517003FC87E /* Resources */, + C59DA0FBD6956966B86A3779 /* [CP] Embed Pods Frameworks */, + F6A41C54EA430FDDC6A6ED99 /* [CP] Copy Pods Resources */, + ); + buildRules = ( + ); + dependencies = ( + 00E356F51AD99517003FC87E /* PBXTargetDependency */, + ); + name = mobileTests; + productName = mobileTests; + productReference = 00E356EE1AD99517003FC87E /* mobileTests.xctest */; + productType = "com.apple.product-type.bundle.unit-test"; + }; + 13B07F861A680F5B00A75B9A /* mobile */ = { + isa = PBXNativeTarget; + buildConfigurationList = 13B07F931A680F5B00A75B9A /* Build configuration list for PBXNativeTarget "mobile" */; + buildPhases = ( + C38B50BA6285516D6DCD4F65 /* [CP] Check Pods Manifest.lock */, + 13B07F871A680F5B00A75B9A /* Sources */, + 13B07F8C1A680F5B00A75B9A /* Frameworks */, + 13B07F8E1A680F5B00A75B9A /* Resources */, + 00DD1BFF1BD5951E006B06BC /* Bundle React Native code and images */, + 00EEFC60759A1932668264C0 /* [CP] Embed Pods Frameworks */, + E235C05ADACE081382539298 /* [CP] Copy Pods Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = mobile; + productName = mobile; + productReference = 13B07F961A680F5B00A75B9A /* mobile.app */; + productType = "com.apple.product-type.application"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 83CBB9F71A601CBA00E9B192 /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 1210; + TargetAttributes = { + 00E356ED1AD99517003FC87E = { + CreatedOnToolsVersion = 6.2; + TestTargetID = 13B07F861A680F5B00A75B9A; + }; + 13B07F861A680F5B00A75B9A = { + LastSwiftMigration = 1120; + }; + }; + }; + buildConfigurationList = 83CBB9FA1A601CBA00E9B192 /* Build configuration list for PBXProject "mobile" */; + compatibilityVersion = "Xcode 12.0"; + developmentRegion = en; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = 83CBB9F61A601CBA00E9B192; + productRefGroup = 83CBBA001A601CBA00E9B192 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 13B07F861A680F5B00A75B9A /* mobile */, + 00E356ED1AD99517003FC87E /* mobileTests */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + 00E356EC1AD99517003FC87E /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 13B07F8E1A680F5B00A75B9A /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 81AB9BB82411601600AC10FF /* LaunchScreen.storyboard in Resources */, + 13B07FBF1A68108700A75B9A /* Images.xcassets in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXShellScriptBuildPhase section */ + 00DD1BFF1BD5951E006B06BC /* Bundle React Native code and images */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/.xcode.env.local", + "$(SRCROOT)/.xcode.env", + ); + name = "Bundle React Native code and images"; + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "set -e\n\nWITH_ENVIRONMENT=\"$REACT_NATIVE_PATH/scripts/xcode/with-environment.sh\"\nREACT_NATIVE_XCODE=\"$REACT_NATIVE_PATH/scripts/react-native-xcode.sh\"\n\n/bin/sh -c \"$WITH_ENVIRONMENT $REACT_NATIVE_XCODE\"\n"; + }; + 00EEFC60759A1932668264C0 /* [CP] Embed Pods Frameworks */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + "${PODS_ROOT}/Target Support Files/Pods-mobile/Pods-mobile-frameworks-${CONFIGURATION}-input-files.xcfilelist", + ); + name = "[CP] Embed Pods Frameworks"; + outputFileListPaths = ( + "${PODS_ROOT}/Target Support Files/Pods-mobile/Pods-mobile-frameworks-${CONFIGURATION}-output-files.xcfilelist", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${PODS_ROOT}/Target Support Files/Pods-mobile/Pods-mobile-frameworks.sh\"\n"; + showEnvVarsInLog = 0; + }; + A55EABD7B0C7F3A422A6CC61 /* [CP] Check Pods Manifest.lock */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + ); + inputPaths = ( + "${PODS_PODFILE_DIR_PATH}/Podfile.lock", + "${PODS_ROOT}/Manifest.lock", + ); + name = "[CP] Check Pods Manifest.lock"; + outputFileListPaths = ( + ); + outputPaths = ( + "$(DERIVED_FILE_DIR)/Pods-mobile-mobileTests-checkManifestLockResult.txt", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n"; + showEnvVarsInLog = 0; + }; + C38B50BA6285516D6DCD4F65 /* [CP] Check Pods Manifest.lock */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + ); + inputPaths = ( + "${PODS_PODFILE_DIR_PATH}/Podfile.lock", + "${PODS_ROOT}/Manifest.lock", + ); + name = "[CP] Check Pods Manifest.lock"; + outputFileListPaths = ( + ); + outputPaths = ( + "$(DERIVED_FILE_DIR)/Pods-mobile-checkManifestLockResult.txt", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "diff \"${PODS_PODFILE_DIR_PATH}/Podfile.lock\" \"${PODS_ROOT}/Manifest.lock\" > /dev/null\nif [ $? != 0 ] ; then\n # print error to STDERR\n echo \"error: The sandbox is not in sync with the Podfile.lock. Run 'pod install' or update your CocoaPods installation.\" >&2\n exit 1\nfi\n# This output is used by Xcode 'outputs' to avoid re-running this script phase.\necho \"SUCCESS\" > \"${SCRIPT_OUTPUT_FILE_0}\"\n"; + showEnvVarsInLog = 0; + }; + C59DA0FBD6956966B86A3779 /* [CP] Embed Pods Frameworks */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + "${PODS_ROOT}/Target Support Files/Pods-mobile-mobileTests/Pods-mobile-mobileTests-frameworks-${CONFIGURATION}-input-files.xcfilelist", + ); + name = "[CP] Embed Pods Frameworks"; + outputFileListPaths = ( + "${PODS_ROOT}/Target Support Files/Pods-mobile-mobileTests/Pods-mobile-mobileTests-frameworks-${CONFIGURATION}-output-files.xcfilelist", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${PODS_ROOT}/Target Support Files/Pods-mobile-mobileTests/Pods-mobile-mobileTests-frameworks.sh\"\n"; + showEnvVarsInLog = 0; + }; + E235C05ADACE081382539298 /* [CP] Copy Pods Resources */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + "${PODS_ROOT}/Target Support Files/Pods-mobile/Pods-mobile-resources-${CONFIGURATION}-input-files.xcfilelist", + ); + name = "[CP] Copy Pods Resources"; + outputFileListPaths = ( + "${PODS_ROOT}/Target Support Files/Pods-mobile/Pods-mobile-resources-${CONFIGURATION}-output-files.xcfilelist", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${PODS_ROOT}/Target Support Files/Pods-mobile/Pods-mobile-resources.sh\"\n"; + showEnvVarsInLog = 0; + }; + F6A41C54EA430FDDC6A6ED99 /* [CP] Copy Pods Resources */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputFileListPaths = ( + "${PODS_ROOT}/Target Support Files/Pods-mobile-mobileTests/Pods-mobile-mobileTests-resources-${CONFIGURATION}-input-files.xcfilelist", + ); + name = "[CP] Copy Pods Resources"; + outputFileListPaths = ( + "${PODS_ROOT}/Target Support Files/Pods-mobile-mobileTests/Pods-mobile-mobileTests-resources-${CONFIGURATION}-output-files.xcfilelist", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "\"${PODS_ROOT}/Target Support Files/Pods-mobile-mobileTests/Pods-mobile-mobileTests-resources.sh\"\n"; + showEnvVarsInLog = 0; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 00E356EA1AD99517003FC87E /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 00E356F31AD99517003FC87E /* mobileTests.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 13B07F871A680F5B00A75B9A /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 13B07FBC1A68108700A75B9A /* AppDelegate.mm in Sources */, + 13B07FC11A68108700A75B9A /* main.m in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + 00E356F51AD99517003FC87E /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 13B07F861A680F5B00A75B9A /* mobile */; + targetProxy = 00E356F41AD99517003FC87E /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin XCBuildConfiguration section */ + 00E356F61AD99517003FC87E /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 5B7EB9410499542E8C5724F5 /* Pods-mobile-mobileTests.debug.xcconfig */; + buildSettings = { + BUNDLE_LOADER = "$(TEST_HOST)"; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + INFOPLIST_FILE = mobileTests/Info.plist; + IPHONEOS_DEPLOYMENT_TARGET = 13.4; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + "@loader_path/Frameworks", + ); + OTHER_LDFLAGS = ( + "-ObjC", + "-lc++", + "$(inherited)", + ); + PRODUCT_BUNDLE_IDENTIFIER = "org.reactjs.native.example.$(PRODUCT_NAME:rfc1034identifier)"; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_HOST = "$(BUILT_PRODUCTS_DIR)/mobile.app/mobile"; + }; + name = Debug; + }; + 00E356F71AD99517003FC87E /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 89C6BE57DB24E9ADA2F236DE /* Pods-mobile-mobileTests.release.xcconfig */; + buildSettings = { + BUNDLE_LOADER = "$(TEST_HOST)"; + COPY_PHASE_STRIP = NO; + INFOPLIST_FILE = mobileTests/Info.plist; + IPHONEOS_DEPLOYMENT_TARGET = 13.4; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + "@loader_path/Frameworks", + ); + OTHER_LDFLAGS = ( + "-ObjC", + "-lc++", + "$(inherited)", + ); + PRODUCT_BUNDLE_IDENTIFIER = "org.reactjs.native.example.$(PRODUCT_NAME:rfc1034identifier)"; + PRODUCT_NAME = "$(TARGET_NAME)"; + TEST_HOST = "$(BUILT_PRODUCTS_DIR)/mobile.app/mobile"; + }; + name = Release; + }; + 13B07F941A680F5B00A75B9A /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 3B4392A12AC88292D35C810B /* Pods-mobile.debug.xcconfig */; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + CLANG_ENABLE_MODULES = YES; + CURRENT_PROJECT_VERSION = 1; + ENABLE_BITCODE = NO; + INFOPLIST_FILE = mobile/Info.plist; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + MARKETING_VERSION = 1.0; + OTHER_LDFLAGS = ( + "$(inherited)", + "-ObjC", + "-lc++", + ); + PRODUCT_BUNDLE_IDENTIFIER = "org.reactjs.native.example.$(PRODUCT_NAME:rfc1034identifier)"; + PRODUCT_NAME = mobile; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; + SWIFT_VERSION = 5.0; + VERSIONING_SYSTEM = "apple-generic"; + }; + name = Debug; + }; + 13B07F951A680F5B00A75B9A /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 5709B34CF0A7D63546082F79 /* Pods-mobile.release.xcconfig */; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + CLANG_ENABLE_MODULES = YES; + CURRENT_PROJECT_VERSION = 1; + INFOPLIST_FILE = mobile/Info.plist; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + MARKETING_VERSION = 1.0; + OTHER_LDFLAGS = ( + "$(inherited)", + "-ObjC", + "-lc++", + ); + PRODUCT_BUNDLE_IDENTIFIER = "org.reactjs.native.example.$(PRODUCT_NAME:rfc1034identifier)"; + PRODUCT_NAME = mobile; + SWIFT_VERSION = 5.0; + VERSIONING_SYSTEM = "apple-generic"; + }; + name = Release; + }; + 83CBBA201A601CBA00E9B192 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES; + CLANG_CXX_LANGUAGE_STANDARD = "c++20"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + "EXCLUDED_ARCHS[sdk=iphonesimulator*]" = ""; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_SYMBOLS_PRIVATE_EXTERN = NO; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 13.4; + LD_RUNPATH_SEARCH_PATHS = ( + /usr/lib/swift, + "$(inherited)", + ); + LIBRARY_SEARCH_PATHS = ( + "\"$(SDKROOT)/usr/lib/swift\"", + "\"$(TOOLCHAIN_DIR)/usr/lib/swift/$(PLATFORM_NAME)\"", + "\"$(inherited)\"", + ); + MTL_ENABLE_DEBUG_INFO = YES; + ONLY_ACTIVE_ARCH = YES; + OTHER_CPLUSPLUSFLAGS = ( + "$(OTHER_CFLAGS)", + "-DFOLLY_NO_CONFIG", + "-DFOLLY_MOBILE=1", + "-DFOLLY_USE_LIBCPP=1", + "-DFOLLY_CFG_NO_COROUTINES=1", + "-DFOLLY_HAVE_CLOCK_GETTIME=1", + ); + SDKROOT = iphoneos; + }; + name = Debug; + }; + 83CBBA211A601CBA00E9B192 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_LOCALIZABILITY_NONLOCALIZED = YES; + CLANG_CXX_LANGUAGE_STANDARD = "c++20"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; + COPY_PHASE_STRIP = YES; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + "EXCLUDED_ARCHS[sdk=iphonesimulator*]" = ""; + GCC_C_LANGUAGE_STANDARD = gnu99; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + IPHONEOS_DEPLOYMENT_TARGET = 13.4; + LD_RUNPATH_SEARCH_PATHS = ( + /usr/lib/swift, + "$(inherited)", + ); + LIBRARY_SEARCH_PATHS = ( + "\"$(SDKROOT)/usr/lib/swift\"", + "\"$(TOOLCHAIN_DIR)/usr/lib/swift/$(PLATFORM_NAME)\"", + "\"$(inherited)\"", + ); + MTL_ENABLE_DEBUG_INFO = NO; + OTHER_CPLUSPLUSFLAGS = ( + "$(OTHER_CFLAGS)", + "-DFOLLY_NO_CONFIG", + "-DFOLLY_MOBILE=1", + "-DFOLLY_USE_LIBCPP=1", + "-DFOLLY_CFG_NO_COROUTINES=1", + "-DFOLLY_HAVE_CLOCK_GETTIME=1", + ); + SDKROOT = iphoneos; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 00E357021AD99517003FC87E /* Build configuration list for PBXNativeTarget "mobileTests" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 00E356F61AD99517003FC87E /* Debug */, + 00E356F71AD99517003FC87E /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 13B07F931A680F5B00A75B9A /* Build configuration list for PBXNativeTarget "mobile" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 13B07F941A680F5B00A75B9A /* Debug */, + 13B07F951A680F5B00A75B9A /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 83CBB9FA1A601CBA00E9B192 /* Build configuration list for PBXProject "mobile" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 83CBBA201A601CBA00E9B192 /* Debug */, + 83CBBA211A601CBA00E9B192 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 83CBB9F71A601CBA00E9B192 /* Project object */; +} diff --git a/third-party/nwaku/examples/mobile/ios/mobile.xcodeproj/xcshareddata/xcschemes/mobile.xcscheme b/third-party/nwaku/examples/mobile/ios/mobile.xcodeproj/xcshareddata/xcschemes/mobile.xcscheme new file mode 100644 index 0000000..85dd574 --- /dev/null +++ b/third-party/nwaku/examples/mobile/ios/mobile.xcodeproj/xcshareddata/xcschemes/mobile.xcscheme @@ -0,0 +1,88 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/third-party/nwaku/examples/mobile/ios/mobile/AppDelegate.h b/third-party/nwaku/examples/mobile/ios/mobile/AppDelegate.h new file mode 100644 index 0000000..5d28082 --- /dev/null +++ b/third-party/nwaku/examples/mobile/ios/mobile/AppDelegate.h @@ -0,0 +1,6 @@ +#import +#import + +@interface AppDelegate : RCTAppDelegate + +@end diff --git a/third-party/nwaku/examples/mobile/ios/mobile/AppDelegate.mm b/third-party/nwaku/examples/mobile/ios/mobile/AppDelegate.mm new file mode 100644 index 0000000..9555b55 --- /dev/null +++ b/third-party/nwaku/examples/mobile/ios/mobile/AppDelegate.mm @@ -0,0 +1,31 @@ +#import "AppDelegate.h" + +#import + +@implementation AppDelegate + +- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions +{ + self.moduleName = @"mobile"; + // You can add your custom initial props in the dictionary below. + // They will be passed down to the ViewController used by React Native. + self.initialProps = @{}; + + return [super application:application didFinishLaunchingWithOptions:launchOptions]; +} + +- (NSURL *)sourceURLForBridge:(RCTBridge *)bridge +{ + return [self bundleURL]; +} + +- (NSURL *)bundleURL +{ +#if DEBUG + return [[RCTBundleURLProvider sharedSettings] jsBundleURLForBundleRoot:@"index"]; +#else + return [[NSBundle mainBundle] URLForResource:@"main" withExtension:@"jsbundle"]; +#endif +} + +@end diff --git a/third-party/nwaku/examples/mobile/ios/mobile/Images.xcassets/AppIcon.appiconset/Contents.json b/third-party/nwaku/examples/mobile/ios/mobile/Images.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 0000000..8121323 --- /dev/null +++ b/third-party/nwaku/examples/mobile/ios/mobile/Images.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,53 @@ +{ + "images" : [ + { + "idiom" : "iphone", + "scale" : "2x", + "size" : "20x20" + }, + { + "idiom" : "iphone", + "scale" : "3x", + "size" : "20x20" + }, + { + "idiom" : "iphone", + "scale" : "2x", + "size" : "29x29" + }, + { + "idiom" : "iphone", + "scale" : "3x", + "size" : "29x29" + }, + { + "idiom" : "iphone", + "scale" : "2x", + "size" : "40x40" + }, + { + "idiom" : "iphone", + "scale" : "3x", + "size" : "40x40" + }, + { + "idiom" : "iphone", + "scale" : "2x", + "size" : "60x60" + }, + { + "idiom" : "iphone", + "scale" : "3x", + "size" : "60x60" + }, + { + "idiom" : "ios-marketing", + "scale" : "1x", + "size" : "1024x1024" + } + ], + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/third-party/nwaku/examples/mobile/ios/mobile/Images.xcassets/Contents.json b/third-party/nwaku/examples/mobile/ios/mobile/Images.xcassets/Contents.json new file mode 100644 index 0000000..2d92bd5 --- /dev/null +++ b/third-party/nwaku/examples/mobile/ios/mobile/Images.xcassets/Contents.json @@ -0,0 +1,6 @@ +{ + "info" : { + "version" : 1, + "author" : "xcode" + } +} diff --git a/third-party/nwaku/examples/mobile/ios/mobile/Info.plist b/third-party/nwaku/examples/mobile/ios/mobile/Info.plist new file mode 100644 index 0000000..9ef592f --- /dev/null +++ b/third-party/nwaku/examples/mobile/ios/mobile/Info.plist @@ -0,0 +1,52 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleDisplayName + mobile + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + APPL + CFBundleShortVersionString + $(MARKETING_VERSION) + CFBundleSignature + ???? + CFBundleVersion + $(CURRENT_PROJECT_VERSION) + LSRequiresIPhoneOS + + NSAppTransportSecurity + + + NSAllowsArbitraryLoads + + NSAllowsLocalNetworking + + + NSLocationWhenInUseUsageDescription + + UILaunchStoryboardName + LaunchScreen + UIRequiredDeviceCapabilities + + arm64 + + UISupportedInterfaceOrientations + + UIInterfaceOrientationPortrait + UIInterfaceOrientationLandscapeLeft + UIInterfaceOrientationLandscapeRight + + UIViewControllerBasedStatusBarAppearance + + + diff --git a/third-party/nwaku/examples/mobile/ios/mobile/LaunchScreen.storyboard b/third-party/nwaku/examples/mobile/ios/mobile/LaunchScreen.storyboard new file mode 100644 index 0000000..61d1bfd --- /dev/null +++ b/third-party/nwaku/examples/mobile/ios/mobile/LaunchScreen.storyboard @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/third-party/nwaku/examples/mobile/ios/mobile/PrivacyInfo.xcprivacy b/third-party/nwaku/examples/mobile/ios/mobile/PrivacyInfo.xcprivacy new file mode 100644 index 0000000..ef1896e --- /dev/null +++ b/third-party/nwaku/examples/mobile/ios/mobile/PrivacyInfo.xcprivacy @@ -0,0 +1,38 @@ + + + + + NSPrivacyCollectedDataTypes + + + NSPrivacyAccessedAPITypes + + + NSPrivacyAccessedAPIType + NSPrivacyAccessedAPICategoryFileTimestamp + NSPrivacyAccessedAPITypeReasons + + C617.1 + + + + NSPrivacyAccessedAPIType + NSPrivacyAccessedAPICategoryUserDefaults + NSPrivacyAccessedAPITypeReasons + + CA92.1 + + + + NSPrivacyAccessedAPIType + NSPrivacyAccessedAPICategorySystemBootTime + NSPrivacyAccessedAPITypeReasons + + 35F9.1 + + + + NSPrivacyTracking + + + diff --git a/third-party/nwaku/examples/mobile/ios/mobile/main.m b/third-party/nwaku/examples/mobile/ios/mobile/main.m new file mode 100644 index 0000000..d645c72 --- /dev/null +++ b/third-party/nwaku/examples/mobile/ios/mobile/main.m @@ -0,0 +1,10 @@ +#import + +#import "AppDelegate.h" + +int main(int argc, char *argv[]) +{ + @autoreleasepool { + return UIApplicationMain(argc, argv, nil, NSStringFromClass([AppDelegate class])); + } +} diff --git a/third-party/nwaku/examples/mobile/ios/mobileTests/Info.plist b/third-party/nwaku/examples/mobile/ios/mobileTests/Info.plist new file mode 100644 index 0000000..ba72822 --- /dev/null +++ b/third-party/nwaku/examples/mobile/ios/mobileTests/Info.plist @@ -0,0 +1,24 @@ + + + + + CFBundleDevelopmentRegion + en + CFBundleExecutable + $(EXECUTABLE_NAME) + CFBundleIdentifier + $(PRODUCT_BUNDLE_IDENTIFIER) + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + $(PRODUCT_NAME) + CFBundlePackageType + BNDL + CFBundleShortVersionString + 1.0 + CFBundleSignature + ???? + CFBundleVersion + 1 + + diff --git a/third-party/nwaku/examples/mobile/ios/mobileTests/mobileTests.m b/third-party/nwaku/examples/mobile/ios/mobileTests/mobileTests.m new file mode 100644 index 0000000..be4fe75 --- /dev/null +++ b/third-party/nwaku/examples/mobile/ios/mobileTests/mobileTests.m @@ -0,0 +1,66 @@ +#import +#import + +#import +#import + +#define TIMEOUT_SECONDS 600 +#define TEXT_TO_LOOK_FOR @"Welcome to React" + +@interface mobileTests : XCTestCase + +@end + +@implementation mobileTests + +- (BOOL)findSubviewInView:(UIView *)view matching:(BOOL (^)(UIView *view))test +{ + if (test(view)) { + return YES; + } + for (UIView *subview in [view subviews]) { + if ([self findSubviewInView:subview matching:test]) { + return YES; + } + } + return NO; +} + +- (void)testRendersWelcomeScreen +{ + UIViewController *vc = [[[RCTSharedApplication() delegate] window] rootViewController]; + NSDate *date = [NSDate dateWithTimeIntervalSinceNow:TIMEOUT_SECONDS]; + BOOL foundElement = NO; + + __block NSString *redboxError = nil; +#ifdef DEBUG + RCTSetLogFunction( + ^(RCTLogLevel level, RCTLogSource source, NSString *fileName, NSNumber *lineNumber, NSString *message) { + if (level >= RCTLogLevelError) { + redboxError = message; + } + }); +#endif + + while ([date timeIntervalSinceNow] > 0 && !foundElement && !redboxError) { + [[NSRunLoop mainRunLoop] runMode:NSDefaultRunLoopMode beforeDate:[NSDate dateWithTimeIntervalSinceNow:0.1]]; + [[NSRunLoop mainRunLoop] runMode:NSRunLoopCommonModes beforeDate:[NSDate dateWithTimeIntervalSinceNow:0.1]]; + + foundElement = [self findSubviewInView:vc.view + matching:^BOOL(UIView *view) { + if ([view.accessibilityLabel isEqualToString:TEXT_TO_LOOK_FOR]) { + return YES; + } + return NO; + }]; + } + +#ifdef DEBUG + RCTSetLogFunction(RCTDefaultLogFunction); +#endif + + XCTAssertNil(redboxError, @"RedBox error: %@", redboxError); + XCTAssertTrue(foundElement, @"Couldn't find element with text '%@' in %d seconds", TEXT_TO_LOOK_FOR, TIMEOUT_SECONDS); +} + +@end diff --git a/third-party/nwaku/examples/mobile/jest.config.js b/third-party/nwaku/examples/mobile/jest.config.js new file mode 100644 index 0000000..8eb675e --- /dev/null +++ b/third-party/nwaku/examples/mobile/jest.config.js @@ -0,0 +1,3 @@ +module.exports = { + preset: 'react-native', +}; diff --git a/third-party/nwaku/examples/mobile/metro.config.js b/third-party/nwaku/examples/mobile/metro.config.js new file mode 100644 index 0000000..9d41685 --- /dev/null +++ b/third-party/nwaku/examples/mobile/metro.config.js @@ -0,0 +1,11 @@ +const {getDefaultConfig, mergeConfig} = require('@react-native/metro-config'); + +/** + * Metro configuration + * https://reactnative.dev/docs/metro + * + * @type {import('metro-config').MetroConfig} + */ +const config = {}; + +module.exports = mergeConfig(getDefaultConfig(__dirname), config); diff --git a/third-party/nwaku/examples/mobile/package.json b/third-party/nwaku/examples/mobile/package.json new file mode 100644 index 0000000..c4e5f0c --- /dev/null +++ b/third-party/nwaku/examples/mobile/package.json @@ -0,0 +1,38 @@ +{ + "name": "mobile", + "version": "0.0.1", + "private": true, + "scripts": { + "prestart": "node build-nwaku.js", + "android": "react-native run-android", + "ios": "react-native run-ios", + "lint": "eslint .", + "start": "react-native start", + "test": "jest", + "force-generate-mobile-libs": "node build-nwaku.js --force" + }, + "dependencies": { + "react": "18.2.0", + "react-native": "0.74.0" + }, + "devDependencies": { + "@babel/core": "^7.20.0", + "@babel/preset-env": "^7.20.0", + "@babel/runtime": "^7.20.0", + "@react-native/babel-preset": "0.74.81", + "@react-native/eslint-config": "0.74.81", + "@react-native/metro-config": "0.74.81", + "@react-native/typescript-config": "0.74.81", + "@types/react": "^18.2.6", + "@types/react-test-renderer": "^18.0.0", + "babel-jest": "^29.6.3", + "eslint": "^8.19.0", + "jest": "^29.6.3", + "prettier": "2.8.8", + "react-test-renderer": "18.2.0", + "typescript": "5.0.4" + }, + "engines": { + "node": ">=18" + } +} diff --git a/third-party/nwaku/examples/mobile/tsconfig.json b/third-party/nwaku/examples/mobile/tsconfig.json new file mode 100644 index 0000000..304ab4e --- /dev/null +++ b/third-party/nwaku/examples/mobile/tsconfig.json @@ -0,0 +1,3 @@ +{ + "extends": "@react-native/typescript-config/tsconfig.json" +} diff --git a/third-party/nwaku/examples/mobile/yarn.lock b/third-party/nwaku/examples/mobile/yarn.lock new file mode 100644 index 0000000..1902232 --- /dev/null +++ b/third-party/nwaku/examples/mobile/yarn.lock @@ -0,0 +1,6747 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@aashutoshrathi/word-wrap@^1.2.3": + version "1.2.6" + resolved "https://registry.yarnpkg.com/@aashutoshrathi/word-wrap/-/word-wrap-1.2.6.tgz#bd9154aec9983f77b3a034ecaa015c2e4201f6cf" + integrity sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA== + +"@ampproject/remapping@^2.2.0": + version "2.3.0" + resolved "https://registry.yarnpkg.com/@ampproject/remapping/-/remapping-2.3.0.tgz#ed441b6fa600072520ce18b43d2c8cc8caecc7f4" + integrity sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw== + dependencies: + "@jridgewell/gen-mapping" "^0.3.5" + "@jridgewell/trace-mapping" "^0.3.24" + +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.12.13", "@babel/code-frame@^7.23.5": + version "7.23.5" + resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.23.5.tgz#9009b69a8c602293476ad598ff53e4562e15c244" + integrity sha512-CgH3s1a96LipHCmSUmYFPwY7MNx8C3avkq7i4Wl3cfa662ldtUe4VM1TPXX70pfmrlWTb6jLqTYrZyT2ZTJBgA== + dependencies: + "@babel/highlight" "^7.23.4" + chalk "^2.4.2" + +"@babel/compat-data@^7.20.5", "@babel/compat-data@^7.22.6", "@babel/compat-data@^7.23.5": + version "7.23.5" + resolved "https://registry.yarnpkg.com/@babel/compat-data/-/compat-data-7.23.5.tgz#ffb878728bb6bdcb6f4510aa51b1be9afb8cfd98" + integrity sha512-uU27kfDRlhfKl+w1U6vp16IuvSLtjAxdArVXPa9BvLkrr7CYIsxH5adpHObeAGY/41+syctUWOZ140a2Rvkgjw== + +"@babel/core@^7.11.6", "@babel/core@^7.12.3", "@babel/core@^7.13.16", "@babel/core@^7.20.0", "@babel/core@^7.23.9": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/core/-/core-7.24.0.tgz#56cbda6b185ae9d9bed369816a8f4423c5f2ff1b" + integrity sha512-fQfkg0Gjkza3nf0c7/w6Xf34BW4YvzNfACRLmmb7XRLa6XHdR+K9AlJlxneFfWYf6uhOzuzZVTjF/8KfndZANw== + dependencies: + "@ampproject/remapping" "^2.2.0" + "@babel/code-frame" "^7.23.5" + "@babel/generator" "^7.23.6" + "@babel/helper-compilation-targets" "^7.23.6" + "@babel/helper-module-transforms" "^7.23.3" + "@babel/helpers" "^7.24.0" + "@babel/parser" "^7.24.0" + "@babel/template" "^7.24.0" + "@babel/traverse" "^7.24.0" + "@babel/types" "^7.24.0" + convert-source-map "^2.0.0" + debug "^4.1.0" + gensync "^1.0.0-beta.2" + json5 "^2.2.3" + semver "^6.3.1" + +"@babel/eslint-parser@^7.20.0": + version "7.23.10" + resolved "https://registry.yarnpkg.com/@babel/eslint-parser/-/eslint-parser-7.23.10.tgz#2d4164842d6db798873b40e0c4238827084667a2" + integrity sha512-3wSYDPZVnhseRnxRJH6ZVTNknBz76AEnyC+AYYhasjP3Yy23qz0ERR7Fcd2SHmYuSFJ2kY9gaaDd3vyqU09eSw== + dependencies: + "@nicolo-ribaudo/eslint-scope-5-internals" "5.1.1-v1" + eslint-visitor-keys "^2.1.0" + semver "^6.3.1" + +"@babel/generator@^7.20.0", "@babel/generator@^7.23.6", "@babel/generator@^7.7.2": + version "7.23.6" + resolved "https://registry.yarnpkg.com/@babel/generator/-/generator-7.23.6.tgz#9e1fca4811c77a10580d17d26b57b036133f3c2e" + integrity sha512-qrSfCYxYQB5owCmGLbl8XRpX1ytXlpueOb0N0UmQwA073KZxejgQTzAmJezxvpwQD9uGtK2shHdi55QT+MbjIw== + dependencies: + "@babel/types" "^7.23.6" + "@jridgewell/gen-mapping" "^0.3.2" + "@jridgewell/trace-mapping" "^0.3.17" + jsesc "^2.5.1" + +"@babel/helper-annotate-as-pure@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz#e7f06737b197d580a01edf75d97e2c8be99d3882" + integrity sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg== + dependencies: + "@babel/types" "^7.22.5" + +"@babel/helper-builder-binary-assignment-operator-visitor@^7.22.15": + version "7.22.15" + resolved "https://registry.yarnpkg.com/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.15.tgz#5426b109cf3ad47b91120f8328d8ab1be8b0b956" + integrity sha512-QkBXwGgaoC2GtGZRoma6kv7Szfv06khvhFav67ZExau2RaXzy8MpHSMO2PNoP2XtmQphJQRHFfg77Bq731Yizw== + dependencies: + "@babel/types" "^7.22.15" + +"@babel/helper-compilation-targets@^7.20.7", "@babel/helper-compilation-targets@^7.22.15", "@babel/helper-compilation-targets@^7.22.6", "@babel/helper-compilation-targets@^7.23.6": + version "7.23.6" + resolved "https://registry.yarnpkg.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.23.6.tgz#4d79069b16cbcf1461289eccfbbd81501ae39991" + integrity sha512-9JB548GZoQVmzrFgp8o7KxdgkTGm6xs9DW0o/Pim72UDjzr5ObUQ6ZzYPqA+g9OTS2bBQoctLJrky0RDCAWRgQ== + dependencies: + "@babel/compat-data" "^7.23.5" + "@babel/helper-validator-option" "^7.23.5" + browserslist "^4.22.2" + lru-cache "^5.1.1" + semver "^6.3.1" + +"@babel/helper-create-class-features-plugin@^7.18.6", "@babel/helper-create-class-features-plugin@^7.22.15", "@babel/helper-create-class-features-plugin@^7.23.6": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.24.0.tgz#fc7554141bdbfa2d17f7b4b80153b9b090e5d158" + integrity sha512-QAH+vfvts51BCsNZ2PhY6HAggnlS6omLLFTsIpeqZk/MmJ6cW7tgz5yRv0fMJThcr6FmbMrENh1RgrWPTYA76g== + dependencies: + "@babel/helper-annotate-as-pure" "^7.22.5" + "@babel/helper-environment-visitor" "^7.22.20" + "@babel/helper-function-name" "^7.23.0" + "@babel/helper-member-expression-to-functions" "^7.23.0" + "@babel/helper-optimise-call-expression" "^7.22.5" + "@babel/helper-replace-supers" "^7.22.20" + "@babel/helper-skip-transparent-expression-wrappers" "^7.22.5" + "@babel/helper-split-export-declaration" "^7.22.6" + semver "^6.3.1" + +"@babel/helper-create-regexp-features-plugin@^7.18.6", "@babel/helper-create-regexp-features-plugin@^7.22.15", "@babel/helper-create-regexp-features-plugin@^7.22.5": + version "7.22.15" + resolved "https://registry.yarnpkg.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.15.tgz#5ee90093914ea09639b01c711db0d6775e558be1" + integrity sha512-29FkPLFjn4TPEa3RE7GpW+qbE8tlsu3jntNYNfcGsc49LphF1PQIiD+vMZ1z1xVOKt+93khA9tc2JBs3kBjA7w== + dependencies: + "@babel/helper-annotate-as-pure" "^7.22.5" + regexpu-core "^5.3.1" + semver "^6.3.1" + +"@babel/helper-define-polyfill-provider@^0.5.0": + version "0.5.0" + resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.5.0.tgz#465805b7361f461e86c680f1de21eaf88c25901b" + integrity sha512-NovQquuQLAQ5HuyjCz7WQP9MjRj7dx++yspwiyUiGl9ZyadHRSql1HZh5ogRd8W8w6YM6EQ/NTB8rgjLt5W65Q== + dependencies: + "@babel/helper-compilation-targets" "^7.22.6" + "@babel/helper-plugin-utils" "^7.22.5" + debug "^4.1.1" + lodash.debounce "^4.0.8" + resolve "^1.14.2" + +"@babel/helper-define-polyfill-provider@^0.6.1": + version "0.6.1" + resolved "https://registry.yarnpkg.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.1.tgz#fadc63f0c2ff3c8d02ed905dcea747c5b0fb74fd" + integrity sha512-o7SDgTJuvx5vLKD6SFvkydkSMBvahDKGiNJzG22IZYXhiqoe9efY7zocICBgzHV4IRg5wdgl2nEL/tulKIEIbA== + dependencies: + "@babel/helper-compilation-targets" "^7.22.6" + "@babel/helper-plugin-utils" "^7.22.5" + debug "^4.1.1" + lodash.debounce "^4.0.8" + resolve "^1.14.2" + +"@babel/helper-environment-visitor@^7.18.9", "@babel/helper-environment-visitor@^7.22.20": + version "7.22.20" + resolved "https://registry.yarnpkg.com/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.20.tgz#96159db61d34a29dba454c959f5ae4a649ba9167" + integrity sha512-zfedSIzFhat/gFhWfHtgWvlec0nqB9YEIVrpuwjruLlXfUSnA8cJB0miHKwqDnQ7d32aKo2xt88/xZptwxbfhA== + +"@babel/helper-function-name@^7.22.5", "@babel/helper-function-name@^7.23.0": + version "7.23.0" + resolved "https://registry.yarnpkg.com/@babel/helper-function-name/-/helper-function-name-7.23.0.tgz#1f9a3cdbd5b2698a670c30d2735f9af95ed52759" + integrity sha512-OErEqsrxjZTJciZ4Oo+eoZqeW9UIiOcuYKRJA4ZAgV9myA+pOXhhmpfNCKjEH/auVfEYVFJ6y1Tc4r0eIApqiw== + dependencies: + "@babel/template" "^7.22.15" + "@babel/types" "^7.23.0" + +"@babel/helper-hoist-variables@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz#c01a007dac05c085914e8fb652b339db50d823bb" + integrity sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw== + dependencies: + "@babel/types" "^7.22.5" + +"@babel/helper-member-expression-to-functions@^7.22.15", "@babel/helper-member-expression-to-functions@^7.23.0": + version "7.23.0" + resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.23.0.tgz#9263e88cc5e41d39ec18c9a3e0eced59a3e7d366" + integrity sha512-6gfrPwh7OuT6gZyJZvd6WbTfrqAo7vm4xCzAXOusKqq/vWdKXphTpj5klHKNmRUU6/QRGlBsyU9mAIPaWHlqJA== + dependencies: + "@babel/types" "^7.23.0" + +"@babel/helper-module-imports@^7.22.15": + version "7.22.15" + resolved "https://registry.yarnpkg.com/@babel/helper-module-imports/-/helper-module-imports-7.22.15.tgz#16146307acdc40cc00c3b2c647713076464bdbf0" + integrity sha512-0pYVBnDKZO2fnSPCrgM/6WMc7eS20Fbok+0r88fp+YtWVLZrp4CkafFGIp+W0VKw4a22sgebPT99y+FDNMdP4w== + dependencies: + "@babel/types" "^7.22.15" + +"@babel/helper-module-transforms@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/helper-module-transforms/-/helper-module-transforms-7.23.3.tgz#d7d12c3c5d30af5b3c0fcab2a6d5217773e2d0f1" + integrity sha512-7bBs4ED9OmswdfDzpz4MpWgSrV7FXlc3zIagvLFjS5H+Mk7Snr21vQ6QwrsoCGMfNC4e4LQPdoULEt4ykz0SRQ== + dependencies: + "@babel/helper-environment-visitor" "^7.22.20" + "@babel/helper-module-imports" "^7.22.15" + "@babel/helper-simple-access" "^7.22.5" + "@babel/helper-split-export-declaration" "^7.22.6" + "@babel/helper-validator-identifier" "^7.22.20" + +"@babel/helper-optimise-call-expression@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz#f21531a9ccbff644fdd156b4077c16ff0c3f609e" + integrity sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw== + dependencies: + "@babel/types" "^7.22.5" + +"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.10.4", "@babel/helper-plugin-utils@^7.12.13", "@babel/helper-plugin-utils@^7.14.5", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.20.2", "@babel/helper-plugin-utils@^7.22.5", "@babel/helper-plugin-utils@^7.24.0", "@babel/helper-plugin-utils@^7.8.0", "@babel/helper-plugin-utils@^7.8.3": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.24.0.tgz#945681931a52f15ce879fd5b86ce2dae6d3d7f2a" + integrity sha512-9cUznXMG0+FxRuJfvL82QlTqIzhVW9sL0KjMPHhAOOvpQGL8QtdxnBKILjBqxlHyliz0yCa1G903ZXI/FuHy2w== + +"@babel/helper-remap-async-to-generator@^7.18.9", "@babel/helper-remap-async-to-generator@^7.22.20": + version "7.22.20" + resolved "https://registry.yarnpkg.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.20.tgz#7b68e1cb4fa964d2996fd063723fb48eca8498e0" + integrity sha512-pBGyV4uBqOns+0UvhsTO8qgl8hO89PmiDYv+/COyp1aeMcmfrfruz+/nCMFiYyFF/Knn0yfrC85ZzNFjembFTw== + dependencies: + "@babel/helper-annotate-as-pure" "^7.22.5" + "@babel/helper-environment-visitor" "^7.22.20" + "@babel/helper-wrap-function" "^7.22.20" + +"@babel/helper-replace-supers@^7.22.20": + version "7.22.20" + resolved "https://registry.yarnpkg.com/@babel/helper-replace-supers/-/helper-replace-supers-7.22.20.tgz#e37d367123ca98fe455a9887734ed2e16eb7a793" + integrity sha512-qsW0In3dbwQUbK8kejJ4R7IHVGwHJlV6lpG6UA7a9hSa2YEiAib+N1T2kr6PEeUT+Fl7najmSOS6SmAwCHK6Tw== + dependencies: + "@babel/helper-environment-visitor" "^7.22.20" + "@babel/helper-member-expression-to-functions" "^7.22.15" + "@babel/helper-optimise-call-expression" "^7.22.5" + +"@babel/helper-simple-access@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz#4938357dc7d782b80ed6dbb03a0fba3d22b1d5de" + integrity sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w== + dependencies: + "@babel/types" "^7.22.5" + +"@babel/helper-skip-transparent-expression-wrappers@^7.20.0", "@babel/helper-skip-transparent-expression-wrappers@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz#007f15240b5751c537c40e77abb4e89eeaaa8847" + integrity sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q== + dependencies: + "@babel/types" "^7.22.5" + +"@babel/helper-split-export-declaration@^7.22.6": + version "7.22.6" + resolved "https://registry.yarnpkg.com/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.6.tgz#322c61b7310c0997fe4c323955667f18fcefb91c" + integrity sha512-AsUnxuLhRYsisFiaJwvp1QF+I3KjD5FOxut14q/GzovUe6orHLesW2C7d754kRm53h5gqrz6sFl6sxc4BVtE/g== + dependencies: + "@babel/types" "^7.22.5" + +"@babel/helper-string-parser@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.23.4.tgz#9478c707febcbbe1ddb38a3d91a2e054ae622d83" + integrity sha512-803gmbQdqwdf4olxrX4AJyFBV/RTr3rSmOj0rKwesmzlfhYNDEs+/iOcznzpNWlJlIlTJC2QfPFcHB6DlzdVLQ== + +"@babel/helper-validator-identifier@^7.22.20": + version "7.22.20" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.22.20.tgz#c4ae002c61d2879e724581d96665583dbc1dc0e0" + integrity sha512-Y4OZ+ytlatR8AI+8KZfKuL5urKp7qey08ha31L8b3BwewJAoJamTzyvxPR/5D+KkdJCGPq/+8TukHBlY10FX9A== + +"@babel/helper-validator-option@^7.22.15", "@babel/helper-validator-option@^7.23.5": + version "7.23.5" + resolved "https://registry.yarnpkg.com/@babel/helper-validator-option/-/helper-validator-option-7.23.5.tgz#907a3fbd4523426285365d1206c423c4c5520307" + integrity sha512-85ttAOMLsr53VgXkTbkx8oA6YTfT4q7/HzXSLEYmjcSTJPMPQtvq1BD79Byep5xMUYbGRzEpDsjUf3dyp54IKw== + +"@babel/helper-wrap-function@^7.22.20": + version "7.22.20" + resolved "https://registry.yarnpkg.com/@babel/helper-wrap-function/-/helper-wrap-function-7.22.20.tgz#15352b0b9bfb10fc9c76f79f6342c00e3411a569" + integrity sha512-pms/UwkOpnQe/PDAEdV/d7dVCoBbB+R4FvYoHGZz+4VPcg7RtYy2KP7S2lbuWM6FCSgob5wshfGESbC/hzNXZw== + dependencies: + "@babel/helper-function-name" "^7.22.5" + "@babel/template" "^7.22.15" + "@babel/types" "^7.22.19" + +"@babel/helpers@^7.24.0": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/helpers/-/helpers-7.24.0.tgz#a3dd462b41769c95db8091e49cfe019389a9409b" + integrity sha512-ulDZdc0Aj5uLc5nETsa7EPx2L7rM0YJM8r7ck7U73AXi7qOV44IHHRAYZHY6iU1rr3C5N4NtTmMRUJP6kwCWeA== + dependencies: + "@babel/template" "^7.24.0" + "@babel/traverse" "^7.24.0" + "@babel/types" "^7.24.0" + +"@babel/highlight@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.23.4.tgz#edaadf4d8232e1a961432db785091207ead0621b" + integrity sha512-acGdbYSfp2WheJoJm/EBBBLh/ID8KDc64ISZ9DYtBmC8/Q204PZJLHyzeB5qMzJ5trcOkybd78M4x2KWsUq++A== + dependencies: + "@babel/helper-validator-identifier" "^7.22.20" + chalk "^2.4.2" + js-tokens "^4.0.0" + +"@babel/parser@^7.1.0", "@babel/parser@^7.13.16", "@babel/parser@^7.14.7", "@babel/parser@^7.20.0", "@babel/parser@^7.20.7", "@babel/parser@^7.23.9", "@babel/parser@^7.24.0": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/parser/-/parser-7.24.0.tgz#26a3d1ff49031c53a97d03b604375f028746a9ac" + integrity sha512-QuP/FxEAzMSjXygs8v4N9dvdXzEHN4W1oF3PxuWAtPo08UdM17u89RDMgjLn/mlc56iM0HlLmVkO/wgR+rDgHg== + +"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.23.3.tgz#5cd1c87ba9380d0afb78469292c954fee5d2411a" + integrity sha512-iRkKcCqb7iGnq9+3G6rZ+Ciz5VywC4XNRHe57lKM+jOeYAoR0lVqdeeDRfh0tQcTfw/+vBhHn926FmQhLtlFLQ== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.23.3.tgz#f6652bb16b94f8f9c20c50941e16e9756898dc5d" + integrity sha512-WwlxbfMNdVEpQjZmK5mhm7oSwD3dS6eU+Iwsi4Knl9wAletWem7kaRsGOG+8UEbRyqxY4SS5zvtfXwX+jMxUwQ== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-skip-transparent-expression-wrappers" "^7.22.5" + "@babel/plugin-transform-optional-chaining" "^7.23.3" + +"@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly@^7.23.7": + version "7.23.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.23.7.tgz#516462a95d10a9618f197d39ad291a9b47ae1d7b" + integrity sha512-LlRT7HgaifEpQA1ZgLVOIJZZFVPWN5iReq/7/JixwBtwcoeVGDBD53ZV28rrsLYOZs1Y/EHhA8N/Z6aazHR8cw== + dependencies: + "@babel/helper-environment-visitor" "^7.22.20" + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-proposal-async-generator-functions@^7.0.0": + version "7.20.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-async-generator-functions/-/plugin-proposal-async-generator-functions-7.20.7.tgz#bfb7276d2d573cb67ba379984a2334e262ba5326" + integrity sha512-xMbiLsn/8RK7Wq7VeVytytS2L6qE69bXPB10YCmMdDZbKF4okCqY74pI/jJQ/8U0b/F6NrT2+14b8/P9/3AMGA== + dependencies: + "@babel/helper-environment-visitor" "^7.18.9" + "@babel/helper-plugin-utils" "^7.20.2" + "@babel/helper-remap-async-to-generator" "^7.18.9" + "@babel/plugin-syntax-async-generators" "^7.8.4" + +"@babel/plugin-proposal-class-properties@^7.13.0", "@babel/plugin-proposal-class-properties@^7.18.0": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-class-properties/-/plugin-proposal-class-properties-7.18.6.tgz#b110f59741895f7ec21a6fff696ec46265c446a3" + integrity sha512-cumfXOF0+nzZrrN8Rf0t7M+tF6sZc7vhQwYQck9q1/5w2OExlD+b4v4RpMJFaV1Z7WcDRgO6FqvxqxGlwo+RHQ== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" + +"@babel/plugin-proposal-export-default-from@^7.0.0": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-export-default-from/-/plugin-proposal-export-default-from-7.23.3.tgz#6f511a676c540ccc8d17a8553dbba9230b0ddac0" + integrity sha512-Q23MpLZfSGZL1kU7fWqV262q65svLSCIP5kZ/JCW/rKTCm/FrLjpvEd2kfUYMVeHh4QhV/xzyoRAHWrAZJrE3Q== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-export-default-from" "^7.23.3" + +"@babel/plugin-proposal-logical-assignment-operators@^7.18.0": + version "7.20.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-logical-assignment-operators/-/plugin-proposal-logical-assignment-operators-7.20.7.tgz#dfbcaa8f7b4d37b51e8bfb46d94a5aea2bb89d83" + integrity sha512-y7C7cZgpMIjWlKE5T7eJwp+tnRYM89HmRvWM5EQuB5BoHEONjmQ8lSNmBUwOyy/GFRsohJED51YBF79hE1djug== + dependencies: + "@babel/helper-plugin-utils" "^7.20.2" + "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" + +"@babel/plugin-proposal-nullish-coalescing-operator@^7.13.8", "@babel/plugin-proposal-nullish-coalescing-operator@^7.18.0": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.18.6.tgz#fdd940a99a740e577d6c753ab6fbb43fdb9467e1" + integrity sha512-wQxQzxYeJqHcfppzBDnm1yAY0jSRkUXR2z8RePZYrKwMKgMlE8+Z6LUno+bd6LvbGh8Gltvy74+9pIYkr+XkKA== + dependencies: + "@babel/helper-plugin-utils" "^7.18.6" + "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" + +"@babel/plugin-proposal-numeric-separator@^7.0.0": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.18.6.tgz#899b14fbafe87f053d2c5ff05b36029c62e13c75" + integrity sha512-ozlZFogPqoLm8WBr5Z8UckIoE4YQ5KESVcNudyXOR8uqIkliTEgJ3RoketfG6pmzLdeZF0H/wjE9/cCEitBl7Q== + dependencies: + "@babel/helper-plugin-utils" "^7.18.6" + "@babel/plugin-syntax-numeric-separator" "^7.10.4" + +"@babel/plugin-proposal-object-rest-spread@^7.20.0": + version "7.20.7" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.20.7.tgz#aa662940ef425779c75534a5c41e9d936edc390a" + integrity sha512-d2S98yCiLxDVmBmE8UjGcfPvNEUbA1U5q5WxaWFUGRzJSVAZqm5W6MbPct0jxnegUZ0niLeNX+IOzEs7wYg9Dg== + dependencies: + "@babel/compat-data" "^7.20.5" + "@babel/helper-compilation-targets" "^7.20.7" + "@babel/helper-plugin-utils" "^7.20.2" + "@babel/plugin-syntax-object-rest-spread" "^7.8.3" + "@babel/plugin-transform-parameters" "^7.20.7" + +"@babel/plugin-proposal-optional-catch-binding@^7.0.0": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-catch-binding/-/plugin-proposal-optional-catch-binding-7.18.6.tgz#f9400d0e6a3ea93ba9ef70b09e72dd6da638a2cb" + integrity sha512-Q40HEhs9DJQyaZfUjjn6vE8Cv4GmMHCYuMGIWUnlxH6400VGxOuwWsPt4FxXxJkC/5eOzgn0z21M9gMT4MOhbw== + dependencies: + "@babel/helper-plugin-utils" "^7.18.6" + "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" + +"@babel/plugin-proposal-optional-chaining@^7.13.12", "@babel/plugin-proposal-optional-chaining@^7.20.0": + version "7.21.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.21.0.tgz#886f5c8978deb7d30f678b2e24346b287234d3ea" + integrity sha512-p4zeefM72gpmEe2fkUr/OnOXpWEf8nAgk7ZYVqqfFiyIG7oFfVZcCrU64hWn5xp4tQ9LkV4bTIa5rD0KANpKNA== + dependencies: + "@babel/helper-plugin-utils" "^7.20.2" + "@babel/helper-skip-transparent-expression-wrappers" "^7.20.0" + "@babel/plugin-syntax-optional-chaining" "^7.8.3" + +"@babel/plugin-proposal-private-property-in-object@7.21.0-placeholder-for-preset-env.2": + version "7.21.0-placeholder-for-preset-env.2" + resolved "https://registry.yarnpkg.com/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz#7844f9289546efa9febac2de4cfe358a050bd703" + integrity sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w== + +"@babel/plugin-syntax-async-generators@^7.8.4": + version "7.8.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz#a983fb1aeb2ec3f6ed042a210f640e90e786fe0d" + integrity sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-syntax-bigint@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-bigint/-/plugin-syntax-bigint-7.8.3.tgz#4c9a6f669f5d0cdf1b90a1671e9a146be5300cea" + integrity sha512-wnTnFlG+YxQm3vDxpGE57Pj0srRU4sHE/mDkt1qv2YJJSeUAec2ma4WLUnUPeKjyrfntVwe/N6dCXpU+zL3Npg== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-syntax-class-properties@^7.12.13", "@babel/plugin-syntax-class-properties@^7.8.3": + version "7.12.13" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz#b5c987274c4a3a82b89714796931a6b53544ae10" + integrity sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA== + dependencies: + "@babel/helper-plugin-utils" "^7.12.13" + +"@babel/plugin-syntax-class-static-block@^7.14.5": + version "7.14.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz#195df89b146b4b78b3bf897fd7a257c84659d406" + integrity sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw== + dependencies: + "@babel/helper-plugin-utils" "^7.14.5" + +"@babel/plugin-syntax-dynamic-import@^7.8.0", "@babel/plugin-syntax-dynamic-import@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz#62bf98b2da3cd21d626154fc96ee5b3cb68eacb3" + integrity sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-syntax-export-default-from@^7.0.0", "@babel/plugin-syntax-export-default-from@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-export-default-from/-/plugin-syntax-export-default-from-7.23.3.tgz#7e6d4bf595d5724230200fb2b7401d4734b15335" + integrity sha512-KeENO5ck1IeZ/l2lFZNy+mpobV3D2Zy5C1YFnWm+YuY5mQiAWc4yAp13dqgguwsBsFVLh4LPCEqCa5qW13N+hw== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-syntax-export-namespace-from@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz#028964a9ba80dbc094c915c487ad7c4e7a66465a" + integrity sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q== + dependencies: + "@babel/helper-plugin-utils" "^7.8.3" + +"@babel/plugin-syntax-flow@^7.12.1", "@babel/plugin-syntax-flow@^7.18.0", "@babel/plugin-syntax-flow@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-flow/-/plugin-syntax-flow-7.23.3.tgz#084564e0f3cc21ea6c70c44cff984a1c0509729a" + integrity sha512-YZiAIpkJAwQXBJLIQbRFayR5c+gJ35Vcz3bg954k7cd73zqjvhacJuL9RbrzPz8qPmZdgqP6EUKwy0PCNhaaPA== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-syntax-import-assertions@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.23.3.tgz#9c05a7f592982aff1a2768260ad84bcd3f0c77fc" + integrity sha512-lPgDSU+SJLK3xmFDTV2ZRQAiM7UuUjGidwBywFavObCiZc1BeAAcMtHJKUya92hPHO+at63JJPLygilZard8jw== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-syntax-import-attributes@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.23.3.tgz#992aee922cf04512461d7dae3ff6951b90a2dc06" + integrity sha512-pawnE0P9g10xgoP7yKr6CK63K2FMsTE+FZidZO/1PwRdzmAPVs+HS1mAURUsgaoxammTJvULUdIkEK0gOcU2tA== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-syntax-import-meta@^7.10.4", "@babel/plugin-syntax-import-meta@^7.8.3": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz#ee601348c370fa334d2207be158777496521fd51" + integrity sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g== + dependencies: + "@babel/helper-plugin-utils" "^7.10.4" + +"@babel/plugin-syntax-json-strings@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz#01ca21b668cd8218c9e640cb6dd88c5412b2c96a" + integrity sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-syntax-jsx@^7.23.3", "@babel/plugin-syntax-jsx@^7.7.2": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.23.3.tgz#8f2e4f8a9b5f9aa16067e142c1ac9cd9f810f473" + integrity sha512-EB2MELswq55OHUoRZLGg/zC7QWUKfNLpE57m/S2yr1uEneIgsTgrSzXP3NXEsMkVn76OlaVVnzN+ugObuYGwhg== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-syntax-logical-assignment-operators@^7.10.4", "@babel/plugin-syntax-logical-assignment-operators@^7.8.3": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz#ca91ef46303530448b906652bac2e9fe9941f699" + integrity sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig== + dependencies: + "@babel/helper-plugin-utils" "^7.10.4" + +"@babel/plugin-syntax-nullish-coalescing-operator@^7.0.0", "@babel/plugin-syntax-nullish-coalescing-operator@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz#167ed70368886081f74b5c36c65a88c03b66d1a9" + integrity sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-syntax-numeric-separator@^7.10.4", "@babel/plugin-syntax-numeric-separator@^7.8.3": + version "7.10.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz#b9b070b3e33570cd9fd07ba7fa91c0dd37b9af97" + integrity sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug== + dependencies: + "@babel/helper-plugin-utils" "^7.10.4" + +"@babel/plugin-syntax-object-rest-spread@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz#60e225edcbd98a640332a2e72dd3e66f1af55871" + integrity sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-syntax-optional-catch-binding@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz#6111a265bcfb020eb9efd0fdfd7d26402b9ed6c1" + integrity sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-syntax-optional-chaining@^7.0.0", "@babel/plugin-syntax-optional-chaining@^7.8.3": + version "7.8.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz#4f69c2ab95167e0180cd5336613f8c5788f7d48a" + integrity sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg== + dependencies: + "@babel/helper-plugin-utils" "^7.8.0" + +"@babel/plugin-syntax-private-property-in-object@^7.14.5": + version "7.14.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz#0dc6671ec0ea22b6e94a1114f857970cd39de1ad" + integrity sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg== + dependencies: + "@babel/helper-plugin-utils" "^7.14.5" + +"@babel/plugin-syntax-top-level-await@^7.14.5", "@babel/plugin-syntax-top-level-await@^7.8.3": + version "7.14.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz#c1cfdadc35a646240001f06138247b741c34d94c" + integrity sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw== + dependencies: + "@babel/helper-plugin-utils" "^7.14.5" + +"@babel/plugin-syntax-typescript@^7.23.3", "@babel/plugin-syntax-typescript@^7.7.2": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.23.3.tgz#24f460c85dbbc983cd2b9c4994178bcc01df958f" + integrity sha512-9EiNjVJOMwCO+43TqoTrgQ8jMwcAd0sWyXi9RPfIsLTj4R2MADDDQXELhffaUx/uJv2AYcxBgPwH6j4TIA4ytQ== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-syntax-unicode-sets-regex@^7.18.6": + version "7.18.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz#d49a3b3e6b52e5be6740022317580234a6a47357" + integrity sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" + +"@babel/plugin-transform-arrow-functions@^7.0.0", "@babel/plugin-transform-arrow-functions@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.23.3.tgz#94c6dcfd731af90f27a79509f9ab7fb2120fc38b" + integrity sha512-NzQcQrzaQPkaEwoTm4Mhyl8jI1huEL/WWIEvudjTCMJ9aBZNpsJbMASx7EQECtQQPS/DcnFpo0FIh3LvEO9cxQ== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-async-generator-functions@^7.23.9": + version "7.23.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.23.9.tgz#9adaeb66fc9634a586c5df139c6240d41ed801ce" + integrity sha512-8Q3veQEDGe14dTYuwagbRtwxQDnytyg1JFu4/HwEMETeofocrB0U0ejBJIXoeG/t2oXZ8kzCyI0ZZfbT80VFNQ== + dependencies: + "@babel/helper-environment-visitor" "^7.22.20" + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-remap-async-to-generator" "^7.22.20" + "@babel/plugin-syntax-async-generators" "^7.8.4" + +"@babel/plugin-transform-async-to-generator@^7.20.0", "@babel/plugin-transform-async-to-generator@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.23.3.tgz#d1f513c7a8a506d43f47df2bf25f9254b0b051fa" + integrity sha512-A7LFsKi4U4fomjqXJlZg/u0ft/n8/7n7lpffUP/ZULx/DtV9SGlNKZolHH6PE8Xl1ngCc0M11OaeZptXVkfKSw== + dependencies: + "@babel/helper-module-imports" "^7.22.15" + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-remap-async-to-generator" "^7.22.20" + +"@babel/plugin-transform-block-scoped-functions@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.23.3.tgz#fe1177d715fb569663095e04f3598525d98e8c77" + integrity sha512-vI+0sIaPIO6CNuM9Kk5VmXcMVRiOpDh7w2zZt9GXzmE/9KD70CUEVhvPR/etAeNK/FAEkhxQtXOzVF3EuRL41A== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-block-scoping@^7.0.0", "@babel/plugin-transform-block-scoping@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.23.4.tgz#b2d38589531c6c80fbe25e6b58e763622d2d3cf5" + integrity sha512-0QqbP6B6HOh7/8iNR4CQU2Th/bbRtBp4KS9vcaZd1fZ0wSh5Fyssg0UCIHwxh+ka+pNDREbVLQnHCMHKZfPwfw== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-class-properties@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.23.3.tgz#35c377db11ca92a785a718b6aa4e3ed1eb65dc48" + integrity sha512-uM+AN8yCIjDPccsKGlw271xjJtGii+xQIF/uMPS8H15L12jZTsLfF4o5vNO7d/oUguOyfdikHGc/yi9ge4SGIg== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.22.15" + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-class-static-block@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.23.4.tgz#2a202c8787a8964dd11dfcedf994d36bfc844ab5" + integrity sha512-nsWu/1M+ggti1SOALj3hfx5FXzAY06fwPJsUZD4/A5e1bWi46VUIWtD+kOX6/IdhXGsXBWllLFDSnqSCdUNydQ== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.22.15" + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-class-static-block" "^7.14.5" + +"@babel/plugin-transform-classes@^7.0.0", "@babel/plugin-transform-classes@^7.23.8": + version "7.23.8" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.23.8.tgz#d08ae096c240347badd68cdf1b6d1624a6435d92" + integrity sha512-yAYslGsY1bX6Knmg46RjiCiNSwJKv2IUC8qOdYKqMMr0491SXFhcHqOdRDeCRohOOIzwN/90C6mQ9qAKgrP7dg== + dependencies: + "@babel/helper-annotate-as-pure" "^7.22.5" + "@babel/helper-compilation-targets" "^7.23.6" + "@babel/helper-environment-visitor" "^7.22.20" + "@babel/helper-function-name" "^7.23.0" + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-replace-supers" "^7.22.20" + "@babel/helper-split-export-declaration" "^7.22.6" + globals "^11.1.0" + +"@babel/plugin-transform-computed-properties@^7.0.0", "@babel/plugin-transform-computed-properties@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.23.3.tgz#652e69561fcc9d2b50ba4f7ac7f60dcf65e86474" + integrity sha512-dTj83UVTLw/+nbiHqQSFdwO9CbTtwq1DsDqm3CUEtDrZNET5rT5E6bIdTlOftDTDLMYxvxHNEYO4B9SLl8SLZw== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/template" "^7.22.15" + +"@babel/plugin-transform-destructuring@^7.20.0", "@babel/plugin-transform-destructuring@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.23.3.tgz#8c9ee68228b12ae3dff986e56ed1ba4f3c446311" + integrity sha512-n225npDqjDIr967cMScVKHXJs7rout1q+tt50inyBCPkyZ8KxeI6d+GIbSBTT/w/9WdlWDOej3V9HE5Lgk57gw== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-dotall-regex@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.23.3.tgz#3f7af6054882ede89c378d0cf889b854a993da50" + integrity sha512-vgnFYDHAKzFaTVp+mneDsIEbnJ2Np/9ng9iviHw3P/KVcgONxpNULEW/51Z/BaFojG2GI2GwwXck5uV1+1NOYQ== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.22.15" + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-duplicate-keys@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.23.3.tgz#664706ca0a5dfe8d066537f99032fc1dc8b720ce" + integrity sha512-RrqQ+BQmU3Oyav3J+7/myfvRCq7Tbz+kKLLshUmMwNlDHExbGL7ARhajvoBJEvc+fCguPPu887N+3RRXBVKZUA== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-dynamic-import@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.23.4.tgz#c7629e7254011ac3630d47d7f34ddd40ca535143" + integrity sha512-V6jIbLhdJK86MaLh4Jpghi8ho5fGzt3imHOBu/x0jlBaPYqDoWz4RDXjmMOfnh+JWNaQleEAByZLV0QzBT4YQQ== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-dynamic-import" "^7.8.3" + +"@babel/plugin-transform-exponentiation-operator@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.23.3.tgz#ea0d978f6b9232ba4722f3dbecdd18f450babd18" + integrity sha512-5fhCsl1odX96u7ILKHBj4/Y8vipoqwsJMh4csSA8qFfxrZDEA4Ssku2DyNvMJSmZNOEBT750LfFPbtrnTP90BQ== + dependencies: + "@babel/helper-builder-binary-assignment-operator-visitor" "^7.22.15" + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-export-namespace-from@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.23.4.tgz#084c7b25e9a5c8271e987a08cf85807b80283191" + integrity sha512-GzuSBcKkx62dGzZI1WVgTWvkkz84FZO5TC5T8dl/Tht/rAla6Dg/Mz9Yhypg+ezVACf/rgDuQt3kbWEv7LdUDQ== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-export-namespace-from" "^7.8.3" + +"@babel/plugin-transform-flow-strip-types@^7.20.0", "@babel/plugin-transform-flow-strip-types@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-flow-strip-types/-/plugin-transform-flow-strip-types-7.23.3.tgz#cfa7ca159cc3306fab526fc67091556b51af26ff" + integrity sha512-26/pQTf9nQSNVJCrLB1IkHUKyPxR+lMrH2QDPG89+Znu9rAMbtrybdbWeE9bb7gzjmE5iXHEY+e0HUwM6Co93Q== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-flow" "^7.23.3" + +"@babel/plugin-transform-for-of@^7.23.6": + version "7.23.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.23.6.tgz#81c37e24171b37b370ba6aaffa7ac86bcb46f94e" + integrity sha512-aYH4ytZ0qSuBbpfhuofbg/e96oQ7U2w1Aw/UQmKT+1l39uEhUPoFS3fHevDc1G0OvewyDudfMKY1OulczHzWIw== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-skip-transparent-expression-wrappers" "^7.22.5" + +"@babel/plugin-transform-function-name@^7.0.0", "@babel/plugin-transform-function-name@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.23.3.tgz#8f424fcd862bf84cb9a1a6b42bc2f47ed630f8dc" + integrity sha512-I1QXp1LxIvt8yLaib49dRW5Okt7Q4oaxao6tFVKS/anCdEOMtYwWVKoiOA1p34GOWIZjUK0E+zCp7+l1pfQyiw== + dependencies: + "@babel/helper-compilation-targets" "^7.22.15" + "@babel/helper-function-name" "^7.23.0" + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-json-strings@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.23.4.tgz#a871d9b6bd171976efad2e43e694c961ffa3714d" + integrity sha512-81nTOqM1dMwZ/aRXQ59zVubN9wHGqk6UtqRK+/q+ciXmRy8fSolhGVvG09HHRGo4l6fr/c4ZhXUQH0uFW7PZbg== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-json-strings" "^7.8.3" + +"@babel/plugin-transform-literals@^7.0.0", "@babel/plugin-transform-literals@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.23.3.tgz#8214665f00506ead73de157eba233e7381f3beb4" + integrity sha512-wZ0PIXRxnwZvl9AYpqNUxpZ5BiTGrYt7kueGQ+N5FiQ7RCOD4cm8iShd6S6ggfVIWaJf2EMk8eRzAh52RfP4rQ== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-logical-assignment-operators@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.23.4.tgz#e599f82c51d55fac725f62ce55d3a0886279ecb5" + integrity sha512-Mc/ALf1rmZTP4JKKEhUwiORU+vcfarFVLfcFiolKUo6sewoxSEgl36ak5t+4WamRsNr6nzjZXQjM35WsU+9vbg== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" + +"@babel/plugin-transform-member-expression-literals@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.23.3.tgz#e37b3f0502289f477ac0e776b05a833d853cabcc" + integrity sha512-sC3LdDBDi5x96LA+Ytekz2ZPk8i/Ck+DEuDbRAll5rknJ5XRTSaPKEYwomLcs1AA8wg9b3KjIQRsnApj+q51Ag== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-modules-amd@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.23.3.tgz#e19b55436a1416829df0a1afc495deedfae17f7d" + integrity sha512-vJYQGxeKM4t8hYCKVBlZX/gtIY2I7mRGFNcm85sgXGMTBcoV3QdVtdpbcWEbzbfUIUZKwvgFT82mRvaQIebZzw== + dependencies: + "@babel/helper-module-transforms" "^7.23.3" + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-modules-commonjs@^7.0.0", "@babel/plugin-transform-modules-commonjs@^7.13.8", "@babel/plugin-transform-modules-commonjs@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.23.3.tgz#661ae831b9577e52be57dd8356b734f9700b53b4" + integrity sha512-aVS0F65LKsdNOtcz6FRCpE4OgsP2OFnW46qNxNIX9h3wuzaNcSQsJysuMwqSibC98HPrf2vCgtxKNwS0DAlgcA== + dependencies: + "@babel/helper-module-transforms" "^7.23.3" + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-simple-access" "^7.22.5" + +"@babel/plugin-transform-modules-systemjs@^7.23.9": + version "7.23.9" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.23.9.tgz#105d3ed46e4a21d257f83a2f9e2ee4203ceda6be" + integrity sha512-KDlPRM6sLo4o1FkiSlXoAa8edLXFsKKIda779fbLrvmeuc3itnjCtaO6RrtoaANsIJANj+Vk1zqbZIMhkCAHVw== + dependencies: + "@babel/helper-hoist-variables" "^7.22.5" + "@babel/helper-module-transforms" "^7.23.3" + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-validator-identifier" "^7.22.20" + +"@babel/plugin-transform-modules-umd@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.23.3.tgz#5d4395fccd071dfefe6585a4411aa7d6b7d769e9" + integrity sha512-zHsy9iXX2nIsCBFPud3jKn1IRPWg3Ing1qOZgeKV39m1ZgIdpJqvlWVeiHBZC6ITRG0MfskhYe9cLgntfSFPIg== + dependencies: + "@babel/helper-module-transforms" "^7.23.3" + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-named-capturing-groups-regex@^7.0.0", "@babel/plugin-transform-named-capturing-groups-regex@^7.22.5": + version "7.22.5" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz#67fe18ee8ce02d57c855185e27e3dc959b2e991f" + integrity sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.22.5" + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-new-target@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.23.3.tgz#5491bb78ed6ac87e990957cea367eab781c4d980" + integrity sha512-YJ3xKqtJMAT5/TIZnpAR3I+K+WaDowYbN3xyxI8zxx/Gsypwf9B9h0VB+1Nh6ACAAPRS5NSRje0uVv5i79HYGQ== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-nullish-coalescing-operator@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.23.4.tgz#45556aad123fc6e52189ea749e33ce090637346e" + integrity sha512-jHE9EVVqHKAQx+VePv5LLGHjmHSJR76vawFPTdlxR/LVJPfOEGxREQwQfjuZEOPTwG92X3LINSh3M40Rv4zpVA== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" + +"@babel/plugin-transform-numeric-separator@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.23.4.tgz#03d08e3691e405804ecdd19dd278a40cca531f29" + integrity sha512-mps6auzgwjRrwKEZA05cOwuDc9FAzoyFS4ZsG/8F43bTLf/TgkJg7QXOrPO1JO599iA3qgK9MXdMGOEC8O1h6Q== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-numeric-separator" "^7.10.4" + +"@babel/plugin-transform-object-rest-spread@^7.24.0": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.24.0.tgz#7b836ad0088fdded2420ce96d4e1d3ed78b71df1" + integrity sha512-y/yKMm7buHpFFXfxVFS4Vk1ToRJDilIa6fKRioB9Vjichv58TDGXTvqV0dN7plobAmTW5eSEGXDngE+Mm+uO+w== + dependencies: + "@babel/compat-data" "^7.23.5" + "@babel/helper-compilation-targets" "^7.23.6" + "@babel/helper-plugin-utils" "^7.24.0" + "@babel/plugin-syntax-object-rest-spread" "^7.8.3" + "@babel/plugin-transform-parameters" "^7.23.3" + +"@babel/plugin-transform-object-super@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.23.3.tgz#81fdb636dcb306dd2e4e8fd80db5b2362ed2ebcd" + integrity sha512-BwQ8q0x2JG+3lxCVFohg+KbQM7plfpBwThdW9A6TMtWwLsbDA01Ek2Zb/AgDN39BiZsExm4qrXxjk+P1/fzGrA== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-replace-supers" "^7.22.20" + +"@babel/plugin-transform-optional-catch-binding@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.23.4.tgz#318066de6dacce7d92fa244ae475aa8d91778017" + integrity sha512-XIq8t0rJPHf6Wvmbn9nFxU6ao4c7WhghTR5WyV8SrJfUFzyxhCm4nhC+iAp3HFhbAKLfYpgzhJ6t4XCtVwqO5A== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" + +"@babel/plugin-transform-optional-chaining@^7.23.3", "@babel/plugin-transform-optional-chaining@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.23.4.tgz#6acf61203bdfc4de9d4e52e64490aeb3e52bd017" + integrity sha512-ZU8y5zWOfjM5vZ+asjgAPwDaBjJzgufjES89Rs4Lpq63O300R/kOz30WCLo6BxxX6QVEilwSlpClnG5cZaikTA== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-skip-transparent-expression-wrappers" "^7.22.5" + "@babel/plugin-syntax-optional-chaining" "^7.8.3" + +"@babel/plugin-transform-parameters@^7.0.0", "@babel/plugin-transform-parameters@^7.20.7", "@babel/plugin-transform-parameters@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.23.3.tgz#83ef5d1baf4b1072fa6e54b2b0999a7b2527e2af" + integrity sha512-09lMt6UsUb3/34BbECKVbVwrT9bO6lILWln237z7sLaWnMsTi7Yc9fhX5DLpkJzAGfaReXI22wP41SZmnAA3Vw== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-private-methods@^7.22.5", "@babel/plugin-transform-private-methods@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.23.3.tgz#b2d7a3c97e278bfe59137a978d53b2c2e038c0e4" + integrity sha512-UzqRcRtWsDMTLrRWFvUBDwmw06tCQH9Rl1uAjfh6ijMSmGYQ+fpdB+cnqRC8EMh5tuuxSv0/TejGL+7vyj+50g== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.22.15" + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-private-property-in-object@^7.22.11", "@babel/plugin-transform-private-property-in-object@^7.23.4": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.23.4.tgz#3ec711d05d6608fd173d9b8de39872d8dbf68bf5" + integrity sha512-9G3K1YqTq3F4Vt88Djx1UZ79PDyj+yKRnUy7cZGSMe+a7jkwD259uKKuUzQlPkGam7R+8RJwh5z4xO27fA1o2A== + dependencies: + "@babel/helper-annotate-as-pure" "^7.22.5" + "@babel/helper-create-class-features-plugin" "^7.22.15" + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-private-property-in-object" "^7.14.5" + +"@babel/plugin-transform-property-literals@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.23.3.tgz#54518f14ac4755d22b92162e4a852d308a560875" + integrity sha512-jR3Jn3y7cZp4oEWPFAlRsSWjxKe4PZILGBSd4nis1TsC5qeSpb+nrtihJuDhNI7QHiVbUaiXa0X2RZY3/TI6Nw== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-react-display-name@^7.0.0": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.23.3.tgz#70529f034dd1e561045ad3c8152a267f0d7b6200" + integrity sha512-GnvhtVfA2OAtzdX58FJxU19rhoGeQzyVndw3GgtdECQvQFXPEZIOVULHVZGAYmOgmqjXpVpfocAbSjh99V/Fqw== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-react-jsx-self@^7.0.0": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-self/-/plugin-transform-react-jsx-self-7.23.3.tgz#ed3e7dadde046cce761a8e3cf003a13d1a7972d9" + integrity sha512-qXRvbeKDSfwnlJnanVRp0SfuWE5DQhwQr5xtLBzp56Wabyo+4CMosF6Kfp+eOD/4FYpql64XVJ2W0pVLlJZxOQ== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-react-jsx-source@^7.0.0": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx-source/-/plugin-transform-react-jsx-source-7.23.3.tgz#03527006bdc8775247a78643c51d4e715fe39a3e" + integrity sha512-91RS0MDnAWDNvGC6Wio5XYkyWI39FMFO+JK9+4AlgaTH+yWwVTsw7/sn6LK0lH7c5F+TFkpv/3LfCJ1Ydwof/g== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-react-jsx@^7.0.0": + version "7.23.4" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.23.4.tgz#393f99185110cea87184ea47bcb4a7b0c2e39312" + integrity sha512-5xOpoPguCZCRbo/JeHlloSkTA8Bld1J/E1/kLfD1nsuiW1m8tduTA1ERCgIZokDflX/IBzKcqR3l7VlRgiIfHA== + dependencies: + "@babel/helper-annotate-as-pure" "^7.22.5" + "@babel/helper-module-imports" "^7.22.15" + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-jsx" "^7.23.3" + "@babel/types" "^7.23.4" + +"@babel/plugin-transform-regenerator@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.23.3.tgz#141afd4a2057298602069fce7f2dc5173e6c561c" + integrity sha512-KP+75h0KghBMcVpuKisx3XTu9Ncut8Q8TuvGO4IhY+9D5DFEckQefOuIsB/gQ2tG71lCke4NMrtIPS8pOj18BQ== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + regenerator-transform "^0.15.2" + +"@babel/plugin-transform-reserved-words@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.23.3.tgz#4130dcee12bd3dd5705c587947eb715da12efac8" + integrity sha512-QnNTazY54YqgGxwIexMZva9gqbPa15t/x9VS+0fsEFWplwVpXYZivtgl43Z1vMpc1bdPP2PP8siFeVcnFvA3Cg== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-runtime@^7.0.0": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.24.0.tgz#e308fe27d08b74027d42547081eefaf4f2ffbcc9" + integrity sha512-zc0GA5IitLKJrSfXlXmp8KDqLrnGECK7YRfQBmEKg1NmBOQ7e+KuclBEKJgzifQeUYLdNiAw4B4bjyvzWVLiSA== + dependencies: + "@babel/helper-module-imports" "^7.22.15" + "@babel/helper-plugin-utils" "^7.24.0" + babel-plugin-polyfill-corejs2 "^0.4.8" + babel-plugin-polyfill-corejs3 "^0.9.0" + babel-plugin-polyfill-regenerator "^0.5.5" + semver "^6.3.1" + +"@babel/plugin-transform-shorthand-properties@^7.0.0", "@babel/plugin-transform-shorthand-properties@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.23.3.tgz#97d82a39b0e0c24f8a981568a8ed851745f59210" + integrity sha512-ED2fgqZLmexWiN+YNFX26fx4gh5qHDhn1O2gvEhreLW2iI63Sqm4llRLCXALKrCnbN4Jy0VcMQZl/SAzqug/jg== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-spread@^7.0.0", "@babel/plugin-transform-spread@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.23.3.tgz#41d17aacb12bde55168403c6f2d6bdca563d362c" + integrity sha512-VvfVYlrlBVu+77xVTOAoxQ6mZbnIq5FM0aGBSFEcIh03qHf+zNqA4DC/3XMUozTg7bZV3e3mZQ0i13VB6v5yUg== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-skip-transparent-expression-wrappers" "^7.22.5" + +"@babel/plugin-transform-sticky-regex@^7.0.0", "@babel/plugin-transform-sticky-regex@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.23.3.tgz#dec45588ab4a723cb579c609b294a3d1bd22ff04" + integrity sha512-HZOyN9g+rtvnOU3Yh7kSxXrKbzgrm5X4GncPY1QOquu7epga5MxKHVpYu2hvQnry/H+JjckSYRb93iNfsioAGg== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-template-literals@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.23.3.tgz#5f0f028eb14e50b5d0f76be57f90045757539d07" + integrity sha512-Flok06AYNp7GV2oJPZZcP9vZdszev6vPBkHLwxwSpaIqx75wn6mUd3UFWsSsA0l8nXAKkyCmL/sR02m8RYGeHg== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-typeof-symbol@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.23.3.tgz#9dfab97acc87495c0c449014eb9c547d8966bca4" + integrity sha512-4t15ViVnaFdrPC74be1gXBSMzXk3B4Us9lP7uLRQHTFpV5Dvt33pn+2MyyNxmN3VTTm3oTrZVMUmuw3oBnQ2oQ== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-typescript@^7.23.3", "@babel/plugin-transform-typescript@^7.5.0": + version "7.23.6" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.23.6.tgz#aa36a94e5da8d94339ae3a4e22d40ed287feb34c" + integrity sha512-6cBG5mBvUu4VUD04OHKnYzbuHNP8huDsD3EDqqpIpsswTDoqHCjLoHb6+QgsV1WsT2nipRqCPgxD3LXnEO7XfA== + dependencies: + "@babel/helper-annotate-as-pure" "^7.22.5" + "@babel/helper-create-class-features-plugin" "^7.23.6" + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/plugin-syntax-typescript" "^7.23.3" + +"@babel/plugin-transform-unicode-escapes@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.23.3.tgz#1f66d16cab01fab98d784867d24f70c1ca65b925" + integrity sha512-OMCUx/bU6ChE3r4+ZdylEqAjaQgHAgipgW8nsCfu5pGqDcFytVd91AwRvUJSBZDz0exPGgnjoqhgRYLRjFZc9Q== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-unicode-property-regex@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.23.3.tgz#19e234129e5ffa7205010feec0d94c251083d7ad" + integrity sha512-KcLIm+pDZkWZQAFJ9pdfmh89EwVfmNovFBcXko8szpBeF8z68kWIPeKlmSOkT9BXJxs2C0uk+5LxoxIv62MROA== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.22.15" + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-unicode-regex@^7.0.0", "@babel/plugin-transform-unicode-regex@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.23.3.tgz#26897708d8f42654ca4ce1b73e96140fbad879dc" + integrity sha512-wMHpNA4x2cIA32b/ci3AfwNgheiva2W0WUKWTK7vBHBhDKfPsc5cFGNWm69WBqpwd86u1qwZ9PWevKqm1A3yAw== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.22.15" + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/plugin-transform-unicode-sets-regex@^7.23.3": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.23.3.tgz#4fb6f0a719c2c5859d11f6b55a050cc987f3799e" + integrity sha512-W7lliA/v9bNR83Qc3q1ip9CQMZ09CcHDbHfbLRDNuAhn1Mvkr1ZNF7hPmztMQvtTGVLJ9m8IZqWsTkXOml8dbw== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.22.15" + "@babel/helper-plugin-utils" "^7.22.5" + +"@babel/preset-env@^7.20.0": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/preset-env/-/preset-env-7.24.0.tgz#11536a7f4b977294f0bdfad780f01a8ac8e183fc" + integrity sha512-ZxPEzV9IgvGn73iK0E6VB9/95Nd7aMFpbE0l8KQFDG70cOV9IxRP7Y2FUPmlK0v6ImlLqYX50iuZ3ZTVhOF2lA== + dependencies: + "@babel/compat-data" "^7.23.5" + "@babel/helper-compilation-targets" "^7.23.6" + "@babel/helper-plugin-utils" "^7.24.0" + "@babel/helper-validator-option" "^7.23.5" + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.23.3" + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.23.3" + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly" "^7.23.7" + "@babel/plugin-proposal-private-property-in-object" "7.21.0-placeholder-for-preset-env.2" + "@babel/plugin-syntax-async-generators" "^7.8.4" + "@babel/plugin-syntax-class-properties" "^7.12.13" + "@babel/plugin-syntax-class-static-block" "^7.14.5" + "@babel/plugin-syntax-dynamic-import" "^7.8.3" + "@babel/plugin-syntax-export-namespace-from" "^7.8.3" + "@babel/plugin-syntax-import-assertions" "^7.23.3" + "@babel/plugin-syntax-import-attributes" "^7.23.3" + "@babel/plugin-syntax-import-meta" "^7.10.4" + "@babel/plugin-syntax-json-strings" "^7.8.3" + "@babel/plugin-syntax-logical-assignment-operators" "^7.10.4" + "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" + "@babel/plugin-syntax-numeric-separator" "^7.10.4" + "@babel/plugin-syntax-object-rest-spread" "^7.8.3" + "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" + "@babel/plugin-syntax-optional-chaining" "^7.8.3" + "@babel/plugin-syntax-private-property-in-object" "^7.14.5" + "@babel/plugin-syntax-top-level-await" "^7.14.5" + "@babel/plugin-syntax-unicode-sets-regex" "^7.18.6" + "@babel/plugin-transform-arrow-functions" "^7.23.3" + "@babel/plugin-transform-async-generator-functions" "^7.23.9" + "@babel/plugin-transform-async-to-generator" "^7.23.3" + "@babel/plugin-transform-block-scoped-functions" "^7.23.3" + "@babel/plugin-transform-block-scoping" "^7.23.4" + "@babel/plugin-transform-class-properties" "^7.23.3" + "@babel/plugin-transform-class-static-block" "^7.23.4" + "@babel/plugin-transform-classes" "^7.23.8" + "@babel/plugin-transform-computed-properties" "^7.23.3" + "@babel/plugin-transform-destructuring" "^7.23.3" + "@babel/plugin-transform-dotall-regex" "^7.23.3" + "@babel/plugin-transform-duplicate-keys" "^7.23.3" + "@babel/plugin-transform-dynamic-import" "^7.23.4" + "@babel/plugin-transform-exponentiation-operator" "^7.23.3" + "@babel/plugin-transform-export-namespace-from" "^7.23.4" + "@babel/plugin-transform-for-of" "^7.23.6" + "@babel/plugin-transform-function-name" "^7.23.3" + "@babel/plugin-transform-json-strings" "^7.23.4" + "@babel/plugin-transform-literals" "^7.23.3" + "@babel/plugin-transform-logical-assignment-operators" "^7.23.4" + "@babel/plugin-transform-member-expression-literals" "^7.23.3" + "@babel/plugin-transform-modules-amd" "^7.23.3" + "@babel/plugin-transform-modules-commonjs" "^7.23.3" + "@babel/plugin-transform-modules-systemjs" "^7.23.9" + "@babel/plugin-transform-modules-umd" "^7.23.3" + "@babel/plugin-transform-named-capturing-groups-regex" "^7.22.5" + "@babel/plugin-transform-new-target" "^7.23.3" + "@babel/plugin-transform-nullish-coalescing-operator" "^7.23.4" + "@babel/plugin-transform-numeric-separator" "^7.23.4" + "@babel/plugin-transform-object-rest-spread" "^7.24.0" + "@babel/plugin-transform-object-super" "^7.23.3" + "@babel/plugin-transform-optional-catch-binding" "^7.23.4" + "@babel/plugin-transform-optional-chaining" "^7.23.4" + "@babel/plugin-transform-parameters" "^7.23.3" + "@babel/plugin-transform-private-methods" "^7.23.3" + "@babel/plugin-transform-private-property-in-object" "^7.23.4" + "@babel/plugin-transform-property-literals" "^7.23.3" + "@babel/plugin-transform-regenerator" "^7.23.3" + "@babel/plugin-transform-reserved-words" "^7.23.3" + "@babel/plugin-transform-shorthand-properties" "^7.23.3" + "@babel/plugin-transform-spread" "^7.23.3" + "@babel/plugin-transform-sticky-regex" "^7.23.3" + "@babel/plugin-transform-template-literals" "^7.23.3" + "@babel/plugin-transform-typeof-symbol" "^7.23.3" + "@babel/plugin-transform-unicode-escapes" "^7.23.3" + "@babel/plugin-transform-unicode-property-regex" "^7.23.3" + "@babel/plugin-transform-unicode-regex" "^7.23.3" + "@babel/plugin-transform-unicode-sets-regex" "^7.23.3" + "@babel/preset-modules" "0.1.6-no-external-plugins" + babel-plugin-polyfill-corejs2 "^0.4.8" + babel-plugin-polyfill-corejs3 "^0.9.0" + babel-plugin-polyfill-regenerator "^0.5.5" + core-js-compat "^3.31.0" + semver "^6.3.1" + +"@babel/preset-flow@^7.13.13": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/preset-flow/-/preset-flow-7.24.0.tgz#0de60271b0a439b415501c5b28f685fbcb080e1c" + integrity sha512-cum/nSi82cDaSJ21I4PgLTVlj0OXovFk6GRguJYe/IKg6y6JHLTbJhybtX4k35WT9wdeJfEVjycTixMhBHd0Dg== + dependencies: + "@babel/helper-plugin-utils" "^7.24.0" + "@babel/helper-validator-option" "^7.23.5" + "@babel/plugin-transform-flow-strip-types" "^7.23.3" + +"@babel/preset-modules@0.1.6-no-external-plugins": + version "0.1.6-no-external-plugins" + resolved "https://registry.yarnpkg.com/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz#ccb88a2c49c817236861fee7826080573b8a923a" + integrity sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA== + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/types" "^7.4.4" + esutils "^2.0.2" + +"@babel/preset-typescript@^7.13.0": + version "7.23.3" + resolved "https://registry.yarnpkg.com/@babel/preset-typescript/-/preset-typescript-7.23.3.tgz#14534b34ed5b6d435aa05f1ae1c5e7adcc01d913" + integrity sha512-17oIGVlqz6CchO9RFYn5U6ZpWRZIngayYCtrPRSgANSwC2V1Jb+iP74nVxzzXJte8b8BYxrL1yY96xfhTBrNNQ== + dependencies: + "@babel/helper-plugin-utils" "^7.22.5" + "@babel/helper-validator-option" "^7.22.15" + "@babel/plugin-syntax-jsx" "^7.23.3" + "@babel/plugin-transform-modules-commonjs" "^7.23.3" + "@babel/plugin-transform-typescript" "^7.23.3" + +"@babel/register@^7.13.16": + version "7.23.7" + resolved "https://registry.yarnpkg.com/@babel/register/-/register-7.23.7.tgz#485a5e7951939d21304cae4af1719fdb887bc038" + integrity sha512-EjJeB6+kvpk+Y5DAkEAmbOBEFkh9OASx0huoEkqYTFxAZHzOAX2Oh5uwAUuL2rUddqfM0SA+KPXV2TbzoZ2kvQ== + dependencies: + clone-deep "^4.0.1" + find-cache-dir "^2.0.0" + make-dir "^2.1.0" + pirates "^4.0.6" + source-map-support "^0.5.16" + +"@babel/regjsgen@^0.8.0": + version "0.8.0" + resolved "https://registry.yarnpkg.com/@babel/regjsgen/-/regjsgen-0.8.0.tgz#f0ba69b075e1f05fb2825b7fad991e7adbb18310" + integrity sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA== + +"@babel/runtime@^7.0.0", "@babel/runtime@^7.20.0", "@babel/runtime@^7.8.4": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.24.0.tgz#584c450063ffda59697021430cb47101b085951e" + integrity sha512-Chk32uHMg6TnQdvw2e9IlqPpFX/6NLuK0Ys2PqLb7/gL5uFn9mXvK715FGLlOLQrcO4qIkNHkvPGktzzXexsFw== + dependencies: + regenerator-runtime "^0.14.0" + +"@babel/template@^7.0.0", "@babel/template@^7.22.15", "@babel/template@^7.24.0", "@babel/template@^7.3.3": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/template/-/template-7.24.0.tgz#c6a524aa93a4a05d66aaf31654258fae69d87d50" + integrity sha512-Bkf2q8lMB0AFpX0NFEqSbx1OkTHf0f+0j82mkw+ZpzBnkk7e9Ql0891vlfgi+kHwOk8tQjiQHpqh4LaSa0fKEA== + dependencies: + "@babel/code-frame" "^7.23.5" + "@babel/parser" "^7.24.0" + "@babel/types" "^7.24.0" + +"@babel/traverse@^7.20.0", "@babel/traverse@^7.24.0": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/traverse/-/traverse-7.24.0.tgz#4a408fbf364ff73135c714a2ab46a5eab2831b1e" + integrity sha512-HfuJlI8qq3dEDmNU5ChzzpZRWq+oxCZQyMzIMEqLho+AQnhMnKQUzH6ydo3RBl/YjPCuk68Y6s0Gx0AeyULiWw== + dependencies: + "@babel/code-frame" "^7.23.5" + "@babel/generator" "^7.23.6" + "@babel/helper-environment-visitor" "^7.22.20" + "@babel/helper-function-name" "^7.23.0" + "@babel/helper-hoist-variables" "^7.22.5" + "@babel/helper-split-export-declaration" "^7.22.6" + "@babel/parser" "^7.24.0" + "@babel/types" "^7.24.0" + debug "^4.3.1" + globals "^11.1.0" + +"@babel/types@^7.0.0", "@babel/types@^7.20.0", "@babel/types@^7.20.7", "@babel/types@^7.22.15", "@babel/types@^7.22.19", "@babel/types@^7.22.5", "@babel/types@^7.23.0", "@babel/types@^7.23.4", "@babel/types@^7.23.6", "@babel/types@^7.24.0", "@babel/types@^7.3.3", "@babel/types@^7.4.4": + version "7.24.0" + resolved "https://registry.yarnpkg.com/@babel/types/-/types-7.24.0.tgz#3b951f435a92e7333eba05b7566fd297960ea1bf" + integrity sha512-+j7a5c253RfKh8iABBhywc8NSfP5LURe7Uh4qpsh6jc+aLJguvmIUBdjSdEMQv2bENrCR5MfRdjGo7vzS/ob7w== + dependencies: + "@babel/helper-string-parser" "^7.23.4" + "@babel/helper-validator-identifier" "^7.22.20" + to-fast-properties "^2.0.0" + +"@bcoe/v8-coverage@^0.2.3": + version "0.2.3" + resolved "https://registry.yarnpkg.com/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz#75a2e8b51cb758a7553d6804a5932d7aace75c39" + integrity sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw== + +"@eslint-community/eslint-utils@^4.2.0", "@eslint-community/eslint-utils@^4.4.0": + version "4.4.0" + resolved "https://registry.yarnpkg.com/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz#a23514e8fb9af1269d5f7788aa556798d61c6b59" + integrity sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA== + dependencies: + eslint-visitor-keys "^3.3.0" + +"@eslint-community/regexpp@^4.5.1", "@eslint-community/regexpp@^4.6.1": + version "4.10.0" + resolved "https://registry.yarnpkg.com/@eslint-community/regexpp/-/regexpp-4.10.0.tgz#548f6de556857c8bb73bbee70c35dc82a2e74d63" + integrity sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA== + +"@eslint/eslintrc@^2.1.4": + version "2.1.4" + resolved "https://registry.yarnpkg.com/@eslint/eslintrc/-/eslintrc-2.1.4.tgz#388a269f0f25c1b6adc317b5a2c55714894c70ad" + integrity sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ== + dependencies: + ajv "^6.12.4" + debug "^4.3.2" + espree "^9.6.0" + globals "^13.19.0" + ignore "^5.2.0" + import-fresh "^3.2.1" + js-yaml "^4.1.0" + minimatch "^3.1.2" + strip-json-comments "^3.1.1" + +"@eslint/js@8.57.0": + version "8.57.0" + resolved "https://registry.yarnpkg.com/@eslint/js/-/js-8.57.0.tgz#a5417ae8427873f1dd08b70b3574b453e67b5f7f" + integrity sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g== + +"@hapi/hoek@^9.0.0", "@hapi/hoek@^9.3.0": + version "9.3.0" + resolved "https://registry.yarnpkg.com/@hapi/hoek/-/hoek-9.3.0.tgz#8368869dcb735be2e7f5cb7647de78e167a251fb" + integrity sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ== + +"@hapi/topo@^5.1.0": + version "5.1.0" + resolved "https://registry.yarnpkg.com/@hapi/topo/-/topo-5.1.0.tgz#dc448e332c6c6e37a4dc02fd84ba8d44b9afb012" + integrity sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg== + dependencies: + "@hapi/hoek" "^9.0.0" + +"@humanwhocodes/config-array@^0.11.14": + version "0.11.14" + resolved "https://registry.yarnpkg.com/@humanwhocodes/config-array/-/config-array-0.11.14.tgz#d78e481a039f7566ecc9660b4ea7fe6b1fec442b" + integrity sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg== + dependencies: + "@humanwhocodes/object-schema" "^2.0.2" + debug "^4.3.1" + minimatch "^3.0.5" + +"@humanwhocodes/module-importer@^1.0.1": + version "1.0.1" + resolved "https://registry.yarnpkg.com/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz#af5b2691a22b44be847b0ca81641c5fb6ad0172c" + integrity sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA== + +"@humanwhocodes/object-schema@^2.0.2": + version "2.0.2" + resolved "https://registry.yarnpkg.com/@humanwhocodes/object-schema/-/object-schema-2.0.2.tgz#d9fae00a2d5cb40f92cfe64b47ad749fbc38f917" + integrity sha512-6EwiSjwWYP7pTckG6I5eyFANjPhmPjUX9JRLUSfNPC7FX7zK9gyZAfUEaECL6ALTpGX5AjnBq3C9XmVWPitNpw== + +"@isaacs/ttlcache@^1.4.1": + version "1.4.1" + resolved "https://registry.yarnpkg.com/@isaacs/ttlcache/-/ttlcache-1.4.1.tgz#21fb23db34e9b6220c6ba023a0118a2dd3461ea2" + integrity sha512-RQgQ4uQ+pLbqXfOmieB91ejmLwvSgv9nLx6sT6sD83s7umBypgg+OIBOBbEUiJXrfpnp9j0mRhYYdzp9uqq3lA== + +"@istanbuljs/load-nyc-config@^1.0.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@istanbuljs/load-nyc-config/-/load-nyc-config-1.1.0.tgz#fd3db1d59ecf7cf121e80650bb86712f9b55eced" + integrity sha512-VjeHSlIzpv/NyD3N0YuHfXOPDIixcA1q2ZV98wsMqcYlPmv2n3Yb2lYP9XMElnaFVXg5A7YLTeLu6V84uQDjmQ== + dependencies: + camelcase "^5.3.1" + find-up "^4.1.0" + get-package-type "^0.1.0" + js-yaml "^3.13.1" + resolve-from "^5.0.0" + +"@istanbuljs/schema@^0.1.2", "@istanbuljs/schema@^0.1.3": + version "0.1.3" + resolved "https://registry.yarnpkg.com/@istanbuljs/schema/-/schema-0.1.3.tgz#e45e384e4b8ec16bce2fd903af78450f6bf7ec98" + integrity sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA== + +"@jest/console@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/console/-/console-29.7.0.tgz#cd4822dbdb84529265c5a2bdb529a3c9cc950ffc" + integrity sha512-5Ni4CU7XHQi32IJ398EEP4RrB8eV09sXP2ROqD4bksHrnTree52PsxvX8tpL8LvTZ3pFzXyPbNQReSN41CAhOg== + dependencies: + "@jest/types" "^29.6.3" + "@types/node" "*" + chalk "^4.0.0" + jest-message-util "^29.7.0" + jest-util "^29.7.0" + slash "^3.0.0" + +"@jest/core@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/core/-/core-29.7.0.tgz#b6cccc239f30ff36609658c5a5e2291757ce448f" + integrity sha512-n7aeXWKMnGtDA48y8TLWJPJmLmmZ642Ceo78cYWEpiD7FzDgmNDV/GCVRorPABdXLJZ/9wzzgZAlHjXjxDHGsg== + dependencies: + "@jest/console" "^29.7.0" + "@jest/reporters" "^29.7.0" + "@jest/test-result" "^29.7.0" + "@jest/transform" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/node" "*" + ansi-escapes "^4.2.1" + chalk "^4.0.0" + ci-info "^3.2.0" + exit "^0.1.2" + graceful-fs "^4.2.9" + jest-changed-files "^29.7.0" + jest-config "^29.7.0" + jest-haste-map "^29.7.0" + jest-message-util "^29.7.0" + jest-regex-util "^29.6.3" + jest-resolve "^29.7.0" + jest-resolve-dependencies "^29.7.0" + jest-runner "^29.7.0" + jest-runtime "^29.7.0" + jest-snapshot "^29.7.0" + jest-util "^29.7.0" + jest-validate "^29.7.0" + jest-watcher "^29.7.0" + micromatch "^4.0.4" + pretty-format "^29.7.0" + slash "^3.0.0" + strip-ansi "^6.0.0" + +"@jest/create-cache-key-function@^29.6.3": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/create-cache-key-function/-/create-cache-key-function-29.7.0.tgz#793be38148fab78e65f40ae30c36785f4ad859f0" + integrity sha512-4QqS3LY5PBmTRHj9sAg1HLoPzqAI0uOX6wI/TRqHIcOxlFidy6YEmCQJk6FSZjNLGCeubDMfmkWL+qaLKhSGQA== + dependencies: + "@jest/types" "^29.6.3" + +"@jest/environment@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/environment/-/environment-29.7.0.tgz#24d61f54ff1f786f3cd4073b4b94416383baf2a7" + integrity sha512-aQIfHDq33ExsN4jP1NWGXhxgQ/wixs60gDiKO+XVMd8Mn0NWPWgc34ZQDTb2jKaUWQ7MuwoitXAsN2XVXNMpAw== + dependencies: + "@jest/fake-timers" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/node" "*" + jest-mock "^29.7.0" + +"@jest/expect-utils@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/expect-utils/-/expect-utils-29.7.0.tgz#023efe5d26a8a70f21677d0a1afc0f0a44e3a1c6" + integrity sha512-GlsNBWiFQFCVi9QVSx7f5AgMeLxe9YCCs5PuP2O2LdjDAA8Jh9eX7lA1Jq/xdXw3Wb3hyvlFNfZIfcRetSzYcA== + dependencies: + jest-get-type "^29.6.3" + +"@jest/expect@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/expect/-/expect-29.7.0.tgz#76a3edb0cb753b70dfbfe23283510d3d45432bf2" + integrity sha512-8uMeAMycttpva3P1lBHB8VciS9V0XAr3GymPpipdyQXbBcuhkLQOSe8E/p92RyAdToS6ZD1tFkX+CkhoECE0dQ== + dependencies: + expect "^29.7.0" + jest-snapshot "^29.7.0" + +"@jest/fake-timers@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/fake-timers/-/fake-timers-29.7.0.tgz#fd91bf1fffb16d7d0d24a426ab1a47a49881a565" + integrity sha512-q4DH1Ha4TTFPdxLsqDXK1d3+ioSL7yL5oCMJZgDYm6i+6CygW5E5xVr/D1HdsGxjt1ZWSfUAs9OxSB/BNelWrQ== + dependencies: + "@jest/types" "^29.6.3" + "@sinonjs/fake-timers" "^10.0.2" + "@types/node" "*" + jest-message-util "^29.7.0" + jest-mock "^29.7.0" + jest-util "^29.7.0" + +"@jest/globals@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/globals/-/globals-29.7.0.tgz#8d9290f9ec47ff772607fa864ca1d5a2efae1d4d" + integrity sha512-mpiz3dutLbkW2MNFubUGUEVLkTGiqW6yLVTA+JbP6fI6J5iL9Y0Nlg8k95pcF8ctKwCS7WVxteBs29hhfAotzQ== + dependencies: + "@jest/environment" "^29.7.0" + "@jest/expect" "^29.7.0" + "@jest/types" "^29.6.3" + jest-mock "^29.7.0" + +"@jest/reporters@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/reporters/-/reporters-29.7.0.tgz#04b262ecb3b8faa83b0b3d321623972393e8f4c7" + integrity sha512-DApq0KJbJOEzAFYjHADNNxAE3KbhxQB1y5Kplb5Waqw6zVbuWatSnMjE5gs8FUgEPmNsnZA3NCWl9NG0ia04Pg== + dependencies: + "@bcoe/v8-coverage" "^0.2.3" + "@jest/console" "^29.7.0" + "@jest/test-result" "^29.7.0" + "@jest/transform" "^29.7.0" + "@jest/types" "^29.6.3" + "@jridgewell/trace-mapping" "^0.3.18" + "@types/node" "*" + chalk "^4.0.0" + collect-v8-coverage "^1.0.0" + exit "^0.1.2" + glob "^7.1.3" + graceful-fs "^4.2.9" + istanbul-lib-coverage "^3.0.0" + istanbul-lib-instrument "^6.0.0" + istanbul-lib-report "^3.0.0" + istanbul-lib-source-maps "^4.0.0" + istanbul-reports "^3.1.3" + jest-message-util "^29.7.0" + jest-util "^29.7.0" + jest-worker "^29.7.0" + slash "^3.0.0" + string-length "^4.0.1" + strip-ansi "^6.0.0" + v8-to-istanbul "^9.0.1" + +"@jest/schemas@^29.6.3": + version "29.6.3" + resolved "https://registry.yarnpkg.com/@jest/schemas/-/schemas-29.6.3.tgz#430b5ce8a4e0044a7e3819663305a7b3091c8e03" + integrity sha512-mo5j5X+jIZmJQveBKeS/clAueipV7KgiX1vMgCxam1RNYiqE1w62n0/tJJnHtjW8ZHcQco5gY85jA3mi0L+nSA== + dependencies: + "@sinclair/typebox" "^0.27.8" + +"@jest/source-map@^29.6.3": + version "29.6.3" + resolved "https://registry.yarnpkg.com/@jest/source-map/-/source-map-29.6.3.tgz#d90ba772095cf37a34a5eb9413f1b562a08554c4" + integrity sha512-MHjT95QuipcPrpLM+8JMSzFx6eHp5Bm+4XeFDJlwsvVBjmKNiIAvasGK2fxz2WbGRlnvqehFbh07MMa7n3YJnw== + dependencies: + "@jridgewell/trace-mapping" "^0.3.18" + callsites "^3.0.0" + graceful-fs "^4.2.9" + +"@jest/test-result@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/test-result/-/test-result-29.7.0.tgz#8db9a80aa1a097bb2262572686734baed9b1657c" + integrity sha512-Fdx+tv6x1zlkJPcWXmMDAG2HBnaR9XPSd5aDWQVsfrZmLVT3lU1cwyxLgRmXR9yrq4NBoEm9BMsfgFzTQAbJYA== + dependencies: + "@jest/console" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/istanbul-lib-coverage" "^2.0.0" + collect-v8-coverage "^1.0.0" + +"@jest/test-sequencer@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/test-sequencer/-/test-sequencer-29.7.0.tgz#6cef977ce1d39834a3aea887a1726628a6f072ce" + integrity sha512-GQwJ5WZVrKnOJuiYiAF52UNUJXgTZx1NHjFSEB0qEMmSZKAkdMoIzw/Cj6x6NF4AvV23AUqDpFzQkN/eYCYTxw== + dependencies: + "@jest/test-result" "^29.7.0" + graceful-fs "^4.2.9" + jest-haste-map "^29.7.0" + slash "^3.0.0" + +"@jest/transform@^29.7.0": + version "29.7.0" + resolved "https://registry.yarnpkg.com/@jest/transform/-/transform-29.7.0.tgz#df2dd9c346c7d7768b8a06639994640c642e284c" + integrity sha512-ok/BTPFzFKVMwO5eOHRrvnBVHdRy9IrsrW1GpMaQ9MCnilNLXQKmAX8s1YXDFaai9xJpac2ySzV0YeRRECr2Vw== + dependencies: + "@babel/core" "^7.11.6" + "@jest/types" "^29.6.3" + "@jridgewell/trace-mapping" "^0.3.18" + babel-plugin-istanbul "^6.1.1" + chalk "^4.0.0" + convert-source-map "^2.0.0" + fast-json-stable-stringify "^2.1.0" + graceful-fs "^4.2.9" + jest-haste-map "^29.7.0" + jest-regex-util "^29.6.3" + jest-util "^29.7.0" + micromatch "^4.0.4" + pirates "^4.0.4" + slash "^3.0.0" + write-file-atomic "^4.0.2" + +"@jest/types@^26.6.2": + version "26.6.2" + resolved "https://registry.yarnpkg.com/@jest/types/-/types-26.6.2.tgz#bef5a532030e1d88a2f5a6d933f84e97226ed48e" + integrity sha512-fC6QCp7Sc5sX6g8Tvbmj4XUTbyrik0akgRy03yjXbQaBWWNWGE7SGtJk98m0N8nzegD/7SggrUlivxo5ax4KWQ== + dependencies: + "@types/istanbul-lib-coverage" "^2.0.0" + "@types/istanbul-reports" "^3.0.0" + "@types/node" "*" + "@types/yargs" "^15.0.0" + chalk "^4.0.0" + +"@jest/types@^29.6.3": + version "29.6.3" + resolved "https://registry.yarnpkg.com/@jest/types/-/types-29.6.3.tgz#1131f8cf634e7e84c5e77bab12f052af585fba59" + integrity sha512-u3UPsIilWKOM3F9CXtrG8LEJmNxwoCQC/XVj4IKYXvvpx7QIi/Kg1LI5uDmDpKlac62NUtX7eLjRh+jVZcLOzw== + dependencies: + "@jest/schemas" "^29.6.3" + "@types/istanbul-lib-coverage" "^2.0.0" + "@types/istanbul-reports" "^3.0.0" + "@types/node" "*" + "@types/yargs" "^17.0.8" + chalk "^4.0.0" + +"@jridgewell/gen-mapping@^0.3.2", "@jridgewell/gen-mapping@^0.3.5": + version "0.3.5" + resolved "https://registry.yarnpkg.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz#dcce6aff74bdf6dad1a95802b69b04a2fcb1fb36" + integrity sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg== + dependencies: + "@jridgewell/set-array" "^1.2.1" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@jridgewell/trace-mapping" "^0.3.24" + +"@jridgewell/resolve-uri@^3.1.0": + version "3.1.2" + resolved "https://registry.yarnpkg.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz#7a0ee601f60f99a20c7c7c5ff0c80388c1189bd6" + integrity sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw== + +"@jridgewell/set-array@^1.2.1": + version "1.2.1" + resolved "https://registry.yarnpkg.com/@jridgewell/set-array/-/set-array-1.2.1.tgz#558fb6472ed16a4c850b889530e6b36438c49280" + integrity sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A== + +"@jridgewell/source-map@^0.3.3": + version "0.3.6" + resolved "https://registry.yarnpkg.com/@jridgewell/source-map/-/source-map-0.3.6.tgz#9d71ca886e32502eb9362c9a74a46787c36df81a" + integrity sha512-1ZJTZebgqllO79ue2bm3rIGud/bOe0pP5BjSRCRxxYkEZS8STV7zN84UBbiYu7jy+eCKSnVIUgoWWE/tt+shMQ== + dependencies: + "@jridgewell/gen-mapping" "^0.3.5" + "@jridgewell/trace-mapping" "^0.3.25" + +"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.14": + version "1.4.15" + resolved "https://registry.yarnpkg.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz#d7c6e6755c78567a951e04ab52ef0fd26de59f32" + integrity sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg== + +"@jridgewell/trace-mapping@^0.3.12", "@jridgewell/trace-mapping@^0.3.17", "@jridgewell/trace-mapping@^0.3.18", "@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25": + version "0.3.25" + resolved "https://registry.yarnpkg.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz#15f190e98895f3fc23276ee14bc76b675c2e50f0" + integrity sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ== + dependencies: + "@jridgewell/resolve-uri" "^3.1.0" + "@jridgewell/sourcemap-codec" "^1.4.14" + +"@nicolo-ribaudo/eslint-scope-5-internals@5.1.1-v1": + version "5.1.1-v1" + resolved "https://registry.yarnpkg.com/@nicolo-ribaudo/eslint-scope-5-internals/-/eslint-scope-5-internals-5.1.1-v1.tgz#dbf733a965ca47b1973177dc0bb6c889edcfb129" + integrity sha512-54/JRvkLIzzDWshCWfuhadfrfZVPiElY8Fcgmg1HroEly/EDSszzhBAsarCux+D/kOslTRquNzuyGSmUSTTHGg== + dependencies: + eslint-scope "5.1.1" + +"@nodelib/fs.scandir@2.1.5": + version "2.1.5" + resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5" + integrity sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g== + dependencies: + "@nodelib/fs.stat" "2.0.5" + run-parallel "^1.1.9" + +"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2": + version "2.0.5" + resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz#5bd262af94e9d25bd1e71b05deed44876a222e8b" + integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A== + +"@nodelib/fs.walk@^1.2.3", "@nodelib/fs.walk@^1.2.8": + version "1.2.8" + resolved "https://registry.yarnpkg.com/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz#e95737e8bb6746ddedf69c556953494f196fe69a" + integrity sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg== + dependencies: + "@nodelib/fs.scandir" "2.1.5" + fastq "^1.6.0" + +"@react-native-community/cli-clean@13.6.4": + version "13.6.4" + resolved "https://registry.yarnpkg.com/@react-native-community/cli-clean/-/cli-clean-13.6.4.tgz#53c07c6f2834a971dc40eab290edcf8ccc5d1e00" + integrity sha512-nS1BJ+2Z+aLmqePxB4AYgJ+C/bgQt02xAgSYtCUv+lneRBGhL2tHRrK8/Iolp0y+yQoUtHHf4txYi90zGXLVfw== + dependencies: + "@react-native-community/cli-tools" "13.6.4" + chalk "^4.1.2" + execa "^5.0.0" + fast-glob "^3.3.2" + +"@react-native-community/cli-config@13.6.4": + version "13.6.4" + resolved "https://registry.yarnpkg.com/@react-native-community/cli-config/-/cli-config-13.6.4.tgz#3004c7bca55cb384b3a99c38c1a48dad24533237" + integrity sha512-GGK415WoTx1R9FXtfb/cTnan9JIWwSm+a5UCuFd6+suzS0oIt1Md1vCzjNh6W1CK3b43rZC2e+3ZU7Ljd7YtyQ== + dependencies: + "@react-native-community/cli-tools" "13.6.4" + chalk "^4.1.2" + cosmiconfig "^5.1.0" + deepmerge "^4.3.0" + fast-glob "^3.3.2" + joi "^17.2.1" + +"@react-native-community/cli-debugger-ui@13.6.4": + version "13.6.4" + resolved "https://registry.yarnpkg.com/@react-native-community/cli-debugger-ui/-/cli-debugger-ui-13.6.4.tgz#3881b9cfe14e66b3ee827a84f19ca9d0283fd764" + integrity sha512-9Gs31s6tA1kuEo69ay9qLgM3x2gsN/RI994DCUKnFSW+qSusQJyyrmfllR2mGU3Wl1W09/nYpIg87W9JPf5y4A== + dependencies: + serve-static "^1.13.1" + +"@react-native-community/cli-doctor@13.6.4": + version "13.6.4" + resolved "https://registry.yarnpkg.com/@react-native-community/cli-doctor/-/cli-doctor-13.6.4.tgz#07e5c2f163807e61ce0ba12901903e591177e3d3" + integrity sha512-lWOXCISH/cHtLvO0cWTr+IPSzA54FewVOw7MoCMEvWusH+1n7c3hXTAve78mLozGQ7iuUufkHFWwKf3dzOkflQ== + dependencies: + "@react-native-community/cli-config" "13.6.4" + "@react-native-community/cli-platform-android" "13.6.4" + "@react-native-community/cli-platform-apple" "13.6.4" + "@react-native-community/cli-platform-ios" "13.6.4" + "@react-native-community/cli-tools" "13.6.4" + chalk "^4.1.2" + command-exists "^1.2.8" + deepmerge "^4.3.0" + envinfo "^7.10.0" + execa "^5.0.0" + hermes-profile-transformer "^0.0.6" + node-stream-zip "^1.9.1" + ora "^5.4.1" + semver "^7.5.2" + strip-ansi "^5.2.0" + wcwidth "^1.0.1" + yaml "^2.2.1" + +"@react-native-community/cli-hermes@13.6.4": + version "13.6.4" + resolved "https://registry.yarnpkg.com/@react-native-community/cli-hermes/-/cli-hermes-13.6.4.tgz#6d3e9b5c251461e9bb35b04110544db8a4f5968f" + integrity sha512-VIAufA/2wTccbMYBT9o+mQs9baOEpTxCiIdWeVdkPWKzIwtKsLpDZJlUqj4r4rI66mwjFyQ60PhwSzEJ2ApFeQ== + dependencies: + "@react-native-community/cli-platform-android" "13.6.4" + "@react-native-community/cli-tools" "13.6.4" + chalk "^4.1.2" + hermes-profile-transformer "^0.0.6" + +"@react-native-community/cli-platform-android@13.6.4": + version "13.6.4" + resolved "https://registry.yarnpkg.com/@react-native-community/cli-platform-android/-/cli-platform-android-13.6.4.tgz#78ab4c840f4f1f5252ad2fcc5a55f7681ec458cb" + integrity sha512-WhknYwIobKKCqaGCN3BzZEQHTbaZTDiGvcXzevvN867ldfaGdtbH0DVqNunbPoV1RNzeV9qKoQHFdWBkg83tpg== + dependencies: + "@react-native-community/cli-tools" "13.6.4" + chalk "^4.1.2" + execa "^5.0.0" + fast-glob "^3.3.2" + fast-xml-parser "^4.2.4" + logkitty "^0.7.1" + +"@react-native-community/cli-platform-apple@13.6.4": + version "13.6.4" + resolved "https://registry.yarnpkg.com/@react-native-community/cli-platform-apple/-/cli-platform-apple-13.6.4.tgz#4912eaf519800a957745192718822b94655c8119" + integrity sha512-TLBiotdIz0veLbmvNQIdUv9fkBx7m34ANGYqr5nH7TFxdmey+Z+omoBqG/HGpvyR7d0AY+kZzzV4k+HkYHM/aQ== + dependencies: + "@react-native-community/cli-tools" "13.6.4" + chalk "^4.1.2" + execa "^5.0.0" + fast-glob "^3.3.2" + fast-xml-parser "^4.0.12" + ora "^5.4.1" + +"@react-native-community/cli-platform-ios@13.6.4": + version "13.6.4" + resolved "https://registry.yarnpkg.com/@react-native-community/cli-platform-ios/-/cli-platform-ios-13.6.4.tgz#96ec915c6df23b2b7b7e0d8cb3db7368e448d620" + integrity sha512-8Dlva8RY+MY5nhWAj6V7voG3+JOEzDTJmD0FHqL+4p0srvr9v7IEVcxfw5lKBDIUNd0OMAHNevGA+cyz1J60jg== + dependencies: + "@react-native-community/cli-platform-apple" "13.6.4" + +"@react-native-community/cli-server-api@13.6.4": + version "13.6.4" + resolved "https://registry.yarnpkg.com/@react-native-community/cli-server-api/-/cli-server-api-13.6.4.tgz#6bcec7ae387fc3aeb3e78f62561a91962e6fadf7" + integrity sha512-D2qSuYCFwrrUJUM0SDc9l3lEhU02yjf+9Peri/xhspzAhALnsf6Z/H7BCjddMV42g9/eY33LqiGyN5chr83a+g== + dependencies: + "@react-native-community/cli-debugger-ui" "13.6.4" + "@react-native-community/cli-tools" "13.6.4" + compression "^1.7.1" + connect "^3.6.5" + errorhandler "^1.5.1" + nocache "^3.0.1" + pretty-format "^26.6.2" + serve-static "^1.13.1" + ws "^7.5.1" + +"@react-native-community/cli-tools@13.6.4": + version "13.6.4" + resolved "https://registry.yarnpkg.com/@react-native-community/cli-tools/-/cli-tools-13.6.4.tgz#ab396604b6dcf215790807fe89656e779b11f0ec" + integrity sha512-N4oHLLbeTdg8opqJozjClmuTfazo1Mt+oxU7mr7m45VCsFgBqTF70Uwad289TM/3l44PP679NRMAHVYqpIRYtQ== + dependencies: + appdirsjs "^1.2.4" + chalk "^4.1.2" + execa "^5.0.0" + find-up "^5.0.0" + mime "^2.4.1" + node-fetch "^2.6.0" + open "^6.2.0" + ora "^5.4.1" + semver "^7.5.2" + shell-quote "^1.7.3" + sudo-prompt "^9.0.0" + +"@react-native-community/cli-types@13.6.4": + version "13.6.4" + resolved "https://registry.yarnpkg.com/@react-native-community/cli-types/-/cli-types-13.6.4.tgz#e499a3691ee597aa4b93196ff182a4782fae7afb" + integrity sha512-NxGCNs4eYtVC8x0wj0jJ/MZLRy8C+B9l8lY8kShuAcvWTv5JXRqmXjg8uK1aA+xikPh0maq4cc/zLw1roroY/A== + dependencies: + joi "^17.2.1" + +"@react-native-community/cli@13.6.4": + version "13.6.4" + resolved "https://registry.yarnpkg.com/@react-native-community/cli/-/cli-13.6.4.tgz#dabe2749470a34533e18aada51d97c94b3568307" + integrity sha512-V7rt2N5JY7M4dJFgdNfR164r3hZdR/Z7V54dv85TFQHRbdwF4QrkG+GeagAU54qrkK/OU8OH3AF2+mKuiNWpGA== + dependencies: + "@react-native-community/cli-clean" "13.6.4" + "@react-native-community/cli-config" "13.6.4" + "@react-native-community/cli-debugger-ui" "13.6.4" + "@react-native-community/cli-doctor" "13.6.4" + "@react-native-community/cli-hermes" "13.6.4" + "@react-native-community/cli-server-api" "13.6.4" + "@react-native-community/cli-tools" "13.6.4" + "@react-native-community/cli-types" "13.6.4" + chalk "^4.1.2" + commander "^9.4.1" + deepmerge "^4.3.0" + execa "^5.0.0" + find-up "^4.1.0" + fs-extra "^8.1.0" + graceful-fs "^4.1.3" + prompts "^2.4.2" + semver "^7.5.2" + +"@react-native/assets-registry@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/assets-registry/-/assets-registry-0.74.81.tgz#76b17f8f79b366ec4f18a0f4e99b7cd466aa5aa7" + integrity sha512-ms+D6pJ6l30epm53pwnAislW79LEUHJxWfe1Cu0LWyTTBlg1OFoqXfB3eIbpe4WyH3nrlkQAh0yyk4huT2mCvw== + +"@react-native/babel-plugin-codegen@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/babel-plugin-codegen/-/babel-plugin-codegen-0.74.81.tgz#80484fb9029038694a92193ae2653529e44aab64" + integrity sha512-Bj6g5/xkLMBAdC6665TbD3uCKCQSmLQpGv3gyqya/ydZpv3dDmDXfkGmO4fqTwEMunzu09Sk55st2ipmuXAaAg== + dependencies: + "@react-native/codegen" "0.74.81" + +"@react-native/babel-preset@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/babel-preset/-/babel-preset-0.74.81.tgz#80d0b96eef35d671f97eaf223c4d770170d7f23f" + integrity sha512-H80B3Y3lBBVC4x9tceTEQq/04lx01gW6ajWCcVbd7sHvGEAxfMFEZUmVZr0451Cafn02wVnDJ8psto1F+0w5lw== + dependencies: + "@babel/core" "^7.20.0" + "@babel/plugin-proposal-async-generator-functions" "^7.0.0" + "@babel/plugin-proposal-class-properties" "^7.18.0" + "@babel/plugin-proposal-export-default-from" "^7.0.0" + "@babel/plugin-proposal-logical-assignment-operators" "^7.18.0" + "@babel/plugin-proposal-nullish-coalescing-operator" "^7.18.0" + "@babel/plugin-proposal-numeric-separator" "^7.0.0" + "@babel/plugin-proposal-object-rest-spread" "^7.20.0" + "@babel/plugin-proposal-optional-catch-binding" "^7.0.0" + "@babel/plugin-proposal-optional-chaining" "^7.20.0" + "@babel/plugin-syntax-dynamic-import" "^7.8.0" + "@babel/plugin-syntax-export-default-from" "^7.0.0" + "@babel/plugin-syntax-flow" "^7.18.0" + "@babel/plugin-syntax-nullish-coalescing-operator" "^7.0.0" + "@babel/plugin-syntax-optional-chaining" "^7.0.0" + "@babel/plugin-transform-arrow-functions" "^7.0.0" + "@babel/plugin-transform-async-to-generator" "^7.20.0" + "@babel/plugin-transform-block-scoping" "^7.0.0" + "@babel/plugin-transform-classes" "^7.0.0" + "@babel/plugin-transform-computed-properties" "^7.0.0" + "@babel/plugin-transform-destructuring" "^7.20.0" + "@babel/plugin-transform-flow-strip-types" "^7.20.0" + "@babel/plugin-transform-function-name" "^7.0.0" + "@babel/plugin-transform-literals" "^7.0.0" + "@babel/plugin-transform-modules-commonjs" "^7.0.0" + "@babel/plugin-transform-named-capturing-groups-regex" "^7.0.0" + "@babel/plugin-transform-parameters" "^7.0.0" + "@babel/plugin-transform-private-methods" "^7.22.5" + "@babel/plugin-transform-private-property-in-object" "^7.22.11" + "@babel/plugin-transform-react-display-name" "^7.0.0" + "@babel/plugin-transform-react-jsx" "^7.0.0" + "@babel/plugin-transform-react-jsx-self" "^7.0.0" + "@babel/plugin-transform-react-jsx-source" "^7.0.0" + "@babel/plugin-transform-runtime" "^7.0.0" + "@babel/plugin-transform-shorthand-properties" "^7.0.0" + "@babel/plugin-transform-spread" "^7.0.0" + "@babel/plugin-transform-sticky-regex" "^7.0.0" + "@babel/plugin-transform-typescript" "^7.5.0" + "@babel/plugin-transform-unicode-regex" "^7.0.0" + "@babel/template" "^7.0.0" + "@react-native/babel-plugin-codegen" "0.74.81" + babel-plugin-transform-flow-enums "^0.0.2" + react-refresh "^0.14.0" + +"@react-native/codegen@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/codegen/-/codegen-0.74.81.tgz#1025ffd41f2b4710fd700c9e8e85210b9651a7c4" + integrity sha512-hhXo4ccv2lYWaJrZDsdbRTZ5SzSOdyZ0MY6YXwf3xEFLuSunbUMu17Rz5LXemKXlpVx4KEgJ/TDc2pPVaRPZgA== + dependencies: + "@babel/parser" "^7.20.0" + glob "^7.1.1" + hermes-parser "0.19.1" + invariant "^2.2.4" + jscodeshift "^0.14.0" + mkdirp "^0.5.1" + nullthrows "^1.1.1" + +"@react-native/community-cli-plugin@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/community-cli-plugin/-/community-cli-plugin-0.74.81.tgz#4177207374942c52a86ad52c8c915f46729305ab" + integrity sha512-ezPOwPxbDgrBZLJJMcXryXJXjv3VWt+Mt4jRZiEtvy6pAoi2owSH0b178T5cEZaWsxQN0BbyJ7F/xJsNiF4z0Q== + dependencies: + "@react-native-community/cli-server-api" "13.6.4" + "@react-native-community/cli-tools" "13.6.4" + "@react-native/dev-middleware" "0.74.81" + "@react-native/metro-babel-transformer" "0.74.81" + chalk "^4.0.0" + execa "^5.1.1" + metro "^0.80.3" + metro-config "^0.80.3" + metro-core "^0.80.3" + node-fetch "^2.2.0" + querystring "^0.2.1" + readline "^1.3.0" + +"@react-native/debugger-frontend@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/debugger-frontend/-/debugger-frontend-0.74.81.tgz#17cefe2b3ff485071bd30d819995867fd145da27" + integrity sha512-HCYF1/88AfixG75558HkNh9wcvGweRaSZGBA71KoZj03umXM8XJy0/ZpacGOml2Fwiqpil72gi6uU+rypcc/vw== + +"@react-native/dev-middleware@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/dev-middleware/-/dev-middleware-0.74.81.tgz#120ab62982a48cba90c7724d099ddaa50184c200" + integrity sha512-x2IpvUJN1LJE0WmPsSfQIbQaa9xwH+2VDFOUrzuO9cbQap8rNfZpcvVNbrZgrlKbgS4LXbbsj6VSL8b6SnMKMA== + dependencies: + "@isaacs/ttlcache" "^1.4.1" + "@react-native/debugger-frontend" "0.74.81" + "@rnx-kit/chromium-edge-launcher" "^1.0.0" + chrome-launcher "^0.15.2" + connect "^3.6.5" + debug "^2.2.0" + node-fetch "^2.2.0" + nullthrows "^1.1.1" + open "^7.0.3" + selfsigned "^2.4.1" + serve-static "^1.13.1" + temp-dir "^2.0.0" + ws "^6.2.2" + +"@react-native/eslint-config@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/eslint-config/-/eslint-config-0.74.81.tgz#36db043a03a024c0e5daf14707c86acb8218bcd5" + integrity sha512-XIBjvKxNJYzON6dInZcpuVDRNGC4QYXtwFu6KUVpnPbWVmOSP1PzUVy8R+y0Vh2FOpYnKmoCLlMU1V4evnZmpw== + dependencies: + "@babel/core" "^7.20.0" + "@babel/eslint-parser" "^7.20.0" + "@react-native/eslint-plugin" "0.74.81" + "@typescript-eslint/eslint-plugin" "^6.7.4" + "@typescript-eslint/parser" "^6.7.4" + eslint-config-prettier "^8.5.0" + eslint-plugin-eslint-comments "^3.2.0" + eslint-plugin-ft-flow "^2.0.1" + eslint-plugin-jest "^26.5.3" + eslint-plugin-prettier "^4.2.1" + eslint-plugin-react "^7.30.1" + eslint-plugin-react-hooks "^4.6.0" + eslint-plugin-react-native "^4.0.0" + +"@react-native/eslint-plugin@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/eslint-plugin/-/eslint-plugin-0.74.81.tgz#ac53da7c41a35948b0f9d01d88d2a858e879edb1" + integrity sha512-vlbLJ38MFJzcEgNxNswjgDRELvZX5e4SmGhtN9N1ZQpXLkgo3hs+l2m4ulSpKhSmqpbacB5XbuTTMgKOsLj/5w== + +"@react-native/gradle-plugin@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/gradle-plugin/-/gradle-plugin-0.74.81.tgz#aac01999b1005bba3213f504deee7efaadb62c1e" + integrity sha512-7YQ4TLnqfe2kplWWzBWO6k0rPSrWEbuEiRXSJNZQCtCk+t2YX985G62p/9jWm3sGLN4UTcpDXaFNTTPBvlycoQ== + +"@react-native/js-polyfills@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/js-polyfills/-/js-polyfills-0.74.81.tgz#64780497be4ecbff1b27076294e3ebd7df1ba485" + integrity sha512-o4MiR+/kkHoeoQ/zPwt81LnTm6pqdg0wOhU7S7vIZUqzJ7YUpnpaAvF+/z7HzUOPudnavoCN0wvcZPe/AMEyCA== + +"@react-native/metro-babel-transformer@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/metro-babel-transformer/-/metro-babel-transformer-0.74.81.tgz#f724eab91e6de82f8d098e6de57f25bb7501d2d6" + integrity sha512-PVcMjj23poAK6Uemflz4MIJdEpONpjqF7JASNqqQkY6wfDdaIiZSNk8EBCWKb0t7nKqhMvtTq11DMzYJ0JFITg== + dependencies: + "@babel/core" "^7.20.0" + "@react-native/babel-preset" "0.74.81" + hermes-parser "0.19.1" + nullthrows "^1.1.1" + +"@react-native/metro-config@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/metro-config/-/metro-config-0.74.81.tgz#3ed605c0bb51081905171af3e0326abd3adc0b27" + integrity sha512-VInZ60cXC/e5MY7kMlrMRn6Mhpj05hJfiJngRKy8BsWnXJNQMv3iggar+XnfSh98saLw1yG96dO3G6s2WQhzOg== + dependencies: + "@react-native/js-polyfills" "0.74.81" + "@react-native/metro-babel-transformer" "0.74.81" + metro-config "^0.80.3" + metro-runtime "^0.80.3" + +"@react-native/normalize-colors@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/normalize-colors/-/normalize-colors-0.74.81.tgz#0b7c440b6e126f79036cbe74a88791aba72b9fcf" + integrity sha512-g3YvkLO7UsSWiDfYAU+gLhRHtEpUyz732lZB+N8IlLXc5MnfXHC8GKneDGY3Mh52I3gBrs20o37D5viQX9E1CA== + +"@react-native/typescript-config@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/typescript-config/-/typescript-config-0.74.81.tgz#a249b6a21b577d572a0a70056d7c48a55fd6662f" + integrity sha512-jk4LJUKdRYmXxxpebRSW8mK9xJPW90W6BE1IE9LdFi0exdsnVv5gXM9QylG+9kDVZj3bltMuMVdijWnU7SRNbg== + +"@react-native/virtualized-lists@0.74.81": + version "0.74.81" + resolved "https://registry.yarnpkg.com/@react-native/virtualized-lists/-/virtualized-lists-0.74.81.tgz#8e43d4c72ec561754491eae731f40877f03d05fb" + integrity sha512-5jF9S10Ug2Wl+L/0+O8WmbC726sMMX8jk/1JrvDDK+0DRLMobfjLc1L26fONlVBF7lE5ctqvKZ9TlKdhPTNOZg== + dependencies: + invariant "^2.2.4" + nullthrows "^1.1.1" + +"@rnx-kit/chromium-edge-launcher@^1.0.0": + version "1.0.0" + resolved "https://registry.yarnpkg.com/@rnx-kit/chromium-edge-launcher/-/chromium-edge-launcher-1.0.0.tgz#c0df8ea00a902c7a417cd9655aab06de398b939c" + integrity sha512-lzD84av1ZQhYUS+jsGqJiCMaJO2dn9u+RTT9n9q6D3SaKVwWqv+7AoRKqBu19bkwyE+iFRl1ymr40QS90jVFYg== + dependencies: + "@types/node" "^18.0.0" + escape-string-regexp "^4.0.0" + is-wsl "^2.2.0" + lighthouse-logger "^1.0.0" + mkdirp "^1.0.4" + rimraf "^3.0.2" + +"@sideway/address@^4.1.5": + version "4.1.5" + resolved "https://registry.yarnpkg.com/@sideway/address/-/address-4.1.5.tgz#4bc149a0076623ced99ca8208ba780d65a99b9d5" + integrity sha512-IqO/DUQHUkPeixNQ8n0JA6102hT9CmaljNTPmQ1u8MEhBo/R4Q8eKLN/vGZxuebwOroDB4cbpjheD4+/sKFK4Q== + dependencies: + "@hapi/hoek" "^9.0.0" + +"@sideway/formula@^3.0.1": + version "3.0.1" + resolved "https://registry.yarnpkg.com/@sideway/formula/-/formula-3.0.1.tgz#80fcbcbaf7ce031e0ef2dd29b1bfc7c3f583611f" + integrity sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg== + +"@sideway/pinpoint@^2.0.0": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@sideway/pinpoint/-/pinpoint-2.0.0.tgz#cff8ffadc372ad29fd3f78277aeb29e632cc70df" + integrity sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ== + +"@sinclair/typebox@^0.27.8": + version "0.27.8" + resolved "https://registry.yarnpkg.com/@sinclair/typebox/-/typebox-0.27.8.tgz#6667fac16c436b5434a387a34dedb013198f6e6e" + integrity sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA== + +"@sinonjs/commons@^3.0.0": + version "3.0.1" + resolved "https://registry.yarnpkg.com/@sinonjs/commons/-/commons-3.0.1.tgz#1029357e44ca901a615585f6d27738dbc89084cd" + integrity sha512-K3mCHKQ9sVh8o1C9cxkwxaOmXoAMlDxC1mYyHrjqOWEcBjYr76t96zL2zlj5dUGZ3HSw240X1qgH3Mjf1yJWpQ== + dependencies: + type-detect "4.0.8" + +"@sinonjs/fake-timers@^10.0.2": + version "10.3.0" + resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz#55fdff1ecab9f354019129daf4df0dd4d923ea66" + integrity sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA== + dependencies: + "@sinonjs/commons" "^3.0.0" + +"@types/babel__core@^7.1.14": + version "7.20.5" + resolved "https://registry.yarnpkg.com/@types/babel__core/-/babel__core-7.20.5.tgz#3df15f27ba85319caa07ba08d0721889bb39c017" + integrity sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA== + dependencies: + "@babel/parser" "^7.20.7" + "@babel/types" "^7.20.7" + "@types/babel__generator" "*" + "@types/babel__template" "*" + "@types/babel__traverse" "*" + +"@types/babel__generator@*": + version "7.6.8" + resolved "https://registry.yarnpkg.com/@types/babel__generator/-/babel__generator-7.6.8.tgz#f836c61f48b1346e7d2b0d93c6dacc5b9535d3ab" + integrity sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw== + dependencies: + "@babel/types" "^7.0.0" + +"@types/babel__template@*": + version "7.4.4" + resolved "https://registry.yarnpkg.com/@types/babel__template/-/babel__template-7.4.4.tgz#5672513701c1b2199bc6dad636a9d7491586766f" + integrity sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A== + dependencies: + "@babel/parser" "^7.1.0" + "@babel/types" "^7.0.0" + +"@types/babel__traverse@*", "@types/babel__traverse@^7.0.6": + version "7.20.5" + resolved "https://registry.yarnpkg.com/@types/babel__traverse/-/babel__traverse-7.20.5.tgz#7b7502be0aa80cc4ef22978846b983edaafcd4dd" + integrity sha512-WXCyOcRtH37HAUkpXhUduaxdm82b4GSlyTqajXviN4EfiuPgNYR109xMCKvpl6zPIpua0DGlMEDCq+g8EdoheQ== + dependencies: + "@babel/types" "^7.20.7" + +"@types/graceful-fs@^4.1.3": + version "4.1.9" + resolved "https://registry.yarnpkg.com/@types/graceful-fs/-/graceful-fs-4.1.9.tgz#2a06bc0f68a20ab37b3e36aa238be6abdf49e8b4" + integrity sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ== + dependencies: + "@types/node" "*" + +"@types/istanbul-lib-coverage@*", "@types/istanbul-lib-coverage@^2.0.0", "@types/istanbul-lib-coverage@^2.0.1": + version "2.0.6" + resolved "https://registry.yarnpkg.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz#7739c232a1fee9b4d3ce8985f314c0c6d33549d7" + integrity sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w== + +"@types/istanbul-lib-report@*": + version "3.0.3" + resolved "https://registry.yarnpkg.com/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.3.tgz#53047614ae72e19fc0401d872de3ae2b4ce350bf" + integrity sha512-NQn7AHQnk/RSLOxrBbGyJM/aVQ+pjj5HCgasFxc0K/KhoATfQ/47AyUl15I2yBUpihjmas+a+VJBOqecrFH+uA== + dependencies: + "@types/istanbul-lib-coverage" "*" + +"@types/istanbul-reports@^3.0.0": + version "3.0.4" + resolved "https://registry.yarnpkg.com/@types/istanbul-reports/-/istanbul-reports-3.0.4.tgz#0f03e3d2f670fbdac586e34b433783070cc16f54" + integrity sha512-pk2B1NWalF9toCRu6gjBzR69syFjP4Od8WRAX+0mmf9lAjCRicLOWc+ZrxZHx/0XRjotgkF9t6iaMJ+aXcOdZQ== + dependencies: + "@types/istanbul-lib-report" "*" + +"@types/json-schema@^7.0.12", "@types/json-schema@^7.0.9": + version "7.0.15" + resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.15.tgz#596a1747233694d50f6ad8a7869fcb6f56cf5841" + integrity sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA== + +"@types/node-forge@^1.3.0": + version "1.3.11" + resolved "https://registry.yarnpkg.com/@types/node-forge/-/node-forge-1.3.11.tgz#0972ea538ddb0f4d9c2fa0ec5db5724773a604da" + integrity sha512-FQx220y22OKNTqaByeBGqHWYz4cl94tpcxeFdvBo3wjG6XPBuZ0BNgNZRV5J5TFmmcsJ4IzsLkmGRiQbnYsBEQ== + dependencies: + "@types/node" "*" + +"@types/node@*": + version "20.11.27" + resolved "https://registry.yarnpkg.com/@types/node/-/node-20.11.27.tgz#debe5cfc8a507dd60fe2a3b4875b1604f215c2ac" + integrity sha512-qyUZfMnCg1KEz57r7pzFtSGt49f6RPkPBis3Vo4PbS7roQEDn22hiHzl/Lo1q4i4hDEgBJmBF/NTNg2XR0HbFg== + dependencies: + undici-types "~5.26.4" + +"@types/node@^18.0.0": + version "18.19.33" + resolved "https://registry.yarnpkg.com/@types/node/-/node-18.19.33.tgz#98cd286a1b8a5e11aa06623210240bcc28e95c48" + integrity sha512-NR9+KrpSajr2qBVp/Yt5TU/rp+b5Mayi3+OlMlcg2cVCfRmcG5PWZ7S4+MG9PZ5gWBoc9Pd0BKSRViuBCRPu0A== + dependencies: + undici-types "~5.26.4" + +"@types/prop-types@*": + version "15.7.11" + resolved "https://registry.yarnpkg.com/@types/prop-types/-/prop-types-15.7.11.tgz#2596fb352ee96a1379c657734d4b913a613ad563" + integrity sha512-ga8y9v9uyeiLdpKddhxYQkxNDrfvuPrlFb0N1qnZZByvcElJaXthF1UhvCh9TLWJBEHeNtdnbysW7Y6Uq8CVng== + +"@types/react-test-renderer@^18.0.0": + version "18.0.7" + resolved "https://registry.yarnpkg.com/@types/react-test-renderer/-/react-test-renderer-18.0.7.tgz#2cfe657adb3688cdf543995eceb2e062b5a68728" + integrity sha512-1+ANPOWc6rB3IkSnElhjv6VLlKg2dSv/OWClUyZimbLsQyBn8Js9Vtdsi3UICJ2rIQ3k2la06dkB+C92QfhKmg== + dependencies: + "@types/react" "*" + +"@types/react@*", "@types/react@^18.2.6": + version "18.2.66" + resolved "https://registry.yarnpkg.com/@types/react/-/react-18.2.66.tgz#d2eafc8c4e70939c5432221adb23d32d76bfe451" + integrity sha512-OYTmMI4UigXeFMF/j4uv0lBBEbongSgptPrHBxqME44h9+yNov+oL6Z3ocJKo0WyXR84sQUNeyIp9MRfckvZpg== + dependencies: + "@types/prop-types" "*" + "@types/scheduler" "*" + csstype "^3.0.2" + +"@types/scheduler@*": + version "0.16.8" + resolved "https://registry.yarnpkg.com/@types/scheduler/-/scheduler-0.16.8.tgz#ce5ace04cfeabe7ef87c0091e50752e36707deff" + integrity sha512-WZLiwShhwLRmeV6zH+GkbOFT6Z6VklCItrDioxUnv+u4Ll+8vKeFySoFyK/0ctcRpOmwAicELfmys1sDc/Rw+A== + +"@types/semver@^7.3.12", "@types/semver@^7.5.0": + version "7.5.8" + resolved "https://registry.yarnpkg.com/@types/semver/-/semver-7.5.8.tgz#8268a8c57a3e4abd25c165ecd36237db7948a55e" + integrity sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ== + +"@types/stack-utils@^2.0.0": + version "2.0.3" + resolved "https://registry.yarnpkg.com/@types/stack-utils/-/stack-utils-2.0.3.tgz#6209321eb2c1712a7e7466422b8cb1fc0d9dd5d8" + integrity sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw== + +"@types/yargs-parser@*": + version "21.0.3" + resolved "https://registry.yarnpkg.com/@types/yargs-parser/-/yargs-parser-21.0.3.tgz#815e30b786d2e8f0dcd85fd5bcf5e1a04d008f15" + integrity sha512-I4q9QU9MQv4oEOz4tAHJtNz1cwuLxn2F3xcc2iV5WdqLPpUnj30aUuxt1mAxYTG+oe8CZMV/+6rU4S4gRDzqtQ== + +"@types/yargs@^15.0.0": + version "15.0.19" + resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-15.0.19.tgz#328fb89e46109ecbdb70c295d96ff2f46dfd01b9" + integrity sha512-2XUaGVmyQjgyAZldf0D0c14vvo/yv0MhQBSTJcejMMaitsn3nxCB6TmH4G0ZQf+uxROOa9mpanoSm8h6SG/1ZA== + dependencies: + "@types/yargs-parser" "*" + +"@types/yargs@^17.0.8": + version "17.0.32" + resolved "https://registry.yarnpkg.com/@types/yargs/-/yargs-17.0.32.tgz#030774723a2f7faafebf645f4e5a48371dca6229" + integrity sha512-xQ67Yc/laOG5uMfX/093MRlGGCIBzZMarVa+gfNKJxWAIgykYpVGkBdbqEzGDDfCrVUj6Hiff4mTZ5BA6TmAog== + dependencies: + "@types/yargs-parser" "*" + +"@typescript-eslint/eslint-plugin@^6.7.4": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz#30830c1ca81fd5f3c2714e524c4303e0194f9cd3" + integrity sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA== + dependencies: + "@eslint-community/regexpp" "^4.5.1" + "@typescript-eslint/scope-manager" "6.21.0" + "@typescript-eslint/type-utils" "6.21.0" + "@typescript-eslint/utils" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" + debug "^4.3.4" + graphemer "^1.4.0" + ignore "^5.2.4" + natural-compare "^1.4.0" + semver "^7.5.4" + ts-api-utils "^1.0.1" + +"@typescript-eslint/parser@^6.7.4": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/parser/-/parser-6.21.0.tgz#af8fcf66feee2edc86bc5d1cf45e33b0630bf35b" + integrity sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ== + dependencies: + "@typescript-eslint/scope-manager" "6.21.0" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/typescript-estree" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" + debug "^4.3.4" + +"@typescript-eslint/scope-manager@5.62.0": + version "5.62.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-5.62.0.tgz#d9457ccc6a0b8d6b37d0eb252a23022478c5460c" + integrity sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w== + dependencies: + "@typescript-eslint/types" "5.62.0" + "@typescript-eslint/visitor-keys" "5.62.0" + +"@typescript-eslint/scope-manager@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz#ea8a9bfc8f1504a6ac5d59a6df308d3a0630a2b1" + integrity sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg== + dependencies: + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" + +"@typescript-eslint/type-utils@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz#6473281cfed4dacabe8004e8521cee0bd9d4c01e" + integrity sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag== + dependencies: + "@typescript-eslint/typescript-estree" "6.21.0" + "@typescript-eslint/utils" "6.21.0" + debug "^4.3.4" + ts-api-utils "^1.0.1" + +"@typescript-eslint/types@5.62.0": + version "5.62.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-5.62.0.tgz#258607e60effa309f067608931c3df6fed41fd2f" + integrity sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ== + +"@typescript-eslint/types@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/types/-/types-6.21.0.tgz#205724c5123a8fef7ecd195075fa6e85bac3436d" + integrity sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg== + +"@typescript-eslint/typescript-estree@5.62.0": + version "5.62.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-5.62.0.tgz#7d17794b77fabcac615d6a48fb143330d962eb9b" + integrity sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA== + dependencies: + "@typescript-eslint/types" "5.62.0" + "@typescript-eslint/visitor-keys" "5.62.0" + debug "^4.3.4" + globby "^11.1.0" + is-glob "^4.0.3" + semver "^7.3.7" + tsutils "^3.21.0" + +"@typescript-eslint/typescript-estree@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz#c47ae7901db3b8bddc3ecd73daff2d0895688c46" + integrity sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ== + dependencies: + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/visitor-keys" "6.21.0" + debug "^4.3.4" + globby "^11.1.0" + is-glob "^4.0.3" + minimatch "9.0.3" + semver "^7.5.4" + ts-api-utils "^1.0.1" + +"@typescript-eslint/utils@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-6.21.0.tgz#4714e7a6b39e773c1c8e97ec587f520840cd8134" + integrity sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ== + dependencies: + "@eslint-community/eslint-utils" "^4.4.0" + "@types/json-schema" "^7.0.12" + "@types/semver" "^7.5.0" + "@typescript-eslint/scope-manager" "6.21.0" + "@typescript-eslint/types" "6.21.0" + "@typescript-eslint/typescript-estree" "6.21.0" + semver "^7.5.4" + +"@typescript-eslint/utils@^5.10.0": + version "5.62.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/utils/-/utils-5.62.0.tgz#141e809c71636e4a75daa39faed2fb5f4b10df86" + integrity sha512-n8oxjeb5aIbPFEtmQxQYOLI0i9n5ySBEY/ZEHHZqKQSFnxio1rv6dthascc9dLuwrL0RC5mPCxB7vnAVGAYWAQ== + dependencies: + "@eslint-community/eslint-utils" "^4.2.0" + "@types/json-schema" "^7.0.9" + "@types/semver" "^7.3.12" + "@typescript-eslint/scope-manager" "5.62.0" + "@typescript-eslint/types" "5.62.0" + "@typescript-eslint/typescript-estree" "5.62.0" + eslint-scope "^5.1.1" + semver "^7.3.7" + +"@typescript-eslint/visitor-keys@5.62.0": + version "5.62.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-5.62.0.tgz#2174011917ce582875954ffe2f6912d5931e353e" + integrity sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw== + dependencies: + "@typescript-eslint/types" "5.62.0" + eslint-visitor-keys "^3.3.0" + +"@typescript-eslint/visitor-keys@6.21.0": + version "6.21.0" + resolved "https://registry.yarnpkg.com/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz#87a99d077aa507e20e238b11d56cc26ade45fe47" + integrity sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A== + dependencies: + "@typescript-eslint/types" "6.21.0" + eslint-visitor-keys "^3.4.1" + +"@ungap/structured-clone@^1.2.0": + version "1.2.0" + resolved "https://registry.yarnpkg.com/@ungap/structured-clone/-/structured-clone-1.2.0.tgz#756641adb587851b5ccb3e095daf27ae581c8406" + integrity sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ== + +abort-controller@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/abort-controller/-/abort-controller-3.0.0.tgz#eaf54d53b62bae4138e809ca225c8439a6efb392" + integrity sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg== + dependencies: + event-target-shim "^5.0.0" + +accepts@^1.3.7, accepts@~1.3.5, accepts@~1.3.7: + version "1.3.8" + resolved "https://registry.yarnpkg.com/accepts/-/accepts-1.3.8.tgz#0bf0be125b67014adcb0b0921e62db7bffe16b2e" + integrity sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw== + dependencies: + mime-types "~2.1.34" + negotiator "0.6.3" + +acorn-jsx@^5.3.2: + version "5.3.2" + resolved "https://registry.yarnpkg.com/acorn-jsx/-/acorn-jsx-5.3.2.tgz#7ed5bb55908b3b2f1bc55c6af1653bada7f07937" + integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ== + +acorn@^8.8.2, acorn@^8.9.0: + version "8.11.3" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.11.3.tgz#71e0b14e13a4ec160724b38fb7b0f233b1b81d7a" + integrity sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg== + +ajv@^6.12.4: + version "6.12.6" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" + integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== + dependencies: + fast-deep-equal "^3.1.1" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.4.1" + uri-js "^4.2.2" + +anser@^1.4.9: + version "1.4.10" + resolved "https://registry.yarnpkg.com/anser/-/anser-1.4.10.tgz#befa3eddf282684bd03b63dcda3927aef8c2e35b" + integrity sha512-hCv9AqTQ8ycjpSd3upOJd7vFwW1JaoYQ7tpham03GJ1ca8/65rqn0RpaWpItOAd6ylW9wAw6luXYPJIyPFVOww== + +ansi-escapes@^4.2.1: + version "4.3.2" + resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-4.3.2.tgz#6b2291d1db7d98b6521d5f1efa42d0f3a9feb65e" + integrity sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ== + dependencies: + type-fest "^0.21.3" + +ansi-fragments@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/ansi-fragments/-/ansi-fragments-0.2.1.tgz#24409c56c4cc37817c3d7caa99d8969e2de5a05e" + integrity sha512-DykbNHxuXQwUDRv5ibc2b0x7uw7wmwOGLBUd5RmaQ5z8Lhx19vwvKV+FAsM5rEA6dEcHxX+/Ad5s9eF2k2bB+w== + dependencies: + colorette "^1.0.7" + slice-ansi "^2.0.0" + strip-ansi "^5.0.0" + +ansi-regex@^4.1.0: + version "4.1.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-4.1.1.tgz#164daac87ab2d6f6db3a29875e2d1766582dabed" + integrity sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g== + +ansi-regex@^5.0.0, ansi-regex@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" + integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== + +ansi-styles@^3.2.0, ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== + dependencies: + color-convert "^1.9.0" + +ansi-styles@^4.0.0, ansi-styles@^4.1.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" + integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== + dependencies: + color-convert "^2.0.1" + +ansi-styles@^5.0.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-5.2.0.tgz#07449690ad45777d1924ac2abb2fc8895dba836b" + integrity sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA== + +anymatch@^3.0.3: + version "3.1.3" + resolved "https://registry.yarnpkg.com/anymatch/-/anymatch-3.1.3.tgz#790c58b19ba1720a84205b57c618d5ad8524973e" + integrity sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw== + dependencies: + normalize-path "^3.0.0" + picomatch "^2.0.4" + +appdirsjs@^1.2.4: + version "1.2.7" + resolved "https://registry.yarnpkg.com/appdirsjs/-/appdirsjs-1.2.7.tgz#50b4b7948a26ba6090d4aede2ae2dc2b051be3b3" + integrity sha512-Quji6+8kLBC3NnBeo14nPDq0+2jUs5s3/xEye+udFHumHhRk4M7aAMXp/PBJqkKYGuuyR9M/6Dq7d2AViiGmhw== + +argparse@^1.0.7: + version "1.0.10" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" + integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== + dependencies: + sprintf-js "~1.0.2" + +argparse@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" + integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== + +array-buffer-byte-length@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz#1e5583ec16763540a27ae52eed99ff899223568f" + integrity sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg== + dependencies: + call-bind "^1.0.5" + is-array-buffer "^3.0.4" + +array-includes@^3.1.6, array-includes@^3.1.7: + version "3.1.7" + resolved "https://registry.yarnpkg.com/array-includes/-/array-includes-3.1.7.tgz#8cd2e01b26f7a3086cbc87271593fe921c62abda" + integrity sha512-dlcsNBIiWhPkHdOEEKnehA+RNUWDc4UqFtnIXU4uuYDPtA4LDkr7qip2p0VvFAEXNDr0yWZ9PJyIRiGjRLQzwQ== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + get-intrinsic "^1.2.1" + is-string "^1.0.7" + +array-union@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" + integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== + +array.prototype.findlast@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/array.prototype.findlast/-/array.prototype.findlast-1.2.4.tgz#eeb9e45fc894055c82e5675c463e8077b827ad36" + integrity sha512-BMtLxpV+8BD+6ZPFIWmnUBpQoy+A+ujcg4rhp2iwCRJYA7PEh2MS4NL3lz8EiDlLrJPp2hg9qWihr5pd//jcGw== + dependencies: + call-bind "^1.0.5" + define-properties "^1.2.1" + es-abstract "^1.22.3" + es-errors "^1.3.0" + es-shim-unscopables "^1.0.2" + +array.prototype.flat@^1.3.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/array.prototype.flat/-/array.prototype.flat-1.3.2.tgz#1476217df8cff17d72ee8f3ba06738db5b387d18" + integrity sha512-djYB+Zx2vLewY8RWlNCUdHjDXs2XOgm602S9E7P/UpHgfeHL00cRiIF+IN/G/aUJ7kGPb6yO/ErDI5V2s8iycA== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + es-shim-unscopables "^1.0.0" + +array.prototype.flatmap@^1.3.2: + version "1.3.2" + resolved "https://registry.yarnpkg.com/array.prototype.flatmap/-/array.prototype.flatmap-1.3.2.tgz#c9a7c6831db8e719d6ce639190146c24bbd3e527" + integrity sha512-Ewyx0c9PmpcsByhSW4r+9zDU7sGjFc86qf/kKtuSCRdhfbk0SNLLkaT5qvcHnRGgc5NP/ly/y+qkXkqONX54CQ== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + es-shim-unscopables "^1.0.0" + +array.prototype.toreversed@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/array.prototype.toreversed/-/array.prototype.toreversed-1.1.2.tgz#b989a6bf35c4c5051e1dc0325151bf8088954eba" + integrity sha512-wwDCoT4Ck4Cz7sLtgUmzR5UV3YF5mFHUlbChCzZBQZ+0m2cl/DH3tKgvphv1nKgFsJ48oCSg6p91q2Vm0I/ZMA== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + es-shim-unscopables "^1.0.0" + +array.prototype.tosorted@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/array.prototype.tosorted/-/array.prototype.tosorted-1.1.3.tgz#c8c89348337e51b8a3c48a9227f9ce93ceedcba8" + integrity sha512-/DdH4TiTmOKzyQbp/eadcCVexiCb36xJg7HshYOYJnNZFDj33GEv0P7GxsynpShhq4OLYJzbGcBDkLsDt7MnNg== + dependencies: + call-bind "^1.0.5" + define-properties "^1.2.1" + es-abstract "^1.22.3" + es-errors "^1.1.0" + es-shim-unscopables "^1.0.2" + +arraybuffer.prototype.slice@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.3.tgz#097972f4255e41bc3425e37dc3f6421cf9aefde6" + integrity sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A== + dependencies: + array-buffer-byte-length "^1.0.1" + call-bind "^1.0.5" + define-properties "^1.2.1" + es-abstract "^1.22.3" + es-errors "^1.2.1" + get-intrinsic "^1.2.3" + is-array-buffer "^3.0.4" + is-shared-array-buffer "^1.0.2" + +asap@~2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/asap/-/asap-2.0.6.tgz#e50347611d7e690943208bbdafebcbc2fb866d46" + integrity sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA== + +ast-types@0.15.2: + version "0.15.2" + resolved "https://registry.yarnpkg.com/ast-types/-/ast-types-0.15.2.tgz#39ae4809393c4b16df751ee563411423e85fb49d" + integrity sha512-c27loCv9QkZinsa5ProX751khO9DJl/AcB5c2KNtA6NRvHKS0PgLfcftz72KVq504vB0Gku5s2kUZzDBvQWvHg== + dependencies: + tslib "^2.0.1" + +astral-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/astral-regex/-/astral-regex-1.0.0.tgz#6c8c3fb827dd43ee3918f27b82782ab7658a6fd9" + integrity sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg== + +async-limiter@~1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/async-limiter/-/async-limiter-1.0.1.tgz#dd379e94f0db8310b08291f9d64c3209766617fd" + integrity sha512-csOlWGAcRFJaI6m+F2WKdnMKr4HhdhFVBk0H/QbJFMCr+uO2kwohwXQPxw/9OCxp05r5ghVBFSyioixx3gfkNQ== + +asynciterator.prototype@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/asynciterator.prototype/-/asynciterator.prototype-1.0.0.tgz#8c5df0514936cdd133604dfcc9d3fb93f09b2b62" + integrity sha512-wwHYEIS0Q80f5mosx3L/dfG5t5rjEa9Ft51GTaNt862EnpyGHpgz2RkZvLPp1oF5TnAiTohkEKVEu8pQPJI7Vg== + dependencies: + has-symbols "^1.0.3" + +available-typed-arrays@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz#a5cc375d6a03c2efc87a553f3e0b1522def14846" + integrity sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ== + dependencies: + possible-typed-array-names "^1.0.0" + +babel-core@^7.0.0-bridge.0: + version "7.0.0-bridge.0" + resolved "https://registry.yarnpkg.com/babel-core/-/babel-core-7.0.0-bridge.0.tgz#95a492ddd90f9b4e9a4a1da14eb335b87b634ece" + integrity sha512-poPX9mZH/5CSanm50Q+1toVci6pv5KSRv/5TWCwtzQS5XEwn40BcCrgIeMFWP9CKKIniKXNxoIOnOq4VVlGXhg== + +babel-jest@^29.6.3, babel-jest@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/babel-jest/-/babel-jest-29.7.0.tgz#f4369919225b684c56085998ac63dbd05be020d5" + integrity sha512-BrvGY3xZSwEcCzKvKsCi2GgHqDqsYkOP4/by5xCgIwGXQxIEh+8ew3gmrE1y7XRR6LHZIj6yLYnUi/mm2KXKBg== + dependencies: + "@jest/transform" "^29.7.0" + "@types/babel__core" "^7.1.14" + babel-plugin-istanbul "^6.1.1" + babel-preset-jest "^29.6.3" + chalk "^4.0.0" + graceful-fs "^4.2.9" + slash "^3.0.0" + +babel-plugin-istanbul@^6.1.1: + version "6.1.1" + resolved "https://registry.yarnpkg.com/babel-plugin-istanbul/-/babel-plugin-istanbul-6.1.1.tgz#fa88ec59232fd9b4e36dbbc540a8ec9a9b47da73" + integrity sha512-Y1IQok9821cC9onCx5otgFfRm7Lm+I+wwxOx738M/WLPZ9Q42m4IG5W0FNX8WLL2gYMZo3JkuXIH2DOpWM+qwA== + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@istanbuljs/load-nyc-config" "^1.0.0" + "@istanbuljs/schema" "^0.1.2" + istanbul-lib-instrument "^5.0.4" + test-exclude "^6.0.0" + +babel-plugin-jest-hoist@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/babel-plugin-jest-hoist/-/babel-plugin-jest-hoist-29.6.3.tgz#aadbe943464182a8922c3c927c3067ff40d24626" + integrity sha512-ESAc/RJvGTFEzRwOTT4+lNDk/GNHMkKbNzsvT0qKRfDyyYTskxB5rnU2njIDYVxXCBHHEI1c0YwHob3WaYujOg== + dependencies: + "@babel/template" "^7.3.3" + "@babel/types" "^7.3.3" + "@types/babel__core" "^7.1.14" + "@types/babel__traverse" "^7.0.6" + +babel-plugin-polyfill-corejs2@^0.4.8: + version "0.4.10" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.10.tgz#276f41710b03a64f6467433cab72cbc2653c38b1" + integrity sha512-rpIuu//y5OX6jVU+a5BCn1R5RSZYWAl2Nar76iwaOdycqb6JPxediskWFMMl7stfwNJR4b7eiQvh5fB5TEQJTQ== + dependencies: + "@babel/compat-data" "^7.22.6" + "@babel/helper-define-polyfill-provider" "^0.6.1" + semver "^6.3.1" + +babel-plugin-polyfill-corejs3@^0.9.0: + version "0.9.0" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.9.0.tgz#9eea32349d94556c2ad3ab9b82ebb27d4bf04a81" + integrity sha512-7nZPG1uzK2Ymhy/NbaOWTg3uibM2BmGASS4vHS4szRZAIR8R6GwA/xAujpdrXU5iyklrimWnLWU+BLF9suPTqg== + dependencies: + "@babel/helper-define-polyfill-provider" "^0.5.0" + core-js-compat "^3.34.0" + +babel-plugin-polyfill-regenerator@^0.5.5: + version "0.5.5" + resolved "https://registry.yarnpkg.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.5.5.tgz#8b0c8fc6434239e5d7b8a9d1f832bb2b0310f06a" + integrity sha512-OJGYZlhLqBh2DDHeqAxWB1XIvr49CxiJ2gIt61/PU55CQK4Z58OzMqjDe1zwQdQk+rBYsRc+1rJmdajM3gimHg== + dependencies: + "@babel/helper-define-polyfill-provider" "^0.5.0" + +babel-plugin-transform-flow-enums@^0.0.2: + version "0.0.2" + resolved "https://registry.yarnpkg.com/babel-plugin-transform-flow-enums/-/babel-plugin-transform-flow-enums-0.0.2.tgz#d1d0cc9bdc799c850ca110d0ddc9f21b9ec3ef25" + integrity sha512-g4aaCrDDOsWjbm0PUUeVnkcVd6AKJsVc/MbnPhEotEpkeJQP6b8nzewohQi7+QS8UyPehOhGWn0nOwjvWpmMvQ== + dependencies: + "@babel/plugin-syntax-flow" "^7.12.1" + +babel-preset-current-node-syntax@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/babel-preset-current-node-syntax/-/babel-preset-current-node-syntax-1.0.1.tgz#b4399239b89b2a011f9ddbe3e4f401fc40cff73b" + integrity sha512-M7LQ0bxarkxQoN+vz5aJPsLBn77n8QgTFmo8WK0/44auK2xlCXrYcUxHFxgU7qW5Yzw/CjmLRK2uJzaCd7LvqQ== + dependencies: + "@babel/plugin-syntax-async-generators" "^7.8.4" + "@babel/plugin-syntax-bigint" "^7.8.3" + "@babel/plugin-syntax-class-properties" "^7.8.3" + "@babel/plugin-syntax-import-meta" "^7.8.3" + "@babel/plugin-syntax-json-strings" "^7.8.3" + "@babel/plugin-syntax-logical-assignment-operators" "^7.8.3" + "@babel/plugin-syntax-nullish-coalescing-operator" "^7.8.3" + "@babel/plugin-syntax-numeric-separator" "^7.8.3" + "@babel/plugin-syntax-object-rest-spread" "^7.8.3" + "@babel/plugin-syntax-optional-catch-binding" "^7.8.3" + "@babel/plugin-syntax-optional-chaining" "^7.8.3" + "@babel/plugin-syntax-top-level-await" "^7.8.3" + +babel-preset-jest@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/babel-preset-jest/-/babel-preset-jest-29.6.3.tgz#fa05fa510e7d493896d7b0dd2033601c840f171c" + integrity sha512-0B3bhxR6snWXJZtR/RliHTDPRgn1sNHOR0yVtq/IiQFyuOVjFS+wuio/R4gSNkyYmKmJB4wGZv2NZanmKmTnNA== + dependencies: + babel-plugin-jest-hoist "^29.6.3" + babel-preset-current-node-syntax "^1.0.0" + +balanced-match@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" + integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== + +base64-js@^1.3.1, base64-js@^1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" + integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== + +bl@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/bl/-/bl-4.1.0.tgz#451535264182bec2fbbc83a62ab98cf11d9f7b3a" + integrity sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w== + dependencies: + buffer "^5.5.0" + inherits "^2.0.4" + readable-stream "^3.4.0" + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +brace-expansion@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-2.0.1.tgz#1edc459e0f0c548486ecf9fc99f2221364b9a0ae" + integrity sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA== + dependencies: + balanced-match "^1.0.0" + +braces@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" + integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== + dependencies: + fill-range "^7.0.1" + +browserslist@^4.22.2, browserslist@^4.22.3: + version "4.23.0" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.23.0.tgz#8f3acc2bbe73af7213399430890f86c63a5674ab" + integrity sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ== + dependencies: + caniuse-lite "^1.0.30001587" + electron-to-chromium "^1.4.668" + node-releases "^2.0.14" + update-browserslist-db "^1.0.13" + +bser@2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/bser/-/bser-2.1.1.tgz#e6787da20ece9d07998533cfd9de6f5c38f4bc05" + integrity sha512-gQxTNE/GAfIIrmHLUE3oJyp5FO6HRBfhjnw4/wMmA63ZGDJnWBmgY/lyQBpnDUkGmAhbSe39tx2d/iTOAfglwQ== + dependencies: + node-int64 "^0.4.0" + +buffer-from@^1.0.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5" + integrity sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ== + +buffer@^5.5.0: + version "5.7.1" + resolved "https://registry.yarnpkg.com/buffer/-/buffer-5.7.1.tgz#ba62e7c13133053582197160851a8f648e99eed0" + integrity sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ== + dependencies: + base64-js "^1.3.1" + ieee754 "^1.1.13" + +bytes@3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/bytes/-/bytes-3.0.0.tgz#d32815404d689699f85a4ea4fa8755dd13a96048" + integrity sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw== + +call-bind@^1.0.2, call-bind@^1.0.5, call-bind@^1.0.6, call-bind@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.7.tgz#06016599c40c56498c18769d2730be242b6fa3b9" + integrity sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w== + dependencies: + es-define-property "^1.0.0" + es-errors "^1.3.0" + function-bind "^1.1.2" + get-intrinsic "^1.2.4" + set-function-length "^1.2.1" + +caller-callsite@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/caller-callsite/-/caller-callsite-2.0.0.tgz#847e0fce0a223750a9a027c54b33731ad3154134" + integrity sha512-JuG3qI4QOftFsZyOn1qq87fq5grLIyk1JYd5lJmdA+fG7aQ9pA/i3JIJGcO3q0MrRcHlOt1U+ZeHW8Dq9axALQ== + dependencies: + callsites "^2.0.0" + +caller-path@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/caller-path/-/caller-path-2.0.0.tgz#468f83044e369ab2010fac5f06ceee15bb2cb1f4" + integrity sha512-MCL3sf6nCSXOwCTzvPKhN18TU7AHTvdtam8DAogxcrJ8Rjfbbg7Lgng64H9Iy+vUV6VGFClN/TyxBkAebLRR4A== + dependencies: + caller-callsite "^2.0.0" + +callsites@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/callsites/-/callsites-2.0.0.tgz#06eb84f00eea413da86affefacbffb36093b3c50" + integrity sha512-ksWePWBloaWPxJYQ8TL0JHvtci6G5QTKwQ95RcWAa/lzoAKuAOflGdAK92hpHXjkwb8zLxoLNUoNYZgVsaJzvQ== + +callsites@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" + integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== + +camelcase@^5.0.0, camelcase@^5.3.1: + version "5.3.1" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320" + integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg== + +camelcase@^6.2.0: + version "6.3.0" + resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a" + integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA== + +caniuse-lite@^1.0.30001587: + version "1.0.30001597" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001597.tgz#8be94a8c1d679de23b22fbd944232aa1321639e6" + integrity sha512-7LjJvmQU6Sj7bL0j5b5WY/3n7utXUJvAe1lxhsHDbLmwX9mdL86Yjtr+5SRCyf8qME4M7pU2hswj0FpyBVCv9w== + +chalk@^2.4.2: + version "2.4.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" + integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +chalk@^4.0.0, chalk@^4.1.0, chalk@^4.1.2: + version "4.1.2" + resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" + integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + +char-regex@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/char-regex/-/char-regex-1.0.2.tgz#d744358226217f981ed58f479b1d6bcc29545dcf" + integrity sha512-kWWXztvZ5SBQV+eRgKFeh8q5sLuZY2+8WUIzlxWVTg+oGwY14qylx1KbKzHd8P6ZYkAg0xyIDU9JMHhyJMZ1jw== + +chrome-launcher@^0.15.2: + version "0.15.2" + resolved "https://registry.yarnpkg.com/chrome-launcher/-/chrome-launcher-0.15.2.tgz#4e6404e32200095fdce7f6a1e1004f9bd36fa5da" + integrity sha512-zdLEwNo3aUVzIhKhTtXfxhdvZhUghrnmkvcAq2NoDd+LeOHKf03H5jwZ8T/STsAlzyALkBVK552iaG1fGf1xVQ== + dependencies: + "@types/node" "*" + escape-string-regexp "^4.0.0" + is-wsl "^2.2.0" + lighthouse-logger "^1.0.0" + +ci-info@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-2.0.0.tgz#67a9e964be31a51e15e5010d58e6f12834002f46" + integrity sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ== + +ci-info@^3.2.0: + version "3.9.0" + resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-3.9.0.tgz#4279a62028a7b1f262f3473fc9605f5e218c59b4" + integrity sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ== + +cjs-module-lexer@^1.0.0: + version "1.2.3" + resolved "https://registry.yarnpkg.com/cjs-module-lexer/-/cjs-module-lexer-1.2.3.tgz#6c370ab19f8a3394e318fe682686ec0ac684d107" + integrity sha512-0TNiGstbQmCFwt4akjjBg5pLRTSyj/PkWQ1ZoO2zntmg9yLqSRxwEa4iCfQLGjqhiqBfOJa7W/E8wfGrTDmlZQ== + +cli-cursor@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-3.1.0.tgz#264305a7ae490d1d03bf0c9ba7c925d1753af307" + integrity sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw== + dependencies: + restore-cursor "^3.1.0" + +cli-spinners@^2.5.0: + version "2.9.2" + resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-2.9.2.tgz#1773a8f4b9c4d6ac31563df53b3fc1d79462fe41" + integrity sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg== + +cliui@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-6.0.0.tgz#511d702c0c4e41ca156d7d0e96021f23e13225b1" + integrity sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ== + dependencies: + string-width "^4.2.0" + strip-ansi "^6.0.0" + wrap-ansi "^6.2.0" + +cliui@^8.0.1: + version "8.0.1" + resolved "https://registry.yarnpkg.com/cliui/-/cliui-8.0.1.tgz#0c04b075db02cbfe60dc8e6cf2f5486b1a3608aa" + integrity sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ== + dependencies: + string-width "^4.2.0" + strip-ansi "^6.0.1" + wrap-ansi "^7.0.0" + +clone-deep@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/clone-deep/-/clone-deep-4.0.1.tgz#c19fd9bdbbf85942b4fd979c84dcf7d5f07c2387" + integrity sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ== + dependencies: + is-plain-object "^2.0.4" + kind-of "^6.0.2" + shallow-clone "^3.0.0" + +clone@^1.0.2: + version "1.0.4" + resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.4.tgz#da309cc263df15994c688ca902179ca3c7cd7c7e" + integrity sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg== + +co@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/co/-/co-4.6.0.tgz#6ea6bdf3d853ae54ccb8e47bfa0bf3f9031fb184" + integrity sha512-QVb0dM5HvG+uaxitm8wONl7jltx8dqhfU33DcqtOZcLSVIKSDDLDi7+0LbAKiyI8hD9u42m2YxXSkMGWThaecQ== + +collect-v8-coverage@^1.0.0: + version "1.0.2" + resolved "https://registry.yarnpkg.com/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz#c0b29bcd33bcd0779a1344c2136051e6afd3d9e9" + integrity sha512-lHl4d5/ONEbLlJvaJNtsF/Lz+WvB07u2ycqTYbdrq7UypDXailES4valYb2eWiJFxZlVmpGekfqoxQhzyFdT4Q== + +color-convert@^1.9.0: + version "1.9.3" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" + integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== + dependencies: + color-name "1.1.3" + +color-convert@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" + integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== + dependencies: + color-name "~1.1.4" + +color-name@1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw== + +color-name@~1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" + integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== + +colorette@^1.0.7: + version "1.4.0" + resolved "https://registry.yarnpkg.com/colorette/-/colorette-1.4.0.tgz#5190fbb87276259a86ad700bff2c6d6faa3fca40" + integrity sha512-Y2oEozpomLn7Q3HFP7dpww7AtMJplbM9lGZP6RDfHqmbeRjiwRg4n6VM6j4KLmRke85uWEI7JqF17f3pqdRA0g== + +command-exists@^1.2.8: + version "1.2.9" + resolved "https://registry.yarnpkg.com/command-exists/-/command-exists-1.2.9.tgz#c50725af3808c8ab0260fd60b01fbfa25b954f69" + integrity sha512-LTQ/SGc+s0Xc0Fu5WaKnR0YiygZkm9eKFvyS+fRsU7/ZWFF8ykFM6Pc9aCVf1+xasOOZpO3BAVgVrKvsqKHV7w== + +commander@^2.20.0: + version "2.20.3" + resolved "https://registry.yarnpkg.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" + integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== + +commander@^9.4.1: + version "9.5.0" + resolved "https://registry.yarnpkg.com/commander/-/commander-9.5.0.tgz#bc08d1eb5cedf7ccb797a96199d41c7bc3e60d30" + integrity sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ== + +commondir@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/commondir/-/commondir-1.0.1.tgz#ddd800da0c66127393cca5950ea968a3aaf1253b" + integrity sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg== + +compressible@~2.0.16: + version "2.0.18" + resolved "https://registry.yarnpkg.com/compressible/-/compressible-2.0.18.tgz#af53cca6b070d4c3c0750fbd77286a6d7cc46fba" + integrity sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg== + dependencies: + mime-db ">= 1.43.0 < 2" + +compression@^1.7.1: + version "1.7.4" + resolved "https://registry.yarnpkg.com/compression/-/compression-1.7.4.tgz#95523eff170ca57c29a0ca41e6fe131f41e5bb8f" + integrity sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ== + dependencies: + accepts "~1.3.5" + bytes "3.0.0" + compressible "~2.0.16" + debug "2.6.9" + on-headers "~1.0.2" + safe-buffer "5.1.2" + vary "~1.1.2" + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== + +connect@^3.6.5: + version "3.7.0" + resolved "https://registry.yarnpkg.com/connect/-/connect-3.7.0.tgz#5d49348910caa5e07a01800b030d0c35f20484f8" + integrity sha512-ZqRXc+tZukToSNmh5C2iWMSoV3X1YUcPbqEM4DkEG5tNQXrQUZCNVGGv3IuicnkMtPfGf3Xtp8WCXs295iQ1pQ== + dependencies: + debug "2.6.9" + finalhandler "1.1.2" + parseurl "~1.3.3" + utils-merge "1.0.1" + +convert-source-map@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/convert-source-map/-/convert-source-map-2.0.0.tgz#4b560f649fc4e918dd0ab75cf4961e8bc882d82a" + integrity sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg== + +core-js-compat@^3.31.0, core-js-compat@^3.34.0: + version "3.36.0" + resolved "https://registry.yarnpkg.com/core-js-compat/-/core-js-compat-3.36.0.tgz#087679119bc2fdbdefad0d45d8e5d307d45ba190" + integrity sha512-iV9Pd/PsgjNWBXeq8XRtWVSgz2tKAfhfvBs7qxYty+RlRd+OCksaWmOnc4JKrTc1cToXL1N0s3l/vwlxPtdElw== + dependencies: + browserslist "^4.22.3" + +core-util-is@~1.0.0: + version "1.0.3" + resolved "https://registry.yarnpkg.com/core-util-is/-/core-util-is-1.0.3.tgz#a6042d3634c2b27e9328f837b965fac83808db85" + integrity sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ== + +cosmiconfig@^5.0.5, cosmiconfig@^5.1.0: + version "5.2.1" + resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-5.2.1.tgz#040f726809c591e77a17c0a3626ca45b4f168b1a" + integrity sha512-H65gsXo1SKjf8zmrJ67eJk8aIRKV5ff2D4uKZIBZShbhGSpEmsQOPW/SKMKYhSTrqR7ufy6RP69rPogdaPh/kA== + dependencies: + import-fresh "^2.0.0" + is-directory "^0.3.1" + js-yaml "^3.13.1" + parse-json "^4.0.0" + +create-jest@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/create-jest/-/create-jest-29.7.0.tgz#a355c5b3cb1e1af02ba177fe7afd7feee49a5320" + integrity sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q== + dependencies: + "@jest/types" "^29.6.3" + chalk "^4.0.0" + exit "^0.1.2" + graceful-fs "^4.2.9" + jest-config "^29.7.0" + jest-util "^29.7.0" + prompts "^2.0.1" + +cross-spawn@^7.0.2, cross-spawn@^7.0.3: + version "7.0.3" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" + integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + dependencies: + path-key "^3.1.0" + shebang-command "^2.0.0" + which "^2.0.1" + +csstype@^3.0.2: + version "3.1.3" + resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.3.tgz#d80ff294d114fb0e6ac500fbf85b60137d7eff81" + integrity sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw== + +dayjs@^1.8.15: + version "1.11.10" + resolved "https://registry.yarnpkg.com/dayjs/-/dayjs-1.11.10.tgz#68acea85317a6e164457d6d6947564029a6a16a0" + integrity sha512-vjAczensTgRcqDERK0SR2XMwsF/tSvnvlv6VcF2GIhg6Sx4yOIt/irsr1RDJsKiIyBzJDpCoXiWWq28MqH2cnQ== + +debug@2.6.9, debug@^2.2.0, debug@^2.6.9: + version "2.6.9" + resolved "https://registry.yarnpkg.com/debug/-/debug-2.6.9.tgz#5d128515df134ff327e90a4c93f4e077a536341f" + integrity sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA== + dependencies: + ms "2.0.0" + +debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4: + version "4.3.4" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" + integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== + dependencies: + ms "2.1.2" + +decamelize@^1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" + integrity sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA== + +dedent@^1.0.0: + version "1.5.1" + resolved "https://registry.yarnpkg.com/dedent/-/dedent-1.5.1.tgz#4f3fc94c8b711e9bb2800d185cd6ad20f2a90aff" + integrity sha512-+LxW+KLWxu3HW3M2w2ympwtqPrqYRzU8fqi6Fhd18fBALe15blJPI/I4+UHveMVG6lJqB4JNd4UG0S5cnVHwIg== + +deep-is@^0.1.3: + version "0.1.4" + resolved "https://registry.yarnpkg.com/deep-is/-/deep-is-0.1.4.tgz#a6f2dce612fadd2ef1f519b73551f17e85199831" + integrity sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ== + +deepmerge@^4.2.2, deepmerge@^4.3.0: + version "4.3.1" + resolved "https://registry.yarnpkg.com/deepmerge/-/deepmerge-4.3.1.tgz#44b5f2147cd3b00d4b56137685966f26fd25dd4a" + integrity sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A== + +defaults@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.4.tgz#b0b02062c1e2aa62ff5d9528f0f98baa90978d7a" + integrity sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A== + dependencies: + clone "^1.0.2" + +define-data-property@^1.0.1, define-data-property@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/define-data-property/-/define-data-property-1.1.4.tgz#894dc141bb7d3060ae4366f6a0107e68fbe48c5e" + integrity sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A== + dependencies: + es-define-property "^1.0.0" + es-errors "^1.3.0" + gopd "^1.0.1" + +define-properties@^1.1.3, define-properties@^1.2.0, define-properties@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.2.1.tgz#10781cc616eb951a80a034bafcaa7377f6af2b6c" + integrity sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg== + dependencies: + define-data-property "^1.0.1" + has-property-descriptors "^1.0.0" + object-keys "^1.1.1" + +denodeify@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/denodeify/-/denodeify-1.2.1.tgz#3a36287f5034e699e7577901052c2e6c94251631" + integrity sha512-KNTihKNmQENUZeKu5fzfpzRqR5S2VMp4gl9RFHiWzj9DfvYQPMJ6XHKNaQxaGCXwPk6y9yme3aUoaiAe+KX+vg== + +depd@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/depd/-/depd-2.0.0.tgz#b696163cc757560d09cf22cc8fad1571b79e76df" + integrity sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw== + +destroy@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/destroy/-/destroy-1.2.0.tgz#4803735509ad8be552934c67df614f94e66fa015" + integrity sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg== + +detect-newline@^3.0.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/detect-newline/-/detect-newline-3.1.0.tgz#576f5dfc63ae1a192ff192d8ad3af6308991b651" + integrity sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA== + +diff-sequences@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/diff-sequences/-/diff-sequences-29.6.3.tgz#4deaf894d11407c51efc8418012f9e70b84ea921" + integrity sha512-EjePK1srD3P08o2j4f0ExnylqRs5B9tJjcp9t1krH2qRi8CCdsYfwe9JgSLurFBWwq4uOlipzfk5fHNvwFKr8Q== + +dir-glob@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f" + integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA== + dependencies: + path-type "^4.0.0" + +doctrine@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-2.1.0.tgz#5cd01fc101621b42c4cd7f5d1a66243716d3f39d" + integrity sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw== + dependencies: + esutils "^2.0.2" + +doctrine@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/doctrine/-/doctrine-3.0.0.tgz#addebead72a6574db783639dc87a121773973961" + integrity sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w== + dependencies: + esutils "^2.0.2" + +ee-first@1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/ee-first/-/ee-first-1.1.1.tgz#590c61156b0ae2f4f0255732a158b266bc56b21d" + integrity sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow== + +electron-to-chromium@^1.4.668: + version "1.4.705" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.705.tgz#ef4f912620bd7c9555a20554ffc568184c0ddceb" + integrity sha512-LKqhpwJCLhYId2VVwEzFXWrqQI5n5zBppz1W9ehhTlfYU8CUUW6kClbN8LHF/v7flMgRdETS772nqywJ+ckVAw== + +emittery@^0.13.1: + version "0.13.1" + resolved "https://registry.yarnpkg.com/emittery/-/emittery-0.13.1.tgz#c04b8c3457490e0847ae51fced3af52d338e3dad" + integrity sha512-DeWwawk6r5yR9jFgnDKYt4sLS0LmHJJi3ZOnb5/JdbYwj3nW+FxQnHIjhBKz8YLC7oRNPVM9NQ47I3CVx34eqQ== + +emoji-regex@^8.0.0: + version "8.0.0" + resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" + integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== + +encodeurl@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/encodeurl/-/encodeurl-1.0.2.tgz#ad3ff4c86ec2d029322f5a02c3a9a606c95b3f59" + integrity sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w== + +envinfo@^7.10.0: + version "7.11.1" + resolved "https://registry.yarnpkg.com/envinfo/-/envinfo-7.11.1.tgz#2ffef77591057081b0129a8fd8cf6118da1b94e1" + integrity sha512-8PiZgZNIB4q/Lw4AhOvAfB/ityHAd2bli3lESSWmWSzSsl5dKpy5N1d1Rfkd2teq/g9xN90lc6o98DOjMeYHpg== + +error-ex@^1.3.1: + version "1.3.2" + resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g== + dependencies: + is-arrayish "^0.2.1" + +error-stack-parser@^2.0.6: + version "2.1.4" + resolved "https://registry.yarnpkg.com/error-stack-parser/-/error-stack-parser-2.1.4.tgz#229cb01cdbfa84440bfa91876285b94680188286" + integrity sha512-Sk5V6wVazPhq5MhpO+AUxJn5x7XSXGl1R93Vn7i+zS15KDVxQijejNCrz8340/2bgLBjR9GtEG8ZVKONDjcqGQ== + dependencies: + stackframe "^1.3.4" + +errorhandler@^1.5.1: + version "1.5.1" + resolved "https://registry.yarnpkg.com/errorhandler/-/errorhandler-1.5.1.tgz#b9ba5d17cf90744cd1e851357a6e75bf806a9a91" + integrity sha512-rcOwbfvP1WTViVoUjcfZicVzjhjTuhSMntHh6mW3IrEiyE6mJyXvsToJUJGlGlw/2xU9P5whlWNGlIDVeCiT4A== + dependencies: + accepts "~1.3.7" + escape-html "~1.0.3" + +es-abstract@^1.22.1, es-abstract@^1.22.3, es-abstract@^1.22.4: + version "1.22.5" + resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.22.5.tgz#1417df4e97cc55f09bf7e58d1e614bc61cb8df46" + integrity sha512-oW69R+4q2wG+Hc3KZePPZxOiisRIqfKBVo/HLx94QcJeWGU/8sZhCvc829rd1kS366vlJbzBfXf9yWwf0+Ko7w== + dependencies: + array-buffer-byte-length "^1.0.1" + arraybuffer.prototype.slice "^1.0.3" + available-typed-arrays "^1.0.7" + call-bind "^1.0.7" + es-define-property "^1.0.0" + es-errors "^1.3.0" + es-set-tostringtag "^2.0.3" + es-to-primitive "^1.2.1" + function.prototype.name "^1.1.6" + get-intrinsic "^1.2.4" + get-symbol-description "^1.0.2" + globalthis "^1.0.3" + gopd "^1.0.1" + has-property-descriptors "^1.0.2" + has-proto "^1.0.3" + has-symbols "^1.0.3" + hasown "^2.0.1" + internal-slot "^1.0.7" + is-array-buffer "^3.0.4" + is-callable "^1.2.7" + is-negative-zero "^2.0.3" + is-regex "^1.1.4" + is-shared-array-buffer "^1.0.3" + is-string "^1.0.7" + is-typed-array "^1.1.13" + is-weakref "^1.0.2" + object-inspect "^1.13.1" + object-keys "^1.1.1" + object.assign "^4.1.5" + regexp.prototype.flags "^1.5.2" + safe-array-concat "^1.1.0" + safe-regex-test "^1.0.3" + string.prototype.trim "^1.2.8" + string.prototype.trimend "^1.0.7" + string.prototype.trimstart "^1.0.7" + typed-array-buffer "^1.0.2" + typed-array-byte-length "^1.0.1" + typed-array-byte-offset "^1.0.2" + typed-array-length "^1.0.5" + unbox-primitive "^1.0.2" + which-typed-array "^1.1.14" + +es-define-property@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.0.tgz#c7faefbdff8b2696cf5f46921edfb77cc4ba3845" + integrity sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ== + dependencies: + get-intrinsic "^1.2.4" + +es-errors@^1.0.0, es-errors@^1.1.0, es-errors@^1.2.1, es-errors@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" + integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== + +es-iterator-helpers@^1.0.17: + version "1.0.17" + resolved "https://registry.yarnpkg.com/es-iterator-helpers/-/es-iterator-helpers-1.0.17.tgz#123d1315780df15b34eb181022da43e734388bb8" + integrity sha512-lh7BsUqelv4KUbR5a/ZTaGGIMLCjPGPqJ6q+Oq24YP0RdyptX1uzm4vvaqzk7Zx3bpl/76YLTTDj9L7uYQ92oQ== + dependencies: + asynciterator.prototype "^1.0.0" + call-bind "^1.0.7" + define-properties "^1.2.1" + es-abstract "^1.22.4" + es-errors "^1.3.0" + es-set-tostringtag "^2.0.2" + function-bind "^1.1.2" + get-intrinsic "^1.2.4" + globalthis "^1.0.3" + has-property-descriptors "^1.0.2" + has-proto "^1.0.1" + has-symbols "^1.0.3" + internal-slot "^1.0.7" + iterator.prototype "^1.1.2" + safe-array-concat "^1.1.0" + +es-set-tostringtag@^2.0.2, es-set-tostringtag@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.0.3.tgz#8bb60f0a440c2e4281962428438d58545af39777" + integrity sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ== + dependencies: + get-intrinsic "^1.2.4" + has-tostringtag "^1.0.2" + hasown "^2.0.1" + +es-shim-unscopables@^1.0.0, es-shim-unscopables@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/es-shim-unscopables/-/es-shim-unscopables-1.0.2.tgz#1f6942e71ecc7835ed1c8a83006d8771a63a3763" + integrity sha512-J3yBRXCzDu4ULnQwxyToo/OjdMx6akgVC7K6few0a7F/0wLtmKKN7I73AH5T2836UuXRqN7Qg+IIUw/+YJksRw== + dependencies: + hasown "^2.0.0" + +es-to-primitive@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.1.tgz#e55cd4c9cdc188bcefb03b366c736323fc5c898a" + integrity sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA== + dependencies: + is-callable "^1.1.4" + is-date-object "^1.0.1" + is-symbol "^1.0.2" + +escalade@^3.1.1: + version "3.1.2" + resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.2.tgz#54076e9ab29ea5bf3d8f1ed62acffbb88272df27" + integrity sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA== + +escape-html@~1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/escape-html/-/escape-html-1.0.3.tgz#0258eae4d3d0c0974de1c169188ef0051d1d1988" + integrity sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow== + +escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg== + +escape-string-regexp@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-2.0.0.tgz#a30304e99daa32e23b2fd20f51babd07cffca344" + integrity sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w== + +escape-string-regexp@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" + integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== + +eslint-config-prettier@^8.5.0: + version "8.10.0" + resolved "https://registry.yarnpkg.com/eslint-config-prettier/-/eslint-config-prettier-8.10.0.tgz#3a06a662130807e2502fc3ff8b4143d8a0658e11" + integrity sha512-SM8AMJdeQqRYT9O9zguiruQZaN7+z+E4eAP9oiLNGKMtomwaB1E9dcgUD6ZAn/eQAb52USbvezbiljfZUhbJcg== + +eslint-plugin-eslint-comments@^3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/eslint-plugin-eslint-comments/-/eslint-plugin-eslint-comments-3.2.0.tgz#9e1cd7b4413526abb313933071d7aba05ca12ffa" + integrity sha512-0jkOl0hfojIHHmEHgmNdqv4fmh7300NdpA9FFpF7zaoLvB/QeXOGNLIo86oAveJFrfB1p05kC8hpEMHM8DwWVQ== + dependencies: + escape-string-regexp "^1.0.5" + ignore "^5.0.5" + +eslint-plugin-ft-flow@^2.0.1: + version "2.0.3" + resolved "https://registry.yarnpkg.com/eslint-plugin-ft-flow/-/eslint-plugin-ft-flow-2.0.3.tgz#3b3c113c41902bcbacf0e22b536debcfc3c819e8" + integrity sha512-Vbsd/b+LYA99jUbsL6viEUWShFaYQt2YQs3QN3f+aeszOhh2sgdcU0mjzDyD4yyBvMc8qy2uwvBBWfMzEX06tg== + dependencies: + lodash "^4.17.21" + string-natural-compare "^3.0.1" + +eslint-plugin-jest@^26.5.3: + version "26.9.0" + resolved "https://registry.yarnpkg.com/eslint-plugin-jest/-/eslint-plugin-jest-26.9.0.tgz#7931c31000b1c19e57dbfb71bbf71b817d1bf949" + integrity sha512-TWJxWGp1J628gxh2KhaH1H1paEdgE2J61BBF1I59c6xWeL5+D1BzMxGDN/nXAfX+aSkR5u80K+XhskK6Gwq9ng== + dependencies: + "@typescript-eslint/utils" "^5.10.0" + +eslint-plugin-prettier@^4.2.1: + version "4.2.1" + resolved "https://registry.yarnpkg.com/eslint-plugin-prettier/-/eslint-plugin-prettier-4.2.1.tgz#651cbb88b1dab98bfd42f017a12fa6b2d993f94b" + integrity sha512-f/0rXLXUt0oFYs8ra4w49wYZBG5GKZpAYsJSm6rnYL5uVDjd+zowwMwVZHnAjf4edNrKpCDYfXDgmRE/Ak7QyQ== + dependencies: + prettier-linter-helpers "^1.0.0" + +eslint-plugin-react-hooks@^4.6.0: + version "4.6.0" + resolved "https://registry.yarnpkg.com/eslint-plugin-react-hooks/-/eslint-plugin-react-hooks-4.6.0.tgz#4c3e697ad95b77e93f8646aaa1630c1ba607edd3" + integrity sha512-oFc7Itz9Qxh2x4gNHStv3BqJq54ExXmfC+a1NjAta66IAN87Wu0R/QArgIS9qKzX3dXKPI9H5crl9QchNMY9+g== + +eslint-plugin-react-native-globals@^0.1.1: + version "0.1.2" + resolved "https://registry.yarnpkg.com/eslint-plugin-react-native-globals/-/eslint-plugin-react-native-globals-0.1.2.tgz#ee1348bc2ceb912303ce6bdbd22e2f045ea86ea2" + integrity sha512-9aEPf1JEpiTjcFAmmyw8eiIXmcNZOqaZyHO77wgm0/dWfT/oxC1SrIq8ET38pMxHYrcB6Uew+TzUVsBeczF88g== + +eslint-plugin-react-native@^4.0.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/eslint-plugin-react-native/-/eslint-plugin-react-native-4.1.0.tgz#5343acd3b2246bc1b857ac38be708f070d18809f" + integrity sha512-QLo7rzTBOl43FvVqDdq5Ql9IoElIuTdjrz9SKAXCvULvBoRZ44JGSkx9z4999ZusCsb4rK3gjS8gOGyeYqZv2Q== + dependencies: + eslint-plugin-react-native-globals "^0.1.1" + +eslint-plugin-react@^7.30.1: + version "7.34.0" + resolved "https://registry.yarnpkg.com/eslint-plugin-react/-/eslint-plugin-react-7.34.0.tgz#ab71484d54fc409c37025c5eca00eb4177a5e88c" + integrity sha512-MeVXdReleBTdkz/bvcQMSnCXGi+c9kvy51IpinjnJgutl3YTHWsDdke7Z1ufZpGfDG8xduBDKyjtB9JH1eBKIQ== + dependencies: + array-includes "^3.1.7" + array.prototype.findlast "^1.2.4" + array.prototype.flatmap "^1.3.2" + array.prototype.toreversed "^1.1.2" + array.prototype.tosorted "^1.1.3" + doctrine "^2.1.0" + es-iterator-helpers "^1.0.17" + estraverse "^5.3.0" + jsx-ast-utils "^2.4.1 || ^3.0.0" + minimatch "^3.1.2" + object.entries "^1.1.7" + object.fromentries "^2.0.7" + object.hasown "^1.1.3" + object.values "^1.1.7" + prop-types "^15.8.1" + resolve "^2.0.0-next.5" + semver "^6.3.1" + string.prototype.matchall "^4.0.10" + +eslint-scope@5.1.1, eslint-scope@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-5.1.1.tgz#e786e59a66cb92b3f6c1fb0d508aab174848f48c" + integrity sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw== + dependencies: + esrecurse "^4.3.0" + estraverse "^4.1.1" + +eslint-scope@^7.2.2: + version "7.2.2" + resolved "https://registry.yarnpkg.com/eslint-scope/-/eslint-scope-7.2.2.tgz#deb4f92563390f32006894af62a22dba1c46423f" + integrity sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg== + dependencies: + esrecurse "^4.3.0" + estraverse "^5.2.0" + +eslint-visitor-keys@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-2.1.0.tgz#f65328259305927392c938ed44eb0a5c9b2bd303" + integrity sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw== + +eslint-visitor-keys@^3.3.0, eslint-visitor-keys@^3.4.1, eslint-visitor-keys@^3.4.3: + version "3.4.3" + resolved "https://registry.yarnpkg.com/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz#0cd72fe8550e3c2eae156a96a4dddcd1c8ac5800" + integrity sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag== + +eslint@^8.19.0: + version "8.57.0" + resolved "https://registry.yarnpkg.com/eslint/-/eslint-8.57.0.tgz#c786a6fd0e0b68941aaf624596fb987089195668" + integrity sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ== + dependencies: + "@eslint-community/eslint-utils" "^4.2.0" + "@eslint-community/regexpp" "^4.6.1" + "@eslint/eslintrc" "^2.1.4" + "@eslint/js" "8.57.0" + "@humanwhocodes/config-array" "^0.11.14" + "@humanwhocodes/module-importer" "^1.0.1" + "@nodelib/fs.walk" "^1.2.8" + "@ungap/structured-clone" "^1.2.0" + ajv "^6.12.4" + chalk "^4.0.0" + cross-spawn "^7.0.2" + debug "^4.3.2" + doctrine "^3.0.0" + escape-string-regexp "^4.0.0" + eslint-scope "^7.2.2" + eslint-visitor-keys "^3.4.3" + espree "^9.6.1" + esquery "^1.4.2" + esutils "^2.0.2" + fast-deep-equal "^3.1.3" + file-entry-cache "^6.0.1" + find-up "^5.0.0" + glob-parent "^6.0.2" + globals "^13.19.0" + graphemer "^1.4.0" + ignore "^5.2.0" + imurmurhash "^0.1.4" + is-glob "^4.0.0" + is-path-inside "^3.0.3" + js-yaml "^4.1.0" + json-stable-stringify-without-jsonify "^1.0.1" + levn "^0.4.1" + lodash.merge "^4.6.2" + minimatch "^3.1.2" + natural-compare "^1.4.0" + optionator "^0.9.3" + strip-ansi "^6.0.1" + text-table "^0.2.0" + +espree@^9.6.0, espree@^9.6.1: + version "9.6.1" + resolved "https://registry.yarnpkg.com/espree/-/espree-9.6.1.tgz#a2a17b8e434690a5432f2f8018ce71d331a48c6f" + integrity sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ== + dependencies: + acorn "^8.9.0" + acorn-jsx "^5.3.2" + eslint-visitor-keys "^3.4.1" + +esprima@^4.0.0, esprima@~4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" + integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== + +esquery@^1.4.2: + version "1.5.0" + resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.5.0.tgz#6ce17738de8577694edd7361c57182ac8cb0db0b" + integrity sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg== + dependencies: + estraverse "^5.1.0" + +esrecurse@^4.3.0: + version "4.3.0" + resolved "https://registry.yarnpkg.com/esrecurse/-/esrecurse-4.3.0.tgz#7ad7964d679abb28bee72cec63758b1c5d2c9921" + integrity sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag== + dependencies: + estraverse "^5.2.0" + +estraverse@^4.1.1: + version "4.3.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-4.3.0.tgz#398ad3f3c5a24948be7725e83d11a7de28cdbd1d" + integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw== + +estraverse@^5.1.0, estraverse@^5.2.0, estraverse@^5.3.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/estraverse/-/estraverse-5.3.0.tgz#2eea5290702f26ab8fe5370370ff86c965d21123" + integrity sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA== + +esutils@^2.0.2: + version "2.0.3" + resolved "https://registry.yarnpkg.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64" + integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== + +etag@~1.8.1: + version "1.8.1" + resolved "https://registry.yarnpkg.com/etag/-/etag-1.8.1.tgz#41ae2eeb65efa62268aebfea83ac7d79299b0887" + integrity sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg== + +event-target-shim@^5.0.0, event-target-shim@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/event-target-shim/-/event-target-shim-5.0.1.tgz#5d4d3ebdf9583d63a5333ce2deb7480ab2b05789" + integrity sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ== + +execa@^5.0.0, execa@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/execa/-/execa-5.1.1.tgz#f80ad9cbf4298f7bd1d4c9555c21e93741c411dd" + integrity sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg== + dependencies: + cross-spawn "^7.0.3" + get-stream "^6.0.0" + human-signals "^2.1.0" + is-stream "^2.0.0" + merge-stream "^2.0.0" + npm-run-path "^4.0.1" + onetime "^5.1.2" + signal-exit "^3.0.3" + strip-final-newline "^2.0.0" + +exit@^0.1.2: + version "0.1.2" + resolved "https://registry.yarnpkg.com/exit/-/exit-0.1.2.tgz#0632638f8d877cc82107d30a0fff1a17cba1cd0c" + integrity sha512-Zk/eNKV2zbjpKzrsQ+n1G6poVbErQxJ0LBOJXaKZ1EViLzH+hrLu9cdXI4zw9dBQJslwBEpbQ2P1oS7nDxs6jQ== + +expect@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/expect/-/expect-29.7.0.tgz#578874590dcb3214514084c08115d8aee61e11bc" + integrity sha512-2Zks0hf1VLFYI1kbh0I5jP3KHHyCHpkfyHBzsSXRFgl/Bg9mWYfMW8oD+PdMPlEwy5HNsR9JutYy6pMeOh61nw== + dependencies: + "@jest/expect-utils" "^29.7.0" + jest-get-type "^29.6.3" + jest-matcher-utils "^29.7.0" + jest-message-util "^29.7.0" + jest-util "^29.7.0" + +fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3: + version "3.1.3" + resolved "https://registry.yarnpkg.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" + integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== + +fast-diff@^1.1.2: + version "1.3.0" + resolved "https://registry.yarnpkg.com/fast-diff/-/fast-diff-1.3.0.tgz#ece407fa550a64d638536cd727e129c61616e0f0" + integrity sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw== + +fast-glob@^3.2.9, fast-glob@^3.3.2: + version "3.3.2" + resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.2.tgz#a904501e57cfdd2ffcded45e99a54fef55e46129" + integrity sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.2" + merge2 "^1.3.0" + micromatch "^4.0.4" + +fast-json-stable-stringify@^2.0.0, fast-json-stable-stringify@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" + integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== + +fast-levenshtein@^2.0.6: + version "2.0.6" + resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" + integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== + +fast-xml-parser@^4.0.12, fast-xml-parser@^4.2.4: + version "4.3.5" + resolved "https://registry.yarnpkg.com/fast-xml-parser/-/fast-xml-parser-4.3.5.tgz#e2f2a2ae8377e9c3dc321b151e58f420ca7e5ccc" + integrity sha512-sWvP1Pl8H03B8oFJpFR3HE31HUfwtX7Rlf9BNsvdpujD4n7WMhfmu8h9wOV2u+c1k0ZilTADhPqypzx2J690ZQ== + dependencies: + strnum "^1.0.5" + +fastq@^1.6.0: + version "1.17.1" + resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.17.1.tgz#2a523f07a4e7b1e81a42b91b8bf2254107753b47" + integrity sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w== + dependencies: + reusify "^1.0.4" + +fb-watchman@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/fb-watchman/-/fb-watchman-2.0.2.tgz#e9524ee6b5c77e9e5001af0f85f3adbb8623255c" + integrity sha512-p5161BqbuCaSnB8jIbzQHOlpgsPmK5rJVDfDKO91Axs5NC1uu3HRQm6wt9cd9/+GtQQIO53JdGXXoyDpTAsgYA== + dependencies: + bser "2.1.1" + +file-entry-cache@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/file-entry-cache/-/file-entry-cache-6.0.1.tgz#211b2dd9659cb0394b073e7323ac3c933d522027" + integrity sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg== + dependencies: + flat-cache "^3.0.4" + +fill-range@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" + integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== + dependencies: + to-regex-range "^5.0.1" + +finalhandler@1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/finalhandler/-/finalhandler-1.1.2.tgz#b7e7d000ffd11938d0fdb053506f6ebabe9f587d" + integrity sha512-aAWcW57uxVNrQZqFXjITpW3sIUQmHGG3qSb9mUah9MgMC4NeWhNOlNjXEYq3HjRAvL6arUviZGGJsBg6z0zsWA== + dependencies: + debug "2.6.9" + encodeurl "~1.0.2" + escape-html "~1.0.3" + on-finished "~2.3.0" + parseurl "~1.3.3" + statuses "~1.5.0" + unpipe "~1.0.0" + +find-cache-dir@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/find-cache-dir/-/find-cache-dir-2.1.0.tgz#8d0f94cd13fe43c6c7c261a0d86115ca918c05f7" + integrity sha512-Tq6PixE0w/VMFfCgbONnkiQIVol/JJL7nRMi20fqzA4NRs9AfeqMGeRdPi3wIhYkxjeBaWh2rxwapn5Tu3IqOQ== + dependencies: + commondir "^1.0.1" + make-dir "^2.0.0" + pkg-dir "^3.0.0" + +find-up@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-3.0.0.tgz#49169f1d7993430646da61ecc5ae355c21c97b73" + integrity sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg== + dependencies: + locate-path "^3.0.0" + +find-up@^4.0.0, find-up@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19" + integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw== + dependencies: + locate-path "^5.0.0" + path-exists "^4.0.0" + +find-up@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/find-up/-/find-up-5.0.0.tgz#4c92819ecb7083561e4f4a240a86be5198f536fc" + integrity sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng== + dependencies: + locate-path "^6.0.0" + path-exists "^4.0.0" + +flat-cache@^3.0.4: + version "3.2.0" + resolved "https://registry.yarnpkg.com/flat-cache/-/flat-cache-3.2.0.tgz#2c0c2d5040c99b1632771a9d105725c0115363ee" + integrity sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw== + dependencies: + flatted "^3.2.9" + keyv "^4.5.3" + rimraf "^3.0.2" + +flatted@^3.2.9: + version "3.3.1" + resolved "https://registry.yarnpkg.com/flatted/-/flatted-3.3.1.tgz#21db470729a6734d4997002f439cb308987f567a" + integrity sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw== + +flow-enums-runtime@^0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/flow-enums-runtime/-/flow-enums-runtime-0.0.6.tgz#5bb0cd1b0a3e471330f4d109039b7eba5cb3e787" + integrity sha512-3PYnM29RFXwvAN6Pc/scUfkI7RwhQ/xqyLUyPNlXUp9S40zI8nup9tUSrTLSVnWGBN38FNiGWbwZOB6uR4OGdw== + +flow-parser@0.*: + version "0.231.0" + resolved "https://registry.yarnpkg.com/flow-parser/-/flow-parser-0.231.0.tgz#13daa172b3c06ffacbb31025592dc0db41fe28f3" + integrity sha512-WVzuqwq7ZnvBceCG0DGeTQebZE+iIU0mlk5PmJgYj9DDrt+0isGC2m1ezW9vxL4V+HERJJo9ExppOnwKH2op6Q== + +for-each@^0.3.3: + version "0.3.3" + resolved "https://registry.yarnpkg.com/for-each/-/for-each-0.3.3.tgz#69b447e88a0a5d32c3e7084f3f1710034b21376e" + integrity sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw== + dependencies: + is-callable "^1.1.3" + +fresh@0.5.2: + version "0.5.2" + resolved "https://registry.yarnpkg.com/fresh/-/fresh-0.5.2.tgz#3d8cadd90d976569fa835ab1f8e4b23a105605a7" + integrity sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q== + +fs-extra@^8.1.0: + version "8.1.0" + resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-8.1.0.tgz#49d43c45a88cd9677668cb7be1b46efdb8d2e1c0" + integrity sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g== + dependencies: + graceful-fs "^4.2.0" + jsonfile "^4.0.0" + universalify "^0.1.0" + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw== + +fsevents@^2.3.2: + version "2.3.3" + resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6" + integrity sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw== + +function-bind@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c" + integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA== + +function.prototype.name@^1.1.5, function.prototype.name@^1.1.6: + version "1.1.6" + resolved "https://registry.yarnpkg.com/function.prototype.name/-/function.prototype.name-1.1.6.tgz#cdf315b7d90ee77a4c6ee216c3c3362da07533fd" + integrity sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + functions-have-names "^1.2.3" + +functions-have-names@^1.2.3: + version "1.2.3" + resolved "https://registry.yarnpkg.com/functions-have-names/-/functions-have-names-1.2.3.tgz#0404fe4ee2ba2f607f0e0ec3c80bae994133b834" + integrity sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ== + +gensync@^1.0.0-beta.2: + version "1.0.0-beta.2" + resolved "https://registry.yarnpkg.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0" + integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg== + +get-caller-file@^2.0.1, get-caller-file@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" + integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== + +get-intrinsic@^1.1.3, get-intrinsic@^1.2.1, get-intrinsic@^1.2.3, get-intrinsic@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.4.tgz#e385f5a4b5227d449c3eabbad05494ef0abbeadd" + integrity sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ== + dependencies: + es-errors "^1.3.0" + function-bind "^1.1.2" + has-proto "^1.0.1" + has-symbols "^1.0.3" + hasown "^2.0.0" + +get-package-type@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/get-package-type/-/get-package-type-0.1.0.tgz#8de2d803cff44df3bc6c456e6668b36c3926e11a" + integrity sha512-pjzuKtY64GYfWizNAJ0fr9VqttZkNiK2iS430LtIHzjBEr6bX8Am2zm4sW4Ro5wjWW5cAlRL1qAMTcXbjNAO2Q== + +get-stream@^6.0.0: + version "6.0.1" + resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" + integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg== + +get-symbol-description@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.0.2.tgz#533744d5aa20aca4e079c8e5daf7fd44202821f5" + integrity sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg== + dependencies: + call-bind "^1.0.5" + es-errors "^1.3.0" + get-intrinsic "^1.2.4" + +glob-parent@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" + integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow== + dependencies: + is-glob "^4.0.1" + +glob-parent@^6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-6.0.2.tgz#6d237d99083950c79290f24c7642a3de9a28f9e3" + integrity sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A== + dependencies: + is-glob "^4.0.3" + +glob@^7.1.1, glob@^7.1.3, glob@^7.1.4: + version "7.2.3" + resolved "https://registry.yarnpkg.com/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b" + integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.1.1" + once "^1.3.0" + path-is-absolute "^1.0.0" + +globals@^11.1.0: + version "11.12.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" + integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== + +globals@^13.19.0: + version "13.24.0" + resolved "https://registry.yarnpkg.com/globals/-/globals-13.24.0.tgz#8432a19d78ce0c1e833949c36adb345400bb1171" + integrity sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ== + dependencies: + type-fest "^0.20.2" + +globalthis@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/globalthis/-/globalthis-1.0.3.tgz#5852882a52b80dc301b0660273e1ed082f0b6ccf" + integrity sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA== + dependencies: + define-properties "^1.1.3" + +globby@^11.1.0: + version "11.1.0" + resolved "https://registry.yarnpkg.com/globby/-/globby-11.1.0.tgz#bd4be98bb042f83d796f7e3811991fbe82a0d34b" + integrity sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g== + dependencies: + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.2.9" + ignore "^5.2.0" + merge2 "^1.4.1" + slash "^3.0.0" + +gopd@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.0.1.tgz#29ff76de69dac7489b7c0918a5788e56477c332c" + integrity sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA== + dependencies: + get-intrinsic "^1.1.3" + +graceful-fs@^4.1.11, graceful-fs@^4.1.3, graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.4, graceful-fs@^4.2.9: + version "4.2.11" + resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" + integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ== + +graphemer@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/graphemer/-/graphemer-1.4.0.tgz#fb2f1d55e0e3a1849aeffc90c4fa0dd53a0e66c6" + integrity sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag== + +has-bigints@^1.0.1, has-bigints@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-bigints/-/has-bigints-1.0.2.tgz#0871bd3e3d51626f6ca0966668ba35d5602d6eaa" + integrity sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ== + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + integrity sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw== + +has-flag@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" + integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== + +has-property-descriptors@^1.0.0, has-property-descriptors@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz#963ed7d071dc7bf5f084c5bfbe0d1b6222586854" + integrity sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg== + dependencies: + es-define-property "^1.0.0" + +has-proto@^1.0.1, has-proto@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.3.tgz#b31ddfe9b0e6e9914536a6ab286426d0214f77fd" + integrity sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q== + +has-symbols@^1.0.2, has-symbols@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" + integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== + +has-tostringtag@^1.0.0, has-tostringtag@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc" + integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw== + dependencies: + has-symbols "^1.0.3" + +hasown@^2.0.0, hasown@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" + integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ== + dependencies: + function-bind "^1.1.2" + +hermes-estree@0.19.1: + version "0.19.1" + resolved "https://registry.yarnpkg.com/hermes-estree/-/hermes-estree-0.19.1.tgz#d5924f5fac2bf0532547ae9f506d6db8f3c96392" + integrity sha512-daLGV3Q2MKk8w4evNMKwS8zBE/rcpA800nu1Q5kM08IKijoSnPe9Uo1iIxzPKRkn95IxxsgBMPeYHt3VG4ej2g== + +hermes-parser@0.19.1: + version "0.19.1" + resolved "https://registry.yarnpkg.com/hermes-parser/-/hermes-parser-0.19.1.tgz#1044348097165b7c93dc198a80b04ed5130d6b1a" + integrity sha512-Vp+bXzxYJWrpEuJ/vXxUsLnt0+y4q9zyi4zUlkLqD8FKv4LjIfOvP69R/9Lty3dCyKh0E2BU7Eypqr63/rKT/A== + dependencies: + hermes-estree "0.19.1" + +hermes-profile-transformer@^0.0.6: + version "0.0.6" + resolved "https://registry.yarnpkg.com/hermes-profile-transformer/-/hermes-profile-transformer-0.0.6.tgz#bd0f5ecceda80dd0ddaae443469ab26fb38fc27b" + integrity sha512-cnN7bQUm65UWOy6cbGcCcZ3rpwW8Q/j4OP5aWRhEry4Z2t2aR1cjrbp0BS+KiBN0smvP1caBgAuxutvyvJILzQ== + dependencies: + source-map "^0.7.3" + +html-escaper@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/html-escaper/-/html-escaper-2.0.2.tgz#dfd60027da36a36dfcbe236262c00a5822681453" + integrity sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg== + +http-errors@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/http-errors/-/http-errors-2.0.0.tgz#b7774a1486ef73cf7667ac9ae0858c012c57b9d3" + integrity sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ== + dependencies: + depd "2.0.0" + inherits "2.0.4" + setprototypeof "1.2.0" + statuses "2.0.1" + toidentifier "1.0.1" + +human-signals@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-2.1.0.tgz#dc91fcba42e4d06e4abaed33b3e7a3c02f514ea0" + integrity sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw== + +ieee754@^1.1.13: + version "1.2.1" + resolved "https://registry.yarnpkg.com/ieee754/-/ieee754-1.2.1.tgz#8eb7a10a63fff25d15a57b001586d177d1b0d352" + integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA== + +ignore@^5.0.5, ignore@^5.2.0, ignore@^5.2.4: + version "5.3.1" + resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.1.tgz#5073e554cd42c5b33b394375f538b8593e34d4ef" + integrity sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw== + +image-size@^1.0.2: + version "1.1.1" + resolved "https://registry.yarnpkg.com/image-size/-/image-size-1.1.1.tgz#ddd67d4dc340e52ac29ce5f546a09f4e29e840ac" + integrity sha512-541xKlUw6jr/6gGuk92F+mYM5zaFAc5ahphvkqvNe2bQ6gVBkd6bfrmVJ2t4KDAfikAYZyIqTnktX3i6/aQDrQ== + dependencies: + queue "6.0.2" + +import-fresh@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-2.0.0.tgz#d81355c15612d386c61f9ddd3922d4304822a546" + integrity sha512-eZ5H8rcgYazHbKC3PG4ClHNykCSxtAhxSSEM+2mb+7evD2CKF5V7c0dNum7AdpDh0ZdICwZY9sRSn8f+KH96sg== + dependencies: + caller-path "^2.0.0" + resolve-from "^3.0.0" + +import-fresh@^3.2.1: + version "3.3.0" + resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.3.0.tgz#37162c25fcb9ebaa2e6e53d5b4d88ce17d9e0c2b" + integrity sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw== + dependencies: + parent-module "^1.0.0" + resolve-from "^4.0.0" + +import-local@^3.0.2: + version "3.1.0" + resolved "https://registry.yarnpkg.com/import-local/-/import-local-3.1.0.tgz#b4479df8a5fd44f6cdce24070675676063c95cb4" + integrity sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg== + dependencies: + pkg-dir "^4.2.0" + resolve-cwd "^3.0.0" + +imurmurhash@^0.1.4: + version "0.1.4" + resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" + integrity sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA== + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + integrity sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA== + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2, inherits@2.0.4, inherits@^2.0.3, inherits@^2.0.4, inherits@~2.0.3: + version "2.0.4" + resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" + integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== + +internal-slot@^1.0.5, internal-slot@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.0.7.tgz#c06dcca3ed874249881007b0a5523b172a190802" + integrity sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g== + dependencies: + es-errors "^1.3.0" + hasown "^2.0.0" + side-channel "^1.0.4" + +invariant@^2.2.4: + version "2.2.4" + resolved "https://registry.yarnpkg.com/invariant/-/invariant-2.2.4.tgz#610f3c92c9359ce1db616e538008d23ff35158e6" + integrity sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA== + dependencies: + loose-envify "^1.0.0" + +is-array-buffer@^3.0.4: + version "3.0.4" + resolved "https://registry.yarnpkg.com/is-array-buffer/-/is-array-buffer-3.0.4.tgz#7a1f92b3d61edd2bc65d24f130530ea93d7fae98" + integrity sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw== + dependencies: + call-bind "^1.0.2" + get-intrinsic "^1.2.1" + +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg== + +is-async-function@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-async-function/-/is-async-function-2.0.0.tgz#8e4418efd3e5d3a6ebb0164c05ef5afb69aa9646" + integrity sha512-Y1JXKrfykRJGdlDwdKlLpLyMIiWqWvuSd17TvZk68PLAOGOoF4Xyav1z0Xhoi+gCYjZVeC5SI+hYFOfvXmGRCA== + dependencies: + has-tostringtag "^1.0.0" + +is-bigint@^1.0.1: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-bigint/-/is-bigint-1.0.4.tgz#08147a1875bc2b32005d41ccd8291dffc6691df3" + integrity sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg== + dependencies: + has-bigints "^1.0.1" + +is-boolean-object@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/is-boolean-object/-/is-boolean-object-1.1.2.tgz#5c6dc200246dd9321ae4b885a114bb1f75f63719" + integrity sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA== + dependencies: + call-bind "^1.0.2" + has-tostringtag "^1.0.0" + +is-callable@^1.1.3, is-callable@^1.1.4, is-callable@^1.2.7: + version "1.2.7" + resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.7.tgz#3bc2a85ea742d9e36205dcacdd72ca1fdc51b055" + integrity sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA== + +is-core-module@^2.13.0: + version "2.13.1" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.13.1.tgz#ad0d7532c6fea9da1ebdc82742d74525c6273384" + integrity sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw== + dependencies: + hasown "^2.0.0" + +is-date-object@^1.0.1, is-date-object@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.5.tgz#0841d5536e724c25597bf6ea62e1bd38298df31f" + integrity sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ== + dependencies: + has-tostringtag "^1.0.0" + +is-directory@^0.3.1: + version "0.3.1" + resolved "https://registry.yarnpkg.com/is-directory/-/is-directory-0.3.1.tgz#61339b6f2475fc772fd9c9d83f5c8575dc154ae1" + integrity sha512-yVChGzahRFvbkscn2MlwGismPO12i9+znNruC5gVEntG3qu0xQMzsGg/JFbrsqDOHtHFPci+V5aP5T9I+yeKqw== + +is-docker@^2.0.0: + version "2.2.1" + resolved "https://registry.yarnpkg.com/is-docker/-/is-docker-2.2.1.tgz#33eeabe23cfe86f14bde4408a02c0cfb853acdaa" + integrity sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ== + +is-extglob@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" + integrity sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ== + +is-finalizationregistry@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-finalizationregistry/-/is-finalizationregistry-1.0.2.tgz#c8749b65f17c133313e661b1289b95ad3dbd62e6" + integrity sha512-0by5vtUJs8iFQb5TYUHHPudOR+qXYIMKtiUzvLIZITZUjknFmziyBJuLhVRc+Ds0dREFlskDNJKYIdIzu/9pfw== + dependencies: + call-bind "^1.0.2" + +is-fullwidth-code-point@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" + integrity sha512-VHskAKYM8RfSFXwee5t5cbN5PZeq1Wrh6qd5bkyiXIf6UQcN6w/A0eXM9r6t8d+GYOh+o6ZhiEnb88LN/Y8m2w== + +is-fullwidth-code-point@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" + integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== + +is-generator-fn@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/is-generator-fn/-/is-generator-fn-2.1.0.tgz#7d140adc389aaf3011a8f2a2a4cfa6faadffb118" + integrity sha512-cTIB4yPYL/Grw0EaSzASzg6bBy9gqCofvWN8okThAYIxKJZC+udlRAmGbM0XLeniEJSs8uEgHPGuHSe1XsOLSQ== + +is-generator-function@^1.0.10: + version "1.0.10" + resolved "https://registry.yarnpkg.com/is-generator-function/-/is-generator-function-1.0.10.tgz#f1558baf1ac17e0deea7c0415c438351ff2b3c72" + integrity sha512-jsEjy9l3yiXEQ+PsXdmBwEPcOxaXWLspKdplFUVI9vq1iZgIekeC0L167qeu86czQaxed3q/Uzuw0swL0irL8A== + dependencies: + has-tostringtag "^1.0.0" + +is-glob@^4.0.0, is-glob@^4.0.1, is-glob@^4.0.3: + version "4.0.3" + resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084" + integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg== + dependencies: + is-extglob "^2.1.1" + +is-interactive@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/is-interactive/-/is-interactive-1.0.0.tgz#cea6e6ae5c870a7b0a0004070b7b587e0252912e" + integrity sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w== + +is-map@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-map/-/is-map-2.0.3.tgz#ede96b7fe1e270b3c4465e3a465658764926d62e" + integrity sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw== + +is-negative-zero@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-negative-zero/-/is-negative-zero-2.0.3.tgz#ced903a027aca6381b777a5743069d7376a49747" + integrity sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw== + +is-number-object@^1.0.4: + version "1.0.7" + resolved "https://registry.yarnpkg.com/is-number-object/-/is-number-object-1.0.7.tgz#59d50ada4c45251784e9904f5246c742f07a42fc" + integrity sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ== + dependencies: + has-tostringtag "^1.0.0" + +is-number@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" + integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== + +is-path-inside@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-3.0.3.tgz#d231362e53a07ff2b0e0ea7fed049161ffd16283" + integrity sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ== + +is-plain-object@^2.0.4: + version "2.0.4" + resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-2.0.4.tgz#2c163b3fafb1b606d9d17928f05c2a1c38e07677" + integrity sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og== + dependencies: + isobject "^3.0.1" + +is-regex@^1.1.4: + version "1.1.4" + resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.1.4.tgz#eef5663cd59fa4c0ae339505323df6854bb15958" + integrity sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg== + dependencies: + call-bind "^1.0.2" + has-tostringtag "^1.0.0" + +is-set@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-set/-/is-set-2.0.3.tgz#8ab209ea424608141372ded6e0cb200ef1d9d01d" + integrity sha512-iPAjerrse27/ygGLxw+EBR9agv9Y6uLeYVJMu+QNCoouJ1/1ri0mGrcWpfCqFZuzzx3WjtwxG098X+n4OuRkPg== + +is-shared-array-buffer@^1.0.2, is-shared-array-buffer@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.3.tgz#1237f1cba059cdb62431d378dcc37d9680181688" + integrity sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg== + dependencies: + call-bind "^1.0.7" + +is-stream@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.1.tgz#fac1e3d53b97ad5a9d0ae9cef2389f5810a5c077" + integrity sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg== + +is-string@^1.0.5, is-string@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/is-string/-/is-string-1.0.7.tgz#0dd12bf2006f255bb58f695110eff7491eebc0fd" + integrity sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg== + dependencies: + has-tostringtag "^1.0.0" + +is-symbol@^1.0.2, is-symbol@^1.0.3: + version "1.0.4" + resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.4.tgz#a6dac93b635b063ca6872236de88910a57af139c" + integrity sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg== + dependencies: + has-symbols "^1.0.2" + +is-typed-array@^1.1.13: + version "1.1.13" + resolved "https://registry.yarnpkg.com/is-typed-array/-/is-typed-array-1.1.13.tgz#d6c5ca56df62334959322d7d7dd1cca50debe229" + integrity sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw== + dependencies: + which-typed-array "^1.1.14" + +is-unicode-supported@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz#3f26c76a809593b52bfa2ecb5710ed2779b522a7" + integrity sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw== + +is-weakmap@^2.0.2: + version "2.0.2" + resolved "https://registry.yarnpkg.com/is-weakmap/-/is-weakmap-2.0.2.tgz#bf72615d649dfe5f699079c54b83e47d1ae19cfd" + integrity sha512-K5pXYOm9wqY1RgjpL3YTkF39tni1XajUIkawTLUo9EZEVUFga5gSQJF8nNS7ZwJQ02y+1YCNYcMh+HIf1ZqE+w== + +is-weakref@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/is-weakref/-/is-weakref-1.0.2.tgz#9529f383a9338205e89765e0392efc2f100f06f2" + integrity sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ== + dependencies: + call-bind "^1.0.2" + +is-weakset@^2.0.3: + version "2.0.3" + resolved "https://registry.yarnpkg.com/is-weakset/-/is-weakset-2.0.3.tgz#e801519df8c0c43e12ff2834eead84ec9e624007" + integrity sha512-LvIm3/KWzS9oRFHugab7d+M/GcBXuXX5xZkzPmN+NxihdQlZUQ4dWuSV1xR/sq6upL1TJEDrfBgRepHFdBtSNQ== + dependencies: + call-bind "^1.0.7" + get-intrinsic "^1.2.4" + +is-wsl@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-1.1.0.tgz#1f16e4aa22b04d1336b66188a66af3c600c3a66d" + integrity sha512-gfygJYZ2gLTDlmbWMI0CE2MwnFzSN/2SZfkMlItC4K/JBlsWVDB0bO6XhqcY13YXE7iMcAJnzTCJjPiTeJJ0Mw== + +is-wsl@^2.1.1, is-wsl@^2.2.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/is-wsl/-/is-wsl-2.2.0.tgz#74a4c76e77ca9fd3f932f290c17ea326cd157271" + integrity sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww== + dependencies: + is-docker "^2.0.0" + +isarray@^2.0.5: + version "2.0.5" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-2.0.5.tgz#8af1e4c1221244cc62459faf38940d4e644a5723" + integrity sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw== + +isarray@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/isarray/-/isarray-1.0.0.tgz#bb935d48582cba168c06834957a54a3e07124f11" + integrity sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ== + +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw== + +isobject@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/isobject/-/isobject-3.0.1.tgz#4e431e92b11a9731636aa1f9c8d1ccbcfdab78df" + integrity sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg== + +istanbul-lib-coverage@^3.0.0, istanbul-lib-coverage@^3.2.0: + version "3.2.2" + resolved "https://registry.yarnpkg.com/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz#2d166c4b0644d43a39f04bf6c2edd1e585f31756" + integrity sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg== + +istanbul-lib-instrument@^5.0.4: + version "5.2.1" + resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-5.2.1.tgz#d10c8885c2125574e1c231cacadf955675e1ce3d" + integrity sha512-pzqtp31nLv/XFOzXGuvhCb8qhjmTVo5vjVk19XE4CRlSWz0KoeJ3bw9XsA7nOp9YBf4qHjwBxkDzKcME/J29Yg== + dependencies: + "@babel/core" "^7.12.3" + "@babel/parser" "^7.14.7" + "@istanbuljs/schema" "^0.1.2" + istanbul-lib-coverage "^3.2.0" + semver "^6.3.0" + +istanbul-lib-instrument@^6.0.0: + version "6.0.2" + resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.2.tgz#91655936cf7380e4e473383081e38478b69993b1" + integrity sha512-1WUsZ9R1lA0HtBSohTkm39WTPlNKSJ5iFk7UwqXkBLoHQT+hfqPsfsTDVuZdKGaBwn7din9bS7SsnoAr943hvw== + dependencies: + "@babel/core" "^7.23.9" + "@babel/parser" "^7.23.9" + "@istanbuljs/schema" "^0.1.3" + istanbul-lib-coverage "^3.2.0" + semver "^7.5.4" + +istanbul-lib-report@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz#908305bac9a5bd175ac6a74489eafd0fc2445a7d" + integrity sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw== + dependencies: + istanbul-lib-coverage "^3.0.0" + make-dir "^4.0.0" + supports-color "^7.1.0" + +istanbul-lib-source-maps@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/istanbul-lib-source-maps/-/istanbul-lib-source-maps-4.0.1.tgz#895f3a709fcfba34c6de5a42939022f3e4358551" + integrity sha512-n3s8EwkdFIJCG3BPKBYvskgXGoy88ARzvegkitk60NxRdwltLOTaH7CUiMRXvwYorl0Q712iEjcWB+fK/MrWVw== + dependencies: + debug "^4.1.1" + istanbul-lib-coverage "^3.0.0" + source-map "^0.6.1" + +istanbul-reports@^3.1.3: + version "3.1.7" + resolved "https://registry.yarnpkg.com/istanbul-reports/-/istanbul-reports-3.1.7.tgz#daed12b9e1dca518e15c056e1e537e741280fa0b" + integrity sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g== + dependencies: + html-escaper "^2.0.0" + istanbul-lib-report "^3.0.0" + +iterator.prototype@^1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/iterator.prototype/-/iterator.prototype-1.1.2.tgz#5e29c8924f01916cb9335f1ff80619dcff22b0c0" + integrity sha512-DR33HMMr8EzwuRL8Y9D3u2BMj8+RqSE850jfGu59kS7tbmPLzGkZmVSfyCFSDxuZiEY6Rzt3T2NA/qU+NwVj1w== + dependencies: + define-properties "^1.2.1" + get-intrinsic "^1.2.1" + has-symbols "^1.0.3" + reflect.getprototypeof "^1.0.4" + set-function-name "^2.0.1" + +jest-changed-files@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-29.7.0.tgz#1c06d07e77c78e1585d020424dedc10d6e17ac3a" + integrity sha512-fEArFiwf1BpQ+4bXSprcDc3/x4HSzL4al2tozwVpDFpsxALjLYdyiIK4e5Vz66GQJIbXJ82+35PtysofptNX2w== + dependencies: + execa "^5.0.0" + jest-util "^29.7.0" + p-limit "^3.1.0" + +jest-circus@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-circus/-/jest-circus-29.7.0.tgz#b6817a45fcc835d8b16d5962d0c026473ee3668a" + integrity sha512-3E1nCMgipcTkCocFwM90XXQab9bS+GMsjdpmPrlelaxwD93Ad8iVEjX/vvHPdLPnFf+L40u+5+iutRdA1N9myw== + dependencies: + "@jest/environment" "^29.7.0" + "@jest/expect" "^29.7.0" + "@jest/test-result" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/node" "*" + chalk "^4.0.0" + co "^4.6.0" + dedent "^1.0.0" + is-generator-fn "^2.0.0" + jest-each "^29.7.0" + jest-matcher-utils "^29.7.0" + jest-message-util "^29.7.0" + jest-runtime "^29.7.0" + jest-snapshot "^29.7.0" + jest-util "^29.7.0" + p-limit "^3.1.0" + pretty-format "^29.7.0" + pure-rand "^6.0.0" + slash "^3.0.0" + stack-utils "^2.0.3" + +jest-cli@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-cli/-/jest-cli-29.7.0.tgz#5592c940798e0cae677eec169264f2d839a37995" + integrity sha512-OVVobw2IubN/GSYsxETi+gOe7Ka59EFMR/twOU3Jb2GnKKeMGJB5SGUUrEz3SFVmJASUdZUzy83sLNNQ2gZslg== + dependencies: + "@jest/core" "^29.7.0" + "@jest/test-result" "^29.7.0" + "@jest/types" "^29.6.3" + chalk "^4.0.0" + create-jest "^29.7.0" + exit "^0.1.2" + import-local "^3.0.2" + jest-config "^29.7.0" + jest-util "^29.7.0" + jest-validate "^29.7.0" + yargs "^17.3.1" + +jest-config@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-config/-/jest-config-29.7.0.tgz#bcbda8806dbcc01b1e316a46bb74085a84b0245f" + integrity sha512-uXbpfeQ7R6TZBqI3/TxCU4q4ttk3u0PJeC+E0zbfSoSjq6bJ7buBPxzQPL0ifrkY4DNu4JUdk0ImlBUYi840eQ== + dependencies: + "@babel/core" "^7.11.6" + "@jest/test-sequencer" "^29.7.0" + "@jest/types" "^29.6.3" + babel-jest "^29.7.0" + chalk "^4.0.0" + ci-info "^3.2.0" + deepmerge "^4.2.2" + glob "^7.1.3" + graceful-fs "^4.2.9" + jest-circus "^29.7.0" + jest-environment-node "^29.7.0" + jest-get-type "^29.6.3" + jest-regex-util "^29.6.3" + jest-resolve "^29.7.0" + jest-runner "^29.7.0" + jest-util "^29.7.0" + jest-validate "^29.7.0" + micromatch "^4.0.4" + parse-json "^5.2.0" + pretty-format "^29.7.0" + slash "^3.0.0" + strip-json-comments "^3.1.1" + +jest-diff@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-diff/-/jest-diff-29.7.0.tgz#017934a66ebb7ecf6f205e84699be10afd70458a" + integrity sha512-LMIgiIrhigmPrs03JHpxUh2yISK3vLFPkAodPeo0+BuF7wA2FoQbkEg1u8gBYBThncu7e1oEDUfIXVuTqLRUjw== + dependencies: + chalk "^4.0.0" + diff-sequences "^29.6.3" + jest-get-type "^29.6.3" + pretty-format "^29.7.0" + +jest-docblock@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-docblock/-/jest-docblock-29.7.0.tgz#8fddb6adc3cdc955c93e2a87f61cfd350d5d119a" + integrity sha512-q617Auw3A612guyaFgsbFeYpNP5t2aoUNLwBUbc/0kD1R4t9ixDbyFTHd1nok4epoVFpr7PmeWHrhvuV3XaJ4g== + dependencies: + detect-newline "^3.0.0" + +jest-each@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-each/-/jest-each-29.7.0.tgz#162a9b3f2328bdd991beaabffbb74745e56577d1" + integrity sha512-gns+Er14+ZrEoC5fhOfYCY1LOHHr0TI+rQUHZS8Ttw2l7gl+80eHc/gFf2Ktkw0+SIACDTeWvpFcv3B04VembQ== + dependencies: + "@jest/types" "^29.6.3" + chalk "^4.0.0" + jest-get-type "^29.6.3" + jest-util "^29.7.0" + pretty-format "^29.7.0" + +jest-environment-node@^29.6.3, jest-environment-node@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-29.7.0.tgz#0b93e111dda8ec120bc8300e6d1fb9576e164376" + integrity sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw== + dependencies: + "@jest/environment" "^29.7.0" + "@jest/fake-timers" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/node" "*" + jest-mock "^29.7.0" + jest-util "^29.7.0" + +jest-get-type@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/jest-get-type/-/jest-get-type-29.6.3.tgz#36f499fdcea197c1045a127319c0481723908fd1" + integrity sha512-zrteXnqYxfQh7l5FHyL38jL39di8H8rHoecLH3JNxH3BwOrBsNeabdap5e0I23lD4HHI8W5VFBZqG4Eaq5LNcw== + +jest-haste-map@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-haste-map/-/jest-haste-map-29.7.0.tgz#3c2396524482f5a0506376e6c858c3bbcc17b104" + integrity sha512-fP8u2pyfqx0K1rGn1R9pyE0/KTn+G7PxktWidOBTqFPLYX0b9ksaMFkhK5vrS3DVun09pckLdlx90QthlW7AmA== + dependencies: + "@jest/types" "^29.6.3" + "@types/graceful-fs" "^4.1.3" + "@types/node" "*" + anymatch "^3.0.3" + fb-watchman "^2.0.0" + graceful-fs "^4.2.9" + jest-regex-util "^29.6.3" + jest-util "^29.7.0" + jest-worker "^29.7.0" + micromatch "^4.0.4" + walker "^1.0.8" + optionalDependencies: + fsevents "^2.3.2" + +jest-leak-detector@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-leak-detector/-/jest-leak-detector-29.7.0.tgz#5b7ec0dadfdfec0ca383dc9aa016d36b5ea4c728" + integrity sha512-kYA8IJcSYtST2BY9I+SMC32nDpBT3J2NvWJx8+JCuCdl/CR1I4EKUJROiP8XtCcxqgTTBGJNdbB1A8XRKbTetw== + dependencies: + jest-get-type "^29.6.3" + pretty-format "^29.7.0" + +jest-matcher-utils@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-matcher-utils/-/jest-matcher-utils-29.7.0.tgz#ae8fec79ff249fd592ce80e3ee474e83a6c44f12" + integrity sha512-sBkD+Xi9DtcChsI3L3u0+N0opgPYnCRPtGcQYrgXmR+hmt/fYfWAL0xRXYU8eWOdfuLgBe0YCW3AFtnRLagq/g== + dependencies: + chalk "^4.0.0" + jest-diff "^29.7.0" + jest-get-type "^29.6.3" + pretty-format "^29.7.0" + +jest-message-util@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-message-util/-/jest-message-util-29.7.0.tgz#8bc392e204e95dfe7564abbe72a404e28e51f7f3" + integrity sha512-GBEV4GRADeP+qtB2+6u61stea8mGcOT4mCtrYISZwfu9/ISHFJ/5zOMXYbpBE9RsS5+Gb63DW4FgmnKJ79Kf6w== + dependencies: + "@babel/code-frame" "^7.12.13" + "@jest/types" "^29.6.3" + "@types/stack-utils" "^2.0.0" + chalk "^4.0.0" + graceful-fs "^4.2.9" + micromatch "^4.0.4" + pretty-format "^29.7.0" + slash "^3.0.0" + stack-utils "^2.0.3" + +jest-mock@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-mock/-/jest-mock-29.7.0.tgz#4e836cf60e99c6fcfabe9f99d017f3fdd50a6347" + integrity sha512-ITOMZn+UkYS4ZFh83xYAOzWStloNzJFO2s8DWrE4lhtGD+AorgnbkiKERe4wQVBydIGPx059g6riW5Btp6Llnw== + dependencies: + "@jest/types" "^29.6.3" + "@types/node" "*" + jest-util "^29.7.0" + +jest-pnp-resolver@^1.2.2: + version "1.2.3" + resolved "https://registry.yarnpkg.com/jest-pnp-resolver/-/jest-pnp-resolver-1.2.3.tgz#930b1546164d4ad5937d5540e711d4d38d4cad2e" + integrity sha512-+3NpwQEnRoIBtx4fyhblQDPgJI0H1IEIkX7ShLUjPGA7TtUTvI1oiKi3SR4oBR0hQhQR80l4WAe5RrXBwWMA8w== + +jest-regex-util@^29.6.3: + version "29.6.3" + resolved "https://registry.yarnpkg.com/jest-regex-util/-/jest-regex-util-29.6.3.tgz#4a556d9c776af68e1c5f48194f4d0327d24e8a52" + integrity sha512-KJJBsRCyyLNWCNBOvZyRDnAIfUiRJ8v+hOBQYGn8gDyF3UegwiP4gwRR3/SDa42g1YbVycTidUF3rKjyLFDWbg== + +jest-resolve-dependencies@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-resolve-dependencies/-/jest-resolve-dependencies-29.7.0.tgz#1b04f2c095f37fc776ff40803dc92921b1e88428" + integrity sha512-un0zD/6qxJ+S0et7WxeI3H5XSe9lTBBR7bOHCHXkKR6luG5mwDDlIzVQ0V5cZCuoTgEdcdwzTghYkTWfubi+nA== + dependencies: + jest-regex-util "^29.6.3" + jest-snapshot "^29.7.0" + +jest-resolve@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-resolve/-/jest-resolve-29.7.0.tgz#64d6a8992dd26f635ab0c01e5eef4399c6bcbc30" + integrity sha512-IOVhZSrg+UvVAshDSDtHyFCCBUl/Q3AAJv8iZ6ZjnZ74xzvwuzLXid9IIIPgTnY62SJjfuupMKZsZQRsCvxEgA== + dependencies: + chalk "^4.0.0" + graceful-fs "^4.2.9" + jest-haste-map "^29.7.0" + jest-pnp-resolver "^1.2.2" + jest-util "^29.7.0" + jest-validate "^29.7.0" + resolve "^1.20.0" + resolve.exports "^2.0.0" + slash "^3.0.0" + +jest-runner@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-runner/-/jest-runner-29.7.0.tgz#809af072d408a53dcfd2e849a4c976d3132f718e" + integrity sha512-fsc4N6cPCAahybGBfTRcq5wFR6fpLznMg47sY5aDpsoejOcVYFb07AHuSnR0liMcPTgBsA3ZJL6kFOjPdoNipQ== + dependencies: + "@jest/console" "^29.7.0" + "@jest/environment" "^29.7.0" + "@jest/test-result" "^29.7.0" + "@jest/transform" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/node" "*" + chalk "^4.0.0" + emittery "^0.13.1" + graceful-fs "^4.2.9" + jest-docblock "^29.7.0" + jest-environment-node "^29.7.0" + jest-haste-map "^29.7.0" + jest-leak-detector "^29.7.0" + jest-message-util "^29.7.0" + jest-resolve "^29.7.0" + jest-runtime "^29.7.0" + jest-util "^29.7.0" + jest-watcher "^29.7.0" + jest-worker "^29.7.0" + p-limit "^3.1.0" + source-map-support "0.5.13" + +jest-runtime@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-runtime/-/jest-runtime-29.7.0.tgz#efecb3141cf7d3767a3a0cc8f7c9990587d3d817" + integrity sha512-gUnLjgwdGqW7B4LvOIkbKs9WGbn+QLqRQQ9juC6HndeDiezIwhDP+mhMwHWCEcfQ5RUXa6OPnFF8BJh5xegwwQ== + dependencies: + "@jest/environment" "^29.7.0" + "@jest/fake-timers" "^29.7.0" + "@jest/globals" "^29.7.0" + "@jest/source-map" "^29.6.3" + "@jest/test-result" "^29.7.0" + "@jest/transform" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/node" "*" + chalk "^4.0.0" + cjs-module-lexer "^1.0.0" + collect-v8-coverage "^1.0.0" + glob "^7.1.3" + graceful-fs "^4.2.9" + jest-haste-map "^29.7.0" + jest-message-util "^29.7.0" + jest-mock "^29.7.0" + jest-regex-util "^29.6.3" + jest-resolve "^29.7.0" + jest-snapshot "^29.7.0" + jest-util "^29.7.0" + slash "^3.0.0" + strip-bom "^4.0.0" + +jest-snapshot@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-snapshot/-/jest-snapshot-29.7.0.tgz#c2c574c3f51865da1bb329036778a69bf88a6be5" + integrity sha512-Rm0BMWtxBcioHr1/OX5YCP8Uov4riHvKPknOGs804Zg9JGZgmIBkbtlxJC/7Z4msKYVbIJtfU+tKb8xlYNfdkw== + dependencies: + "@babel/core" "^7.11.6" + "@babel/generator" "^7.7.2" + "@babel/plugin-syntax-jsx" "^7.7.2" + "@babel/plugin-syntax-typescript" "^7.7.2" + "@babel/types" "^7.3.3" + "@jest/expect-utils" "^29.7.0" + "@jest/transform" "^29.7.0" + "@jest/types" "^29.6.3" + babel-preset-current-node-syntax "^1.0.0" + chalk "^4.0.0" + expect "^29.7.0" + graceful-fs "^4.2.9" + jest-diff "^29.7.0" + jest-get-type "^29.6.3" + jest-matcher-utils "^29.7.0" + jest-message-util "^29.7.0" + jest-util "^29.7.0" + natural-compare "^1.4.0" + pretty-format "^29.7.0" + semver "^7.5.3" + +jest-util@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-util/-/jest-util-29.7.0.tgz#23c2b62bfb22be82b44de98055802ff3710fc0bc" + integrity sha512-z6EbKajIpqGKU56y5KBUgy1dt1ihhQJgWzUlZHArA/+X2ad7Cb5iF+AK1EWVL/Bo7Rz9uurpqw6SiBCefUbCGA== + dependencies: + "@jest/types" "^29.6.3" + "@types/node" "*" + chalk "^4.0.0" + ci-info "^3.2.0" + graceful-fs "^4.2.9" + picomatch "^2.2.3" + +jest-validate@^29.6.3, jest-validate@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-validate/-/jest-validate-29.7.0.tgz#7bf705511c64da591d46b15fce41400d52147d9c" + integrity sha512-ZB7wHqaRGVw/9hST/OuFUReG7M8vKeq0/J2egIGLdvjHCmYqGARhzXmtgi+gVeZ5uXFF219aOc3Ls2yLg27tkw== + dependencies: + "@jest/types" "^29.6.3" + camelcase "^6.2.0" + chalk "^4.0.0" + jest-get-type "^29.6.3" + leven "^3.1.0" + pretty-format "^29.7.0" + +jest-watcher@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-watcher/-/jest-watcher-29.7.0.tgz#7810d30d619c3a62093223ce6bb359ca1b28a2f2" + integrity sha512-49Fg7WXkU3Vl2h6LbLtMQ/HyB6rXSIX7SqvBLQmssRBGN9I0PNvPmAmCWSOY6SOvrjhI/F7/bGAv9RtnsPA03g== + dependencies: + "@jest/test-result" "^29.7.0" + "@jest/types" "^29.6.3" + "@types/node" "*" + ansi-escapes "^4.2.1" + chalk "^4.0.0" + emittery "^0.13.1" + jest-util "^29.7.0" + string-length "^4.0.1" + +jest-worker@^29.6.3, jest-worker@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest-worker/-/jest-worker-29.7.0.tgz#acad073acbbaeb7262bd5389e1bcf43e10058d4a" + integrity sha512-eIz2msL/EzL9UFTFFx7jBTkeZfku0yUAyZZZmJ93H2TYEiroIx2PQjEXcwYtYl8zXCxb+PAmA2hLIt/6ZEkPHw== + dependencies: + "@types/node" "*" + jest-util "^29.7.0" + merge-stream "^2.0.0" + supports-color "^8.0.0" + +jest@^29.6.3: + version "29.7.0" + resolved "https://registry.yarnpkg.com/jest/-/jest-29.7.0.tgz#994676fc24177f088f1c5e3737f5697204ff2613" + integrity sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw== + dependencies: + "@jest/core" "^29.7.0" + "@jest/types" "^29.6.3" + import-local "^3.0.2" + jest-cli "^29.7.0" + +joi@^17.2.1: + version "17.12.2" + resolved "https://registry.yarnpkg.com/joi/-/joi-17.12.2.tgz#283a664dabb80c7e52943c557aab82faea09f521" + integrity sha512-RonXAIzCiHLc8ss3Ibuz45u28GOsWE1UpfDXLbN/9NKbL4tCJf8TWYVKsoYuuh+sAUt7fsSNpA+r2+TBA6Wjmw== + dependencies: + "@hapi/hoek" "^9.3.0" + "@hapi/topo" "^5.1.0" + "@sideway/address" "^4.1.5" + "@sideway/formula" "^3.0.1" + "@sideway/pinpoint" "^2.0.0" + +"js-tokens@^3.0.0 || ^4.0.0", js-tokens@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== + +js-yaml@^3.13.1: + version "3.14.1" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537" + integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g== + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +js-yaml@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-4.1.0.tgz#c1fb65f8f5017901cdd2c951864ba18458a10602" + integrity sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA== + dependencies: + argparse "^2.0.1" + +jsc-android@^250231.0.0: + version "250231.0.0" + resolved "https://registry.yarnpkg.com/jsc-android/-/jsc-android-250231.0.0.tgz#91720f8df382a108872fa4b3f558f33ba5e95262" + integrity sha512-rS46PvsjYmdmuz1OAWXY/1kCYG7pnf1TBqeTiOJr1iDz7s5DLxxC9n/ZMknLDxzYzNVfI7R95MH10emSSG1Wuw== + +jsc-safe-url@^0.2.2: + version "0.2.4" + resolved "https://registry.yarnpkg.com/jsc-safe-url/-/jsc-safe-url-0.2.4.tgz#141c14fbb43791e88d5dc64e85a374575a83477a" + integrity sha512-0wM3YBWtYePOjfyXQH5MWQ8H7sdk5EXSwZvmSLKk2RboVQ2Bu239jycHDz5J/8Blf3K0Qnoy2b6xD+z10MFB+Q== + +jscodeshift@^0.14.0: + version "0.14.0" + resolved "https://registry.yarnpkg.com/jscodeshift/-/jscodeshift-0.14.0.tgz#7542e6715d6d2e8bde0b4e883f0ccea358b46881" + integrity sha512-7eCC1knD7bLUPuSCwXsMZUH51O8jIcoVyKtI6P0XM0IVzlGjckPy3FIwQlorzbN0Sg79oK+RlohN32Mqf/lrYA== + dependencies: + "@babel/core" "^7.13.16" + "@babel/parser" "^7.13.16" + "@babel/plugin-proposal-class-properties" "^7.13.0" + "@babel/plugin-proposal-nullish-coalescing-operator" "^7.13.8" + "@babel/plugin-proposal-optional-chaining" "^7.13.12" + "@babel/plugin-transform-modules-commonjs" "^7.13.8" + "@babel/preset-flow" "^7.13.13" + "@babel/preset-typescript" "^7.13.0" + "@babel/register" "^7.13.16" + babel-core "^7.0.0-bridge.0" + chalk "^4.1.2" + flow-parser "0.*" + graceful-fs "^4.2.4" + micromatch "^4.0.4" + neo-async "^2.5.0" + node-dir "^0.1.17" + recast "^0.21.0" + temp "^0.8.4" + write-file-atomic "^2.3.0" + +jsesc@^2.5.1: + version "2.5.2" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-2.5.2.tgz#80564d2e483dacf6e8ef209650a67df3f0c283a4" + integrity sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA== + +jsesc@~0.5.0: + version "0.5.0" + resolved "https://registry.yarnpkg.com/jsesc/-/jsesc-0.5.0.tgz#e7dee66e35d6fc16f710fe91d5cf69f70f08911d" + integrity sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA== + +json-buffer@3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.1.tgz#9338802a30d3b6605fbe0613e094008ca8c05a13" + integrity sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ== + +json-parse-better-errors@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9" + integrity sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw== + +json-parse-even-better-errors@^2.3.0: + version "2.3.1" + resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d" + integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w== + +json-schema-traverse@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" + integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== + +json-stable-stringify-without-jsonify@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651" + integrity sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw== + +json5@^2.2.3: + version "2.2.3" + resolved "https://registry.yarnpkg.com/json5/-/json5-2.2.3.tgz#78cd6f1a19bdc12b73db5ad0c61efd66c1e29283" + integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg== + +jsonfile@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/jsonfile/-/jsonfile-4.0.0.tgz#8771aae0799b64076b76640fca058f9c10e33ecb" + integrity sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg== + optionalDependencies: + graceful-fs "^4.1.6" + +"jsx-ast-utils@^2.4.1 || ^3.0.0": + version "3.3.5" + resolved "https://registry.yarnpkg.com/jsx-ast-utils/-/jsx-ast-utils-3.3.5.tgz#4766bd05a8e2a11af222becd19e15575e52a853a" + integrity sha512-ZZow9HBI5O6EPgSJLUb8n2NKgmVWTwCvHGwFuJlMjvLFqlGG6pjirPhtdsseaLZjSibD8eegzmYpUZwoIlj2cQ== + dependencies: + array-includes "^3.1.6" + array.prototype.flat "^1.3.1" + object.assign "^4.1.4" + object.values "^1.1.6" + +keyv@^4.5.3: + version "4.5.4" + resolved "https://registry.yarnpkg.com/keyv/-/keyv-4.5.4.tgz#a879a99e29452f942439f2a405e3af8b31d4de93" + integrity sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw== + dependencies: + json-buffer "3.0.1" + +kind-of@^6.0.2: + version "6.0.3" + resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.3.tgz#07c05034a6c349fa06e24fa35aa76db4580ce4dd" + integrity sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw== + +kleur@^3.0.3: + version "3.0.3" + resolved "https://registry.yarnpkg.com/kleur/-/kleur-3.0.3.tgz#a79c9ecc86ee1ce3fa6206d1216c501f147fc07e" + integrity sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w== + +leven@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/leven/-/leven-3.1.0.tgz#77891de834064cccba82ae7842bb6b14a13ed7f2" + integrity sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A== + +levn@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/levn/-/levn-0.4.1.tgz#ae4562c007473b932a6200d403268dd2fffc6ade" + integrity sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ== + dependencies: + prelude-ls "^1.2.1" + type-check "~0.4.0" + +lighthouse-logger@^1.0.0: + version "1.4.2" + resolved "https://registry.yarnpkg.com/lighthouse-logger/-/lighthouse-logger-1.4.2.tgz#aef90f9e97cd81db367c7634292ee22079280aaa" + integrity sha512-gPWxznF6TKmUHrOQjlVo2UbaL2EJ71mb2CCeRs/2qBpi4L/g4LUVc9+3lKQ6DTUZwJswfM7ainGrLO1+fOqa2g== + dependencies: + debug "^2.6.9" + marky "^1.2.2" + +lines-and-columns@^1.1.6: + version "1.2.4" + resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" + integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg== + +locate-path@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-3.0.0.tgz#dbec3b3ab759758071b58fe59fc41871af21400e" + integrity sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A== + dependencies: + p-locate "^3.0.0" + path-exists "^3.0.0" + +locate-path@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0" + integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g== + dependencies: + p-locate "^4.1.0" + +locate-path@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-6.0.0.tgz#55321eb309febbc59c4801d931a72452a681d286" + integrity sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw== + dependencies: + p-locate "^5.0.0" + +lodash.debounce@^4.0.8: + version "4.0.8" + resolved "https://registry.yarnpkg.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af" + integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow== + +lodash.merge@^4.6.2: + version "4.6.2" + resolved "https://registry.yarnpkg.com/lodash.merge/-/lodash.merge-4.6.2.tgz#558aa53b43b661e1925a0afdfa36a9a1085fe57a" + integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== + +lodash.throttle@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/lodash.throttle/-/lodash.throttle-4.1.1.tgz#c23e91b710242ac70c37f1e1cda9274cc39bf2f4" + integrity sha512-wIkUCfVKpVsWo3JSZlc+8MB5it+2AN5W8J7YVMST30UrvcQNZ1Okbj+rbVniijTWE6FGYy4XJq/rHkas8qJMLQ== + +lodash@^4.17.21: + version "4.17.21" + resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" + integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== + +log-symbols@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.1.0.tgz#3fbdbb95b4683ac9fc785111e792e558d4abd503" + integrity sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg== + dependencies: + chalk "^4.1.0" + is-unicode-supported "^0.1.0" + +logkitty@^0.7.1: + version "0.7.1" + resolved "https://registry.yarnpkg.com/logkitty/-/logkitty-0.7.1.tgz#8e8d62f4085a826e8d38987722570234e33c6aa7" + integrity sha512-/3ER20CTTbahrCrpYfPn7Xavv9diBROZpoXGVZDWMw4b/X4uuUwAC0ki85tgsdMRONURyIJbcOvS94QsUBYPbQ== + dependencies: + ansi-fragments "^0.2.1" + dayjs "^1.8.15" + yargs "^15.1.0" + +loose-envify@^1.0.0, loose-envify@^1.1.0, loose-envify@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/loose-envify/-/loose-envify-1.4.0.tgz#71ee51fa7be4caec1a63839f7e682d8132d30caf" + integrity sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q== + dependencies: + js-tokens "^3.0.0 || ^4.0.0" + +lru-cache@^5.1.1: + version "5.1.1" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" + integrity sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w== + dependencies: + yallist "^3.0.2" + +lru-cache@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94" + integrity sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA== + dependencies: + yallist "^4.0.0" + +make-dir@^2.0.0, make-dir@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-2.1.0.tgz#5f0310e18b8be898cc07009295a30ae41e91e6f5" + integrity sha512-LS9X+dc8KLxXCb8dni79fLIIUA5VyZoyjSMCwTluaXA0o27cCK0bhXkpgw+sTXVpPy/lSO57ilRixqk0vDmtRA== + dependencies: + pify "^4.0.1" + semver "^5.6.0" + +make-dir@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-4.0.0.tgz#c3c2307a771277cd9638305f915c29ae741b614e" + integrity sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw== + dependencies: + semver "^7.5.3" + +makeerror@1.0.12: + version "1.0.12" + resolved "https://registry.yarnpkg.com/makeerror/-/makeerror-1.0.12.tgz#3e5dd2079a82e812e983cc6610c4a2cb0eaa801a" + integrity sha512-JmqCvUhmt43madlpFzG4BQzG2Z3m6tvQDNKdClZnO3VbIudJYmxsT0FNJMeiB2+JTSlTQTSbU8QdesVmwJcmLg== + dependencies: + tmpl "1.0.5" + +marky@^1.2.2: + version "1.2.5" + resolved "https://registry.yarnpkg.com/marky/-/marky-1.2.5.tgz#55796b688cbd72390d2d399eaaf1832c9413e3c0" + integrity sha512-q9JtQJKjpsVxCRVgQ+WapguSbKC3SQ5HEzFGPAJMStgh3QjCawp00UKv3MTTAArTmGmmPUvllHZoNbZ3gs0I+Q== + +memoize-one@^5.0.0: + version "5.2.1" + resolved "https://registry.yarnpkg.com/memoize-one/-/memoize-one-5.2.1.tgz#8337aa3c4335581839ec01c3d594090cebe8f00e" + integrity sha512-zYiwtZUcYyXKo/np96AGZAckk+FWWsUdJ3cHGGmld7+AhvcWmQyGCYUh1hc4Q/pkOhb65dQR/pqCyK0cOaHz4Q== + +merge-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" + integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== + +merge2@^1.3.0, merge2@^1.4.1: + version "1.4.1" + resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" + integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== + +metro-babel-transformer@0.80.6: + version "0.80.6" + resolved "https://registry.yarnpkg.com/metro-babel-transformer/-/metro-babel-transformer-0.80.6.tgz#49df74af71ecc9871636cf469726debcb5a1c858" + integrity sha512-ssuoVC4OzqaOt3LpwfUbDfBlFGRu9v1Yf2JJnKPz0ROYHNjSBws4aUesqQQ/Ea8DbiH7TK4j4cJmm+XjdHmgqA== + dependencies: + "@babel/core" "^7.20.0" + hermes-parser "0.19.1" + nullthrows "^1.1.1" + +metro-cache-key@0.80.6: + version "0.80.6" + resolved "https://registry.yarnpkg.com/metro-cache-key/-/metro-cache-key-0.80.6.tgz#48fe84477f6408478a33c363a8f5eaceea5cf853" + integrity sha512-DFmjQacC8m/S3HpELklLMWkPGP/fZPX3BSgjd0xQvwIvWyFwk8Nn/lfp/uWdEVDtDSIr64/anXU5uWohGwlWXw== + +metro-cache@0.80.6: + version "0.80.6" + resolved "https://registry.yarnpkg.com/metro-cache/-/metro-cache-0.80.6.tgz#05fdd83482f4132243b27713716c289532bd41c3" + integrity sha512-NP81pHSPkzs+iNlpVkJqijrpcd6lfuDAunYH9/Rn8oLNz0yLfkl8lt+xOdUU4IkFt3oVcTBEFCnzAzv4B8YhyA== + dependencies: + metro-core "0.80.6" + rimraf "^3.0.2" + +metro-config@0.80.6, metro-config@^0.80.3: + version "0.80.6" + resolved "https://registry.yarnpkg.com/metro-config/-/metro-config-0.80.6.tgz#b404e2f24b22c9c683abcf8da3efa8c87e382ad7" + integrity sha512-vHYYvJpRTWYbmvqlR7i04xQpZCHJ6yfZ/xIcPdz2ssbdJGGJbiT1Aar9wr8RAhsccSxdJgfE5B1DB8Mo+DnhIg== + dependencies: + connect "^3.6.5" + cosmiconfig "^5.0.5" + jest-validate "^29.6.3" + metro "0.80.6" + metro-cache "0.80.6" + metro-core "0.80.6" + metro-runtime "0.80.6" + +metro-core@0.80.6, metro-core@^0.80.3: + version "0.80.6" + resolved "https://registry.yarnpkg.com/metro-core/-/metro-core-0.80.6.tgz#b13fa98417e70203d2533c5d0f5c4d541f3d9fbe" + integrity sha512-fn4rryTUAwzFJWj7VIPDH4CcW/q7MV4oGobqR6NsuxZoIGYrVpK7pBasumu5YbCqifuErMs5s23BhmrDNeZURw== + dependencies: + lodash.throttle "^4.1.1" + metro-resolver "0.80.6" + +metro-file-map@0.80.6: + version "0.80.6" + resolved "https://registry.yarnpkg.com/metro-file-map/-/metro-file-map-0.80.6.tgz#9d96e54bd3bde6747b6860702a098a333599bba2" + integrity sha512-S3CUqvpXpc+q3q+hCEWvFKhVqgq0VmXdZQDF6u7ue86E2elq1XLnfLOt9JSpwyhpMQRyysjSCnd/Yh6GZMNHoQ== + dependencies: + anymatch "^3.0.3" + debug "^2.2.0" + fb-watchman "^2.0.0" + graceful-fs "^4.2.4" + invariant "^2.2.4" + jest-worker "^29.6.3" + micromatch "^4.0.4" + node-abort-controller "^3.1.1" + nullthrows "^1.1.1" + walker "^1.0.7" + optionalDependencies: + fsevents "^2.3.2" + +metro-minify-terser@0.80.6: + version "0.80.6" + resolved "https://registry.yarnpkg.com/metro-minify-terser/-/metro-minify-terser-0.80.6.tgz#27193867ec177c5a9b636725ff1c94c65ce701cc" + integrity sha512-83eZaH2+B+jP92KuodPqXknzwmiboKAuZY4doRfTEEXAG57pNVNN6cqSRJlwDnmaTBKRffxoncBXbYqHQgulgg== + dependencies: + terser "^5.15.0" + +metro-resolver@0.80.6: + version "0.80.6" + resolved "https://registry.yarnpkg.com/metro-resolver/-/metro-resolver-0.80.6.tgz#b648b8c661bc4cf091efd11affa010dd11f58bec" + integrity sha512-R7trfglG4zY4X9XyM9cvuffAhQ9W1reWoahr1jdEWa6rOI8PyM0qXjcsb8l+fsOQhdSiVlkKcYAmkyrs1S/zrA== + +metro-runtime@0.80.6, metro-runtime@^0.80.3: + version "0.80.6" + resolved "https://registry.yarnpkg.com/metro-runtime/-/metro-runtime-0.80.6.tgz#efd566a02e63e6f2bd08b5e2a8fe57333f1a2c4e" + integrity sha512-21GQVd0pp2nACoK0C2PL8mBsEhIFUFFntYrWRlYNHtPQoqDzddrPEIgkyaABGXGued+dZoBlFQl+LASlmmfkvw== + dependencies: + "@babel/runtime" "^7.0.0" + +metro-source-map@0.80.6, metro-source-map@^0.80.3: + version "0.80.6" + resolved "https://registry.yarnpkg.com/metro-source-map/-/metro-source-map-0.80.6.tgz#f129a36bb5b74e3ae0d4cbbcdc62904fa0161fb1" + integrity sha512-lqDuSLctWy9Qccu4Zl0YB1PzItpsqcKGb1nK0aDY+lzJ26X65OCib2VzHlj+xj7e4PiIKOfsvDCczCBz4cnxdg== + dependencies: + "@babel/traverse" "^7.20.0" + "@babel/types" "^7.20.0" + invariant "^2.2.4" + metro-symbolicate "0.80.6" + nullthrows "^1.1.1" + ob1 "0.80.6" + source-map "^0.5.6" + vlq "^1.0.0" + +metro-symbolicate@0.80.6: + version "0.80.6" + resolved "https://registry.yarnpkg.com/metro-symbolicate/-/metro-symbolicate-0.80.6.tgz#8690af051f33c98c0e8efcd779aebbfdea9fabef" + integrity sha512-SGwKeBi+lK7NmM5+EcW6DyRRa9HmGSvH0LJtlT4XoRMbpxzsLYs0qUEA+olD96pOIP+ta7I8S30nQr2ttqgO8A== + dependencies: + invariant "^2.2.4" + metro-source-map "0.80.6" + nullthrows "^1.1.1" + source-map "^0.5.6" + through2 "^2.0.1" + vlq "^1.0.0" + +metro-transform-plugins@0.80.6: + version "0.80.6" + resolved "https://registry.yarnpkg.com/metro-transform-plugins/-/metro-transform-plugins-0.80.6.tgz#f9039384692fc8cd51a67d1cd7c35964e7d374e8" + integrity sha512-e04tdTC5Fy1vOQrTTXb5biao0t7nR/h+b1IaBTlM5UaHaAJZr658uVOoZhkRxKjbhF2mIwJ/8DdorD2CA15BCg== + dependencies: + "@babel/core" "^7.20.0" + "@babel/generator" "^7.20.0" + "@babel/template" "^7.0.0" + "@babel/traverse" "^7.20.0" + nullthrows "^1.1.1" + +metro-transform-worker@0.80.6: + version "0.80.6" + resolved "https://registry.yarnpkg.com/metro-transform-worker/-/metro-transform-worker-0.80.6.tgz#fc09822ce360eaa929b14408e4af97a2fa8feba6" + integrity sha512-jV+VgCLiCj5jQadW/h09qJaqDreL6XcBRY52STCoz2xWn6WWLLMB5nXzQtvFNPmnIOps+Xu8+d5hiPcBNOhYmA== + dependencies: + "@babel/core" "^7.20.0" + "@babel/generator" "^7.20.0" + "@babel/parser" "^7.20.0" + "@babel/types" "^7.20.0" + metro "0.80.6" + metro-babel-transformer "0.80.6" + metro-cache "0.80.6" + metro-cache-key "0.80.6" + metro-minify-terser "0.80.6" + metro-source-map "0.80.6" + metro-transform-plugins "0.80.6" + nullthrows "^1.1.1" + +metro@0.80.6, metro@^0.80.3: + version "0.80.6" + resolved "https://registry.yarnpkg.com/metro/-/metro-0.80.6.tgz#11cf77700b8be767f6663c1d6f6ed287dd686535" + integrity sha512-f6Nhnht9TxVRP6zdBq9J2jNdeDBxRmJFnjxhQS1GeCpokBvI6fTXq+wHTLz5jZA+75fwbkPSzBxBJzQa6xi0AQ== + dependencies: + "@babel/code-frame" "^7.0.0" + "@babel/core" "^7.20.0" + "@babel/generator" "^7.20.0" + "@babel/parser" "^7.20.0" + "@babel/template" "^7.0.0" + "@babel/traverse" "^7.20.0" + "@babel/types" "^7.20.0" + accepts "^1.3.7" + chalk "^4.0.0" + ci-info "^2.0.0" + connect "^3.6.5" + debug "^2.2.0" + denodeify "^1.2.1" + error-stack-parser "^2.0.6" + graceful-fs "^4.2.4" + hermes-parser "0.19.1" + image-size "^1.0.2" + invariant "^2.2.4" + jest-worker "^29.6.3" + jsc-safe-url "^0.2.2" + lodash.throttle "^4.1.1" + metro-babel-transformer "0.80.6" + metro-cache "0.80.6" + metro-cache-key "0.80.6" + metro-config "0.80.6" + metro-core "0.80.6" + metro-file-map "0.80.6" + metro-resolver "0.80.6" + metro-runtime "0.80.6" + metro-source-map "0.80.6" + metro-symbolicate "0.80.6" + metro-transform-plugins "0.80.6" + metro-transform-worker "0.80.6" + mime-types "^2.1.27" + node-fetch "^2.2.0" + nullthrows "^1.1.1" + rimraf "^3.0.2" + serialize-error "^2.1.0" + source-map "^0.5.6" + strip-ansi "^6.0.0" + throat "^5.0.0" + ws "^7.5.1" + yargs "^17.6.2" + +micromatch@^4.0.4: + version "4.0.5" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" + integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== + dependencies: + braces "^3.0.2" + picomatch "^2.3.1" + +mime-db@1.52.0, "mime-db@>= 1.43.0 < 2": + version "1.52.0" + resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" + integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== + +mime-types@^2.1.27, mime-types@~2.1.34: + version "2.1.35" + resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a" + integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== + dependencies: + mime-db "1.52.0" + +mime@1.6.0: + version "1.6.0" + resolved "https://registry.yarnpkg.com/mime/-/mime-1.6.0.tgz#32cd9e5c64553bd58d19a568af452acff04981b1" + integrity sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg== + +mime@^2.4.1: + version "2.6.0" + resolved "https://registry.yarnpkg.com/mime/-/mime-2.6.0.tgz#a2a682a95cd4d0cb1d6257e28f83da7e35800367" + integrity sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg== + +mimic-fn@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" + integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== + +minimatch@9.0.3: + version "9.0.3" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.3.tgz#a6e00c3de44c3a542bfaae70abfc22420a6da825" + integrity sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg== + dependencies: + brace-expansion "^2.0.1" + +minimatch@^3.0.2, minimatch@^3.0.4, minimatch@^3.0.5, minimatch@^3.1.1, minimatch@^3.1.2: + version "3.1.2" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" + integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== + dependencies: + brace-expansion "^1.1.7" + +minimist@^1.2.6: + version "1.2.8" + resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" + integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== + +mkdirp@^0.5.1: + version "0.5.6" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-0.5.6.tgz#7def03d2432dcae4ba1d611445c48396062255f6" + integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw== + dependencies: + minimist "^1.2.6" + +mkdirp@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-1.0.4.tgz#3eb5ed62622756d79a5f0e2a221dfebad75c2f7e" + integrity sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw== + +ms@2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.0.0.tgz#5608aeadfc00be6c2901df5f9861788de0d597c8" + integrity sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A== + +ms@2.1.2: + version "2.1.2" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" + integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== + +ms@2.1.3: + version "2.1.3" + resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" + integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== + +natural-compare@^1.4.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" + integrity sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw== + +negotiator@0.6.3: + version "0.6.3" + resolved "https://registry.yarnpkg.com/negotiator/-/negotiator-0.6.3.tgz#58e323a72fedc0d6f9cd4d31fe49f51479590ccd" + integrity sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg== + +neo-async@^2.5.0: + version "2.6.2" + resolved "https://registry.yarnpkg.com/neo-async/-/neo-async-2.6.2.tgz#b4aafb93e3aeb2d8174ca53cf163ab7d7308305f" + integrity sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw== + +nocache@^3.0.1: + version "3.0.4" + resolved "https://registry.yarnpkg.com/nocache/-/nocache-3.0.4.tgz#5b37a56ec6e09fc7d401dceaed2eab40c8bfdf79" + integrity sha512-WDD0bdg9mbq6F4mRxEYcPWwfA1vxd0mrvKOyxI7Xj/atfRHVeutzuWByG//jfm4uPzp0y4Kj051EORCBSQMycw== + +node-abort-controller@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/node-abort-controller/-/node-abort-controller-3.1.1.tgz#a94377e964a9a37ac3976d848cb5c765833b8548" + integrity sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ== + +node-dir@^0.1.17: + version "0.1.17" + resolved "https://registry.yarnpkg.com/node-dir/-/node-dir-0.1.17.tgz#5f5665d93351335caabef8f1c554516cf5f1e4e5" + integrity sha512-tmPX422rYgofd4epzrNoOXiE8XFZYOcCq1vD7MAXCDO+O+zndlA2ztdKKMa+EeuBG5tHETpr4ml4RGgpqDCCAg== + dependencies: + minimatch "^3.0.2" + +node-fetch@^2.2.0, node-fetch@^2.6.0: + version "2.7.0" + resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.7.0.tgz#d0f0fa6e3e2dc1d27efcd8ad99d550bda94d187d" + integrity sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A== + dependencies: + whatwg-url "^5.0.0" + +node-forge@^1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/node-forge/-/node-forge-1.3.1.tgz#be8da2af243b2417d5f646a770663a92b7e9ded3" + integrity sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA== + +node-int64@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/node-int64/-/node-int64-0.4.0.tgz#87a9065cdb355d3182d8f94ce11188b825c68a3b" + integrity sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw== + +node-releases@^2.0.14: + version "2.0.14" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.14.tgz#2ffb053bceb8b2be8495ece1ab6ce600c4461b0b" + integrity sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw== + +node-stream-zip@^1.9.1: + version "1.15.0" + resolved "https://registry.yarnpkg.com/node-stream-zip/-/node-stream-zip-1.15.0.tgz#158adb88ed8004c6c49a396b50a6a5de3bca33ea" + integrity sha512-LN4fydt9TqhZhThkZIVQnF9cwjU3qmUH9h78Mx/K7d3VvfRqqwthLwJEUOEL0QPZ0XQmNN7be5Ggit5+4dq3Bw== + +normalize-path@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" + integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== + +npm-run-path@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea" + integrity sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw== + dependencies: + path-key "^3.0.0" + +nullthrows@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/nullthrows/-/nullthrows-1.1.1.tgz#7818258843856ae971eae4208ad7d7eb19a431b1" + integrity sha512-2vPPEi+Z7WqML2jZYddDIfy5Dqb0r2fze2zTxNNknZaFpVHU3mFB3R+DWeJWGVx0ecvttSGlJTI+WG+8Z4cDWw== + +ob1@0.80.6: + version "0.80.6" + resolved "https://registry.yarnpkg.com/ob1/-/ob1-0.80.6.tgz#61d7881f458333ed2a73b90cea4aa62f8ca9e045" + integrity sha512-nlLGZPMQ/kbmkdIb5yvVzep1jKUII2x6ehNsHpgy71jpnJMW7V+KsB3AjYI2Ajb7UqMAMNjlssg6FUodrEMYzg== + +object-assign@^4.1.1: + version "4.1.1" + resolved "https://registry.yarnpkg.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== + +object-inspect@^1.13.1: + version "1.13.1" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.1.tgz#b96c6109324ccfef6b12216a956ca4dc2ff94bc2" + integrity sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ== + +object-keys@^1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e" + integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA== + +object.assign@^4.1.4, object.assign@^4.1.5: + version "4.1.5" + resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.5.tgz#3a833f9ab7fdb80fc9e8d2300c803d216d8fdbb0" + integrity sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ== + dependencies: + call-bind "^1.0.5" + define-properties "^1.2.1" + has-symbols "^1.0.3" + object-keys "^1.1.1" + +object.entries@^1.1.7: + version "1.1.7" + resolved "https://registry.yarnpkg.com/object.entries/-/object.entries-1.1.7.tgz#2b47760e2a2e3a752f39dd874655c61a7f03c131" + integrity sha512-jCBs/0plmPsOnrKAfFQXRG2NFjlhZgjjcBLSmTnEhU8U6vVTsVe8ANeQJCHTl3gSsI4J+0emOoCgoKlmQPMgmA== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + +object.fromentries@^2.0.7: + version "2.0.7" + resolved "https://registry.yarnpkg.com/object.fromentries/-/object.fromentries-2.0.7.tgz#71e95f441e9a0ea6baf682ecaaf37fa2a8d7e616" + integrity sha512-UPbPHML6sL8PI/mOqPwsH4G6iyXcCGzLin8KvEPenOZN5lpCNBZZQ+V62vdjB1mQHrmqGQt5/OJzemUA+KJmEA== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + +object.hasown@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/object.hasown/-/object.hasown-1.1.3.tgz#6a5f2897bb4d3668b8e79364f98ccf971bda55ae" + integrity sha512-fFI4VcYpRHvSLXxP7yiZOMAd331cPfd2p7PFDVbgUsYOfCT3tICVqXWngbjr4m49OvsBwUBQ6O2uQoJvy3RexA== + dependencies: + define-properties "^1.2.0" + es-abstract "^1.22.1" + +object.values@^1.1.6, object.values@^1.1.7: + version "1.1.7" + resolved "https://registry.yarnpkg.com/object.values/-/object.values-1.1.7.tgz#617ed13272e7e1071b43973aa1655d9291b8442a" + integrity sha512-aU6xnDFYT3x17e/f0IiiwlGPTy2jzMySGfUB4fq6z7CV8l85CWHDk5ErhyhpfDHhrOMwGFhSQkhMGHaIotA6Ng== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + +on-finished@2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.4.1.tgz#58c8c44116e54845ad57f14ab10b03533184ac3f" + integrity sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg== + dependencies: + ee-first "1.1.1" + +on-finished@~2.3.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/on-finished/-/on-finished-2.3.0.tgz#20f1336481b083cd75337992a16971aa2d906947" + integrity sha512-ikqdkGAAyf/X/gPhXGvfgAytDZtDbr+bkNUJ0N9h5MI/dmdgCs3l6hoHrcUv41sRKew3jIwrp4qQDXiK99Utww== + dependencies: + ee-first "1.1.1" + +on-headers@~1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/on-headers/-/on-headers-1.0.2.tgz#772b0ae6aaa525c399e489adfad90c403eb3c28f" + integrity sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA== + +once@^1.3.0: + version "1.4.0" + resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + integrity sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w== + dependencies: + wrappy "1" + +onetime@^5.1.0, onetime@^5.1.2: + version "5.1.2" + resolved "https://registry.yarnpkg.com/onetime/-/onetime-5.1.2.tgz#d0e96ebb56b07476df1dd9c4806e5237985ca45e" + integrity sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg== + dependencies: + mimic-fn "^2.1.0" + +open@^6.2.0: + version "6.4.0" + resolved "https://registry.yarnpkg.com/open/-/open-6.4.0.tgz#5c13e96d0dc894686164f18965ecfe889ecfc8a9" + integrity sha512-IFenVPgF70fSm1keSd2iDBIDIBZkroLeuffXq+wKTzTJlBpesFWojV9lb8mzOfaAzM1sr7HQHuO0vtV0zYekGg== + dependencies: + is-wsl "^1.1.0" + +open@^7.0.3: + version "7.4.2" + resolved "https://registry.yarnpkg.com/open/-/open-7.4.2.tgz#b8147e26dcf3e426316c730089fd71edd29c2321" + integrity sha512-MVHddDVweXZF3awtlAS+6pgKLlm/JgxZ90+/NBurBoQctVOOB/zDdVjcyPzQ+0laDGbsWgrRkflI65sQeOgT9Q== + dependencies: + is-docker "^2.0.0" + is-wsl "^2.1.1" + +optionator@^0.9.3: + version "0.9.3" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.9.3.tgz#007397d44ed1872fdc6ed31360190f81814e2c64" + integrity sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg== + dependencies: + "@aashutoshrathi/word-wrap" "^1.2.3" + deep-is "^0.1.3" + fast-levenshtein "^2.0.6" + levn "^0.4.1" + prelude-ls "^1.2.1" + type-check "^0.4.0" + +ora@^5.4.1: + version "5.4.1" + resolved "https://registry.yarnpkg.com/ora/-/ora-5.4.1.tgz#1b2678426af4ac4a509008e5e4ac9e9959db9e18" + integrity sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ== + dependencies: + bl "^4.1.0" + chalk "^4.1.0" + cli-cursor "^3.1.0" + cli-spinners "^2.5.0" + is-interactive "^1.0.0" + is-unicode-supported "^0.1.0" + log-symbols "^4.1.0" + strip-ansi "^6.0.0" + wcwidth "^1.0.1" + +p-limit@^2.0.0, p-limit@^2.2.0: + version "2.3.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1" + integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w== + dependencies: + p-try "^2.0.0" + +p-limit@^3.0.2, p-limit@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b" + integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== + dependencies: + yocto-queue "^0.1.0" + +p-locate@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-3.0.0.tgz#322d69a05c0264b25997d9f40cd8a891ab0064a4" + integrity sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ== + dependencies: + p-limit "^2.0.0" + +p-locate@^4.1.0: + version "4.1.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07" + integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A== + dependencies: + p-limit "^2.2.0" + +p-locate@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-5.0.0.tgz#83c8315c6785005e3bd021839411c9e110e6d834" + integrity sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw== + dependencies: + p-limit "^3.0.2" + +p-try@^2.0.0: + version "2.2.0" + resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" + integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== + +parent-module@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" + integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== + dependencies: + callsites "^3.0.0" + +parse-json@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-4.0.0.tgz#be35f5425be1f7f6c747184f98a788cb99477ee0" + integrity sha512-aOIos8bujGN93/8Ox/jPLh7RwVnPEysynVFE+fQZyg6jKELEHwzgKdLRFHUgXJL6kylijVSBC4BvN9OmsB48Rw== + dependencies: + error-ex "^1.3.1" + json-parse-better-errors "^1.0.1" + +parse-json@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.2.0.tgz#c76fc66dee54231c962b22bcc8a72cf2f99753cd" + integrity sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg== + dependencies: + "@babel/code-frame" "^7.0.0" + error-ex "^1.3.1" + json-parse-even-better-errors "^2.3.0" + lines-and-columns "^1.1.6" + +parseurl@~1.3.3: + version "1.3.3" + resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4" + integrity sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ== + +path-exists@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-3.0.0.tgz#ce0ebeaa5f78cb18925ea7d810d7b59b010fd515" + integrity sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ== + +path-exists@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" + integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== + +path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + integrity sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg== + +path-key@^3.0.0, path-key@^3.1.0: + version "3.1.1" + resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" + integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== + +path-parse@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" + integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== + +path-type@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" + integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== + +picocolors@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" + integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== + +picomatch@^2.0.4, picomatch@^2.2.3, picomatch@^2.3.1: + version "2.3.1" + resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" + integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== + +pify@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/pify/-/pify-4.0.1.tgz#4b2cd25c50d598735c50292224fd8c6df41e3231" + integrity sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g== + +pirates@^4.0.4, pirates@^4.0.6: + version "4.0.6" + resolved "https://registry.yarnpkg.com/pirates/-/pirates-4.0.6.tgz#3018ae32ecfcff6c29ba2267cbf21166ac1f36b9" + integrity sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg== + +pkg-dir@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-3.0.0.tgz#2749020f239ed990881b1f71210d51eb6523bea3" + integrity sha512-/E57AYkoeQ25qkxMj5PBOVgF8Kiu/h7cYS30Z5+R7WaiCCBfLq58ZI/dSeaEKb9WVJV5n/03QwrN3IeWIFllvw== + dependencies: + find-up "^3.0.0" + +pkg-dir@^4.2.0: + version "4.2.0" + resolved "https://registry.yarnpkg.com/pkg-dir/-/pkg-dir-4.2.0.tgz#f099133df7ede422e81d1d8448270eeb3e4261f3" + integrity sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ== + dependencies: + find-up "^4.0.0" + +possible-typed-array-names@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz#89bb63c6fada2c3e90adc4a647beeeb39cc7bf8f" + integrity sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q== + +prelude-ls@^1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/prelude-ls/-/prelude-ls-1.2.1.tgz#debc6489d7a6e6b0e7611888cec880337d316396" + integrity sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g== + +prettier-linter-helpers@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/prettier-linter-helpers/-/prettier-linter-helpers-1.0.0.tgz#d23d41fe1375646de2d0104d3454a3008802cf7b" + integrity sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w== + dependencies: + fast-diff "^1.1.2" + +prettier@2.8.8: + version "2.8.8" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-2.8.8.tgz#e8c5d7e98a4305ffe3de2e1fc4aca1a71c28b1da" + integrity sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q== + +pretty-format@^26.5.2, pretty-format@^26.6.2: + version "26.6.2" + resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-26.6.2.tgz#e35c2705f14cb7fe2fe94fa078345b444120fc93" + integrity sha512-7AeGuCYNGmycyQbCqd/3PWH4eOoX/OiCa0uphp57NVTeAGdJGaAliecxwBDHYQCIvrW7aDBZCYeNTP/WX69mkg== + dependencies: + "@jest/types" "^26.6.2" + ansi-regex "^5.0.0" + ansi-styles "^4.0.0" + react-is "^17.0.1" + +pretty-format@^29.7.0: + version "29.7.0" + resolved "https://registry.yarnpkg.com/pretty-format/-/pretty-format-29.7.0.tgz#ca42c758310f365bfa71a0bda0a807160b776812" + integrity sha512-Pdlw/oPxN+aXdmM9R00JVC9WVFoCLTKJvDVLgmJ+qAffBMxsV85l/Lu7sNx4zSzPyoL2euImuEwHhOXdEgNFZQ== + dependencies: + "@jest/schemas" "^29.6.3" + ansi-styles "^5.0.0" + react-is "^18.0.0" + +process-nextick-args@~2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/process-nextick-args/-/process-nextick-args-2.0.1.tgz#7820d9b16120cc55ca9ae7792680ae7dba6d7fe2" + integrity sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag== + +promise@^8.3.0: + version "8.3.0" + resolved "https://registry.yarnpkg.com/promise/-/promise-8.3.0.tgz#8cb333d1edeb61ef23869fbb8a4ea0279ab60e0a" + integrity sha512-rZPNPKTOYVNEEKFaq1HqTgOwZD+4/YHS5ukLzQCypkj+OkYx7iv0mA91lJlpPPZ8vMau3IIGj5Qlwrx+8iiSmg== + dependencies: + asap "~2.0.6" + +prompts@^2.0.1, prompts@^2.4.2: + version "2.4.2" + resolved "https://registry.yarnpkg.com/prompts/-/prompts-2.4.2.tgz#7b57e73b3a48029ad10ebd44f74b01722a4cb069" + integrity sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q== + dependencies: + kleur "^3.0.3" + sisteransi "^1.0.5" + +prop-types@^15.8.1: + version "15.8.1" + resolved "https://registry.yarnpkg.com/prop-types/-/prop-types-15.8.1.tgz#67d87bf1a694f48435cf332c24af10214a3140b5" + integrity sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg== + dependencies: + loose-envify "^1.4.0" + object-assign "^4.1.1" + react-is "^16.13.1" + +punycode@^2.1.0: + version "2.3.1" + resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.3.1.tgz#027422e2faec0b25e1549c3e1bd8309b9133b6e5" + integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== + +pure-rand@^6.0.0: + version "6.0.4" + resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-6.0.4.tgz#50b737f6a925468679bff00ad20eade53f37d5c7" + integrity sha512-LA0Y9kxMYv47GIPJy6MI84fqTd2HmYZI83W/kM/SkKfDlajnZYfmXFTxkbY+xSBPkLJxltMa9hIkmdc29eguMA== + +querystring@^0.2.1: + version "0.2.1" + resolved "https://registry.yarnpkg.com/querystring/-/querystring-0.2.1.tgz#40d77615bb09d16902a85c3e38aa8b5ed761c2dd" + integrity sha512-wkvS7mL/JMugcup3/rMitHmd9ecIGd2lhFhK9N3UUQ450h66d1r3Y9nvXzQAW1Lq+wyx61k/1pfKS5KuKiyEbg== + +queue-microtask@^1.2.2: + version "1.2.3" + resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243" + integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A== + +queue@6.0.2: + version "6.0.2" + resolved "https://registry.yarnpkg.com/queue/-/queue-6.0.2.tgz#b91525283e2315c7553d2efa18d83e76432fed65" + integrity sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA== + dependencies: + inherits "~2.0.3" + +range-parser@~1.2.1: + version "1.2.1" + resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.1.tgz#3cf37023d199e1c24d1a55b84800c2f3e6468031" + integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg== + +react-devtools-core@^5.0.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/react-devtools-core/-/react-devtools-core-5.2.0.tgz#072ecd2d84d3653817cc11e4b16f60a3c2b705f9" + integrity sha512-vZK+/gvxxsieAoAyYaiRIVFxlajb7KXhgBDV7OsoMzaAE+IqGpoxusBjIgq5ibqA2IloKu0p9n7tE68z1xs18A== + dependencies: + shell-quote "^1.6.1" + ws "^7" + +"react-is@^16.12.0 || ^17.0.0 || ^18.0.0", react-is@^18.0.0, react-is@^18.2.0: + version "18.2.0" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-18.2.0.tgz#199431eeaaa2e09f86427efbb4f1473edb47609b" + integrity sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w== + +react-is@^16.13.1: + version "16.13.1" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-16.13.1.tgz#789729a4dc36de2999dc156dd6c1d9c18cea56a4" + integrity sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ== + +react-is@^17.0.1: + version "17.0.2" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-17.0.2.tgz#e691d4a8e9c789365655539ab372762b0efb54f0" + integrity sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w== + +react-native@0.74.0: + version "0.74.0" + resolved "https://registry.yarnpkg.com/react-native/-/react-native-0.74.0.tgz#9f0901139424152216e1ae1b32773787a0158d41" + integrity sha512-Vpp9WPmkCm4TUH5YDxwQhqktGVon/yLpjbTgjgLqup3GglOgWagYCX3MlmK1iksIcqtyMJHMEWa+UEzJ3G9T8w== + dependencies: + "@jest/create-cache-key-function" "^29.6.3" + "@react-native-community/cli" "13.6.4" + "@react-native-community/cli-platform-android" "13.6.4" + "@react-native-community/cli-platform-ios" "13.6.4" + "@react-native/assets-registry" "0.74.81" + "@react-native/codegen" "0.74.81" + "@react-native/community-cli-plugin" "0.74.81" + "@react-native/gradle-plugin" "0.74.81" + "@react-native/js-polyfills" "0.74.81" + "@react-native/normalize-colors" "0.74.81" + "@react-native/virtualized-lists" "0.74.81" + abort-controller "^3.0.0" + anser "^1.4.9" + ansi-regex "^5.0.0" + base64-js "^1.5.1" + chalk "^4.0.0" + event-target-shim "^5.0.1" + flow-enums-runtime "^0.0.6" + invariant "^2.2.4" + jest-environment-node "^29.6.3" + jsc-android "^250231.0.0" + memoize-one "^5.0.0" + metro-runtime "^0.80.3" + metro-source-map "^0.80.3" + mkdirp "^0.5.1" + nullthrows "^1.1.1" + pretty-format "^26.5.2" + promise "^8.3.0" + react-devtools-core "^5.0.0" + react-refresh "^0.14.0" + react-shallow-renderer "^16.15.0" + regenerator-runtime "^0.13.2" + scheduler "0.24.0-canary-efb381bbf-20230505" + stacktrace-parser "^0.1.10" + whatwg-fetch "^3.0.0" + ws "^6.2.2" + yargs "^17.6.2" + +react-refresh@^0.14.0: + version "0.14.0" + resolved "https://registry.yarnpkg.com/react-refresh/-/react-refresh-0.14.0.tgz#4e02825378a5f227079554d4284889354e5f553e" + integrity sha512-wViHqhAd8OHeLS/IRMJjTSDHF3U9eWi62F/MledQGPdJGDhodXJ9PBLNGr6WWL7qlH12Mt3TyTpbS+hGXMjCzQ== + +react-shallow-renderer@^16.15.0: + version "16.15.0" + resolved "https://registry.yarnpkg.com/react-shallow-renderer/-/react-shallow-renderer-16.15.0.tgz#48fb2cf9b23d23cde96708fe5273a7d3446f4457" + integrity sha512-oScf2FqQ9LFVQgA73vr86xl2NaOIX73rh+YFqcOp68CWj56tSfgtGKrEbyhCj0rSijyG9M1CYprTh39fBi5hzA== + dependencies: + object-assign "^4.1.1" + react-is "^16.12.0 || ^17.0.0 || ^18.0.0" + +react-test-renderer@18.2.0: + version "18.2.0" + resolved "https://registry.yarnpkg.com/react-test-renderer/-/react-test-renderer-18.2.0.tgz#1dd912bd908ff26da5b9fca4fd1c489b9523d37e" + integrity sha512-JWD+aQ0lh2gvh4NM3bBM42Kx+XybOxCpgYK7F8ugAlpaTSnWsX+39Z4XkOykGZAHrjwwTZT3x3KxswVWxHPUqA== + dependencies: + react-is "^18.2.0" + react-shallow-renderer "^16.15.0" + scheduler "^0.23.0" + +react@18.2.0: + version "18.2.0" + resolved "https://registry.yarnpkg.com/react/-/react-18.2.0.tgz#555bd98592883255fa00de14f1151a917b5d77d5" + integrity sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ== + dependencies: + loose-envify "^1.1.0" + +readable-stream@^3.4.0: + version "3.6.2" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-3.6.2.tgz#56a9b36ea965c00c5a93ef31eb111a0f11056967" + integrity sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA== + dependencies: + inherits "^2.0.3" + string_decoder "^1.1.1" + util-deprecate "^1.0.1" + +readable-stream@~2.3.6: + version "2.3.8" + resolved "https://registry.yarnpkg.com/readable-stream/-/readable-stream-2.3.8.tgz#91125e8042bba1b9887f49345f6277027ce8be9b" + integrity sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA== + dependencies: + core-util-is "~1.0.0" + inherits "~2.0.3" + isarray "~1.0.0" + process-nextick-args "~2.0.0" + safe-buffer "~5.1.1" + string_decoder "~1.1.1" + util-deprecate "~1.0.1" + +readline@^1.3.0: + version "1.3.0" + resolved "https://registry.yarnpkg.com/readline/-/readline-1.3.0.tgz#c580d77ef2cfc8752b132498060dc9793a7ac01c" + integrity sha512-k2d6ACCkiNYz222Fs/iNze30rRJ1iIicW7JuX/7/cozvih6YCkFZH+J6mAFDVgv0dRBaAyr4jDqC95R2y4IADg== + +recast@^0.21.0: + version "0.21.5" + resolved "https://registry.yarnpkg.com/recast/-/recast-0.21.5.tgz#e8cd22bb51bcd6130e54f87955d33a2b2e57b495" + integrity sha512-hjMmLaUXAm1hIuTqOdeYObMslq/q+Xff6QE3Y2P+uoHAg2nmVlLBps2hzh1UJDdMtDTMXOFewK6ky51JQIeECg== + dependencies: + ast-types "0.15.2" + esprima "~4.0.0" + source-map "~0.6.1" + tslib "^2.0.1" + +reflect.getprototypeof@^1.0.4: + version "1.0.5" + resolved "https://registry.yarnpkg.com/reflect.getprototypeof/-/reflect.getprototypeof-1.0.5.tgz#e0bd28b597518f16edaf9c0e292c631eb13e0674" + integrity sha512-62wgfC8dJWrmxv44CA36pLDnP6KKl3Vhxb7PL+8+qrrFMMoJij4vgiMP8zV4O8+CBMXY1mHxI5fITGHXFHVmQQ== + dependencies: + call-bind "^1.0.5" + define-properties "^1.2.1" + es-abstract "^1.22.3" + es-errors "^1.0.0" + get-intrinsic "^1.2.3" + globalthis "^1.0.3" + which-builtin-type "^1.1.3" + +regenerate-unicode-properties@^10.1.0: + version "10.1.1" + resolved "https://registry.yarnpkg.com/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.1.tgz#6b0e05489d9076b04c436f318d9b067bba459480" + integrity sha512-X007RyZLsCJVVrjgEFVpLUTZwyOZk3oiL75ZcuYjlIWd6rNJtOjkBwQc5AsRrpbKVkxN6sklw/k/9m2jJYOf8Q== + dependencies: + regenerate "^1.4.2" + +regenerate@^1.4.2: + version "1.4.2" + resolved "https://registry.yarnpkg.com/regenerate/-/regenerate-1.4.2.tgz#b9346d8827e8f5a32f7ba29637d398b69014848a" + integrity sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A== + +regenerator-runtime@^0.13.2: + version "0.13.11" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz#f6dca3e7ceec20590d07ada785636a90cdca17f9" + integrity sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg== + +regenerator-runtime@^0.14.0: + version "0.14.1" + resolved "https://registry.yarnpkg.com/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz#356ade10263f685dda125100cd862c1db895327f" + integrity sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw== + +regenerator-transform@^0.15.2: + version "0.15.2" + resolved "https://registry.yarnpkg.com/regenerator-transform/-/regenerator-transform-0.15.2.tgz#5bbae58b522098ebdf09bca2f83838929001c7a4" + integrity sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg== + dependencies: + "@babel/runtime" "^7.8.4" + +regexp.prototype.flags@^1.5.0, regexp.prototype.flags@^1.5.2: + version "1.5.2" + resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.5.2.tgz#138f644a3350f981a858c44f6bb1a61ff59be334" + integrity sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw== + dependencies: + call-bind "^1.0.6" + define-properties "^1.2.1" + es-errors "^1.3.0" + set-function-name "^2.0.1" + +regexpu-core@^5.3.1: + version "5.3.2" + resolved "https://registry.yarnpkg.com/regexpu-core/-/regexpu-core-5.3.2.tgz#11a2b06884f3527aec3e93dbbf4a3b958a95546b" + integrity sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ== + dependencies: + "@babel/regjsgen" "^0.8.0" + regenerate "^1.4.2" + regenerate-unicode-properties "^10.1.0" + regjsparser "^0.9.1" + unicode-match-property-ecmascript "^2.0.0" + unicode-match-property-value-ecmascript "^2.1.0" + +regjsparser@^0.9.1: + version "0.9.1" + resolved "https://registry.yarnpkg.com/regjsparser/-/regjsparser-0.9.1.tgz#272d05aa10c7c1f67095b1ff0addae8442fc5709" + integrity sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ== + dependencies: + jsesc "~0.5.0" + +require-directory@^2.1.1: + version "2.1.1" + resolved "https://registry.yarnpkg.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" + integrity sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q== + +require-main-filename@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/require-main-filename/-/require-main-filename-2.0.0.tgz#d0b329ecc7cc0f61649f62215be69af54aa8989b" + integrity sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg== + +resolve-cwd@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/resolve-cwd/-/resolve-cwd-3.0.0.tgz#0f0075f1bb2544766cf73ba6a6e2adfebcb13f2d" + integrity sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg== + dependencies: + resolve-from "^5.0.0" + +resolve-from@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-3.0.0.tgz#b22c7af7d9d6881bc8b6e653335eebcb0a188748" + integrity sha512-GnlH6vxLymXJNMBo7XP1fJIzBFbdYt49CuTwmB/6N53t+kMPRMFKz783LlQ4tv28XoQfMWinAJX6WCGf2IlaIw== + +resolve-from@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" + integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== + +resolve-from@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-5.0.0.tgz#c35225843df8f776df21c57557bc087e9dfdfc69" + integrity sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw== + +resolve.exports@^2.0.0: + version "2.0.2" + resolved "https://registry.yarnpkg.com/resolve.exports/-/resolve.exports-2.0.2.tgz#f8c934b8e6a13f539e38b7098e2e36134f01e800" + integrity sha512-X2UW6Nw3n/aMgDVy+0rSqgHlv39WZAlZrXCdnbyEiKm17DSqHX4MmQMaST3FbeWR5FTuRcUwYAziZajji0Y7mg== + +resolve@^1.14.2, resolve@^1.20.0: + version "1.22.8" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.8.tgz#b6c87a9f2aa06dfab52e3d70ac8cde321fa5a48d" + integrity sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw== + dependencies: + is-core-module "^2.13.0" + path-parse "^1.0.7" + supports-preserve-symlinks-flag "^1.0.0" + +resolve@^2.0.0-next.5: + version "2.0.0-next.5" + resolved "https://registry.yarnpkg.com/resolve/-/resolve-2.0.0-next.5.tgz#6b0ec3107e671e52b68cd068ef327173b90dc03c" + integrity sha512-U7WjGVG9sH8tvjW5SmGbQuui75FiyjAX72HX15DwBBwF9dNiQZRQAg9nnPhYy+TUnE0+VcrttuvNI8oSxZcocA== + dependencies: + is-core-module "^2.13.0" + path-parse "^1.0.7" + supports-preserve-symlinks-flag "^1.0.0" + +restore-cursor@^3.1.0: + version "3.1.0" + resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-3.1.0.tgz#39f67c54b3a7a58cea5236d95cf0034239631f7e" + integrity sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA== + dependencies: + onetime "^5.1.0" + signal-exit "^3.0.2" + +reusify@^1.0.4: + version "1.0.4" + resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76" + integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== + +rimraf@^3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-3.0.2.tgz#f1a5402ba6220ad52cc1282bac1ae3aa49fd061a" + integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA== + dependencies: + glob "^7.1.3" + +rimraf@~2.6.2: + version "2.6.3" + resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.3.tgz#b2d104fe0d8fb27cf9e0a1cda8262dd3833c6cab" + integrity sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA== + dependencies: + glob "^7.1.3" + +run-parallel@^1.1.9: + version "1.2.0" + resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" + integrity sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA== + dependencies: + queue-microtask "^1.2.2" + +safe-array-concat@^1.1.0: + version "1.1.2" + resolved "https://registry.yarnpkg.com/safe-array-concat/-/safe-array-concat-1.1.2.tgz#81d77ee0c4e8b863635227c721278dd524c20edb" + integrity sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q== + dependencies: + call-bind "^1.0.7" + get-intrinsic "^1.2.4" + has-symbols "^1.0.3" + isarray "^2.0.5" + +safe-buffer@5.1.2, safe-buffer@~5.1.0, safe-buffer@~5.1.1: + version "5.1.2" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.1.2.tgz#991ec69d296e0313747d59bdfd2b745c35f8828d" + integrity sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g== + +safe-buffer@~5.2.0: + version "5.2.1" + resolved "https://registry.yarnpkg.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" + integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== + +safe-regex-test@^1.0.3: + version "1.0.3" + resolved "https://registry.yarnpkg.com/safe-regex-test/-/safe-regex-test-1.0.3.tgz#a5b4c0f06e0ab50ea2c395c14d8371232924c377" + integrity sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw== + dependencies: + call-bind "^1.0.6" + es-errors "^1.3.0" + is-regex "^1.1.4" + +scheduler@0.24.0-canary-efb381bbf-20230505: + version "0.24.0-canary-efb381bbf-20230505" + resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.24.0-canary-efb381bbf-20230505.tgz#5dddc60e29f91cd7f8b983d7ce4a99c2202d178f" + integrity sha512-ABvovCDe/k9IluqSh4/ISoq8tIJnW8euVAWYt5j/bg6dRnqwQwiGO1F/V4AyK96NGF/FB04FhOUDuWj8IKfABA== + dependencies: + loose-envify "^1.1.0" + +scheduler@^0.23.0: + version "0.23.0" + resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.23.0.tgz#ba8041afc3d30eb206a487b6b384002e4e61fdfe" + integrity sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw== + dependencies: + loose-envify "^1.1.0" + +selfsigned@^2.4.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/selfsigned/-/selfsigned-2.4.1.tgz#560d90565442a3ed35b674034cec4e95dceb4ae0" + integrity sha512-th5B4L2U+eGLq1TVh7zNRGBapioSORUeymIydxgFpwww9d2qyKvtuPU2jJuHvYAwwqi2Y596QBL3eEqcPEYL8Q== + dependencies: + "@types/node-forge" "^1.3.0" + node-forge "^1" + +semver@^5.6.0: + version "5.7.2" + resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.2.tgz#48d55db737c3287cd4835e17fa13feace1c41ef8" + integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g== + +semver@^6.3.0, semver@^6.3.1: + version "6.3.1" + resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" + integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== + +semver@^7.3.7, semver@^7.5.2, semver@^7.5.3, semver@^7.5.4: + version "7.6.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.0.tgz#1a46a4db4bffcccd97b743b5005c8325f23d4e2d" + integrity sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg== + dependencies: + lru-cache "^6.0.0" + +send@0.18.0: + version "0.18.0" + resolved "https://registry.yarnpkg.com/send/-/send-0.18.0.tgz#670167cc654b05f5aa4a767f9113bb371bc706be" + integrity sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg== + dependencies: + debug "2.6.9" + depd "2.0.0" + destroy "1.2.0" + encodeurl "~1.0.2" + escape-html "~1.0.3" + etag "~1.8.1" + fresh "0.5.2" + http-errors "2.0.0" + mime "1.6.0" + ms "2.1.3" + on-finished "2.4.1" + range-parser "~1.2.1" + statuses "2.0.1" + +serialize-error@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/serialize-error/-/serialize-error-2.1.0.tgz#50b679d5635cdf84667bdc8e59af4e5b81d5f60a" + integrity sha512-ghgmKt5o4Tly5yEG/UJp8qTd0AN7Xalw4XBtDEKP655B699qMEtra1WlXeE6WIvdEG481JvRxULKsInq/iNysw== + +serve-static@^1.13.1: + version "1.15.0" + resolved "https://registry.yarnpkg.com/serve-static/-/serve-static-1.15.0.tgz#faaef08cffe0a1a62f60cad0c4e513cff0ac9540" + integrity sha512-XGuRDNjXUijsUL0vl6nSD7cwURuzEgglbOaFuZM9g3kwDXOWVTck0jLzjPzGD+TazWbboZYu52/9/XPdUgne9g== + dependencies: + encodeurl "~1.0.2" + escape-html "~1.0.3" + parseurl "~1.3.3" + send "0.18.0" + +set-blocking@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" + integrity sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw== + +set-function-length@^1.2.1: + version "1.2.2" + resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449" + integrity sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg== + dependencies: + define-data-property "^1.1.4" + es-errors "^1.3.0" + function-bind "^1.1.2" + get-intrinsic "^1.2.4" + gopd "^1.0.1" + has-property-descriptors "^1.0.2" + +set-function-name@^2.0.0, set-function-name@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/set-function-name/-/set-function-name-2.0.2.tgz#16a705c5a0dc2f5e638ca96d8a8cd4e1c2b90985" + integrity sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ== + dependencies: + define-data-property "^1.1.4" + es-errors "^1.3.0" + functions-have-names "^1.2.3" + has-property-descriptors "^1.0.2" + +setprototypeof@1.2.0: + version "1.2.0" + resolved "https://registry.yarnpkg.com/setprototypeof/-/setprototypeof-1.2.0.tgz#66c9a24a73f9fc28cbe66b09fed3d33dcaf1b424" + integrity sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw== + +shallow-clone@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/shallow-clone/-/shallow-clone-3.0.1.tgz#8f2981ad92531f55035b01fb230769a40e02efa3" + integrity sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA== + dependencies: + kind-of "^6.0.2" + +shebang-command@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" + integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== + dependencies: + shebang-regex "^3.0.0" + +shebang-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" + integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== + +shell-quote@^1.6.1, shell-quote@^1.7.3: + version "1.8.1" + resolved "https://registry.yarnpkg.com/shell-quote/-/shell-quote-1.8.1.tgz#6dbf4db75515ad5bac63b4f1894c3a154c766680" + integrity sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA== + +side-channel@^1.0.4: + version "1.0.6" + resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.6.tgz#abd25fb7cd24baf45466406b1096b7831c9215f2" + integrity sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA== + dependencies: + call-bind "^1.0.7" + es-errors "^1.3.0" + get-intrinsic "^1.2.4" + object-inspect "^1.13.1" + +signal-exit@^3.0.2, signal-exit@^3.0.3, signal-exit@^3.0.7: + version "3.0.7" + resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" + integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== + +sisteransi@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/sisteransi/-/sisteransi-1.0.5.tgz#134d681297756437cc05ca01370d3a7a571075ed" + integrity sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg== + +slash@^3.0.0: + version "3.0.0" + resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" + integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== + +slice-ansi@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/slice-ansi/-/slice-ansi-2.1.0.tgz#cacd7693461a637a5788d92a7dd4fba068e81636" + integrity sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ== + dependencies: + ansi-styles "^3.2.0" + astral-regex "^1.0.0" + is-fullwidth-code-point "^2.0.0" + +source-map-support@0.5.13: + version "0.5.13" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.13.tgz#31b24a9c2e73c2de85066c0feb7d44767ed52932" + integrity sha512-SHSKFHadjVA5oR4PPqhtAVdcBWwRYVd6g6cAXnIbRiIwc2EhPrTuKUBdSLvlEKyIP3GCf89fltvcZiP9MMFA1w== + dependencies: + buffer-from "^1.0.0" + source-map "^0.6.0" + +source-map-support@^0.5.16, source-map-support@~0.5.20: + version "0.5.21" + resolved "https://registry.yarnpkg.com/source-map-support/-/source-map-support-0.5.21.tgz#04fe7c7f9e1ed2d662233c28cb2b35b9f63f6e4f" + integrity sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w== + dependencies: + buffer-from "^1.0.0" + source-map "^0.6.0" + +source-map@^0.5.6: + version "0.5.7" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.5.7.tgz#8a039d2d1021d22d1ea14c80d8ea468ba2ef3fcc" + integrity sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ== + +source-map@^0.6.0, source-map@^0.6.1, source-map@~0.6.1: + version "0.6.1" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" + integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== + +source-map@^0.7.3: + version "0.7.4" + resolved "https://registry.yarnpkg.com/source-map/-/source-map-0.7.4.tgz#a9bbe705c9d8846f4e08ff6765acf0f1b0898656" + integrity sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA== + +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g== + +stack-utils@^2.0.3: + version "2.0.6" + resolved "https://registry.yarnpkg.com/stack-utils/-/stack-utils-2.0.6.tgz#aaf0748169c02fc33c8232abccf933f54a1cc34f" + integrity sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ== + dependencies: + escape-string-regexp "^2.0.0" + +stackframe@^1.3.4: + version "1.3.4" + resolved "https://registry.yarnpkg.com/stackframe/-/stackframe-1.3.4.tgz#b881a004c8c149a5e8efef37d51b16e412943310" + integrity sha512-oeVtt7eWQS+Na6F//S4kJ2K2VbRlS9D43mAlMyVpVWovy9o+jfgH8O9agzANzaiLjclA0oYzUXEM4PurhSUChw== + +stacktrace-parser@^0.1.10: + version "0.1.10" + resolved "https://registry.yarnpkg.com/stacktrace-parser/-/stacktrace-parser-0.1.10.tgz#29fb0cae4e0d0b85155879402857a1639eb6051a" + integrity sha512-KJP1OCML99+8fhOHxwwzyWrlUuVX5GQ0ZpJTd1DFXhdkrvg1szxfHhawXUZ3g9TkXORQd4/WG68jMlQZ2p8wlg== + dependencies: + type-fest "^0.7.1" + +statuses@2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" + integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ== + +statuses@~1.5.0: + version "1.5.0" + resolved "https://registry.yarnpkg.com/statuses/-/statuses-1.5.0.tgz#161c7dac177659fd9811f43771fa99381478628c" + integrity sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA== + +string-length@^4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/string-length/-/string-length-4.0.2.tgz#a8a8dc7bd5c1a82b9b3c8b87e125f66871b6e57a" + integrity sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ== + dependencies: + char-regex "^1.0.2" + strip-ansi "^6.0.0" + +string-natural-compare@^3.0.1: + version "3.0.1" + resolved "https://registry.yarnpkg.com/string-natural-compare/-/string-natural-compare-3.0.1.tgz#7a42d58474454963759e8e8b7ae63d71c1e7fdf4" + integrity sha512-n3sPwynL1nwKi3WJ6AIsClwBMa0zTi54fn2oLU6ndfTSIO05xaznjSf15PcBZU6FNWbmN5Q6cxT4V5hGvB4taw== + +string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + +string.prototype.matchall@^4.0.10: + version "4.0.10" + resolved "https://registry.yarnpkg.com/string.prototype.matchall/-/string.prototype.matchall-4.0.10.tgz#a1553eb532221d4180c51581d6072cd65d1ee100" + integrity sha512-rGXbGmOEosIQi6Qva94HUjgPs9vKW+dkG7Y8Q5O2OYkWL6wFaTRZO8zM4mhP94uX55wgyrXzfS2aGtGzUL7EJQ== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + get-intrinsic "^1.2.1" + has-symbols "^1.0.3" + internal-slot "^1.0.5" + regexp.prototype.flags "^1.5.0" + set-function-name "^2.0.0" + side-channel "^1.0.4" + +string.prototype.trim@^1.2.8: + version "1.2.8" + resolved "https://registry.yarnpkg.com/string.prototype.trim/-/string.prototype.trim-1.2.8.tgz#f9ac6f8af4bd55ddfa8895e6aea92a96395393bd" + integrity sha512-lfjY4HcixfQXOfaqCvcBuOIapyaroTXhbkfJN3gcB1OtyupngWK4sEET9Knd0cXd28kTUqu/kHoV4HKSJdnjiQ== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + +string.prototype.trimend@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/string.prototype.trimend/-/string.prototype.trimend-1.0.7.tgz#1bb3afc5008661d73e2dc015cd4853732d6c471e" + integrity sha512-Ni79DqeB72ZFq1uH/L6zJ+DKZTkOtPIHovb3YZHQViE+HDouuU4mBrLOLDn5Dde3RF8qw5qVETEjhu9locMLvA== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + +string.prototype.trimstart@^1.0.7: + version "1.0.7" + resolved "https://registry.yarnpkg.com/string.prototype.trimstart/-/string.prototype.trimstart-1.0.7.tgz#d4cdb44b83a4737ffbac2d406e405d43d0184298" + integrity sha512-NGhtDFu3jCEm7B4Fy0DpLewdJQOZcQ0rGbwQ/+stjnrp2i+rlKeCvos9hOIeCmqwratM47OBxY7uFZzjxHXmrg== + dependencies: + call-bind "^1.0.2" + define-properties "^1.2.0" + es-abstract "^1.22.1" + +string_decoder@^1.1.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e" + integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== + dependencies: + safe-buffer "~5.2.0" + +string_decoder@~1.1.1: + version "1.1.1" + resolved "https://registry.yarnpkg.com/string_decoder/-/string_decoder-1.1.1.tgz#9cf1611ba62685d7030ae9e4ba34149c3af03fc8" + integrity sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg== + dependencies: + safe-buffer "~5.1.0" + +strip-ansi@^5.0.0, strip-ansi@^5.2.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-5.2.0.tgz#8c9a536feb6afc962bdfa5b104a5091c1ad9c0ae" + integrity sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA== + dependencies: + ansi-regex "^4.1.0" + +strip-ansi@^6.0.0, strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + +strip-bom@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/strip-bom/-/strip-bom-4.0.0.tgz#9c3505c1db45bcedca3d9cf7a16f5c5aa3901878" + integrity sha512-3xurFv5tEgii33Zi8Jtp55wEIILR9eh34FAW00PZf+JnSsTmV/ioewSgQl97JHvgjoRGwPShsWm+IdrxB35d0w== + +strip-final-newline@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz#89b852fb2fcbe936f6f4b3187afb0a12c1ab58ad" + integrity sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA== + +strip-json-comments@^3.1.1: + version "3.1.1" + resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006" + integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== + +strnum@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/strnum/-/strnum-1.0.5.tgz#5c4e829fe15ad4ff0d20c3db5ac97b73c9b072db" + integrity sha512-J8bbNyKKXl5qYcR36TIO8W3mVGVHrmmxsd5PAItGkmyzwJvybiw2IVq5nqd0i4LSNSkB/sx9VHllbfFdr9k1JA== + +sudo-prompt@^9.0.0: + version "9.2.1" + resolved "https://registry.yarnpkg.com/sudo-prompt/-/sudo-prompt-9.2.1.tgz#77efb84309c9ca489527a4e749f287e6bdd52afd" + integrity sha512-Mu7R0g4ig9TUuGSxJavny5Rv0egCEtpZRNMrZaYS1vxkiIxGiGUwoezU3LazIQ+KE04hTrTfNPgxU5gzi7F5Pw== + +supports-color@^5.3.0: + version "5.5.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" + integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== + dependencies: + has-flag "^3.0.0" + +supports-color@^7.1.0: + version "7.2.0" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" + integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== + dependencies: + has-flag "^4.0.0" + +supports-color@^8.0.0: + version "8.1.1" + resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c" + integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== + dependencies: + has-flag "^4.0.0" + +supports-preserve-symlinks-flag@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" + integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== + +temp-dir@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/temp-dir/-/temp-dir-2.0.0.tgz#bde92b05bdfeb1516e804c9c00ad45177f31321e" + integrity sha512-aoBAniQmmwtcKp/7BzsH8Cxzv8OL736p7v1ihGb5e9DJ9kTwGWHrQrVB5+lfVDzfGrdRzXch+ig7LHaY1JTOrg== + +temp@^0.8.4: + version "0.8.4" + resolved "https://registry.yarnpkg.com/temp/-/temp-0.8.4.tgz#8c97a33a4770072e0a05f919396c7665a7dd59f2" + integrity sha512-s0ZZzd0BzYv5tLSptZooSjK8oj6C+c19p7Vqta9+6NPOf7r+fxq0cJe6/oN4LTC79sy5NY8ucOJNgwsKCSbfqg== + dependencies: + rimraf "~2.6.2" + +terser@^5.15.0: + version "5.29.2" + resolved "https://registry.yarnpkg.com/terser/-/terser-5.29.2.tgz#c17d573ce1da1b30f21a877bffd5655dd86fdb35" + integrity sha512-ZiGkhUBIM+7LwkNjXYJq8svgkd+QK3UUr0wJqY4MieaezBSAIPgbSPZyIx0idM6XWK5CMzSWa8MJIzmRcB8Caw== + dependencies: + "@jridgewell/source-map" "^0.3.3" + acorn "^8.8.2" + commander "^2.20.0" + source-map-support "~0.5.20" + +test-exclude@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/test-exclude/-/test-exclude-6.0.0.tgz#04a8698661d805ea6fa293b6cb9e63ac044ef15e" + integrity sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w== + dependencies: + "@istanbuljs/schema" "^0.1.2" + glob "^7.1.4" + minimatch "^3.0.4" + +text-table@^0.2.0: + version "0.2.0" + resolved "https://registry.yarnpkg.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" + integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw== + +throat@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/throat/-/throat-5.0.0.tgz#c5199235803aad18754a667d659b5e72ce16764b" + integrity sha512-fcwX4mndzpLQKBS1DVYhGAcYaYt7vsHNIvQV+WXMvnow5cgjPphq5CaayLaGsjRdSCKZFNGt7/GYAuXaNOiYCA== + +through2@^2.0.1: + version "2.0.5" + resolved "https://registry.yarnpkg.com/through2/-/through2-2.0.5.tgz#01c1e39eb31d07cb7d03a96a70823260b23132cd" + integrity sha512-/mrRod8xqpA+IHSLyGCQ2s8SPHiCDEeQJSep1jqLYeEUClOFG2Qsh+4FU6G9VeqpZnGW/Su8LQGc4YKni5rYSQ== + dependencies: + readable-stream "~2.3.6" + xtend "~4.0.1" + +tmpl@1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/tmpl/-/tmpl-1.0.5.tgz#8683e0b902bb9c20c4f726e3c0b69f36518c07cc" + integrity sha512-3f0uOEAQwIqGuWW2MVzYg8fV/QNnc/IpuJNG837rLuczAaLVHslWHZQj4IGiEl5Hs3kkbhwL9Ab7Hrsmuj+Smw== + +to-fast-properties@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/to-fast-properties/-/to-fast-properties-2.0.0.tgz#dc5e698cbd079265bc73e0377681a4e4e83f616e" + integrity sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog== + +to-regex-range@^5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" + integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== + dependencies: + is-number "^7.0.0" + +toidentifier@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/toidentifier/-/toidentifier-1.0.1.tgz#3be34321a88a820ed1bd80dfaa33e479fbb8dd35" + integrity sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA== + +tr46@~0.0.3: + version "0.0.3" + resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a" + integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw== + +ts-api-utils@^1.0.1: + version "1.3.0" + resolved "https://registry.yarnpkg.com/ts-api-utils/-/ts-api-utils-1.3.0.tgz#4b490e27129f1e8e686b45cc4ab63714dc60eea1" + integrity sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ== + +tslib@^1.8.1: + version "1.14.1" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" + integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== + +tslib@^2.0.1: + version "2.6.2" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" + integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q== + +tsutils@^3.21.0: + version "3.21.0" + resolved "https://registry.yarnpkg.com/tsutils/-/tsutils-3.21.0.tgz#b48717d394cea6c1e096983eed58e9d61715b623" + integrity sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA== + dependencies: + tslib "^1.8.1" + +type-check@^0.4.0, type-check@~0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/type-check/-/type-check-0.4.0.tgz#07b8203bfa7056c0657050e3ccd2c37730bab8f1" + integrity sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew== + dependencies: + prelude-ls "^1.2.1" + +type-detect@4.0.8: + version "4.0.8" + resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.0.8.tgz#7646fb5f18871cfbb7749e69bd39a6388eb7450c" + integrity sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g== + +type-fest@^0.20.2: + version "0.20.2" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.20.2.tgz#1bf207f4b28f91583666cb5fbd327887301cd5f4" + integrity sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ== + +type-fest@^0.21.3: + version "0.21.3" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.21.3.tgz#d260a24b0198436e133fa26a524a6d65fa3b2e37" + integrity sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w== + +type-fest@^0.7.1: + version "0.7.1" + resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.7.1.tgz#8dda65feaf03ed78f0a3f9678f1869147f7c5c48" + integrity sha512-Ne2YiiGN8bmrmJJEuTWTLJR32nh/JdL1+PSicowtNb0WFpn59GK8/lfD61bVtzguz7b3PBt74nxpv/Pw5po5Rg== + +typed-array-buffer@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/typed-array-buffer/-/typed-array-buffer-1.0.2.tgz#1867c5d83b20fcb5ccf32649e5e2fc7424474ff3" + integrity sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ== + dependencies: + call-bind "^1.0.7" + es-errors "^1.3.0" + is-typed-array "^1.1.13" + +typed-array-byte-length@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/typed-array-byte-length/-/typed-array-byte-length-1.0.1.tgz#d92972d3cff99a3fa2e765a28fcdc0f1d89dec67" + integrity sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw== + dependencies: + call-bind "^1.0.7" + for-each "^0.3.3" + gopd "^1.0.1" + has-proto "^1.0.3" + is-typed-array "^1.1.13" + +typed-array-byte-offset@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/typed-array-byte-offset/-/typed-array-byte-offset-1.0.2.tgz#f9ec1acb9259f395093e4567eb3c28a580d02063" + integrity sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA== + dependencies: + available-typed-arrays "^1.0.7" + call-bind "^1.0.7" + for-each "^0.3.3" + gopd "^1.0.1" + has-proto "^1.0.3" + is-typed-array "^1.1.13" + +typed-array-length@^1.0.5: + version "1.0.5" + resolved "https://registry.yarnpkg.com/typed-array-length/-/typed-array-length-1.0.5.tgz#57d44da160296d8663fd63180a1802ebf25905d5" + integrity sha512-yMi0PlwuznKHxKmcpoOdeLwxBoVPkqZxd7q2FgMkmD3bNwvF5VW0+UlUQ1k1vmktTu4Yu13Q0RIxEP8+B+wloA== + dependencies: + call-bind "^1.0.7" + for-each "^0.3.3" + gopd "^1.0.1" + has-proto "^1.0.3" + is-typed-array "^1.1.13" + possible-typed-array-names "^1.0.0" + +typescript@5.0.4: + version "5.0.4" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.0.4.tgz#b217fd20119bd61a94d4011274e0ab369058da3b" + integrity sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw== + +unbox-primitive@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.0.2.tgz#29032021057d5e6cdbd08c5129c226dff8ed6f9e" + integrity sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw== + dependencies: + call-bind "^1.0.2" + has-bigints "^1.0.2" + has-symbols "^1.0.3" + which-boxed-primitive "^1.0.2" + +undici-types@~5.26.4: + version "5.26.5" + resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617" + integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA== + +unicode-canonical-property-names-ecmascript@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz#301acdc525631670d39f6146e0e77ff6bbdebddc" + integrity sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ== + +unicode-match-property-ecmascript@^2.0.0: + version "2.0.0" + resolved "https://registry.yarnpkg.com/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz#54fd16e0ecb167cf04cf1f756bdcc92eba7976c3" + integrity sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q== + dependencies: + unicode-canonical-property-names-ecmascript "^2.0.0" + unicode-property-aliases-ecmascript "^2.0.0" + +unicode-match-property-value-ecmascript@^2.1.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz#cb5fffdcd16a05124f5a4b0bf7c3770208acbbe0" + integrity sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA== + +unicode-property-aliases-ecmascript@^2.0.0: + version "2.1.0" + resolved "https://registry.yarnpkg.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz#43d41e3be698bd493ef911077c9b131f827e8ccd" + integrity sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w== + +universalify@^0.1.0: + version "0.1.2" + resolved "https://registry.yarnpkg.com/universalify/-/universalify-0.1.2.tgz#b646f69be3942dabcecc9d6639c80dc105efaa66" + integrity sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg== + +unpipe@~1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/unpipe/-/unpipe-1.0.0.tgz#b2bf4ee8514aae6165b4817829d21b2ef49904ec" + integrity sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ== + +update-browserslist-db@^1.0.13: + version "1.0.13" + resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz#3c5e4f5c083661bd38ef64b6328c26ed6c8248c4" + integrity sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg== + dependencies: + escalade "^3.1.1" + picocolors "^1.0.0" + +uri-js@^4.2.2: + version "4.4.1" + resolved "https://registry.yarnpkg.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" + integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== + dependencies: + punycode "^2.1.0" + +util-deprecate@^1.0.1, util-deprecate@~1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + integrity sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw== + +utils-merge@1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/utils-merge/-/utils-merge-1.0.1.tgz#9f95710f50a267947b2ccc124741c1028427e713" + integrity sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA== + +v8-to-istanbul@^9.0.1: + version "9.2.0" + resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-9.2.0.tgz#2ed7644a245cddd83d4e087b9b33b3e62dfd10ad" + integrity sha512-/EH/sDgxU2eGxajKdwLCDmQ4FWq+kpi3uCmBGpw1xJtnAxEjlD8j8PEiGWpCIMIs3ciNAgH0d3TTJiUkYzyZjA== + dependencies: + "@jridgewell/trace-mapping" "^0.3.12" + "@types/istanbul-lib-coverage" "^2.0.1" + convert-source-map "^2.0.0" + +vary@~1.1.2: + version "1.1.2" + resolved "https://registry.yarnpkg.com/vary/-/vary-1.1.2.tgz#2299f02c6ded30d4a5961b0b9f74524a18f634fc" + integrity sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg== + +vlq@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/vlq/-/vlq-1.0.1.tgz#c003f6e7c0b4c1edd623fd6ee50bbc0d6a1de468" + integrity sha512-gQpnTgkubC6hQgdIcRdYGDSDc+SaujOdyesZQMv6JlfQee/9Mp0Qhnys6WxDWvQnL5WZdT7o2Ul187aSt0Rq+w== + +walker@^1.0.7, walker@^1.0.8: + version "1.0.8" + resolved "https://registry.yarnpkg.com/walker/-/walker-1.0.8.tgz#bd498db477afe573dc04185f011d3ab8a8d7653f" + integrity sha512-ts/8E8l5b7kY0vlWLewOkDXMmPdLcVV4GmOQLyxuSswIJsweeFZtAsMF7k1Nszz+TYBQrlYRmzOnr398y1JemQ== + dependencies: + makeerror "1.0.12" + +wcwidth@^1.0.1: + version "1.0.1" + resolved "https://registry.yarnpkg.com/wcwidth/-/wcwidth-1.0.1.tgz#f0b0dcf915bc5ff1528afadb2c0e17b532da2fe8" + integrity sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg== + dependencies: + defaults "^1.0.3" + +webidl-conversions@^3.0.0: + version "3.0.1" + resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" + integrity sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ== + +whatwg-fetch@^3.0.0: + version "3.6.20" + resolved "https://registry.yarnpkg.com/whatwg-fetch/-/whatwg-fetch-3.6.20.tgz#580ce6d791facec91d37c72890995a0b48d31c70" + integrity sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg== + +whatwg-url@^5.0.0: + version "5.0.0" + resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d" + integrity sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw== + dependencies: + tr46 "~0.0.3" + webidl-conversions "^3.0.0" + +which-boxed-primitive@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz#13757bc89b209b049fe5d86430e21cf40a89a8e6" + integrity sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg== + dependencies: + is-bigint "^1.0.1" + is-boolean-object "^1.1.0" + is-number-object "^1.0.4" + is-string "^1.0.5" + is-symbol "^1.0.3" + +which-builtin-type@^1.1.3: + version "1.1.3" + resolved "https://registry.yarnpkg.com/which-builtin-type/-/which-builtin-type-1.1.3.tgz#b1b8443707cc58b6e9bf98d32110ff0c2cbd029b" + integrity sha512-YmjsSMDBYsM1CaFiayOVT06+KJeXf0o5M/CAd4o1lTadFAtacTUM49zoYxr/oroopFDfhvN6iEcBxUyc3gvKmw== + dependencies: + function.prototype.name "^1.1.5" + has-tostringtag "^1.0.0" + is-async-function "^2.0.0" + is-date-object "^1.0.5" + is-finalizationregistry "^1.0.2" + is-generator-function "^1.0.10" + is-regex "^1.1.4" + is-weakref "^1.0.2" + isarray "^2.0.5" + which-boxed-primitive "^1.0.2" + which-collection "^1.0.1" + which-typed-array "^1.1.9" + +which-collection@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/which-collection/-/which-collection-1.0.2.tgz#627ef76243920a107e7ce8e96191debe4b16c2a0" + integrity sha512-K4jVyjnBdgvc86Y6BkaLZEN933SwYOuBFkdmBu9ZfkcAbdVbpITnDmjvZ/aQjRXQrv5EPkTnD1s39GiiqbngCw== + dependencies: + is-map "^2.0.3" + is-set "^2.0.3" + is-weakmap "^2.0.2" + is-weakset "^2.0.3" + +which-module@^2.0.0: + version "2.0.1" + resolved "https://registry.yarnpkg.com/which-module/-/which-module-2.0.1.tgz#776b1fe35d90aebe99e8ac15eb24093389a4a409" + integrity sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ== + +which-typed-array@^1.1.14, which-typed-array@^1.1.9: + version "1.1.15" + resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.15.tgz#264859e9b11a649b388bfaaf4f767df1f779b38d" + integrity sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA== + dependencies: + available-typed-arrays "^1.0.7" + call-bind "^1.0.7" + for-each "^0.3.3" + gopd "^1.0.1" + has-tostringtag "^1.0.2" + +which@^2.0.1: + version "2.0.2" + resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" + integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== + dependencies: + isexe "^2.0.0" + +wrap-ansi@^6.2.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53" + integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +wrap-ansi@^7.0.0: + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +wrappy@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== + +write-file-atomic@^2.3.0: + version "2.4.3" + resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-2.4.3.tgz#1fd2e9ae1df3e75b8d8c367443c692d4ca81f481" + integrity sha512-GaETH5wwsX+GcnzhPgKcKjJ6M2Cq3/iZp1WyY/X1CSqrW+jVNM9Y7D8EC2sM4ZG/V8wZlSniJnCKWPmBYAucRQ== + dependencies: + graceful-fs "^4.1.11" + imurmurhash "^0.1.4" + signal-exit "^3.0.2" + +write-file-atomic@^4.0.2: + version "4.0.2" + resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-4.0.2.tgz#a9df01ae5b77858a027fd2e80768ee433555fcfd" + integrity sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg== + dependencies: + imurmurhash "^0.1.4" + signal-exit "^3.0.7" + +ws@^6.2.2: + version "6.2.2" + resolved "https://registry.yarnpkg.com/ws/-/ws-6.2.2.tgz#dd5cdbd57a9979916097652d78f1cc5faea0c32e" + integrity sha512-zmhltoSR8u1cnDsD43TX59mzoMZsLKqUweyYBAIvTngR3shc0W6aOZylZmq/7hqyVxPdi+5Ud2QInblgyE72fw== + dependencies: + async-limiter "~1.0.0" + +ws@^7, ws@^7.5.1: + version "7.5.9" + resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.9.tgz#54fa7db29f4c7cec68b1ddd3a89de099942bb591" + integrity sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q== + +xtend@~4.0.1: + version "4.0.2" + resolved "https://registry.yarnpkg.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54" + integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== + +y18n@^4.0.0: + version "4.0.3" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-4.0.3.tgz#b5f259c82cd6e336921efd7bfd8bf560de9eeedf" + integrity sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ== + +y18n@^5.0.5: + version "5.0.8" + resolved "https://registry.yarnpkg.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55" + integrity sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA== + +yallist@^3.0.2: + version "3.1.1" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd" + integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g== + +yallist@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" + integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== + +yaml@^2.2.1: + version "2.4.1" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.4.1.tgz#2e57e0b5e995292c25c75d2658f0664765210eed" + integrity sha512-pIXzoImaqmfOrL7teGUBt/T7ZDnyeGBWyXQBvOVhLkWLN37GXv8NMLK406UY6dS51JfcQHsmcW5cJ441bHg6Lg== + +yargs-parser@^18.1.2: + version "18.1.3" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-18.1.3.tgz#be68c4975c6b2abf469236b0c870362fab09a7b0" + integrity sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ== + dependencies: + camelcase "^5.0.0" + decamelize "^1.2.0" + +yargs-parser@^21.1.1: + version "21.1.1" + resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" + integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== + +yargs@^15.1.0: + version "15.4.1" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-15.4.1.tgz#0d87a16de01aee9d8bec2bfbf74f67851730f4f8" + integrity sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A== + dependencies: + cliui "^6.0.0" + decamelize "^1.2.0" + find-up "^4.1.0" + get-caller-file "^2.0.1" + require-directory "^2.1.1" + require-main-filename "^2.0.0" + set-blocking "^2.0.0" + string-width "^4.2.0" + which-module "^2.0.0" + y18n "^4.0.0" + yargs-parser "^18.1.2" + +yargs@^17.3.1, yargs@^17.6.2: + version "17.7.2" + resolved "https://registry.yarnpkg.com/yargs/-/yargs-17.7.2.tgz#991df39aca675a192b816e1e0363f9d75d2aa269" + integrity sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w== + dependencies: + cliui "^8.0.1" + escalade "^3.1.1" + get-caller-file "^2.0.5" + require-directory "^2.1.1" + string-width "^4.2.3" + y18n "^5.0.5" + yargs-parser "^21.1.1" + +yocto-queue@^0.1.0: + version "0.1.0" + resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" + integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== diff --git a/third-party/nwaku/examples/nim.cfg b/third-party/nwaku/examples/nim.cfg new file mode 100644 index 0000000..2f8d454 --- /dev/null +++ b/third-party/nwaku/examples/nim.cfg @@ -0,0 +1,5 @@ +-d:chronicles_line_numbers +-d:chronicles_log_level="DEBUG" +-d:chronicles_runtime_filtering=on +-d:discv5_protocol_id="d5waku" +path = "../" diff --git a/third-party/nwaku/examples/nodejs/binding.gyp b/third-party/nwaku/examples/nodejs/binding.gyp new file mode 100644 index 0000000..38a1656 --- /dev/null +++ b/third-party/nwaku/examples/nodejs/binding.gyp @@ -0,0 +1,9 @@ +{ + "targets": [ + { + "target_name": "waku", + "sources": [ "waku_addon.c", "../cbindings/base64.c" ], + "libraries": [ "-lwaku", "-L../../../build/" ] + } + ] +} diff --git a/third-party/nwaku/examples/nodejs/waku.js b/third-party/nwaku/examples/nodejs/waku.js new file mode 100644 index 0000000..258e348 --- /dev/null +++ b/third-party/nwaku/examples/nodejs/waku.js @@ -0,0 +1,78 @@ + +var express = require('express'); +var app = express(); + +function create_random_string(length) { + let result = ''; + const characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'; + const charactersLength = characters.length; + let counter = 0; + while (counter < length) { + result += characters.charAt(Math.floor(Math.random() * charactersLength)); + counter += 1; + } + return result; +} + +var wakuMod = require('bindings')('waku'); + +var cfg = `{ + "host": "0.0.0.0", + "port": 60001, + "key": "364d111d729a6eb6d3e6113e163f017b5ef03a6f94c9b5b7bb1bb36fa5cb07a9", + "relay": true + "logLevel": "DEBUG" +}` + +function event_handler(event) { + console.log("evento NodeJs: " + event) +} + +wakuMod.wakuNew(cfg) + +wakuMod.wakuVersion(function(msg){ console.log("Waku Version: " + msg) }) + +// Example on how to retrieve a value from the waku library +var defaultPubsubTopic = "" +wakuMod.wakuDefaultPubsubTopic(function(msg){ defaultPubsubTopic = msg }) + +console.log("Default pubsub topic: " + defaultPubsubTopic) + +console.log("Setting callback event callback function") +wakuMod.wakuSetEventCallback(event_handler) + +wakuMod.wakuStart() + +wakuMod.wakuConnect("/ip4/127.0.0.1/tcp/60000/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN", + 10000, + function onErr(msg) { + console.log("Error connecting node: " + msg) + }) + +wakuMod.wakuRelaySubscribe(defaultPubsubTopic, + function onErr(msg) { + console.log("Error subscribing: " + msg) + }) + +app.post('/publish', + function (req, res) { + // First read existing users. + console.log("Publish event received") + + wakuMod.wakuRelayPublish(defaultPubsubTopic, + "content_topic_name", + create_random_string(10), + 10000, + function onError(msg) { + console.log("Error: " + msg) + process.exit(-1) + }); + + res.end( JSON.stringify("OK publish")) + }) + +var server = app.listen(8081, function () { + var host = server.address().address + var port = server.address().port + console.log("Example waku listening at http://%s:%s", host, port) +}) diff --git a/third-party/nwaku/examples/nodejs/waku_addon.c b/third-party/nwaku/examples/nodejs/waku_addon.c new file mode 100644 index 0000000..643c90d --- /dev/null +++ b/third-party/nwaku/examples/nodejs/waku_addon.c @@ -0,0 +1,586 @@ +#include +#include +#include +#include +#include + +#include "../cbindings/base64.h" +#include "../../library/libwaku.h" + +// Reference to the NodeJs function to be called when a waku event occurs. +// static napi_ref ref_event_callback = NULL; +static napi_ref ref_version_callback = NULL; +static napi_ref ref_def_pubsub_topic_callback = NULL; +static napi_ref ref_on_error_callback = NULL; +static napi_threadsafe_function thsafe_fn = NULL; + +// As a convenience, wrap N-API calls such that they cause Node.js to abort +// when they are unsuccessful. +#define NAPI_CALL(call) \ +do { \ + napi_status status = call; \ + if (status != napi_ok) { \ + napi_fatal_error(#call, NAPI_AUTO_LENGTH, "failed", NAPI_AUTO_LENGTH); \ + } \ +} while (0) + +#define WAKU_CALL(call) \ +do { \ + int ret = call; \ + if (ret != 0) { \ + char msg[128]; \ + snprintf(msg, 128, "WAKU_CALL failed with code %d", ret); \ + napi_fatal_error(#call, NAPI_AUTO_LENGTH, "failed", NAPI_AUTO_LENGTH); \ + } \ +} while (0) + +// libwaku Context +void* ctx; + +// For the case of C language we don't need to store a particular userData +void* userData = NULL; + +static napi_env my_env; + +// This function is responsible for converting data coming in from the worker +// thread to napi_value items that can be passed into JavaScript, and for +// calling the JavaScript function. +static void CallJs(napi_env env, napi_value js_cb, void* context, void* data) { + + // This parameter is not used. + (void) context; + // napi_status status; + + // Retrieve the message item created by the worker thread. + char* msg = (char*) data; + + // env and js_cb may both be NULL if Node.js is in its cleanup phase, and + // items are left over from earlier thread-safe calls from the worker thread. + // When env is NULL, we simply skip over the call into Javascript and free the + // items. + if (env != NULL) { + napi_value undefined; + + // Convert the integer to a napi_value. + napi_value napi_msg; + NAPI_CALL(napi_create_string_utf8(my_env, + msg, + NAPI_AUTO_LENGTH, + &napi_msg)); + + // Retrieve the JavaScript `undefined` value so we can use it as the `this` + // value of the JavaScript function call. + NAPI_CALL(napi_get_undefined(env, &undefined)); + + // Call the JavaScript function and pass it the message generated in the + // working thread. + NAPI_CALL(napi_call_function(env, + undefined, + js_cb, + 1, + &napi_msg, + NULL)); + } + + // Free the item created by the worker thread. + free(data); +} + +void handle_waku_version(int callerRet, const char* msg, size_t len) { + if (ref_version_callback == NULL) { + napi_throw_type_error(my_env, NULL, "ERROR in event_handler. ref_version_callback == NULL"); + } + + napi_value callback; + NAPI_CALL(napi_get_reference_value(my_env, ref_version_callback, &callback)); + + size_t argc = 2; + napi_value napi_msg; + NAPI_CALL(napi_create_string_utf8(my_env, + msg, + NAPI_AUTO_LENGTH, + &napi_msg)); + napi_value napi_len; + NAPI_CALL(napi_create_int32(my_env, + len, + &napi_len)); + + napi_value global; + NAPI_CALL(napi_get_global(my_env, &global)); + NAPI_CALL(napi_call_function(my_env, global, callback, argc, &napi_msg, NULL)); +} + +// This function is directly passed as a callback to the libwaku and it +// calls a NodeJs function if it has been set. +void event_handler(int callerRet, const char* msg, size_t len) { + if (thsafe_fn == NULL) { + // if (ref_event_callback == NULL) { + napi_throw_type_error(my_env, NULL, "ERROR in event_handler. ref_event_callback == NULL"); + } + + char* allocated_msg = malloc(len + 1); + strcpy(allocated_msg, msg); + + NAPI_CALL(napi_call_threadsafe_function(thsafe_fn, allocated_msg, napi_tsfn_nonblocking)); +} + +void handle_error(int callerRet, const char* msg, size_t len) { + if (ref_on_error_callback == NULL) { + napi_throw_type_error(my_env, NULL, "ERROR in event_handler. ref_on_error_callback == NULL"); + } + + napi_value callback; + NAPI_CALL(napi_get_reference_value(my_env, + ref_on_error_callback, + &callback)); + size_t argc = 2; + napi_value napi_msg; + NAPI_CALL(napi_create_string_utf8(my_env, + msg, + NAPI_AUTO_LENGTH, + &napi_msg)); + napi_value global; + NAPI_CALL(napi_get_global(my_env, &global)); + NAPI_CALL(napi_call_function(my_env, global, callback, argc, &napi_msg, NULL)); +} + +char* contentTopic = NULL; +void handle_content_topic(int callerRet, const char* msg, size_t len) { + if (contentTopic != NULL) { + free(contentTopic); + } + + contentTopic = malloc(len * sizeof(char) + 1); + strcpy(contentTopic, msg); +} + +void handle_default_pubsub_topic(int callerRet, const char* msg, size_t len) { + if (ref_def_pubsub_topic_callback == NULL) { + napi_throw_type_error(my_env, NULL, + "ERROR in event_handler. ref_def_pubsub_topic_callback == NULL"); + } + + napi_value callback; + NAPI_CALL(napi_get_reference_value(my_env, + ref_def_pubsub_topic_callback, + &callback)); + size_t argc = 2; + napi_value napi_msg; + NAPI_CALL(napi_create_string_utf8(my_env, + msg, + NAPI_AUTO_LENGTH, + &napi_msg)); + napi_value napi_len; + NAPI_CALL(napi_create_int32(my_env, + len, + &napi_len)); + + napi_value global; + NAPI_CALL(napi_get_global(my_env, &global)); + NAPI_CALL(napi_call_function(my_env, global, callback, argc, &napi_msg, NULL)); +} + +// The next should be called always, at the beginning +static napi_value WakuNew(napi_env env, napi_callback_info info) { + + size_t argc = 1; + napi_value args[1]; + NAPI_CALL(napi_get_cb_info(env, info, &argc, args, NULL, NULL)); + + if (argc < 1) { + napi_throw_type_error(env, NULL, "Wrong number of arguments"); + return NULL; + } + + size_t str_size; + size_t str_size_read; + napi_get_value_string_utf8(env, args[0], NULL, 0, &str_size); + char* jsonConfig; + jsonConfig = malloc(str_size + 1); + str_size = str_size + 1; + napi_get_value_string_utf8(env, args[0], jsonConfig, str_size, &str_size_read); + + ctx = waku_new(jsonConfig, event_handler, userData); + + free(jsonConfig); + + return NULL; +} + +static napi_value WakuVersion(napi_env env, napi_callback_info info) { + size_t argc = 1; + napi_value args[1]; + NAPI_CALL(napi_get_cb_info(env, info, &argc, args, NULL, NULL)); + + if (argc < 1) { + napi_throw_type_error(env, NULL, "Wrong number of arguments"); + return NULL; + } + + napi_value cb = args[0]; + + napi_valuetype valueType; + NAPI_CALL(napi_typeof(env, cb, &valueType)); + + if (valueType != napi_function) { + napi_throw_type_error(env, NULL, "The argument should be a napi_function"); + return NULL; + } + + my_env = env; + if(ref_version_callback != NULL) { + NAPI_CALL(napi_delete_reference(env, ref_version_callback)); + } + + NAPI_CALL(napi_create_reference(env, cb, 1, &ref_version_callback)); + + WAKU_CALL( waku_version(ctx, handle_waku_version, userData) ); + + return NULL; +} + +static napi_value WakuSetEventCallback(napi_env env, napi_callback_info info) { + + size_t argc = 1; + napi_value args[1]; + NAPI_CALL(napi_get_cb_info(env, info, &argc, args, NULL, NULL)); + + if (argc < 1) { + napi_throw_type_error(env, NULL, "Wrong number of arguments"); + return NULL; + } + + napi_value cb = args[0]; + + napi_valuetype valueType; + NAPI_CALL(napi_typeof(env, cb, &valueType)); + + if (valueType != napi_function) { + napi_throw_type_error(env, NULL, "The argument should be a napi_function"); + return NULL; + } + + my_env = env; + + napi_value work_name; + NAPI_CALL(napi_create_string_utf8(env, + "worker_name", + NAPI_AUTO_LENGTH, + &work_name)); + + NAPI_CALL( + napi_create_threadsafe_function(env, + cb, + NULL, + work_name, + 0, + 1, + NULL, + NULL, + NULL, + CallJs, // the C/C++ callback function + // out: the asynchronous thread-safe JavaScript function + &thsafe_fn)); + + // Inside 'event_handler', the event will be dispatched to the NodeJs + // if there is a proper napi_function (ref_event_callback) being set. + waku_set_event_callback(event_handler, userData); + + return NULL; +} + +static napi_value WakuStart(napi_env env, napi_callback_info info) { + waku_start(ctx, event_handler, userData); + return NULL; +} + +static napi_value WakuConnect(napi_env env, napi_callback_info info) { + size_t argc = 3; + napi_value args[3]; + NAPI_CALL(napi_get_cb_info(env, info, &argc, args, NULL, NULL)); + + if (argc < 3) { + napi_throw_type_error(env, NULL, "Wrong number of arguments"); + return NULL; + } + + // Getting the peers param + napi_value napiPeers = args[0]; + napi_valuetype valueType; + NAPI_CALL(napi_typeof(env, napiPeers, &valueType)); + + if (valueType != napi_string) { + napi_throw_type_error(env, NULL, "The peers attribute should be a string"); + return NULL; + } + + size_t str_size; + size_t str_size_read; + napi_get_value_string_utf8(env, napiPeers, NULL, 0, &str_size); + char* peers; + peers = malloc(str_size + 1); + str_size = str_size + 1; + napi_get_value_string_utf8(env, napiPeers, peers, str_size, &str_size_read); + + // Getting the timeout param + napi_value napiTimeout = args[1]; + NAPI_CALL(napi_typeof(env, napiTimeout, &valueType)); + + if (valueType != napi_number) { + napi_throw_type_error(env, NULL, "The timeout attribute should be a number"); + return NULL; + } + + int32_t timeoutMs; + NAPI_CALL(napi_get_value_int32(env, napiTimeout, &timeoutMs)); + + // Getting the 'onError' callback + napi_value cb = args[2]; + + NAPI_CALL(napi_typeof(env, cb, &valueType)); + + if (valueType != napi_function) { + napi_throw_type_error(env, NULL, "The argument should be a napi_function"); + return NULL; + } + + my_env = env; + NAPI_CALL(napi_create_reference(env, cb, 1, &ref_on_error_callback)); + + WAKU_CALL(waku_connect(ctx, peers, timeoutMs, handle_error, userData)); + + // Free allocated memory + free(peers); + + return NULL; +} + +static napi_value WakuRelayPublish(napi_env env, napi_callback_info info) { + size_t argc = 5; + napi_value args[5]; + NAPI_CALL(napi_get_cb_info(env, info, &argc, args, NULL, NULL)); + + if (argc < 5) { + napi_throw_type_error(env, NULL, "Wrong number of arguments"); + return NULL; + } + + // pubsubtopic + napi_value napiPubsubTopic = args[0]; + napi_valuetype valueType; + NAPI_CALL(napi_typeof(env, napiPubsubTopic, &valueType)); + + if (valueType != napi_string) { + napi_throw_type_error(env, NULL, "The napiPubsubTopic attribute should be a string"); + return NULL; + } + + size_t str_size; + size_t str_size_read; + napi_get_value_string_utf8(env, napiPubsubTopic, NULL, 0, &str_size); + char* pubsub_topic; + pubsub_topic = malloc(str_size + 1); + str_size = str_size + 1; + napi_get_value_string_utf8(env, napiPubsubTopic, pubsub_topic, str_size, &str_size_read); + + // content topic + napi_value napiContentTopic = args[1]; + NAPI_CALL(napi_typeof(env, napiContentTopic, &valueType)); + + if (valueType != napi_string) { + napi_throw_type_error(env, NULL, "The content topic attribute should be a string"); + return NULL; + } + + napi_get_value_string_utf8(env, napiContentTopic, NULL, 0, &str_size); + char* content_topic_name = malloc(str_size + 1); + str_size = str_size + 1; + napi_get_value_string_utf8(env, napiContentTopic, content_topic_name, str_size, &str_size_read); + + // message + napi_value napiMessage = args[2]; + NAPI_CALL(napi_typeof(env, napiMessage, &valueType)); + + if (valueType != napi_string) { + napi_throw_type_error(env, NULL, "The message attribute should be a string"); + return NULL; + } + + char msg[2048]; + // TODO: check the correct message size limit + size_t lengthMsg; + NAPI_CALL(napi_get_value_string_utf8(env, + napiMessage, + msg, + 2048, + &lengthMsg)); + char jsonWakuMsg[1024]; + char *msgPayload = b64_encode((unsigned char*) msg, strlen(msg)); + + // TODO: move all the 'waku_content_topic' logic inside the libwaku + WAKU_CALL( waku_content_topic(ctx, + "appName", + 1, + content_topic_name, + "encoding", + handle_content_topic, + userData) ); + snprintf(jsonWakuMsg, + 1024, + "{\"payload\":\"%s\",\"content_topic\":\"%s\"}", + msgPayload, contentTopic); + free(msgPayload); + + // Getting the timeout parameter + unsigned int timeoutMs; + napi_value timeout = args[3]; + + NAPI_CALL(napi_typeof(env, timeout, &valueType)); + + if (valueType != napi_number) { + napi_throw_type_error(env, NULL, "The argument should be a napi_number"); + return NULL; + } + + NAPI_CALL(napi_get_value_int64(env, timeout, (int64_t *) &timeoutMs)); + + // Getting the 'onError' callback + napi_value cb = args[4]; + + NAPI_CALL(napi_typeof(env, cb, &valueType)); + + if (valueType != napi_function) { + napi_throw_type_error(env, NULL, "The argument should be a napi_function"); + return NULL; + } + + NAPI_CALL(napi_create_reference(env, cb, 1, &ref_on_error_callback)); + + // Perform the actual 'publish' + WAKU_CALL( waku_relay_publish(ctx, + pubsub_topic, + jsonWakuMsg, + timeoutMs, + handle_error, + userData) ); + free(pubsub_topic); + free(content_topic_name); + + return NULL; +} + +static napi_value WakuDefaultPubsubTopic(napi_env env, napi_callback_info info) { + size_t argc = 1; + napi_value args[1]; + NAPI_CALL(napi_get_cb_info(env, info, &argc, args, NULL, NULL)); + + if (argc < 1) { + napi_throw_type_error(env, NULL, "Wrong number of arguments"); + return NULL; + } + + napi_value cb = args[0]; + + napi_valuetype valueType; + NAPI_CALL(napi_typeof(env, cb, &valueType)); + + if (valueType != napi_function) { + napi_throw_type_error(env, NULL, "The argument should be a napi_function"); + return NULL; + } + + my_env = env; + if(ref_def_pubsub_topic_callback != NULL) { + NAPI_CALL(napi_delete_reference(env, ref_def_pubsub_topic_callback)); + } + + NAPI_CALL(napi_create_reference(env, cb, 1, &ref_def_pubsub_topic_callback)); + + WAKU_CALL( waku_default_pubsub_topic(ctx, handle_default_pubsub_topic, userData) ); + + return NULL; +} + +static napi_value WakuRelaySubscribe(napi_env env, napi_callback_info info) { + size_t argc = 2; + napi_value args[2]; + NAPI_CALL(napi_get_cb_info(env, info, &argc, args, NULL, NULL)); + + if (argc < 2) { + napi_throw_type_error(env, NULL, "Wrong number of arguments"); + return NULL; + } + + // Getting the pubsub topic param + napi_value topic = args[0]; + napi_valuetype valueType; + NAPI_CALL(napi_typeof(env, topic, &valueType)); + + if (valueType != napi_string) { + napi_throw_type_error(env, NULL, "The topic attribute should be a string"); + return NULL; + } + + size_t str_size; + size_t str_size_read; + napi_get_value_string_utf8(env, topic, NULL, 0, &str_size); + char* pubsub_topic; + pubsub_topic = malloc(str_size + 1); + str_size = str_size + 1; + napi_get_value_string_utf8(env, topic, pubsub_topic, str_size, &str_size_read); + + // Getting the 'onError' callback + napi_value cb = args[1]; + + NAPI_CALL(napi_typeof(env, cb, &valueType)); + + if (valueType != napi_function) { + napi_throw_type_error(env, NULL, "The argument should be a napi_function"); + return NULL; + } + + my_env = env; + NAPI_CALL(napi_create_reference(env, cb, 1, &ref_on_error_callback)); + + // Calling the actual 'subscribe' waku function + WAKU_CALL( waku_relay_subscribe(ctx, pubsub_topic, handle_error, userData) ); + + free(pubsub_topic); + + return NULL; +} + +#define DECLARE_NAPI_METHOD(name, func) \ + { name, 0, func, 0, 0, 0, napi_default, 0 } + +static napi_value Init(napi_env env, napi_value exports) { + // DECLARE_NAPI_METHOD("", ); + + napi_property_descriptor wakuVersion = DECLARE_NAPI_METHOD("wakuVersion", WakuVersion); + NAPI_CALL(napi_define_properties(env, exports, 1, &wakuVersion)); + + napi_property_descriptor wakuNew = DECLARE_NAPI_METHOD("wakuNew", WakuNew); + NAPI_CALL(napi_define_properties(env, exports, 1, &wakuNew)); + + napi_property_descriptor wakuStart = DECLARE_NAPI_METHOD("wakuStart", WakuStart); + NAPI_CALL(napi_define_properties(env, exports, 1, &wakuStart)); + + napi_property_descriptor wakuSetEventCallback = DECLARE_NAPI_METHOD("wakuSetEventCallback", WakuSetEventCallback); + NAPI_CALL(napi_define_properties(env, exports, 1, &wakuSetEventCallback)); + + napi_property_descriptor wakuDefaultPubsubTopic = DECLARE_NAPI_METHOD("wakuDefaultPubsubTopic", WakuDefaultPubsubTopic); + NAPI_CALL(napi_define_properties(env, exports, 1, &wakuDefaultPubsubTopic)); + + napi_property_descriptor wakuRelaySubscribe = DECLARE_NAPI_METHOD("wakuRelaySubscribe", WakuRelaySubscribe); + NAPI_CALL(napi_define_properties(env, exports, 1, &wakuRelaySubscribe)); + + napi_property_descriptor wakuConnect = DECLARE_NAPI_METHOD("wakuConnect", WakuConnect); + NAPI_CALL(napi_define_properties(env, exports, 1, &wakuConnect)); + + napi_property_descriptor wakuRelayPublish = DECLARE_NAPI_METHOD("wakuRelayPublish", WakuRelayPublish); + NAPI_CALL(napi_define_properties(env, exports, 1, &wakuRelayPublish)); + + return exports; +} + +NAPI_MODULE(NODE_GYP_MODULE_NAME, Init) diff --git a/third-party/nwaku/examples/publisher.nim b/third-party/nwaku/examples/publisher.nim new file mode 100644 index 0000000..8c2d036 --- /dev/null +++ b/third-party/nwaku/examples/publisher.nim @@ -0,0 +1,142 @@ +import + std/[tables, times, sequtils], + stew/byteutils, + chronicles, + chronos, + confutils, + libp2p/crypto/crypto, + eth/keys, + eth/p2p/discoveryv5/enr + +import + waku/[ + common/logging, + node/peer_manager, + waku_core, + waku_node, + waku_enr, + discovery/waku_discv5, + factory/builder, + ] + +proc now*(): Timestamp = + getNanosecondTime(getTime().toUnixFloat()) + +# An accesible bootstrap node. See waku.sandbox fleets.status.im +const bootstrapNode = + "enr:-QEkuEB3WHNS-xA3RDpfu9A2Qycr3bN3u7VoArMEiDIFZJ6" & + "6F1EB3d4wxZN1hcdcOX-RfuXB-MQauhJGQbpz3qUofOtLAYJpZI" & + "J2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjL" & + "WNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2" & + "XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5" & + "kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQ" & + "AGAAeJc2VjcDI1NmsxoQPK35Nnz0cWUtSAhBp7zvHEhyU_AqeQU" & + "lqzLiLxfP2L4oN0Y3CCdl-DdWRwgiMohXdha3UyDw" + +# careful if running pub and sub in the same machine +const wakuPort = 60000 +const discv5Port = 9000 + +proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} = + # use notice to filter all waku messaging + setupLog(logging.LogLevel.NOTICE, logging.LogFormat.TEXT) + + notice "starting publisher", wakuPort = wakuPort, discv5Port = discv5Port + let + nodeKey = crypto.PrivateKey.random(Secp256k1, rng[]).get() + ip = parseIpAddress("0.0.0.0") + flags = CapabilitiesBitfield.init(relay = true) + + var enrBuilder = EnrBuilder.init(nodeKey) + + let recordRes = enrBuilder.build() + let record = + if recordRes.isErr(): + error "failed to create enr record", error = recordRes.error + quit(QuitFailure) + else: + recordRes.get() + + var builder = WakuNodeBuilder.init() + builder.withNodeKey(nodeKey) + builder.withRecord(record) + builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet() + let node = builder.build().tryGet() + + var bootstrapNodeEnr: enr.Record + discard bootstrapNodeEnr.fromURI(bootstrapNode) + + let discv5Conf = WakuDiscoveryV5Config( + discv5Config: none(DiscoveryConfig), + address: ip, + port: Port(discv5Port), + privateKey: keys.PrivateKey(nodeKey.skkey), + bootstrapRecords: @[bootstrapNodeEnr], + autoupdateRecord: true, + ) + + # assumes behind a firewall, so not care about being discoverable + let wakuDiscv5 = WakuDiscoveryV5.new( + node.rng, + discv5Conf, + some(node.enr), + some(node.peerManager), + node.topicSubscriptionQueue, + ) + + await node.start() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error + quit(1) + + node.peerManager.start() + + (await wakuDiscv5.start()).isOkOr: + error "failed to start discv5", error = error + quit(1) + + # wait for a minimum of peers to be connected, otherwise messages wont be gossiped + while true: + let numConnectedPeers = node.peerManager.switch.peerStore[ConnectionBook].book + .values() + .countIt(it == Connected) + if numConnectedPeers >= 6: + notice "publisher is ready", connectedPeers = numConnectedPeers, required = 6 + break + notice "waiting to be ready", connectedPeers = numConnectedPeers, required = 6 + await sleepAsync(5000) + + # Make sure it matches the publisher. Use default value + # see spec: https://rfc.vac.dev/spec/23/ + let pubSubTopic = PubsubTopic("/waku/2/rs/0/0") + + # any content topic can be chosen + let contentTopic = ContentTopic("/examples/1/pubsub-example/proto") + + notice "publisher service started" + while true: + let text = "hi there i'm a publisher" + let message = WakuMessage( + payload: toBytes(text), # content of the message + contentTopic: contentTopic, # content topic to publish to + ephemeral: true, # tell store nodes to not store it + timestamp: now(), + ) # current timestamp + + let res = await node.publish(some(pubSubTopic), message) + + if res.isOk: + notice "published message", + text = text, + timestamp = message.timestamp, + psTopic = pubSubTopic, + contentTopic = contentTopic + else: + error "failed to publish message", error = res.error + + await sleepAsync(5000) + +when isMainModule: + let rng = crypto.newRng() + asyncSpawn setupAndPublish(rng) + runForever() diff --git a/third-party/nwaku/examples/python/requirements.txt b/third-party/nwaku/examples/python/requirements.txt new file mode 100644 index 0000000..b2c3a0d --- /dev/null +++ b/third-party/nwaku/examples/python/requirements.txt @@ -0,0 +1,7 @@ +blinker==1.6.2 +click==8.1.6 +Flask==2.3.2 +itsdangerous==2.1.2 +Jinja2==3.1.2 +MarkupSafe==2.1.3 +Werkzeug==2.3.6 diff --git a/third-party/nwaku/examples/python/waku.py b/third-party/nwaku/examples/python/waku.py new file mode 100644 index 0000000..4d5f564 --- /dev/null +++ b/third-party/nwaku/examples/python/waku.py @@ -0,0 +1,154 @@ +from flask import Flask +import ctypes +import argparse + +libwaku = object +try: + # This python script should be run from the root repo folder + libwaku = ctypes.CDLL("build/libwaku.so") +except Exception as e: + print("Exception: ", e) + print(""" +The 'libwaku.so' library can be created with the next command from +the repo's root folder: `make libwaku`. + +And it should build the library in 'build/libwaku.so'. + +Therefore, make sure the LD_LIBRARY_PATH env var points at the location that +contains the 'libwaku.so' library. +""") + exit(-1) + +def handle_event(ret, msg, user_data): + print("Event received: %s" % msg) + +def call_waku(func): + ret = func() + if (ret != 0): + print("Error in %s. Error code: %d" % (locals().keys(), ret)) + exit(1) + +# Parse params +parser = argparse.ArgumentParser(description='libwaku integration in Python.') +parser.add_argument('-d', '--host', dest='host', default='0.0.0.0', + help='Address this node will listen to. [=0.0.0.0]') +parser.add_argument('-p', '--port', dest='port', default=60000, required=True, + help='Port this node will listen to. [=60000]') +parser.add_argument('-k', '--key', dest='key', default="", required=True, + help="""P2P node private key as 64 char hex string. +e.g.: 364d111d729a6eb6d2e6113e163f017b5ef03a6f94c9b5b7bb1bb36fa5cb07a9""") +parser.add_argument('-r', '--relay', dest='relay', default="true", + help="Enable relay protocol: true|false [=true]") +parser.add_argument('--peer', dest='peer', default="", + help="Multiqualified libp2p address") + +args = parser.parse_args() + +# The next 'json_config' is the item passed to the 'waku_new'. +json_config = "{ \ + \"host\": \"%s\", \ + \"port\": %d, \ + \"key\": \"%s\", \ + \"relay\": %s ,\ + \"logLevel\": \"DEBUG\" \ + }" % (args.host, + int(args.port), + args.key, + "true" if args.relay else "false") + +callback_type = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_char_p, ctypes.c_size_t) + +# Node creation +libwaku.waku_new.restype = ctypes.c_void_p +libwaku.waku_new.argtypes = [ctypes.c_char_p, + callback_type, + ctypes.c_void_p] + +ctx = libwaku.waku_new(bytes(json_config, 'utf-8'), + callback_type( + #onErrCb + lambda ret, msg, len: + print("Error calling waku_new: %s", + msg.decode('utf-8')) + ), + ctypes.c_void_p(0)) + +# Retrieve the current version of the library +libwaku.waku_version.argtypes = [ctypes.c_void_p, + callback_type, + ctypes.c_void_p] +libwaku.waku_version(ctx, + callback_type(lambda ret, msg, len: + print("Git Version: %s" % + msg.decode('utf-8'))), + ctypes.c_void_p(0)) + +# Retrieve the default pubsub topic +default_pubsub_topic = "" +libwaku.waku_default_pubsub_topic.argtypes = [ctypes.c_void_p, + callback_type, + ctypes.c_void_p] +libwaku.waku_default_pubsub_topic(ctx, + callback_type( + lambda ret, msg, len: ( + globals().update(default_pubsub_topic = msg.decode('utf-8')), + print("Default pubsub topic: %s" % msg.decode('utf-8'))) + ), + ctypes.c_void_p(0)) + +print("Bind addr: {}:{}".format(args.host, args.port)) +print("Waku Relay enabled: {}".format(args.relay)) + +# Set the event callback +callback = callback_type(handle_event) # This line is important so that the callback is not gc'ed + +libwaku.waku_set_event_callback.argtypes = [callback_type, ctypes.c_void_p] +libwaku.waku_set_event_callback(callback, ctypes.c_void_p(0)) + +# Start the node +libwaku.waku_start.argtypes = [ctypes.c_void_p, + callback_type, + ctypes.c_void_p] +libwaku.waku_start(ctx, + callback_type(lambda ret, msg, len: + print("Error in waku_start: %s" % + msg.decode('utf-8'))), + ctypes.c_void_p(0)) + +# Subscribe to the default pubsub topic +libwaku.waku_relay_subscribe.argtypes = [ctypes.c_void_p, + ctypes.c_char_p, + callback_type, + ctypes.c_void_p] +libwaku.waku_relay_subscribe(ctx, + default_pubsub_topic.encode('utf-8'), + callback_type( + #onErrCb + lambda ret, msg, len: + print("Error calling waku_relay_subscribe: %s" % + msg.decode('utf-8')) + ), + ctypes.c_void_p(0)) + +libwaku.waku_connect.argtypes = [ctypes.c_void_p, + ctypes.c_char_p, + ctypes.c_int, + callback_type, + ctypes.c_void_p] +libwaku.waku_connect(ctx, + args.peer.encode('utf-8'), + 10000, + # onErrCb + callback_type( + lambda ret, msg, len: + print("Error calling waku_connect: %s" % msg.decode('utf-8'))), + ctypes.c_void_p(0)) + +# app = Flask(__name__) +# @app.route("/") +# def hello_world(): +# return "Hello, World!" + +# Simply avoid the app to +a = input() + diff --git a/third-party/nwaku/examples/qt/Makefile b/third-party/nwaku/examples/qt/Makefile new file mode 100644 index 0000000..aa2147e --- /dev/null +++ b/third-party/nwaku/examples/qt/Makefile @@ -0,0 +1,26 @@ + +## Has been compiled with Qt 5.15.2 + +## If change the main.qml, the qmake should be called +## This may be needed in Ubuntu: sudo apt install qtdeclarative5-dev qtquickcontrols2-5-dev + +CXX = g++ +CXXFLAGS = -g3 -fpermissive -fPIC `pkg-config --cflags Qt5Core Qt5Gui Qt5Qml Qt5Quick` +LDFLAGS = `pkg-config --libs Qt5Core Qt5Gui Qt5Qml Qt5Quick` -lwaku -L../../build/ +MOC = moc + +TARGET = main-qt +SRC = main_qt.cpp +MOC_SRC = waku_handler.moc.cpp +HEADERS = waku_handler.h + +all: $(TARGET) + +$(MOC_SRC): $(HEADERS) + $(MOC) $< -o $@ + +$(TARGET): $(SRC) $(MOC_SRC) + $(CXX) $(CXXFLAGS) -o $(TARGET) $(SRC) $(MOC_SRC) $(LDFLAGS) + +clean: + rm -f $(TARGET) $(MOC_SRC) diff --git a/third-party/nwaku/examples/qt/main.qml b/third-party/nwaku/examples/qt/main.qml new file mode 100644 index 0000000..7ef2dcc --- /dev/null +++ b/third-party/nwaku/examples/qt/main.qml @@ -0,0 +1,64 @@ +import QtQuick 2.15 +import QtQuick.Controls 2.15 + +ApplicationWindow { + visible: true + width: 400 + height: 300 + title: "Hello, World!" + + Column { + anchors.centerIn: parent + spacing: 20 + + Label { + text: "Hello, World!" + font.pixelSize: 24 + horizontalAlignment: Text.AlignHCenter + } + } + + Rectangle { + width: parent.width + height: 60 + anchors.bottom: parent.bottom + color: "transparent" + + Row { + anchors.centerIn: parent + spacing: 30 + + Button { + text: "Start Waku Node" + width: 150 + height: 40 + font.pixelSize: 16 + MouseArea { + anchors.fill: parent + cursorShape: Qt.PointingHandCursor + onClicked: wakuHandler.start() + } + background: Rectangle { + color: "#2196F3" + radius: 10 + } + } + + Button { + text: "Stop Waku Node" + width: 150 + height: 40 + font.pixelSize: 16 + MouseArea { + anchors.fill: parent + cursorShape: Qt.PointingHandCursor + onClicked: wakuHandler.stop() + } + background: Rectangle { + color: "#F44336" + radius: 10 + } + } + } + } +} diff --git a/third-party/nwaku/examples/qt/main_qt.cpp b/third-party/nwaku/examples/qt/main_qt.cpp new file mode 100644 index 0000000..f16660c --- /dev/null +++ b/third-party/nwaku/examples/qt/main_qt.cpp @@ -0,0 +1,46 @@ +#include +#include +#include + +#include "waku_handler.h" + +void event_handler(int callerRet, const char* msg, size_t len, void* userData) { + printf("Receiving message %s\n", msg); +} + +int main(int argc, char *argv[]) { + QGuiApplication app(argc, argv); + QQmlApplicationEngine engine; + + WakuHandler wakuHandler; + void* userData = nullptr; + + QString jsonConfig = R"( + { + "tcpPort": 60000, + "relay": true, + "logLevel": "TRACE", + "discv5Discovery": true, + "discv5BootstrapNodes": [ + "enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw", + "enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw" + ], + "discv5UdpPort": 9999, + "dnsDiscovery": true, + "dnsDiscoveryUrl": "enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im", + "dnsDiscoveryNameServers": ["8.8.8.8", "1.0.0.1"] + } + )"; + + wakuHandler.initialize(jsonConfig, event_handler, userData); + + engine.rootContext()->setContextProperty("wakuHandler", &wakuHandler); + + engine.load(QUrl::fromLocalFile("main.qml")); + + if (engine.rootObjects().isEmpty()) + return -1; + + return app.exec(); +} + diff --git a/third-party/nwaku/examples/qt/qt.pro b/third-party/nwaku/examples/qt/qt.pro new file mode 100644 index 0000000..7e1770d --- /dev/null +++ b/third-party/nwaku/examples/qt/qt.pro @@ -0,0 +1,18 @@ +###################################################################### +# Automatically generated by qmake (3.1) Thu Feb 27 21:42:11 2025 +###################################################################### + +TEMPLATE = app +TARGET = qt +INCLUDEPATH += . + +# You can make your code fail to compile if you use deprecated APIs. +# In order to do so, uncomment the following line. +# Please consult the documentation of the deprecated API in order to know +# how to port your code away from it. +# You can also select to disable deprecated APIs only up to a certain version of Qt. +#DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000 # disables all the APIs deprecated before Qt 6.0.0 + +# Input +HEADERS += waku_handler.h +SOURCES += main_qt.cpp waku_hand.moc.cpp waku_handler.moc.cpp diff --git a/third-party/nwaku/examples/qt/waku_handler.h b/third-party/nwaku/examples/qt/waku_handler.h new file mode 100644 index 0000000..161a17c --- /dev/null +++ b/third-party/nwaku/examples/qt/waku_handler.h @@ -0,0 +1,56 @@ +#include +#include +#include + +#include "../../library/libwaku.h" + +class WakuHandler : public QObject { + Q_OBJECT +private: + static void event_handler(int callerRet, const char* msg, size_t len, void* userData) { + printf("Receiving message %s\n", msg); + } + + static void on_event_received(int callerRet, const char* msg, size_t len, void* userData) { + if (callerRet == RET_ERR) { + printf("Error: %s\n", msg); + exit(1); + } + else if (callerRet == RET_OK) { + printf("Receiving event: %s\n", msg); + } + } + +public: + WakuHandler() : QObject(), ctx(nullptr) {} + + void initialize(const QString& jsonConfig, WakuCallBack event_handler, void* userData) { + ctx = waku_new(jsonConfig.toUtf8().constData(), WakuCallBack(event_handler), userData); + + waku_set_event_callback(ctx, on_event_received, userData); + qDebug() << "Waku context initialized, ready to start."; + } + + Q_INVOKABLE void start() { + if (ctx) { + waku_start(ctx, event_handler, nullptr); + qDebug() << "Waku start called with event_handler and userData."; + } else { + qDebug() << "Context is not initialized in start."; + } + } + + Q_INVOKABLE void stop() { + if (ctx) { + waku_stop(ctx, event_handler, nullptr); + qDebug() << "Waku stop called with event_handler and userData."; + } else { + qDebug() << "Context is not initialized in stop."; + } + } + + virtual ~WakuHandler() {} + +private: + void* ctx; +}; diff --git a/third-party/nwaku/examples/rust/Cargo.lock b/third-party/nwaku/examples/rust/Cargo.lock new file mode 100644 index 0000000..a4fea40 --- /dev/null +++ b/third-party/nwaku/examples/rust/Cargo.lock @@ -0,0 +1,25 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "cc" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] + +[[package]] +name = "libc" +version = "0.2.148" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" + +[[package]] +name = "waku-rust-simple-example" +version = "0.1.0" +dependencies = [ + "cc", +] diff --git a/third-party/nwaku/examples/rust/Cargo.toml b/third-party/nwaku/examples/rust/Cargo.toml new file mode 100644 index 0000000..c9ad9fa --- /dev/null +++ b/third-party/nwaku/examples/rust/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "waku-rust-simple-example" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[build-dependencies] +cc = "1.0.52" diff --git a/third-party/nwaku/examples/rust/README.md b/third-party/nwaku/examples/rust/README.md new file mode 100644 index 0000000..dc7bce3 --- /dev/null +++ b/third-party/nwaku/examples/rust/README.md @@ -0,0 +1,6 @@ + +This represents a very simple example on how to integrate the `libwaku` library in Rust, and then, only a few `libwaku` functions are being wrapped. + +In [waku-rust-bindings](https://github.com/waku-org/waku-rust-bindings) you will find a complete Waku integration in Rust. + + diff --git a/third-party/nwaku/examples/rust/build.rs b/third-party/nwaku/examples/rust/build.rs new file mode 100644 index 0000000..b0398c6 --- /dev/null +++ b/third-party/nwaku/examples/rust/build.rs @@ -0,0 +1,5 @@ + +fn main() { + println!("cargo:rustc-link-arg=-lwaku"); + println!("cargo:rustc-link-arg=-L../../build/"); +} diff --git a/third-party/nwaku/examples/rust/src/main.rs b/third-party/nwaku/examples/rust/src/main.rs new file mode 100644 index 0000000..926d0e3 --- /dev/null +++ b/third-party/nwaku/examples/rust/src/main.rs @@ -0,0 +1,111 @@ +use std::cell::OnceCell; +use std::ffi::CString; +use std::os::raw::{c_char, c_int, c_void}; +use std::{slice, thread, time}; + +pub type WakuCallback = unsafe extern "C" fn(c_int, *const c_char, usize, *const c_void); + +extern "C" { + pub fn waku_new( + config_json: *const u8, + cb: WakuCallback, + user_data: *const c_void, + ) -> *mut c_void; + + pub fn waku_version(ctx: *const c_void, cb: WakuCallback, user_data: *const c_void) -> c_int; + + pub fn waku_start(ctx: *const c_void, cb: WakuCallback, user_data: *const c_void) -> c_int; + + pub fn waku_default_pubsub_topic( + ctx: *mut c_void, + cb: WakuCallback, + user_data: *const c_void, + ) -> *mut c_void; +} + +pub unsafe extern "C" fn trampoline( + return_val: c_int, + buffer: *const c_char, + buffer_len: usize, + data: *const c_void, +) where + C: FnMut(i32, &str), +{ + let closure = &mut *(data as *mut C); + + let buffer_utf8 = + String::from_utf8(slice::from_raw_parts(buffer as *mut u8, buffer_len).to_vec()) + .expect("valid utf8"); + + closure(return_val, &buffer_utf8); +} + +pub fn get_trampoline(_closure: &C) -> WakuCallback +where + C: FnMut(i32, &str), +{ + trampoline:: +} + +fn main() { + let config_json = "\ + { \ + \"host\": \"127.0.0.1\",\ + \"port\": 60000, \ + \"key\": \"0d714a1fada214dead6dc9c7274581ec20ff292451866e7d6d677dc818e8ccd2\", \ + \"relay\": true ,\ + \"logLevel\": \"DEBUG\" + }"; + + unsafe { + // Create the waku node + let closure = |ret: i32, data: &str| { + println!("Ret {ret}. waku_new closure called {data}"); + }; + let cb = get_trampoline(&closure); + let config_json_str = CString::new(config_json).unwrap(); + let ctx = waku_new( + config_json_str.as_ptr() as *const u8, + cb, + &closure as *const _ as *const c_void, + ); + + // Extracting the current waku version + let version: OnceCell = OnceCell::new(); + let closure = |ret: i32, data: &str| { + println!("version_closure. Ret: {ret}. Data: {data}"); + let _ = version.set(data.to_string()); + }; + let cb = get_trampoline(&closure); + let _ret = waku_version( + &ctx as *const _ as *const c_void, + cb, + &closure as *const _ as *const c_void, + ); + + // Extracting the default pubsub topic + let default_pubsub_topic: OnceCell = OnceCell::new(); + let closure = |_ret: i32, data: &str| { + let _ = default_pubsub_topic.set(data.to_string()); + }; + let cb = get_trampoline(&closure); + let _ret = waku_default_pubsub_topic(ctx, cb, &closure as *const _ as *const c_void); + + println!("Version: {}", version.get_or_init(|| unreachable!())); + println!( + "Default pubsubTopic: {}", + default_pubsub_topic.get_or_init(|| unreachable!()) + ); + + // Start the Waku node + let closure = |ret: i32, data: &str| { + println!("Ret {ret}. waku_start closure called {data}"); + }; + let cb = get_trampoline(&closure); + let _ret = waku_start(ctx, cb, &closure as *const _ as *const c_void); + } + + loop { + thread::sleep(time::Duration::from_millis(10000)); + } +} diff --git a/third-party/nwaku/examples/subscriber.nim b/third-party/nwaku/examples/subscriber.nim new file mode 100644 index 0000000..fb040b0 --- /dev/null +++ b/third-party/nwaku/examples/subscriber.nim @@ -0,0 +1,129 @@ +import + std/[tables, sequtils], + stew/byteutils, + chronicles, + chronos, + confutils, + libp2p/crypto/crypto, + eth/keys, + eth/p2p/discoveryv5/enr + +import + waku/[ + common/logging, + node/peer_manager, + waku_core, + waku_node, + waku_enr, + discovery/waku_discv5, + factory/builder, + waku_relay, + ] + +# An accesible bootstrap node. See waku.sandbox fleets.status.im +const bootstrapNode = + "enr:-QEkuEB3WHNS-xA3RDpfu9A2Qycr3bN3u7VoArMEiDIFZJ6" & + "6F1EB3d4wxZN1hcdcOX-RfuXB-MQauhJGQbpz3qUofOtLAYJpZI" & + "J2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjL" & + "WNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2" & + "XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5" & + "kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQ" & + "AGAAeJc2VjcDI1NmsxoQPK35Nnz0cWUtSAhBp7zvHEhyU_AqeQU" & + "lqzLiLxfP2L4oN0Y3CCdl-DdWRwgiMohXdha3UyDw" + +# careful if running pub and sub in the same machine +const wakuPort = 50000 +const discv5Port = 8000 + +proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} = + # use notice to filter all waku messaging + setupLog(logging.LogLevel.NOTICE, logging.LogFormat.TEXT) + + notice "starting subscriber", wakuPort = wakuPort, discv5Port = discv5Port + let + nodeKey = crypto.PrivateKey.random(Secp256k1, rng[])[] + ip = parseIpAddress("0.0.0.0") + flags = CapabilitiesBitfield.init(relay = true) + + var enrBuilder = EnrBuilder.init(nodeKey) + + let recordRes = enrBuilder.build() + let record = + if recordRes.isErr(): + error "failed to create enr record", error = recordRes.error + quit(QuitFailure) + else: + recordRes.get() + + var builder = WakuNodeBuilder.init() + builder.withNodeKey(nodeKey) + builder.withRecord(record) + builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet() + let node = builder.build().tryGet() + + var bootstrapNodeEnr: enr.Record + discard bootstrapNodeEnr.fromURI(bootstrapNode) + + let discv5Conf = WakuDiscoveryV5Config( + discv5Config: none(DiscoveryConfig), + address: ip, + port: Port(discv5Port), + privateKey: keys.PrivateKey(nodeKey.skkey), + bootstrapRecords: @[bootstrapNodeEnr], + autoupdateRecord: true, + ) + + # assumes behind a firewall, so not care about being discoverable + let wakuDiscv5 = WakuDiscoveryV5.new( + node.rng, + discv5Conf, + some(node.enr), + some(node.peerManager), + node.topicSubscriptionQueue, + ) + + await node.start() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error + quit(1) + node.peerManager.start() + + (await wakuDiscv5.start()).isOkOr: + error "failed to start discv5", error = error + quit(1) + + # wait for a minimum of peers to be connected, otherwise messages wont be gossiped + while true: + let numConnectedPeers = node.peerManager.switch.peerStore[ConnectionBook].book + .values() + .countIt(it == Connected) + if numConnectedPeers >= 6: + notice "subscriber is ready", connectedPeers = numConnectedPeers, required = 6 + break + notice "waiting to be ready", connectedPeers = numConnectedPeers, required = 6 + await sleepAsync(5000) + + # Make sure it matches the publisher. Use default value + # see spec: https://rfc.vac.dev/spec/23/ + let pubSubTopic = PubsubTopic("/waku/2/rs/0/0") + + # any content topic can be chosen. make sure it matches the publisher + let contentTopic = ContentTopic("/examples/1/pubsub-example/proto") + + proc handler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + let payloadStr = string.fromBytes(msg.payload) + if msg.contentTopic == contentTopic: + notice "message received", + payload = payloadStr, + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + timestamp = msg.timestamp + + node.subscribe((kind: PubsubSub, topic: pubsubTopic), WakuRelayHandler(handler)).isOkOr: + error "failed to subscribe to pubsub topic", pubsubTopic, error + quit(1) + +when isMainModule: + let rng = crypto.newRng() + asyncSpawn setupAndSubscribe(rng) + runForever() diff --git a/third-party/nwaku/examples/waku_example.nim b/third-party/nwaku/examples/waku_example.nim new file mode 100644 index 0000000..03964ca --- /dev/null +++ b/third-party/nwaku/examples/waku_example.nim @@ -0,0 +1,38 @@ +import std/options +import chronos, results, confutils, confutils/defs +import waku + +type CliArgs = object + ethRpcEndpoint* {. + defaultValue: "", desc: "ETH RPC Endpoint, if passed, RLN is enabled" + .}: string + +when isMainModule: + let args = CliArgs.load() + + echo "Starting Waku node..." + + let config = + if (args.ethRpcEndpoint == ""): + # Create a basic configuration for the Waku node + # No RLN as we don't have an ETH RPC Endpoint + NodeConfig.init(wakuConfig = WakuConfig.init(entryNodes = @[], clusterId = 42)) + else: + # Connect to TWN, use ETH RPC Endpoint for RLN + NodeConfig.init(ethRpcEndpoints = @[args.ethRpcEndpoint]) + + # Create the node using the library API's createNode function + let node = (waitFor createNode(config)).valueOr: + echo "Failed to create node: ", error + quit(QuitFailure) + + echo("Waku node created successfully!") + + # Start the node + (waitFor startWaku(addr node)).isOkOr: + echo "Failed to start node: ", error + quit(QuitFailure) + + echo "Node started successfully!" + + runForever() diff --git a/third-party/nwaku/examples/wakustealthcommitments/README.md b/third-party/nwaku/examples/wakustealthcommitments/README.md new file mode 100644 index 0000000..f577188 --- /dev/null +++ b/third-party/nwaku/examples/wakustealthcommitments/README.md @@ -0,0 +1,38 @@ +# wakustealthcommitments + +This application/tool/protocol is used to securely communicate requests and responses for the [Stealth Address Scheme](https://eips.ethereum.org/EIPS/eip-5564) + +Uses TWN config as default, and content topic: `/wakustealthcommitments/1/app/proto` + +## Usage + +1. Clone the erc-5564-bn254 repo and build the static lib +```sh +gh repo clone rymnc/erc-5564-bn254 +cd erc-5564-bn254 +cargo build --release --all-features +cp ./target/release/liberc_5564_bn254.a +``` + +> ![NOTE] +> This static library also includes the rln ffi library, so you don't need to build it separately. +> This is because using both of them separately brings in a lot of duplicate symbols. + +2. Build the wakustealthcommitments app +```sh +cd +source env.sh +nim c --out:build/wakustealthcommitments --verbosity:0 --hints:off -d:chronicles_log_level=INFO -d:git_version="v0.24.0-rc.0-62-g7da25c" -d:release --passL:-lm --passL:liberc_5564_bn254.a --debugger:native examples/wakustealthcommitments/wakustealthcommitments.nim +``` + +3. +```sh +./build/wakustealthcommitments \ + --rln-relay-eth-client-address: \ + --rln-relay-cred-path: \ + --rln-relay-cred-password: +``` + +This service listens for requests for stealth commitment/address generation, +partakes in the generation of said stealth commitment and then distributes the response to the mesh. + diff --git a/third-party/nwaku/examples/wakustealthcommitments/erc_5564_interface.nim b/third-party/nwaku/examples/wakustealthcommitments/erc_5564_interface.nim new file mode 100644 index 0000000..a6785f8 --- /dev/null +++ b/third-party/nwaku/examples/wakustealthcommitments/erc_5564_interface.nim @@ -0,0 +1,167 @@ +## Nim wrappers for the functions defined in librln +{.push raises: [].} + +import results + +###################################################################### +## ERC-5564-BN254 module APIs +###################################################################### + +type CErrorCode* = uint8 + +type CG1Projective* = object + x0: array[32, uint8] + +type CReturn*[T] = object + value: T + err_code: CErrorCode + +type CFr* = object + x0: array[32, uint8] + +type CStealthCommitment* = object + stealth_commitment: CG1Projective + view_tag: uint64 + +type CKeyPair* = object + private_key: CFr + public_key: CG1Projective + +proc drop_ffi_derive_public_key*( + ptrx: ptr CReturn[CG1Projective] +) {.importc: "drop_ffi_derive_public_key".} + +proc drop_ffi_generate_random_fr*( + ptrx: ptr CReturn[CFr] +) {.importc: "drop_ffi_generate_random_fr".} + +proc drop_ffi_generate_stealth_commitment*( + ptrx: ptr CReturn[CStealthCommitment] +) {.importc: "drop_ffi_generate_stealth_commitment".} + +proc drop_ffi_generate_stealth_private_key*( + ptrx: ptr CReturn[CFr] +) {.importc: "drop_ffi_generate_stealth_private_key".} + +proc drop_ffi_random_keypair*( + ptrx: ptr CReturn[CKeyPair] +) {.importc: "drop_ffi_random_keypair".} + +proc ffi_derive_public_key*( + private_key: ptr CFr +): (ptr CReturn[CG1Projective]) {.importc: "ffi_derive_public_key".} + +proc ffi_generate_random_fr*(): (ptr CReturn[CFr]) {.importc: "ffi_generate_random_fr".} + +proc ffi_generate_stealth_commitment*( + viewing_public_key: ptr CG1Projective, + spending_public_key: ptr CG1Projective, + ephemeral_private_key: ptr CFr, +): (ptr CReturn[CStealthCommitment]) {.importc: "ffi_generate_stealth_commitment".} + +proc ffi_generate_stealth_private_key*( + ephemeral_public_key: ptr CG1Projective, + spending_key: ptr CFr, + viewing_key: ptr CFr, + view_tag: ptr uint64, +): (ptr CReturn[CFr]) {.importc: "ffi_generate_stealth_private_key".} + +proc ffi_random_keypair*(): (ptr CReturn[CKeyPair]) {.importc: "ffi_random_keypair".} + +## Nim wrappers and types for the ERC-5564-BN254 module + +type FFIResult[T] = Result[T, string] +type Fr = array[32, uint8] +type G1Projective = array[32, uint8] +type KeyPair* = object + private_key*: Fr + public_key*: G1Projective + +type StealthCommitment* = object + stealth_commitment*: G1Projective + view_tag*: uint64 + +type PrivateKey* = Fr +type PublicKey* = G1Projective + +proc generateRandomFr*(): FFIResult[Fr] = + let res_ptr = (ffi_generate_random_fr()) + let res_value = res_ptr[] + if res_value.err_code != 0: + drop_ffi_generate_random_fr(res_ptr) + return err("Error generating random field element: " & $res_value.err_code) + + let ret = res_value.value.x0 + drop_ffi_generate_random_fr(res_ptr) + return ok(ret) + +proc generateKeypair*(): FFIResult[KeyPair] = + let res_ptr = (ffi_random_keypair()) + let res_value = res_ptr[] + if res_value.err_code != 0: + drop_ffi_random_keypair(res_ptr) + return err("Error generating random keypair: " & $res_value.err_code) + + let ret = KeyPair( + private_key: res_value.value.private_key.x0, + public_key: res_value.value.public_key.x0, + ) + drop_ffi_random_keypair(res_ptr) + return ok(ret) + +proc generateStealthCommitment*( + viewing_public_key: G1Projective, + spending_public_key: G1Projective, + ephemeral_private_key: Fr, +): FFIResult[StealthCommitment] = + let viewing_public_key = CG1Projective(x0: viewing_public_key) + let viewing_public_key_ptr = unsafeAddr(viewing_public_key) + let spending_public_key = CG1Projective(x0: spending_public_key) + let spending_public_key_ptr = unsafeAddr(spending_public_key) + let ephemeral_private_key = CFr(x0: ephemeral_private_key) + let ephemeral_private_key_ptr = unsafeAddr(ephemeral_private_key) + + let res_ptr = ( + ffi_generate_stealth_commitment( + viewing_public_key_ptr, spending_public_key_ptr, ephemeral_private_key_ptr + ) + ) + let res_value = res_ptr[] + if res_value.err_code != 0: + drop_ffi_generate_stealth_commitment(res_ptr) + return err("Error generating stealth commitment: " & $res_value.err_code) + + let ret = StealthCommitment( + stealth_commitment: res_value.value.stealth_commitment.x0, + view_tag: res_value.value.view_tag, + ) + drop_ffi_generate_stealth_commitment(res_ptr) + return ok(ret) + +proc generateStealthPrivateKey*( + ephemeral_public_key: G1Projective, + spending_key: Fr, + viewing_key: Fr, + view_tag: uint64, +): FFIResult[Fr] = + let ephemeral_public_key = CG1Projective(x0: ephemeral_public_key) + let ephemeral_public_key_ptr = unsafeAddr(ephemeral_public_key) + let spending_key = CFr(x0: spending_key) + let spending_key_ptr = unsafeAddr(spending_key) + let viewing_key = CFr(x0: viewing_key) + let viewing_key_ptr = unsafeAddr(viewing_key) + let view_tag_ptr = unsafeAddr(view_tag) + + let res_ptr = ( + ffi_generate_stealth_private_key( + ephemeral_public_key_ptr, spending_key_ptr, viewing_key_ptr, view_tag_ptr + ) + ) + let res_value = res_ptr[] + if res_value.err_code != 0: + drop_ffi_generate_stealth_private_key(res_ptr) + return err("Error generating stealth private key: " & $res_value.err_code) + + let ret = res_value.value.x0 + drop_ffi_generate_stealth_private_key(res_ptr) + return ok(ret) diff --git a/third-party/nwaku/examples/wakustealthcommitments/nim.cfg b/third-party/nwaku/examples/wakustealthcommitments/nim.cfg new file mode 100644 index 0000000..219c620 --- /dev/null +++ b/third-party/nwaku/examples/wakustealthcommitments/nim.cfg @@ -0,0 +1,11 @@ +-d:chronicles_line_numbers +-d:discv5_protocol_id="d5waku" +-d:chronicles_runtime_filtering=on +-d:chronicles_sinks="textlines,json" +-d:chronicles_default_output_device=dynamic +# Disabling the following topics from nim-eth and nim-dnsdisc since some types cannot be serialized +-d:chronicles_disabled_topics="eth,dnsdisc.client" +-d:chronicles_log_level=INFO +# Results in empty output for some reason +#-d:"chronicles_enabled_topics=GossipSub:TRACE,WakuRelay:TRACE" +path = "../../" diff --git a/third-party/nwaku/examples/wakustealthcommitments/node_spec.nim b/third-party/nwaku/examples/wakustealthcommitments/node_spec.nim new file mode 100644 index 0000000..bf61c28 --- /dev/null +++ b/third-party/nwaku/examples/wakustealthcommitments/node_spec.nim @@ -0,0 +1,62 @@ +{.push raises: [].} + +import ../../apps/wakunode2/cli_args +import waku/[common/logging, factory/[waku, networks_config]] +import + std/[options, strutils, os, sequtils], + chronicles, + chronos, + metrics, + libbacktrace, + libp2p/crypto/crypto + +export + networks_config, waku, logging, options, strutils, os, sequtils, stewNet, chronicles, + chronos, metrics, libbacktrace, crypto + +proc setup*(): Waku = + const versionString = "version / git commit hash: " & waku.git_version + let rng = crypto.newRng() + + let confRes = WakuNodeConf.load(version = versionString) + if confRes.isErr(): + error "failure while loading the configuration", error = $confRes.error + quit(QuitFailure) + + var conf = confRes.get() + + let twnNetworkConf = NetworkConf.TheWakuNetworkConf() + if len(conf.shards) != 0: + conf.pubsubTopics = conf.shards.mapIt(twnNetworkConf.pubsubTopics[it.uint16]) + else: + conf.pubsubTopics = twnNetworkConf.pubsubTopics + + # Override configuration + conf.maxMessageSize = twnNetworkConf.maxMessageSize + conf.clusterId = twnNetworkConf.clusterId + conf.rlnRelayEthContractAddress = twnNetworkConf.rlnRelayEthContractAddress + conf.rlnRelayDynamic = twnNetworkConf.rlnRelayDynamic + conf.discv5Discovery = twnNetworkConf.discv5Discovery + conf.discv5BootstrapNodes = + conf.discv5BootstrapNodes & twnNetworkConf.discv5BootstrapNodes + conf.rlnEpochSizeSec = twnNetworkConf.rlnEpochSizeSec + conf.rlnRelayUserMessageLimit = twnNetworkConf.rlnRelayUserMessageLimit + + # Only set rlnRelay to true if relay is configured + if conf.relay: + conf.rlnRelay = twnNetworkConf.rlnRelay + + debug "Starting node" + var waku = (waitFor Waku.new(conf)).valueOr: + error "Waku initialization failed", error = error + quit(QuitFailure) + + (waitFor startWaku(addr waku)).isOkOr: + error "Starting waku failed", error = error + quit(QuitFailure) + + # set triggerSelf to false, we don't want to process our own stealthCommitments + waku.node.wakuRelay.triggerSelf = false + + info "Node setup complete" + return waku diff --git a/third-party/nwaku/examples/wakustealthcommitments/stealth_commitment_protocol.nim b/third-party/nwaku/examples/wakustealthcommitments/stealth_commitment_protocol.nim new file mode 100644 index 0000000..7da6bff --- /dev/null +++ b/third-party/nwaku/examples/wakustealthcommitments/stealth_commitment_protocol.nim @@ -0,0 +1,193 @@ +{.push raises: [].} + +import + results, + waku/[common/logging, waku_node, waku_rln_relay], + ./erc_5564_interface as StealthCommitmentFFI, + ./node_spec, + ./wire_spec + +export wire_spec, logging + +type StealthCommitmentProtocol* = object + waku: Waku + contentTopic: string + spendingKeyPair: StealthCommitmentFFI.KeyPair + viewingKeyPair: StealthCommitmentFFI.KeyPair + +proc deserialize( + T: type StealthCommitmentFFI.PublicKey, v: SerializedKey +): Result[T, string] = + # deserialize seq[byte] into array[32, uint8] + if v.len != 32: + return err("invalid key length") + var buf: array[32, uint8] + for i in 0 ..< v.len: + buf[i] = v[i] + return ok(buf) + +proc serialize( + v: StealthCommitmentFFI.PublicKey | StealthCommitmentFFI.PrivateKey +): SerializedKey = + # serialize array[32, uint8] into seq[byte] + var buf = newSeq[byte](v.len) + for i in 0 ..< v.len: + buf[i] = v[i] + return buf + +proc sendThruWaku*( + self: StealthCommitmentProtocol, msg: seq[byte] +): Future[Result[void, string]] {.async.} = + let time = getTime().toUnix() + var message = WakuMessage( + payload: msg, + contentTopic: self.contentTopic, + version: 0, + timestamp: getNanosecondTime(time), + ) + + (self.waku.node.wakuRlnRelay.appendRLNProof(message, float64(time))).isOkOr: + return err("could not append rate limit proof to the message: " & $error) + + (await self.waku.node.publish(some(DefaultPubsubTopic), message)).isOkOr: + return err("failed to publish message: " & $error) + + debug "rate limit proof is appended to the message" + + return ok() + +proc sendRequest*( + self: StealthCommitmentProtocol +): Future[Result[void, string]] {.async.} = + let request = constructRequest( + serialize(self.spendingKeyPair.publicKey), + serialize(self.viewingKeyPair.publicKey), + ) + .encode() + try: + (await self.sendThruWaku(request.buffer)).isOkOr: + return err("Could not send stealth commitment payload thru waku: " & $error) + except CatchableError: + return err( + "Could not send stealth commitment payload thru waku: " & getCurrentExceptionMsg() + ) + return ok() + +proc sendResponse*( + self: StealthCommitmentProtocol, + stealthCommitment: StealthCommitmentFFI.PublicKey, + ephemeralPubKey: StealthCommitmentFFI.PublicKey, + viewTag: uint64, +): Future[Result[void, string]] {.async.} = + let response = constructResponse( + serialize(stealthCommitment), serialize(ephemeralPubKey), viewTag + ) + .encode() + try: + (await self.sendThruWaku(response.buffer)).isOkOr: + return err("Could not send stealth commitment payload thru waku: " & $error) + except CatchableError: + return err( + "Could not send stealth commitment payload thru waku: " & getCurrentExceptionMsg() + ) + return ok() + +type SCPHandler* = proc(msg: WakuMessage): Future[void] {.async.} +proc getSCPHandler(self: StealthCommitmentProtocol): SCPHandler = + let handler = proc(msg: WakuMessage): Future[void] {.async.} = + let decodedRes = WakuStealthCommitmentMsg.decode(msg.payload) + if decodedRes.isErr(): + error "could not decode scp message" + let decoded = decodedRes.get() + if decoded.request == false: + # check if the generated stealth commitment belongs to the receiver + # if not, continue + let ephemeralPubKeyRes = + deserialize(StealthCommitmentFFI.PublicKey, decoded.ephemeralPubKey.get()) + if ephemeralPubKeyRes.isErr(): + error "could not deserialize ephemeral public key: ", + err = ephemeralPubKeyRes.error() + let ephemeralPubKey = ephemeralPubKeyRes.get() + let stealthCommitmentPrivateKeyRes = StealthCommitmentFFI.generateStealthPrivateKey( + ephemeralPubKey, + self.spendingKeyPair.privateKey, + self.viewingKeyPair.privateKey, + decoded.viewTag.get(), + ) + if stealthCommitmentPrivateKeyRes.isErr(): + info "received stealth commitment does not belong to the receiver: ", + err = stealthCommitmentPrivateKeyRes.error() + + let stealthCommitmentPrivateKey = stealthCommitmentPrivateKeyRes.get() + info "received stealth commitment belongs to the receiver: ", + stealthCommitmentPrivateKey, + stealthCommitmentPubKey = decoded.stealthCommitment.get() + return + # send response + # deseralize the keys + let spendingKeyRes = + deserialize(StealthCommitmentFFI.PublicKey, decoded.spendingPubKey.get()) + if spendingKeyRes.isErr(): + error "could not deserialize spending key: ", err = spendingKeyRes.error() + let spendingKey = spendingKeyRes.get() + let viewingKeyRes = + (deserialize(StealthCommitmentFFI.PublicKey, decoded.viewingPubKey.get())) + if viewingKeyRes.isErr(): + error "could not deserialize viewing key: ", err = viewingKeyRes.error() + let viewingKey = viewingKeyRes.get() + + info "received spending key", spendingKey + info "received viewing key", viewingKey + let ephemeralKeyPairRes = StealthCommitmentFFI.generateKeyPair() + if ephemeralKeyPairRes.isErr(): + error "could not generate ephemeral key pair: ", err = ephemeralKeyPairRes.error() + let ephemeralKeyPair = ephemeralKeyPairRes.get() + + let stealthCommitmentRes = StealthCommitmentFFI.generateStealthCommitment( + spendingKey, viewingKey, ephemeralKeyPair.privateKey + ) + if stealthCommitmentRes.isErr(): + error "could not generate stealth commitment: ", + err = stealthCommitmentRes.error() + let stealthCommitment = stealthCommitmentRes.get() + + ( + await self.sendResponse( + stealthCommitment.stealthCommitment, ephemeralKeyPair.publicKey, + stealthCommitment.viewTag, + ) + ).isOkOr: + error "could not send response: ", err = $error + + return handler + +proc new*( + waku: Waku, contentTopic = ContentTopic("/wakustealthcommitments/1/app/proto") +): Result[StealthCommitmentProtocol, string] = + let spendingKeyPair = StealthCommitmentFFI.generateKeyPair().valueOr: + return err("could not generate spending key pair: " & $error) + let viewingKeyPair = StealthCommitmentFFI.generateKeyPair().valueOr: + return err("could not generate viewing key pair: " & $error) + + info "spending public key", publicKey = spendingKeyPair.publicKey + info "viewing public key", publicKey = viewingKeyPair.publicKey + + let SCP = StealthCommitmentProtocol( + waku: waku, + contentTopic: contentTopic, + spendingKeyPair: spendingKeyPair, + viewingKeyPair: viewingKeyPair, + ) + + proc handler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + let scpHandler = getSCPHandler(SCP) + if msg.contentTopic == contentTopic: + try: + await scpHandler(msg) + except CatchableError: + error "could not handle SCP message: ", err = getCurrentExceptionMsg() + + waku.node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(handler)).isOkOr: + error "could not subscribe to pubsub topic: ", err = $error + return err("could not subscribe to pubsub topic: " & $error) + return ok(SCP) diff --git a/third-party/nwaku/examples/wakustealthcommitments/wakustealthcommitments.nim b/third-party/nwaku/examples/wakustealthcommitments/wakustealthcommitments.nim new file mode 100644 index 0000000..d2fe98b --- /dev/null +++ b/third-party/nwaku/examples/wakustealthcommitments/wakustealthcommitments.nim @@ -0,0 +1,27 @@ +{.push raises: [].} + +import results, chronicles, ./node_spec as Waku, ./stealth_commitment_protocol as SCP + +logScope: + topics = "waku stealthcommitments" + +when isMainModule: + ## Logging setup + setupLog(logging.LogLevel.NOTICE, logging.LogFormat.TEXT) + + info "Starting Waku Stealth Commitment Protocol" + info "Starting Waku Node" + let node = Waku.setup() + info "Waku Node started, listening for StealthCommitmentMessages" + let scp = SCP.new(node).valueOr: + error "Could not start Stealth Commitment Protocol", error = $error + quit(1) + + try: + info "Sending stealth commitment request" + (waitFor scp.sendRequest()).isOkOr: + error "Could not send stealth commitment request", error = $error + except: + error "Could not send stealth commitment request", error = getCurrentExceptionMsg() + + runForever() diff --git a/third-party/nwaku/examples/wakustealthcommitments/wire_spec.nim b/third-party/nwaku/examples/wakustealthcommitments/wire_spec.nim new file mode 100644 index 0000000..fa3d5a8 --- /dev/null +++ b/third-party/nwaku/examples/wakustealthcommitments/wire_spec.nim @@ -0,0 +1,133 @@ +import std/[times, options] +import confutils, chronicles, chronos, results + +import waku/[waku_core, common/protobuf] +import libp2p/protobuf/minprotobuf + +export + times, options, confutils, chronicles, chronos, results, waku_core, protobuf, + minprotobuf + +type SerializedKey* = seq[byte] + +type WakuStealthCommitmentMsg* = object + request*: bool + spendingPubKey*: Option[SerializedKey] + viewingPubKey*: Option[SerializedKey] + ephemeralPubKey*: Option[SerializedKey] + stealthCommitment*: Option[SerializedKey] + viewTag*: Option[uint64] + +proc decode*(T: type WakuStealthCommitmentMsg, buffer: seq[byte]): ProtoResult[T] = + var msg = WakuStealthCommitmentMsg() + let pb = initProtoBuffer(buffer) + + var request: uint64 + discard ?pb.getField(1, request) + msg.request = request == 1 + var spendingPubKey = newSeq[byte]() + discard ?pb.getField(2, spendingPubKey) + msg.spendingPubKey = + if spendingPubKey.len > 0: + some(spendingPubKey) + else: + none(SerializedKey) + var viewingPubKey = newSeq[byte]() + discard ?pb.getField(3, viewingPubKey) + msg.viewingPubKey = + if viewingPubKey.len > 0: + some(viewingPubKey) + else: + none(SerializedKey) + + if msg.spendingPubKey.isSome() and msg.viewingPubKey.isSome(): + msg.stealthCommitment = none(SerializedKey) + msg.viewTag = none(uint64) + return ok(msg) + if msg.spendingPubKey.isSome() and msg.viewingPubKey.isNone(): + return err(ProtoError.RequiredFieldMissing) + if msg.spendingPubKey.isNone() and msg.viewingPubKey.isSome(): + return err(ProtoError.RequiredFieldMissing) + if msg.request == true and msg.spendingPubKey.isNone() and msg.viewingPubKey.isNone(): + return err(ProtoError.RequiredFieldMissing) + + var stealthCommitment = newSeq[byte]() + discard ?pb.getField(4, stealthCommitment) + msg.stealthCommitment = + if stealthCommitment.len > 0: + some(stealthCommitment) + else: + none(SerializedKey) + + var ephemeralPubKey = newSeq[byte]() + discard ?pb.getField(5, ephemeralPubKey) + msg.ephemeralPubKey = + if ephemeralPubKey.len > 0: + some(ephemeralPubKey) + else: + none(SerializedKey) + + var viewTag: uint64 + discard ?pb.getField(6, viewTag) + msg.viewTag = + if viewTag != 0: + some(viewTag) + else: + none(uint64) + + if msg.stealthCommitment.isNone() and msg.viewTag.isNone() and + msg.ephemeralPubKey.isNone(): + return err(ProtoError.RequiredFieldMissing) + + if msg.stealthCommitment.isSome() and msg.viewTag.isNone(): + return err(ProtoError.RequiredFieldMissing) + + if msg.stealthCommitment.isNone() and msg.viewTag.isSome(): + return err(ProtoError.RequiredFieldMissing) + + if msg.stealthCommitment.isSome() and msg.viewTag.isSome(): + msg.spendingPubKey = none(SerializedKey) + msg.viewingPubKey = none(SerializedKey) + + ok(msg) + +proc encode*(msg: WakuStealthCommitmentMsg): ProtoBuffer = + var serialised = initProtoBuffer() + + serialised.write(1, uint64(msg.request)) + + if msg.spendingPubKey.isSome(): + serialised.write(2, msg.spendingPubKey.get()) + if msg.viewingPubKey.isSome(): + serialised.write(3, msg.viewingPubKey.get()) + if msg.stealthCommitment.isSome(): + serialised.write(4, msg.stealthCommitment.get()) + if msg.ephemeralPubKey.isSome(): + serialised.write(5, msg.ephemeralPubKey.get()) + if msg.viewTag.isSome(): + serialised.write(6, msg.viewTag.get()) + + return serialised + +func toByteSeq*(str: string): seq[byte] {.inline.} = + ## Converts a string to the corresponding byte sequence. + @(str.toOpenArrayByte(0, str.high)) + +proc constructRequest*( + spendingPubKey: SerializedKey, viewingPubKey: SerializedKey +): WakuStealthCommitmentMsg = + WakuStealthCommitmentMsg( + request: true, + spendingPubKey: some(spendingPubKey), + viewingPubKey: some(viewingPubKey), + ) + +proc constructResponse*( + stealthCommitment: SerializedKey, ephemeralPubKey: SerializedKey, viewTag: uint64 +): WakuStealthCommitmentMsg = + WakuStealthCommitmentMsg( + request: false, + stealthCommitment: some(stealthCommitment), + ephemeralPubKey: some(ephemeralPubKey), + viewTag: some(viewTag), + ) diff --git a/third-party/nwaku/flake.lock b/third-party/nwaku/flake.lock new file mode 100644 index 0000000..359ae25 --- /dev/null +++ b/third-party/nwaku/flake.lock @@ -0,0 +1,49 @@ +{ + "nodes": { + "nixpkgs": { + "locked": { + "lastModified": 1740603184, + "narHash": "sha256-t+VaahjQAWyA+Ctn2idyo1yxRIYpaDxMgHkgCNiMJa4=", + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "f44bd8ca21e026135061a0a57dcf3d0775b67a49", + "type": "github" + }, + "original": { + "owner": "NixOS", + "repo": "nixpkgs", + "rev": "f44bd8ca21e026135061a0a57dcf3d0775b67a49", + "type": "github" + } + }, + "root": { + "inputs": { + "nixpkgs": "nixpkgs", + "zerokit": "zerokit" + } + }, + "zerokit": { + "inputs": { + "nixpkgs": [ + "nixpkgs" + ] + }, + "locked": { + "lastModified": 1743756626, + "narHash": "sha256-SvhfEl0bJcRsCd79jYvZbxQecGV2aT+TXjJ57WVv7Aw=", + "owner": "vacp2p", + "repo": "zerokit", + "rev": "c60e0c33fc6350a4b1c20e6b6727c44317129582", + "type": "github" + }, + "original": { + "owner": "vacp2p", + "repo": "zerokit", + "rev": "c60e0c33fc6350a4b1c20e6b6727c44317129582", + "type": "github" + } + } + }, + "root": "root", + "version": 7 +} diff --git a/third-party/nwaku/flake.nix b/third-party/nwaku/flake.nix new file mode 100644 index 0000000..760f493 --- /dev/null +++ b/third-party/nwaku/flake.nix @@ -0,0 +1,64 @@ +{ + description = "NWaku build flake"; + + nixConfig = { + extra-substituters = [ "https://nix-cache.status.im/" ]; + extra-trusted-public-keys = [ "nix-cache.status.im-1:x/93lOfLU+duPplwMSBR+OlY4+mo+dCN7n0mr4oPwgY=" ]; + }; + + inputs = { + nixpkgs.url = "github:NixOS/nixpkgs?rev=f44bd8ca21e026135061a0a57dcf3d0775b67a49"; + zerokit = { + url = "github:vacp2p/zerokit?rev=c60e0c33fc6350a4b1c20e6b6727c44317129582"; + inputs.nixpkgs.follows = "nixpkgs"; + }; + }; + + outputs = { self, nixpkgs, zerokit }: + let + stableSystems = [ + "x86_64-linux" "aarch64-linux" + "x86_64-darwin" "aarch64-darwin" + "x86_64-windows" "i686-linux" + "i686-windows" + ]; + + forAllSystems = f: nixpkgs.lib.genAttrs stableSystems (system: f system); + + pkgsFor = forAllSystems ( + system: import nixpkgs { + inherit system; + config = { + android_sdk.accept_license = true; + allowUnfree = true; + }; + overlays = [ + (final: prev: { + androidEnvCustom = prev.callPackage ./nix/pkgs/android-sdk { }; + androidPkgs = final.androidEnvCustom.pkgs; + androidShell = final.androidEnvCustom.shell; + }) + ]; + } + ); + + in rec { + packages = forAllSystems (system: let + pkgs = pkgsFor.${system}; + in rec { + libwaku-android-arm64 = pkgs.callPackage ./nix/default.nix { + inherit stableSystems; + src = self; + targets = ["libwaku-android-arm64"]; + androidArch = "aarch64-linux-android"; + abidir = "arm64-v8a"; + zerokitPkg = zerokit.packages.${system}.zerokit-android-arm64; + }; + default = libwaku-android-arm64; + }); + + devShells = forAllSystems (system: { + default = pkgsFor.${system}.callPackage ./nix/shell.nix {}; + }); + }; +} \ No newline at end of file diff --git a/third-party/nwaku/library/alloc.nim b/third-party/nwaku/library/alloc.nim new file mode 100644 index 0000000..1a6f118 --- /dev/null +++ b/third-party/nwaku/library/alloc.nim @@ -0,0 +1,42 @@ +## Can be shared safely between threads +type SharedSeq*[T] = tuple[data: ptr UncheckedArray[T], len: int] + +proc alloc*(str: cstring): cstring = + # Byte allocation from the given address. + # There should be the corresponding manual deallocation with deallocShared ! + if str.isNil(): + var ret = cast[cstring](allocShared(1)) # Allocate memory for the null terminator + ret[0] = '\0' # Set the null terminator + return ret + + let ret = cast[cstring](allocShared(len(str) + 1)) + copyMem(ret, str, len(str) + 1) + return ret + +proc alloc*(str: string): cstring = + ## Byte allocation from the given address. + ## There should be the corresponding manual deallocation with deallocShared ! + var ret = cast[cstring](allocShared(str.len + 1)) + let s = cast[seq[char]](str) + for i in 0 ..< str.len: + ret[i] = s[i] + ret[str.len] = '\0' + return ret + +proc allocSharedSeq*[T](s: seq[T]): SharedSeq[T] = + let data = allocShared(sizeof(T) * s.len) + if s.len != 0: + copyMem(data, unsafeAddr s[0], s.len) + return (cast[ptr UncheckedArray[T]](data), s.len) + +proc deallocSharedSeq*[T](s: var SharedSeq[T]) = + deallocShared(s.data) + s.len = 0 + +proc toSeq*[T](s: SharedSeq[T]): seq[T] = + ## Creates a seq[T] from a SharedSeq[T]. No explicit dealloc is required + ## as req[T] is a GC managed type. + var ret = newSeq[T]() + for i in 0 ..< s.len: + ret.add(s.data[i]) + return ret diff --git a/third-party/nwaku/library/events/json_base_event.nim b/third-party/nwaku/library/events/json_base_event.nim new file mode 100644 index 0000000..8c51d2c --- /dev/null +++ b/third-party/nwaku/library/events/json_base_event.nim @@ -0,0 +1,6 @@ +type JsonEvent* = ref object of RootObj # https://rfc.vac.dev/spec/36/#jsonsignal-type + eventType* {.requiresInit.}: string + +method `$`*(jsonEvent: JsonEvent): string {.base.} = + discard + # All events should implement this diff --git a/third-party/nwaku/library/events/json_connection_change_event.nim b/third-party/nwaku/library/events/json_connection_change_event.nim new file mode 100644 index 0000000..ff28236 --- /dev/null +++ b/third-party/nwaku/library/events/json_connection_change_event.nim @@ -0,0 +1,17 @@ +import system, std/json, libp2p/[connmanager, peerid] + +import ../../waku/common/base64, ./json_base_event + +type JsonConnectionChangeEvent* = ref object of JsonEvent + peerId*: string + peerEvent*: PeerEventKind + +proc new*( + T: type JsonConnectionChangeEvent, peerId: string, peerEvent: PeerEventKind +): T = + return JsonConnectionChangeEvent( + eventType: "connection_change", peerId: peerId, peerEvent: peerEvent + ) + +method `$`*(jsonConnectionChangeEvent: JsonConnectionChangeEvent): string = + $(%*jsonConnectionChangeEvent) diff --git a/third-party/nwaku/library/events/json_message_event.nim b/third-party/nwaku/library/events/json_message_event.nim new file mode 100644 index 0000000..f79fef8 --- /dev/null +++ b/third-party/nwaku/library/events/json_message_event.nim @@ -0,0 +1,106 @@ +import system, results, std/json, std/strutils +import stew/byteutils +import + ../../waku/common/base64, + ../../waku/waku_core/message, + ../../waku/waku_core/message/message, + ../utils, + ./json_base_event + +type JsonMessage* = ref object # https://rfc.vac.dev/spec/36/#jsonmessage-type + payload*: Base64String + contentTopic*: string + version*: uint + timestamp*: int64 + ephemeral*: bool + meta*: Base64String + proof*: Base64String + +func fromJsonNode*( + T: type JsonMessage, jsonContent: JsonNode +): Result[JsonMessage, string] = + # Visit https://rfc.vac.dev/spec/14/ for further details + + # Check if required fields exist + if not jsonContent.hasKey("payload"): + return err("Missing required field in WakuMessage: payload") + if not jsonContent.hasKey("contentTopic"): + return err("Missing required field in WakuMessage: contentTopic") + + ok( + JsonMessage( + payload: Base64String(jsonContent["payload"].getStr()), + contentTopic: jsonContent["contentTopic"].getStr(), + version: uint32(jsonContent{"version"}.getInt()), + timestamp: (?jsonContent.getProtoInt64("timestamp")).get(0), + ephemeral: jsonContent{"ephemeral"}.getBool(), + meta: Base64String(jsonContent{"meta"}.getStr()), + proof: Base64String(jsonContent{"proof"}.getStr()), + ) + ) + +proc toWakuMessage*(self: JsonMessage): Result[WakuMessage, string] = + let payload = base64.decode(self.payload).valueOr: + return err("invalid payload format: " & error) + + let meta = base64.decode(self.meta).valueOr: + return err("invalid meta format: " & error) + + let proof = base64.decode(self.proof).valueOr: + return err("invalid proof format: " & error) + + ok( + WakuMessage( + payload: payload, + meta: meta, + contentTopic: self.contentTopic, + version: uint32(self.version), + timestamp: self.timestamp, + ephemeral: self.ephemeral, + proof: proof, + ) + ) + +proc `%`*(value: Base64String): JsonNode = + %(value.string) + +type JsonMessageEvent* = ref object of JsonEvent + pubsubTopic*: string + messageHash*: string + wakuMessage*: JsonMessage + +proc new*(T: type JsonMessageEvent, pubSubTopic: string, msg: WakuMessage): T = + # Returns a WakuMessage event as indicated in + # https://github.com/vacp2p/rfc/blob/master/content/docs/rfcs/36/README.md#jsonmessageevent-type + + var payload = newSeq[byte](len(msg.payload)) + if len(msg.payload) != 0: + copyMem(addr payload[0], unsafeAddr msg.payload[0], len(msg.payload)) + + var meta = newSeq[byte](len(msg.meta)) + if len(msg.meta) != 0: + copyMem(addr meta[0], unsafeAddr msg.meta[0], len(msg.meta)) + + var proof = newSeq[byte](len(msg.proof)) + if len(msg.proof) != 0: + copyMem(addr proof[0], unsafeAddr msg.proof[0], len(msg.proof)) + + let msgHash = computeMessageHash(pubSubTopic, msg) + + return JsonMessageEvent( + eventType: "message", + pubSubTopic: pubSubTopic, + messageHash: msgHash.to0xHex(), + wakuMessage: JsonMessage( + payload: base64.encode(payload), + contentTopic: msg.contentTopic, + version: msg.version, + timestamp: int64(msg.timestamp), + ephemeral: msg.ephemeral, + meta: base64.encode(meta), + proof: base64.encode(proof), + ), + ) + +method `$`*(jsonMessage: JsonMessageEvent): string = + $(%*jsonMessage) diff --git a/third-party/nwaku/library/events/json_topic_health_change_event.nim b/third-party/nwaku/library/events/json_topic_health_change_event.nim new file mode 100644 index 0000000..c194e89 --- /dev/null +++ b/third-party/nwaku/library/events/json_topic_health_change_event.nim @@ -0,0 +1,20 @@ +import system, results, std/json +import stew/byteutils +import ../../waku/common/base64, ./json_base_event +import ../../waku/waku_relay + +type JsonTopicHealthChangeEvent* = ref object of JsonEvent + pubsubTopic*: string + topicHealth*: TopicHealth + +proc new*( + T: type JsonTopicHealthChangeEvent, pubsubTopic: string, topicHealth: TopicHealth +): T = + return JsonTopicHealthChangeEvent( + eventType: "relay_topic_health_change", + pubsubTopic: pubsubTopic, + topicHealth: topicHealth, + ) + +method `$`*(jsonTopicHealthChange: JsonTopicHealthChangeEvent): string = + $(%*jsonTopicHealthChange) diff --git a/third-party/nwaku/library/events/json_waku_not_responding_event.nim b/third-party/nwaku/library/events/json_waku_not_responding_event.nim new file mode 100644 index 0000000..1e1d5fc --- /dev/null +++ b/third-party/nwaku/library/events/json_waku_not_responding_event.nim @@ -0,0 +1,9 @@ +import system, std/json, ./json_base_event + +type JsonWakuNotRespondingEvent* = ref object of JsonEvent + +proc new*(T: type JsonWakuNotRespondingEvent): T = + return JsonWakuNotRespondingEvent(eventType: "waku_not_responding") + +method `$`*(event: JsonWakuNotRespondingEvent): string = + $(%*event) diff --git a/third-party/nwaku/library/ffi_types.nim b/third-party/nwaku/library/ffi_types.nim new file mode 100644 index 0000000..a5eeb97 --- /dev/null +++ b/third-party/nwaku/library/ffi_types.nim @@ -0,0 +1,30 @@ +################################################################################ +### Exported types + +type WakuCallBack* = proc( + callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer +) {.cdecl, gcsafe, raises: [].} + +const RET_OK*: cint = 0 +const RET_ERR*: cint = 1 +const RET_MISSING_CALLBACK*: cint = 2 + +### End of exported types +################################################################################ + +################################################################################ +### FFI utils + +template foreignThreadGc*(body: untyped) = + when declared(setupForeignThreadGc): + setupForeignThreadGc() + + body + + when declared(tearDownForeignThreadGc): + tearDownForeignThreadGc() + +type onDone* = proc() + +### End of FFI utils +################################################################################ diff --git a/third-party/nwaku/library/libwaku.h b/third-party/nwaku/library/libwaku.h new file mode 100644 index 0000000..b5d6c9b --- /dev/null +++ b/third-party/nwaku/library/libwaku.h @@ -0,0 +1,253 @@ + +// Generated manually and inspired by the one generated by the Nim Compiler. +// In order to see the header file generated by Nim just run `make libwaku` +// from the root repo folder and the header should be created in +// nimcache/release/libwaku/libwaku.h +#ifndef __libwaku__ +#define __libwaku__ + +#include +#include + +// The possible returned values for the functions that return int +#define RET_OK 0 +#define RET_ERR 1 +#define RET_MISSING_CALLBACK 2 + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void (*WakuCallBack) (int callerRet, const char* msg, size_t len, void* userData); + +// Creates a new instance of the waku node. +// Sets up the waku node from the given configuration. +// Returns a pointer to the Context needed by the rest of the API functions. +void* waku_new( + const char* configJson, + WakuCallBack callback, + void* userData); + +int waku_start(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_stop(void* ctx, + WakuCallBack callback, + void* userData); + +// Destroys an instance of a waku node created with waku_new +int waku_destroy(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_version(void* ctx, + WakuCallBack callback, + void* userData); + +// Sets a callback that will be invoked whenever an event occurs. +// It is crucial that the passed callback is fast, non-blocking and potentially thread-safe. +void waku_set_event_callback(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_content_topic(void* ctx, + const char* appName, + unsigned int appVersion, + const char* contentTopicName, + const char* encoding, + WakuCallBack callback, + void* userData); + +int waku_pubsub_topic(void* ctx, + const char* topicName, + WakuCallBack callback, + void* userData); + +int waku_default_pubsub_topic(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_relay_publish(void* ctx, + const char* pubSubTopic, + const char* jsonWakuMessage, + unsigned int timeoutMs, + WakuCallBack callback, + void* userData); + +int waku_lightpush_publish(void* ctx, + const char* pubSubTopic, + const char* jsonWakuMessage, + WakuCallBack callback, + void* userData); + +int waku_relay_subscribe(void* ctx, + const char* pubSubTopic, + WakuCallBack callback, + void* userData); + +int waku_relay_add_protected_shard(void* ctx, + int clusterId, + int shardId, + char* publicKey, + WakuCallBack callback, + void* userData); + +int waku_relay_unsubscribe(void* ctx, + const char* pubSubTopic, + WakuCallBack callback, + void* userData); + +int waku_filter_subscribe(void* ctx, + const char* pubSubTopic, + const char* contentTopics, + WakuCallBack callback, + void* userData); + +int waku_filter_unsubscribe(void* ctx, + const char* pubSubTopic, + const char* contentTopics, + WakuCallBack callback, + void* userData); + +int waku_filter_unsubscribe_all(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_relay_get_num_connected_peers(void* ctx, + const char* pubSubTopic, + WakuCallBack callback, + void* userData); + +int waku_relay_get_connected_peers(void* ctx, + const char* pubSubTopic, + WakuCallBack callback, + void* userData); + +int waku_relay_get_num_peers_in_mesh(void* ctx, + const char* pubSubTopic, + WakuCallBack callback, + void* userData); + +int waku_relay_get_peers_in_mesh(void* ctx, + const char* pubSubTopic, + WakuCallBack callback, + void* userData); + +int waku_store_query(void* ctx, + const char* jsonQuery, + const char* peerAddr, + int timeoutMs, + WakuCallBack callback, + void* userData); + +int waku_connect(void* ctx, + const char* peerMultiAddr, + unsigned int timeoutMs, + WakuCallBack callback, + void* userData); + +int waku_disconnect_peer_by_id(void* ctx, + const char* peerId, + WakuCallBack callback, + void* userData); + +int waku_disconnect_all_peers(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_dial_peer(void* ctx, + const char* peerMultiAddr, + const char* protocol, + int timeoutMs, + WakuCallBack callback, + void* userData); + +int waku_dial_peer_by_id(void* ctx, + const char* peerId, + const char* protocol, + int timeoutMs, + WakuCallBack callback, + void* userData); + +int waku_get_peerids_from_peerstore(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_get_connected_peers_info(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_get_peerids_by_protocol(void* ctx, + const char* protocol, + WakuCallBack callback, + void* userData); + +int waku_listen_addresses(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_get_connected_peers(void* ctx, + WakuCallBack callback, + void* userData); + +// Returns a list of multiaddress given a url to a DNS discoverable ENR tree +// Parameters +// char* entTreeUrl: URL containing a discoverable ENR tree +// char* nameDnsServer: The nameserver to resolve the ENR tree url. +// int timeoutMs: Timeout value in milliseconds to execute the call. +int waku_dns_discovery(void* ctx, + const char* entTreeUrl, + const char* nameDnsServer, + int timeoutMs, + WakuCallBack callback, + void* userData); + +// Updates the bootnode list used for discovering new peers via DiscoveryV5 +// bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]` +int waku_discv5_update_bootnodes(void* ctx, + char* bootnodes, + WakuCallBack callback, + void* userData); + +int waku_start_discv5(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_stop_discv5(void* ctx, + WakuCallBack callback, + void* userData); + +// Retrieves the ENR information +int waku_get_my_enr(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_get_my_peerid(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_get_metrics(void* ctx, + WakuCallBack callback, + void* userData); + +int waku_peer_exchange_request(void* ctx, + int numPeers, + WakuCallBack callback, + void* userData); + +int waku_ping_peer(void* ctx, + const char* peerAddr, + int timeoutMs, + WakuCallBack callback, + void* userData); + +int waku_is_online(void* ctx, + WakuCallBack callback, + void* userData); + +#ifdef __cplusplus +} +#endif + +#endif /* __libwaku__ */ diff --git a/third-party/nwaku/library/libwaku.nim b/third-party/nwaku/library/libwaku.nim new file mode 100644 index 0000000..ad3afa1 --- /dev/null +++ b/third-party/nwaku/library/libwaku.nim @@ -0,0 +1,853 @@ +{.pragma: exported, exportc, cdecl, raises: [].} +{.pragma: callback, cdecl, raises: [], gcsafe.} +{.passc: "-fPIC".} + +when defined(linux): + {.passl: "-Wl,-soname,libwaku.so".} + +import std/[json, atomics, strformat, options, atomics] +import chronicles, chronos, chronos/threadsync +import + waku/common/base64, + waku/waku_core/message/message, + waku/node/waku_node, + waku/node/peer_manager, + waku/waku_core/topics/pubsub_topic, + waku/waku_core/subscription/push_handler, + waku/waku_relay, + ./events/json_message_event, + ./waku_context, + ./waku_thread_requests/requests/node_lifecycle_request, + ./waku_thread_requests/requests/peer_manager_request, + ./waku_thread_requests/requests/protocols/relay_request, + ./waku_thread_requests/requests/protocols/store_request, + ./waku_thread_requests/requests/protocols/lightpush_request, + ./waku_thread_requests/requests/protocols/filter_request, + ./waku_thread_requests/requests/debug_node_request, + ./waku_thread_requests/requests/discovery_request, + ./waku_thread_requests/requests/ping_request, + ./waku_thread_requests/waku_thread_request, + ./alloc, + ./ffi_types, + ../waku/factory/app_callbacks + +################################################################################ +### Wrapper around the waku node +################################################################################ + +################################################################################ +### Not-exported components + +template checkLibwakuParams*( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +) = + if not isNil(ctx): + ctx[].userData = userData + + if isNil(callback): + return RET_MISSING_CALLBACK + +proc handleRequest( + ctx: ptr WakuContext, + requestType: RequestType, + content: pointer, + callback: WakuCallBack, + userData: pointer, +): cint = + waku_context.sendRequestToWakuThread(ctx, requestType, content, callback, userData).isOkOr: + let msg = "libwaku error: " & $error + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + return RET_ERR + + return RET_OK + +### End of not-exported components +################################################################################ + +################################################################################ +### Library setup + +# Every Nim library must have this function called - the name is derived from +# the `--nimMainPrefix` command line option +proc libwakuNimMain() {.importc.} + +# To control when the library has been initialized +var initialized: Atomic[bool] + +if defined(android): + # Redirect chronicles to Android System logs + when compiles(defaultChroniclesStream.outputs[0].writer): + defaultChroniclesStream.outputs[0].writer = proc( + logLevel: LogLevel, msg: LogOutputStr + ) {.raises: [].} = + echo logLevel, msg + +proc initializeLibrary() {.exported.} = + if not initialized.exchange(true): + ## Every Nim library needs to call `NimMain` once exactly, to initialize the Nim runtime. + ## Being `` the value given in the optional compilation flag --nimMainPrefix:yourprefix + libwakuNimMain() + when declared(setupForeignThreadGc): + setupForeignThreadGc() + when declared(nimGC_setStackBottom): + var locals {.volatile, noinit.}: pointer + locals = addr(locals) + nimGC_setStackBottom(locals) + +### End of library setup +################################################################################ + +################################################################################ +### Exported procs + +proc waku_new( + configJson: cstring, callback: WakuCallback, userData: pointer +): pointer {.dynlib, exportc, cdecl.} = + initializeLibrary() + + ## Creates a new instance of the WakuNode. + if isNil(callback): + echo "error: missing callback in waku_new" + return nil + + ## Create the Waku thread that will keep waiting for req from the main thread. + var ctx = waku_context.createWakuContext().valueOr: + let msg = "Error in createWakuContext: " & $error + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + return nil + + ctx.userData = userData + + let appCallbacks = AppCallbacks( + relayHandler: onReceivedMessage(ctx), + topicHealthChangeHandler: onTopicHealthChange(ctx), + connectionChangeHandler: onConnectionChange(ctx), + ) + + let retCode = handleRequest( + ctx, + RequestType.LIFECYCLE, + NodeLifecycleRequest.createShared( + NodeLifecycleMsgType.CREATE_NODE, configJson, appCallbacks + ), + callback, + userData, + ) + + if retCode == RET_ERR: + return nil + + return ctx + +proc waku_destroy( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + waku_context.destroyWakuContext(ctx).isOkOr: + let msg = "libwaku error: " & $error + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + return RET_ERR + + ## always need to invoke the callback although we don't retrieve value to the caller + callback(RET_OK, nil, 0, userData) + + return RET_OK + +proc waku_version( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + callback( + RET_OK, + cast[ptr cchar](WakuNodeVersionString), + cast[csize_t](len(WakuNodeVersionString)), + userData, + ) + + return RET_OK + +proc waku_set_event_callback( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +) {.dynlib, exportc.} = + initializeLibrary() + ctx[].eventCallback = cast[pointer](callback) + ctx[].eventUserData = userData + +proc waku_content_topic( + ctx: ptr WakuContext, + appName: cstring, + appVersion: cuint, + contentTopicName: cstring, + encoding: cstring, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + # https://rfc.vac.dev/spec/36/#extern-char-waku_content_topicchar-applicationname-unsigned-int-applicationversion-char-contenttopicname-char-encoding + + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + let contentTopic = fmt"/{$appName}/{$appVersion}/{$contentTopicName}/{$encoding}" + callback( + RET_OK, unsafeAddr contentTopic[0], cast[csize_t](len(contentTopic)), userData + ) + + return RET_OK + +proc waku_pubsub_topic( + ctx: ptr WakuContext, topicName: cstring, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc, cdecl.} = + # https://rfc.vac.dev/spec/36/#extern-char-waku_pubsub_topicchar-name-char-encoding + + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + let outPubsubTopic = fmt"/waku/2/{$topicName}" + callback( + RET_OK, unsafeAddr outPubsubTopic[0], cast[csize_t](len(outPubsubTopic)), userData + ) + + return RET_OK + +proc waku_default_pubsub_topic( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + # https://rfc.vac.dev/spec/36/#extern-char-waku_default_pubsub_topic + + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + callback( + RET_OK, + cast[ptr cchar](DefaultPubsubTopic), + cast[csize_t](len(DefaultPubsubTopic)), + userData, + ) + + return RET_OK + +proc waku_relay_publish( + ctx: ptr WakuContext, + pubSubTopic: cstring, + jsonWakuMessage: cstring, + timeoutMs: cuint, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc, cdecl.} = + # https://rfc.vac.dev/spec/36/#extern-char-waku_relay_publishchar-messagejson-char-pubsubtopic-int-timeoutms + + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + var jsonMessage: JsonMessage + try: + let jsonContent = parseJson($jsonWakuMessage) + jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr: + raise newException(JsonParsingError, $error) + except JsonParsingError: + let msg = fmt"Error parsing json message: {getCurrentExceptionMsg()}" + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + return RET_ERR + + let wakuMessage = jsonMessage.toWakuMessage().valueOr: + let msg = "Problem building the WakuMessage: " & $error + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + return RET_ERR + + handleRequest( + ctx, + RequestType.RELAY, + RelayRequest.createShared(RelayMsgType.PUBLISH, pubSubTopic, nil, wakuMessage), + callback, + userData, + ) + +proc waku_start( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + handleRequest( + ctx, + RequestType.LIFECYCLE, + NodeLifecycleRequest.createShared(NodeLifecycleMsgType.START_NODE), + callback, + userData, + ) + +proc waku_stop( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + handleRequest( + ctx, + RequestType.LIFECYCLE, + NodeLifecycleRequest.createShared(NodeLifecycleMsgType.STOP_NODE), + callback, + userData, + ) + +proc waku_relay_subscribe( + ctx: ptr WakuContext, + pubSubTopic: cstring, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + var cb = onReceivedMessage(ctx) + + handleRequest( + ctx, + RequestType.RELAY, + RelayRequest.createShared(RelayMsgType.SUBSCRIBE, pubSubTopic, WakuRelayHandler(cb)), + callback, + userData, + ) + +proc waku_relay_add_protected_shard( + ctx: ptr WakuContext, + clusterId: cint, + shardId: cint, + publicKey: cstring, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc, cdecl.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.RELAY, + RelayRequest.createShared( + RelayMsgType.ADD_PROTECTED_SHARD, + clusterId = clusterId, + shardId = shardId, + publicKey = publicKey, + ), + callback, + userData, + ) + +proc waku_relay_unsubscribe( + ctx: ptr WakuContext, + pubSubTopic: cstring, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.RELAY, + RelayRequest.createShared( + RelayMsgType.UNSUBSCRIBE, pubSubTopic, WakuRelayHandler(onReceivedMessage(ctx)) + ), + callback, + userData, + ) + +proc waku_relay_get_num_connected_peers( + ctx: ptr WakuContext, + pubSubTopic: cstring, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.RELAY, + RelayRequest.createShared(RelayMsgType.NUM_CONNECTED_PEERS, pubSubTopic), + callback, + userData, + ) + +proc waku_relay_get_connected_peers( + ctx: ptr WakuContext, + pubSubTopic: cstring, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.RELAY, + RelayRequest.createShared(RelayMsgType.LIST_CONNECTED_PEERS, pubSubTopic), + callback, + userData, + ) + +proc waku_relay_get_num_peers_in_mesh( + ctx: ptr WakuContext, + pubSubTopic: cstring, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.RELAY, + RelayRequest.createShared(RelayMsgType.NUM_MESH_PEERS, pubSubTopic), + callback, + userData, + ) + +proc waku_relay_get_peers_in_mesh( + ctx: ptr WakuContext, + pubSubTopic: cstring, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.RELAY, + RelayRequest.createShared(RelayMsgType.LIST_MESH_PEERS, pubSubTopic), + callback, + userData, + ) + +proc waku_filter_subscribe( + ctx: ptr WakuContext, + pubSubTopic: cstring, + contentTopics: cstring, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.FILTER, + FilterRequest.createShared( + FilterMsgType.SUBSCRIBE, + pubSubTopic, + contentTopics, + FilterPushHandler(onReceivedMessage(ctx)), + ), + callback, + userData, + ) + +proc waku_filter_unsubscribe( + ctx: ptr WakuContext, + pubSubTopic: cstring, + contentTopics: cstring, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.FILTER, + FilterRequest.createShared(FilterMsgType.UNSUBSCRIBE, pubSubTopic, contentTopics), + callback, + userData, + ) + +proc waku_filter_unsubscribe_all( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.FILTER, + FilterRequest.createShared(FilterMsgType.UNSUBSCRIBE_ALL), + callback, + userData, + ) + +proc waku_lightpush_publish( + ctx: ptr WakuContext, + pubSubTopic: cstring, + jsonWakuMessage: cstring, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc, cdecl.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + var jsonMessage: JsonMessage + try: + let jsonContent = parseJson($jsonWakuMessage) + jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr: + raise newException(JsonParsingError, $error) + except JsonParsingError: + let msg = fmt"Error parsing json message: {getCurrentExceptionMsg()}" + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + return RET_ERR + + let wakuMessage = jsonMessage.toWakuMessage().valueOr: + let msg = "Problem building the WakuMessage: " & $error + callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData) + return RET_ERR + + handleRequest( + ctx, + RequestType.LIGHTPUSH, + LightpushRequest.createShared(LightpushMsgType.PUBLISH, pubSubTopic, wakuMessage), + callback, + userData, + ) + +proc waku_connect( + ctx: ptr WakuContext, + peerMultiAddr: cstring, + timeoutMs: cuint, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.PEER_MANAGER, + PeerManagementRequest.createShared( + PeerManagementMsgType.CONNECT_TO, $peerMultiAddr, chronos.milliseconds(timeoutMs) + ), + callback, + userData, + ) + +proc waku_disconnect_peer_by_id( + ctx: ptr WakuContext, peerId: cstring, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.PEER_MANAGER, + PeerManagementRequest.createShared( + op = PeerManagementMsgType.DISCONNECT_PEER_BY_ID, peerId = $peerId + ), + callback, + userData, + ) + +proc waku_disconnect_all_peers( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.PEER_MANAGER, + PeerManagementRequest.createShared(op = PeerManagementMsgType.DISCONNECT_ALL_PEERS), + callback, + userData, + ) + +proc waku_dial_peer( + ctx: ptr WakuContext, + peerMultiAddr: cstring, + protocol: cstring, + timeoutMs: cuint, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.PEER_MANAGER, + PeerManagementRequest.createShared( + op = PeerManagementMsgType.DIAL_PEER, + peerMultiAddr = $peerMultiAddr, + protocol = $protocol, + ), + callback, + userData, + ) + +proc waku_dial_peer_by_id( + ctx: ptr WakuContext, + peerId: cstring, + protocol: cstring, + timeoutMs: cuint, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.PEER_MANAGER, + PeerManagementRequest.createShared( + op = PeerManagementMsgType.DIAL_PEER_BY_ID, peerId = $peerId, protocol = $protocol + ), + callback, + userData, + ) + +proc waku_get_peerids_from_peerstore( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.PEER_MANAGER, + PeerManagementRequest.createShared(PeerManagementMsgType.GET_ALL_PEER_IDS), + callback, + userData, + ) + +proc waku_get_connected_peers_info( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.PEER_MANAGER, + PeerManagementRequest.createShared(PeerManagementMsgType.GET_CONNECTED_PEERS_INFO), + callback, + userData, + ) + +proc waku_get_connected_peers( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.PEER_MANAGER, + PeerManagementRequest.createShared(PeerManagementMsgType.GET_CONNECTED_PEERS), + callback, + userData, + ) + +proc waku_get_peerids_by_protocol( + ctx: ptr WakuContext, protocol: cstring, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.PEER_MANAGER, + PeerManagementRequest.createShared( + op = PeerManagementMsgType.GET_PEER_IDS_BY_PROTOCOL, protocol = $protocol + ), + callback, + userData, + ) + +proc waku_store_query( + ctx: ptr WakuContext, + jsonQuery: cstring, + peerAddr: cstring, + timeoutMs: cint, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.STORE, + StoreRequest.createShared(StoreReqType.REMOTE_QUERY, jsonQuery, peerAddr, timeoutMs), + callback, + userData, + ) + +proc waku_listen_addresses( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.DEBUG, + DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_LISTENING_ADDRESSES), + callback, + userData, + ) + +proc waku_dns_discovery( + ctx: ptr WakuContext, + entTreeUrl: cstring, + nameDnsServer: cstring, + timeoutMs: cint, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.DISCOVERY, + DiscoveryRequest.createRetrieveBootstrapNodesRequest( + DiscoveryMsgType.GET_BOOTSTRAP_NODES, entTreeUrl, nameDnsServer, timeoutMs + ), + callback, + userData, + ) + +proc waku_discv5_update_bootnodes( + ctx: ptr WakuContext, bootnodes: cstring, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + ## Updates the bootnode list used for discovering new peers via DiscoveryV5 + ## bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]` + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.DISCOVERY, + DiscoveryRequest.createUpdateBootstrapNodesRequest( + DiscoveryMsgType.UPDATE_DISCV5_BOOTSTRAP_NODES, bootnodes + ), + callback, + userData, + ) + +proc waku_get_my_enr( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.DEBUG, + DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_MY_ENR), + callback, + userData, + ) + +proc waku_get_my_peerid( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.DEBUG, + DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_MY_PEER_ID), + callback, + userData, + ) + +proc waku_get_metrics( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.DEBUG, + DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_METRICS), + callback, + userData, + ) + +proc waku_start_discv5( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.DISCOVERY, + DiscoveryRequest.createDiscV5StartRequest(), + callback, + userData, + ) + +proc waku_stop_discv5( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.DISCOVERY, + DiscoveryRequest.createDiscV5StopRequest(), + callback, + userData, + ) + +proc waku_peer_exchange_request( + ctx: ptr WakuContext, numPeers: uint64, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.DISCOVERY, + DiscoveryRequest.createPeerExchangeRequest(numPeers), + callback, + userData, + ) + +proc waku_ping_peer( + ctx: ptr WakuContext, + peerAddr: cstring, + timeoutMs: cuint, + callback: WakuCallBack, + userData: pointer, +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.PING, + PingRequest.createShared(peerAddr, chronos.milliseconds(timeoutMs)), + callback, + userData, + ) + +proc waku_is_online( + ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer +): cint {.dynlib, exportc.} = + initializeLibrary() + checkLibwakuParams(ctx, callback, userData) + + handleRequest( + ctx, + RequestType.DEBUG, + DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_ONLINE_STATE), + callback, + userData, + ) + +### End of exported procs +################################################################################ diff --git a/third-party/nwaku/library/nim.cfg b/third-party/nwaku/library/nim.cfg new file mode 100644 index 0000000..3dc6221 --- /dev/null +++ b/third-party/nwaku/library/nim.cfg @@ -0,0 +1 @@ +path = "../" diff --git a/third-party/nwaku/library/utils.nim b/third-party/nwaku/library/utils.nim new file mode 100644 index 0000000..926ec4e --- /dev/null +++ b/third-party/nwaku/library/utils.nim @@ -0,0 +1,20 @@ +import std/[json, options, strutils] +import results + +proc getProtoInt64*(node: JsonNode, key: string): Result[Option[int64], string] = + try: + let (value, ok) = + if node.hasKey(key): + if node[key].kind == JString: + (parseBiggestInt(node[key].getStr()), true) + else: + (node[key].getBiggestInt(), true) + else: + (0, false) + + if ok: + return ok(some(value)) + + return ok(none(int64)) + except CatchableError: + return err("Invalid int64 value in `" & key & "`") diff --git a/third-party/nwaku/library/waku_context.nim b/third-party/nwaku/library/waku_context.nim new file mode 100644 index 0000000..c227551 --- /dev/null +++ b/third-party/nwaku/library/waku_context.nim @@ -0,0 +1,226 @@ +{.pragma: exported, exportc, cdecl, raises: [].} +{.pragma: callback, cdecl, raises: [], gcsafe.} +{.passc: "-fPIC".} + +import std/[options, atomics, os, net, locks] +import chronicles, chronos, chronos/threadsync, taskpools/channels_spsc_single, results +import + waku/common/logging, + waku/factory/waku, + waku/node/peer_manager, + waku/waku_relay/[protocol, topic_health], + waku/waku_core/[topics/pubsub_topic, message], + ./waku_thread_requests/[waku_thread_request, requests/debug_node_request], + ./ffi_types, + ./events/[ + json_message_event, json_topic_health_change_event, json_connection_change_event, + json_waku_not_responding_event, + ] + +type WakuContext* = object + wakuThread: Thread[(ptr WakuContext)] + watchdogThread: Thread[(ptr WakuContext)] + # monitors the Waku thread and notifies the Waku SDK consumer if it hangs + lock: Lock + reqChannel: ChannelSPSCSingle[ptr WakuThreadRequest] + reqSignal: ThreadSignalPtr + # to inform The Waku Thread (a.k.a TWT) that a new request is sent + reqReceivedSignal: ThreadSignalPtr + # to inform the main thread that the request is rx by TWT + userData*: pointer + eventCallback*: pointer + eventUserdata*: pointer + running: Atomic[bool] # To control when the threads are running + +const git_version* {.strdefine.} = "n/a" +const versionString = "version / git commit hash: " & waku.git_version + +template callEventCallback(ctx: ptr WakuContext, eventName: string, body: untyped) = + if isNil(ctx[].eventCallback): + error eventName & " - eventCallback is nil" + return + + foreignThreadGc: + try: + let event = body + cast[WakuCallBack](ctx[].eventCallback)( + RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData + ) + except Exception, CatchableError: + let msg = + "Exception " & eventName & " when calling 'eventCallBack': " & + getCurrentExceptionMsg() + cast[WakuCallBack](ctx[].eventCallback)( + RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData + ) + +proc onConnectionChange*(ctx: ptr WakuContext): ConnectionChangeHandler = + return proc(peerId: PeerId, peerEvent: PeerEventKind) {.async.} = + callEventCallback(ctx, "onConnectionChange"): + $JsonConnectionChangeEvent.new($peerId, peerEvent) + +proc onReceivedMessage*(ctx: ptr WakuContext): WakuRelayHandler = + return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} = + callEventCallback(ctx, "onReceivedMessage"): + $JsonMessageEvent.new(pubsubTopic, msg) + +proc onTopicHealthChange*(ctx: ptr WakuContext): TopicHealthChangeHandler = + return proc(pubsubTopic: PubsubTopic, topicHealth: TopicHealth) {.async.} = + callEventCallback(ctx, "onTopicHealthChange"): + $JsonTopicHealthChangeEvent.new(pubsubTopic, topicHealth) + +proc onWakuNotResponding*(ctx: ptr WakuContext) = + callEventCallback(ctx, "onWakuNotResponsive"): + $JsonWakuNotRespondingEvent.new() + +proc sendRequestToWakuThread*( + ctx: ptr WakuContext, + reqType: RequestType, + reqContent: pointer, + callback: WakuCallBack, + userData: pointer, + timeout = InfiniteDuration, +): Result[void, string] = + ctx.lock.acquire() + # This lock is only necessary while we use a SP Channel and while the signalling + # between threads assumes that there aren't concurrent requests. + # Rearchitecting the signaling + migrating to a MP Channel will allow us to receive + # requests concurrently and spare us the need of locks + defer: + ctx.lock.release() + + let req = WakuThreadRequest.createShared(reqType, reqContent, callback, userData) + ## Sending the request + let sentOk = ctx.reqChannel.trySend(req) + if not sentOk: + deallocShared(req) + return err("Couldn't send a request to the waku thread: " & $req[]) + + let fireSyncRes = ctx.reqSignal.fireSync() + if fireSyncRes.isErr(): + deallocShared(req) + return err("failed fireSync: " & $fireSyncRes.error) + + if fireSyncRes.get() == false: + deallocShared(req) + return err("Couldn't fireSync in time") + + ## wait until the Waku Thread properly received the request + let res = ctx.reqReceivedSignal.waitSync(timeout) + if res.isErr(): + deallocShared(req) + return err("Couldn't receive reqReceivedSignal signal") + + ## Notice that in case of "ok", the deallocShared(req) is performed by the Waku Thread in the + ## process proc. See the 'waku_thread_request.nim' module for more details. + ok() + +proc watchdogThreadBody(ctx: ptr WakuContext) {.thread.} = + ## Watchdog thread that monitors the Waku thread and notifies the library user if it hangs. + + let watchdogRun = proc(ctx: ptr WakuContext) {.async.} = + const WatchdogStartDelay = 10.seconds + const WatchdogTimeinterval = 1.seconds + const WakuNotRespondingTimeout = 3.seconds + + # Give time for the node to be created and up before sending watchdog requests + await sleepAsync(WatchdogStartDelay) + while true: + await sleepAsync(WatchdogTimeinterval) + + if ctx.running.load == false: + debug "Watchdog thread exiting because WakuContext is not running" + break + + let wakuCallback = proc( + callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer + ) {.cdecl, gcsafe, raises: [].} = + discard ## Don't do anything. Just respecting the callback signature. + const nilUserData = nil + + trace "Sending watchdog request to Waku thread" + + sendRequestToWakuThread( + ctx, + RequestType.DEBUG, + DebugNodeRequest.createShared(DebugNodeMsgType.CHECK_WAKU_NOT_BLOCKED), + wakuCallback, + nilUserData, + WakuNotRespondingTimeout, + ).isOkOr: + error "Failed to send watchdog request to Waku thread", error = $error + onWakuNotResponding(ctx) + + waitFor watchdogRun(ctx) + +proc wakuThreadBody(ctx: ptr WakuContext) {.thread.} = + ## Waku thread that attends library user requests (stop, connect_to, etc.) + + logging.setupLog(logging.LogLevel.DEBUG, logging.LogFormat.TEXT) + + let wakuRun = proc(ctx: ptr WakuContext) {.async.} = + var waku: Waku + while true: + await ctx.reqSignal.wait() + + if ctx.running.load == false: + break + + ## Trying to get a request from the libwaku requestor thread + var request: ptr WakuThreadRequest + let recvOk = ctx.reqChannel.tryRecv(request) + if not recvOk: + error "waku thread could not receive a request" + continue + + ## Handle the request + asyncSpawn WakuThreadRequest.process(request, addr waku) + + let fireRes = ctx.reqReceivedSignal.fireSync() + if fireRes.isErr(): + error "could not fireSync back to requester thread", error = fireRes.error + + waitFor wakuRun(ctx) + +proc createWakuContext*(): Result[ptr WakuContext, string] = + ## This proc is called from the main thread and it creates + ## the Waku working thread. + var ctx = createShared(WakuContext, 1) + ctx.reqSignal = ThreadSignalPtr.new().valueOr: + return err("couldn't create reqSignal ThreadSignalPtr") + ctx.reqReceivedSignal = ThreadSignalPtr.new().valueOr: + return err("couldn't create reqReceivedSignal ThreadSignalPtr") + ctx.lock.initLock() + + ctx.running.store(true) + + try: + createThread(ctx.wakuThread, wakuThreadBody, ctx) + except ValueError, ResourceExhaustedError: + freeShared(ctx) + return err("failed to create the Waku thread: " & getCurrentExceptionMsg()) + + try: + createThread(ctx.watchdogThread, watchdogThreadBody, ctx) + except ValueError, ResourceExhaustedError: + freeShared(ctx) + return err("failed to create the watchdog thread: " & getCurrentExceptionMsg()) + + return ok(ctx) + +proc destroyWakuContext*(ctx: ptr WakuContext): Result[void, string] = + ctx.running.store(false) + + let signaledOnTime = ctx.reqSignal.fireSync().valueOr: + return err("error in destroyWakuContext: " & $error) + if not signaledOnTime: + return err("failed to signal reqSignal on time in destroyWakuContext") + + joinThread(ctx.wakuThread) + joinThread(ctx.watchdogThread) + ctx.lock.deinitLock() + ?ctx.reqSignal.close() + ?ctx.reqReceivedSignal.close() + freeShared(ctx) + + return ok() diff --git a/third-party/nwaku/library/waku_thread_requests/requests/debug_node_request.nim b/third-party/nwaku/library/waku_thread_requests/requests/debug_node_request.nim new file mode 100644 index 0000000..c9aa5a7 --- /dev/null +++ b/third-party/nwaku/library/waku_thread_requests/requests/debug_node_request.nim @@ -0,0 +1,63 @@ +import std/json +import + chronicles, + chronos, + results, + eth/p2p/discoveryv5/enr, + strutils, + libp2p/peerid, + metrics +import + ../../../waku/factory/waku, + ../../../waku/node/waku_node, + ../../../waku/node/health_monitor + +type DebugNodeMsgType* = enum + RETRIEVE_LISTENING_ADDRESSES + RETRIEVE_MY_ENR + RETRIEVE_MY_PEER_ID + RETRIEVE_METRICS + RETRIEVE_ONLINE_STATE + CHECK_WAKU_NOT_BLOCKED + +type DebugNodeRequest* = object + operation: DebugNodeMsgType + +proc createShared*(T: type DebugNodeRequest, op: DebugNodeMsgType): ptr type T = + var ret = createShared(T) + ret[].operation = op + return ret + +proc destroyShared(self: ptr DebugNodeRequest) = + deallocShared(self) + +proc getMultiaddresses(node: WakuNode): seq[string] = + return node.info().listenAddresses + +proc getMetrics(): string = + {.gcsafe.}: + return defaultRegistry.toText() ## defaultRegistry is {.global.} in metrics module + +proc process*( + self: ptr DebugNodeRequest, waku: Waku +): Future[Result[string, string]] {.async.} = + defer: + destroyShared(self) + + case self.operation + of RETRIEVE_LISTENING_ADDRESSES: + ## returns a comma-separated string of the listen addresses + return ok(waku.node.getMultiaddresses().join(",")) + of RETRIEVE_MY_ENR: + return ok(waku.node.enr.toURI()) + of RETRIEVE_MY_PEER_ID: + return ok($waku.node.peerId()) + of RETRIEVE_METRICS: + return ok(getMetrics()) + of RETRIEVE_ONLINE_STATE: + return ok($waku.healthMonitor.onlineMonitor.amIOnline()) + of CHECK_WAKU_NOT_BLOCKED: + return ok("waku thread is not blocked") + + error "unsupported operation in DebugNodeRequest" + return err("unsupported operation in DebugNodeRequest") diff --git a/third-party/nwaku/library/waku_thread_requests/requests/discovery_request.nim b/third-party/nwaku/library/waku_thread_requests/requests/discovery_request.nim new file mode 100644 index 0000000..8fec0dd --- /dev/null +++ b/third-party/nwaku/library/waku_thread_requests/requests/discovery_request.nim @@ -0,0 +1,150 @@ +import std/json +import chronos, chronicles, results, strutils, libp2p/multiaddress +import + ../../../waku/factory/waku, + ../../../waku/discovery/waku_dnsdisc, + ../../../waku/discovery/waku_discv5, + ../../../waku/waku_core/peers, + ../../../waku/node/waku_node, + ../../alloc + +type DiscoveryMsgType* = enum + GET_BOOTSTRAP_NODES + UPDATE_DISCV5_BOOTSTRAP_NODES + START_DISCV5 + STOP_DISCV5 + PEER_EXCHANGE + +type DiscoveryRequest* = object + operation: DiscoveryMsgType + + ## used in GET_BOOTSTRAP_NODES + enrTreeUrl: cstring + nameDnsServer: cstring + timeoutMs: cint + + ## used in UPDATE_DISCV5_BOOTSTRAP_NODES + nodes: cstring + + ## used in PEER_EXCHANGE + numPeers: uint64 + +proc createShared( + T: type DiscoveryRequest, + op: DiscoveryMsgType, + enrTreeUrl: cstring, + nameDnsServer: cstring, + timeoutMs: cint, + nodes: cstring, + numPeers: uint64, +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].enrTreeUrl = enrTreeUrl.alloc() + ret[].nameDnsServer = nameDnsServer.alloc() + ret[].timeoutMs = timeoutMs + ret[].nodes = nodes.alloc() + ret[].numPeers = numPeers + return ret + +proc createRetrieveBootstrapNodesRequest*( + T: type DiscoveryRequest, + op: DiscoveryMsgType, + enrTreeUrl: cstring, + nameDnsServer: cstring, + timeoutMs: cint, +): ptr type T = + return T.createShared(op, enrTreeUrl, nameDnsServer, timeoutMs, "", 0) + +proc createUpdateBootstrapNodesRequest*( + T: type DiscoveryRequest, op: DiscoveryMsgType, nodes: cstring +): ptr type T = + return T.createShared(op, "", "", 0, nodes, 0) + +proc createDiscV5StartRequest*(T: type DiscoveryRequest): ptr type T = + return T.createShared(START_DISCV5, "", "", 0, "", 0) + +proc createDiscV5StopRequest*(T: type DiscoveryRequest): ptr type T = + return T.createShared(STOP_DISCV5, "", "", 0, "", 0) + +proc createPeerExchangeRequest*( + T: type DiscoveryRequest, numPeers: uint64 +): ptr type T = + return T.createShared(PEER_EXCHANGE, "", "", 0, "", numPeers) + +proc destroyShared(self: ptr DiscoveryRequest) = + deallocShared(self[].enrTreeUrl) + deallocShared(self[].nameDnsServer) + deallocShared(self[].nodes) + deallocShared(self) + +proc retrieveBootstrapNodes( + enrTreeUrl: string, ipDnsServer: string +): Future[Result[seq[string], string]] {.async.} = + let dnsNameServers = @[parseIpAddress(ipDnsServer)] + let discoveredPeers: seq[RemotePeerInfo] = ( + await retrieveDynamicBootstrapNodes(enrTreeUrl, dnsNameServers) + ).valueOr: + return err("failed discovering peers from DNS: " & $error) + + var multiAddresses = newSeq[string]() + + for discPeer in discoveredPeers: + for address in discPeer.addrs: + multiAddresses.add($address & "/p2p/" & $discPeer) + + return ok(multiAddresses) + +proc updateDiscv5BootstrapNodes(nodes: string, waku: ptr Waku): Result[void, string] = + waku.wakuDiscv5.updateBootstrapRecords(nodes).isOkOr: + return err("error in updateDiscv5BootstrapNodes: " & $error) + return ok() + +proc performPeerExchangeRequestTo( + numPeers: uint64, waku: ptr Waku +): Future[Result[int, string]] {.async.} = + let numPeersRecv = (await waku.node.fetchPeerExchangePeers(numPeers)).valueOr: + return err($error) + return ok(numPeersRecv) + +proc process*( + self: ptr DiscoveryRequest, waku: ptr Waku +): Future[Result[string, string]] {.async.} = + defer: + destroyShared(self) + + case self.operation + of START_DISCV5: + let res = await waku.wakuDiscv5.start() + res.isOkOr: + error "START_DISCV5 failed", error = error + return err($error) + + return ok("discv5 started correctly") + of STOP_DISCV5: + await waku.wakuDiscv5.stop() + + return ok("discv5 stopped correctly") + of GET_BOOTSTRAP_NODES: + let nodes = ( + await retrieveBootstrapNodes($self[].enrTreeUrl, $self[].nameDnsServer) + ).valueOr: + error "GET_BOOTSTRAP_NODES failed", error = error + return err($error) + + ## returns a comma-separated string of bootstrap nodes' multiaddresses + return ok(nodes.join(",")) + of UPDATE_DISCV5_BOOTSTRAP_NODES: + updateDiscv5BootstrapNodes($self[].nodes, waku).isOkOr: + error "UPDATE_DISCV5_BOOTSTRAP_NODES failed", error = error + return err($error) + + return ok("discovery request processed correctly") + of PEER_EXCHANGE: + let numValidPeers = (await performPeerExchangeRequestTo(self[].numPeers, waku)).valueOr: + error "PEER_EXCHANGE failed", error = error + return err($error) + return ok($numValidPeers) + + error "discovery request not handled" + return err("discovery request not handled") diff --git a/third-party/nwaku/library/waku_thread_requests/requests/node_lifecycle_request.nim b/third-party/nwaku/library/waku_thread_requests/requests/node_lifecycle_request.nim new file mode 100644 index 0000000..270bdf1 --- /dev/null +++ b/third-party/nwaku/library/waku_thread_requests/requests/node_lifecycle_request.nim @@ -0,0 +1,110 @@ +import std/[options, json, strutils, net] +import chronos, chronicles, results, confutils, confutils/std/net + +import + ../../../waku/node/peer_manager/peer_manager, + ../../../tools/confutils/cli_args, + ../../../waku/factory/waku, + ../../../waku/factory/node_factory, + ../../../waku/factory/networks_config, + ../../../waku/factory/app_callbacks, + ../../../waku/waku_api/rest/builder, + ../../alloc + +type NodeLifecycleMsgType* = enum + CREATE_NODE + START_NODE + STOP_NODE + +type NodeLifecycleRequest* = object + operation: NodeLifecycleMsgType + configJson: cstring ## Only used in 'CREATE_NODE' operation + appCallbacks: AppCallbacks + +proc createShared*( + T: type NodeLifecycleRequest, + op: NodeLifecycleMsgType, + configJson: cstring = "", + appCallbacks: AppCallbacks = nil, +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].appCallbacks = appCallbacks + ret[].configJson = configJson.alloc() + return ret + +proc destroyShared(self: ptr NodeLifecycleRequest) = + deallocShared(self[].configJson) + deallocShared(self) + +proc createWaku( + configJson: cstring, appCallbacks: AppCallbacks = nil +): Future[Result[Waku, string]] {.async.} = + var conf = defaultWakuNodeConf().valueOr: + return err("Failed creating node: " & error) + + var errorResp: string + + var jsonNode: JsonNode + try: + jsonNode = parseJson($configJson) + except Exception: + return err( + "exception in createWaku when calling parseJson: " & getCurrentExceptionMsg() & + " configJson string: " & $configJson + ) + + for confField, confValue in fieldPairs(conf): + if jsonNode.contains(confField): + # Make sure string doesn't contain the leading or trailing " character + let formattedString = ($jsonNode[confField]).strip(chars = {'\"'}) + # Override conf field with the value set in the json-string + try: + confValue = parseCmdArg(typeof(confValue), formattedString) + except Exception: + return err( + "exception in createWaku when parsing configuration. exc: " & + getCurrentExceptionMsg() & ". string that could not be parsed: " & + formattedString & ". expected type: " & $typeof(confValue) + ) + + # Don't send relay app callbacks if relay is disabled + if not conf.relay and not appCallbacks.isNil(): + appCallbacks.relayHandler = nil + appCallbacks.topicHealthChangeHandler = nil + + # TODO: Convert `confJson` directly to `WakuConf` + var wakuConf = conf.toWakuConf().valueOr: + return err("Configuration error: " & $error) + + wakuConf.restServerConf = none(RestServerConf) ## don't want REST in libwaku + + let wakuRes = (await Waku.new(wakuConf, appCallbacks)).valueOr: + error "waku initialization failed", error = error + return err("Failed setting up Waku: " & $error) + + return ok(wakuRes) + +proc process*( + self: ptr NodeLifecycleRequest, waku: ptr Waku +): Future[Result[string, string]] {.async.} = + defer: + destroyShared(self) + + case self.operation + of CREATE_NODE: + waku[] = (await createWaku(self.configJson, self.appCallbacks)).valueOr: + error "CREATE_NODE failed", error = error + return err($error) + of START_NODE: + (await waku.startWaku()).isOkOr: + error "START_NODE failed", error = error + return err($error) + of STOP_NODE: + try: + await waku[].stop() + except Exception: + error "STOP_NODE failed", error = getCurrentExceptionMsg() + return err(getCurrentExceptionMsg()) + + return ok("") diff --git a/third-party/nwaku/library/waku_thread_requests/requests/peer_manager_request.nim b/third-party/nwaku/library/waku_thread_requests/requests/peer_manager_request.nim new file mode 100644 index 0000000..cac5ca3 --- /dev/null +++ b/third-party/nwaku/library/waku_thread_requests/requests/peer_manager_request.nim @@ -0,0 +1,135 @@ +import std/[sequtils, strutils, tables] +import chronicles, chronos, results, options, json +import + ../../../waku/factory/waku, + ../../../waku/node/waku_node, + ../../alloc, + ../../../waku/node/peer_manager + +type PeerManagementMsgType* {.pure.} = enum + CONNECT_TO + GET_ALL_PEER_IDS + GET_CONNECTED_PEERS_INFO + GET_PEER_IDS_BY_PROTOCOL + DISCONNECT_PEER_BY_ID + DISCONNECT_ALL_PEERS + DIAL_PEER + DIAL_PEER_BY_ID + GET_CONNECTED_PEERS + +type PeerManagementRequest* = object + operation: PeerManagementMsgType + peerMultiAddr: cstring + dialTimeout: Duration + protocol: cstring + peerId: cstring + +type PeerInfo = object + protocols: seq[string] + addresses: seq[string] + +proc createShared*( + T: type PeerManagementRequest, + op: PeerManagementMsgType, + peerMultiAddr = "", + dialTimeout = chronos.milliseconds(0), ## arbitrary Duration as not all ops needs dialTimeout + peerId = "", + protocol = "", +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].peerMultiAddr = peerMultiAddr.alloc() + ret[].peerId = peerId.alloc() + ret[].protocol = protocol.alloc() + ret[].dialTimeout = dialTimeout + return ret + +proc destroyShared(self: ptr PeerManagementRequest) = + if not isNil(self[].peerMultiAddr): + deallocShared(self[].peerMultiAddr) + + if not isNil(self[].peerId): + deallocShared(self[].peerId) + + if not isNil(self[].protocol): + deallocShared(self[].protocol) + + deallocShared(self) + +proc process*( + self: ptr PeerManagementRequest, waku: Waku +): Future[Result[string, string]] {.async.} = + defer: + destroyShared(self) + + case self.operation + of CONNECT_TO: + let peers = ($self[].peerMultiAddr).split(",").mapIt(strip(it)) + await waku.node.connectToNodes(peers, source = "static") + return ok("") + of GET_ALL_PEER_IDS: + ## returns a comma-separated string of peerIDs + let peerIDs = + waku.node.peerManager.switch.peerStore.peers().mapIt($it.peerId).join(",") + return ok(peerIDs) + of GET_CONNECTED_PEERS_INFO: + ## returns a JSON string mapping peerIDs to objects with protocols and addresses + + var peersMap = initTable[string, PeerInfo]() + let peers = waku.node.peerManager.switch.peerStore.peers().filterIt( + it.connectedness == Connected + ) + + # Build a map of peer IDs to peer info objects + for peer in peers: + let peerIdStr = $peer.peerId + peersMap[peerIdStr] = + PeerInfo(protocols: peer.protocols, addresses: peer.addrs.mapIt($it)) + + # Convert the map to JSON string + let jsonObj = %*peersMap + let jsonStr = $jsonObj + return ok(jsonStr) + of GET_PEER_IDS_BY_PROTOCOL: + ## returns a comma-separated string of peerIDs that mount the given protocol + let connectedPeers = waku.node.peerManager.switch.peerStore + .peers($self[].protocol) + .filterIt(it.connectedness == Connected) + .mapIt($it.peerId) + .join(",") + return ok(connectedPeers) + of DISCONNECT_PEER_BY_ID: + let peerId = PeerId.init($self[].peerId).valueOr: + error "DISCONNECT_PEER_BY_ID failed", error = $error + return err($error) + await waku.node.peerManager.disconnectNode(peerId) + return ok("") + of DISCONNECT_ALL_PEERS: + await waku.node.peerManager.disconnectAllPeers() + return ok("") + of DIAL_PEER: + let remotePeerInfo = parsePeerInfo($self[].peerMultiAddr).valueOr: + error "DIAL_PEER failed", error = $error + return err($error) + let conn = await waku.node.peerManager.dialPeer(remotePeerInfo, $self[].protocol) + if conn.isNone(): + let msg = "failed dialing peer" + error "DIAL_PEER failed", error = msg, peerId = $remotePeerInfo.peerId + return err(msg) + of DIAL_PEER_BY_ID: + let peerId = PeerId.init($self[].peerId).valueOr: + error "DIAL_PEER_BY_ID failed", error = $error + return err($error) + let conn = await waku.node.peerManager.dialPeer(peerId, $self[].protocol) + if conn.isNone(): + let msg = "failed dialing peer" + error "DIAL_PEER_BY_ID failed", error = msg, peerId = $peerId + return err(msg) + of GET_CONNECTED_PEERS: + ## returns a comma-separated string of peerIDs + let + (inPeerIds, outPeerIds) = waku.node.peerManager.connectedPeers() + connectedPeerids = concat(inPeerIds, outPeerIds) + return ok(connectedPeerids.mapIt($it).join(",")) + + return ok("") diff --git a/third-party/nwaku/library/waku_thread_requests/requests/ping_request.nim b/third-party/nwaku/library/waku_thread_requests/requests/ping_request.nim new file mode 100644 index 0000000..53d3396 --- /dev/null +++ b/third-party/nwaku/library/waku_thread_requests/requests/ping_request.nim @@ -0,0 +1,56 @@ +import std/[json, strutils] +import chronos, results +import libp2p/[protocols/ping, switch, multiaddress, multicodec] +import ../../../waku/[factory/waku, waku_core/peers, node/waku_node], ../../alloc + +type PingRequest* = object + peerAddr: cstring + timeout: Duration + +proc createShared*( + T: type PingRequest, peerAddr: cstring, timeout: Duration +): ptr type T = + var ret = createShared(T) + ret[].peerAddr = peerAddr.alloc() + ret[].timeout = timeout + return ret + +proc destroyShared(self: ptr PingRequest) = + deallocShared(self[].peerAddr) + deallocShared(self) + +proc process*( + self: ptr PingRequest, waku: ptr Waku +): Future[Result[string, string]] {.async.} = + defer: + destroyShared(self) + + let peerInfo = peers.parsePeerInfo(($self[].peerAddr).split(",")).valueOr: + return err("PingRequest failed to parse peer addr: " & $error) + + proc ping(): Future[Result[Duration, string]] {.async, gcsafe.} = + try: + let conn = await waku.node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec) + defer: + await conn.close() + + let pingRTT = await waku.node.libp2pPing.ping(conn) + if pingRTT == 0.nanos: + return err("could not ping peer: rtt-0") + return ok(pingRTT) + except CatchableError: + return err("could not ping peer: " & getCurrentExceptionMsg()) + + let pingFuture = ping() + let pingRTT: Duration = + if self[].timeout == chronos.milliseconds(0): # No timeout expected + (await pingFuture).valueOr: + return err(error) + else: + let timedOut = not (await pingFuture.withTimeout(self[].timeout)) + if timedOut: + return err("ping timed out") + pingFuture.read().valueOr: + return err(error) + + ok($(pingRTT.nanos)) diff --git a/third-party/nwaku/library/waku_thread_requests/requests/protocols/filter_request.nim b/third-party/nwaku/library/waku_thread_requests/requests/protocols/filter_request.nim new file mode 100644 index 0000000..274ec32 --- /dev/null +++ b/third-party/nwaku/library/waku_thread_requests/requests/protocols/filter_request.nim @@ -0,0 +1,105 @@ +import options, std/[strutils, sequtils] +import chronicles, chronos, results +import + ../../../../waku/waku_filter_v2/client, + ../../../../waku/waku_core/message/message, + ../../../../waku/factory/waku, + ../../../../waku/waku_filter_v2/common, + ../../../../waku/waku_core/subscription/push_handler, + ../../../../waku/node/peer_manager/peer_manager, + ../../../../waku/node/waku_node, + ../../../../waku/waku_core/topics/pubsub_topic, + ../../../../waku/waku_core/topics/content_topic, + ../../../alloc + +type FilterMsgType* = enum + SUBSCRIBE + UNSUBSCRIBE + UNSUBSCRIBE_ALL + +type FilterRequest* = object + operation: FilterMsgType + pubsubTopic: cstring + contentTopics: cstring ## comma-separated list of content-topics + filterPushEventCallback: FilterPushHandler ## handles incoming filter pushed msgs + +proc createShared*( + T: type FilterRequest, + op: FilterMsgType, + pubsubTopic: cstring = "", + contentTopics: cstring = "", + filterPushEventCallback: FilterPushHandler = nil, +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].pubsubTopic = pubsubTopic.alloc() + ret[].contentTopics = contentTopics.alloc() + ret[].filterPushEventCallback = filterPushEventCallback + + return ret + +proc destroyShared(self: ptr FilterRequest) = + deallocShared(self[].pubsubTopic) + deallocShared(self[].contentTopics) + deallocShared(self) + +proc process*( + self: ptr FilterRequest, waku: ptr Waku +): Future[Result[string, string]] {.async.} = + defer: + destroyShared(self) + + const FilterOpTimeout = 5.seconds + if waku.node.wakuFilterClient.isNil(): + let errorMsg = "FilterRequest waku.node.wakuFilterClient is nil" + error "fail filter process", error = errorMsg, op = $(self.operation) + return err(errorMsg) + + case self.operation + of SUBSCRIBE: + waku.node.wakuFilterClient.registerPushHandler(self.filterPushEventCallback) + + let peer = waku.node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: + let errorMsg = + "could not find peer with WakuFilterSubscribeCodec when subscribing" + error "fail filter process", error = errorMsg, op = $(self.operation) + return err(errorMsg) + + let pubsubTopic = some(PubsubTopic($self[].pubsubTopic)) + let contentTopics = ($(self[].contentTopics)).split(",").mapIt(ContentTopic(it)) + + let subFut = waku.node.filterSubscribe(pubsubTopic, contentTopics, peer) + if not await subFut.withTimeout(FilterOpTimeout): + let errorMsg = "filter subscription timed out" + error "fail filter process", error = errorMsg, op = $(self.operation) + return err(errorMsg) + of UNSUBSCRIBE: + let peer = waku.node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: + let errorMsg = + "could not find peer with WakuFilterSubscribeCodec when unsubscribing" + error "fail filter process", error = errorMsg, op = $(self.operation) + return err(errorMsg) + + let pubsubTopic = some(PubsubTopic($self[].pubsubTopic)) + let contentTopics = ($(self[].contentTopics)).split(",").mapIt(ContentTopic(it)) + + let subFut = waku.node.filterUnsubscribe(pubsubTopic, contentTopics, peer) + if not await subFut.withTimeout(FilterOpTimeout): + let errorMsg = "filter un-subscription timed out" + error "fail filter process", error = errorMsg, op = $(self.operation) + return err(errorMsg) + of UNSUBSCRIBE_ALL: + let peer = waku.node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: + let errorMsg = + "could not find peer with WakuFilterSubscribeCodec when unsubscribing all" + error "fail filter process", error = errorMsg, op = $(self.operation) + return err(errorMsg) + + let unsubFut = waku.node.filterUnsubscribeAll(peer) + + if not await unsubFut.withTimeout(FilterOpTimeout): + let errorMsg = "filter un-subscription all timed out" + error "fail filter process", error = errorMsg, op = $(self.operation) + return err(errorMsg) + + return ok("") diff --git a/third-party/nwaku/library/waku_thread_requests/requests/protocols/lightpush_request.nim b/third-party/nwaku/library/waku_thread_requests/requests/protocols/lightpush_request.nim new file mode 100644 index 0000000..bc3d9de --- /dev/null +++ b/third-party/nwaku/library/waku_thread_requests/requests/protocols/lightpush_request.nim @@ -0,0 +1,109 @@ +import options +import chronicles, chronos, results +import + ../../../../waku/waku_core/message/message, + ../../../../waku/waku_core/codecs, + ../../../../waku/factory/waku, + ../../../../waku/waku_core/message, + ../../../../waku/waku_core/time, # Timestamp + ../../../../waku/waku_core/topics/pubsub_topic, + ../../../../waku/waku_lightpush_legacy/client, + ../../../../waku/waku_lightpush_legacy/common, + ../../../../waku/node/peer_manager/peer_manager, + ../../../alloc + +type LightpushMsgType* = enum + PUBLISH + +type ThreadSafeWakuMessage* = object + payload: SharedSeq[byte] + contentTopic: cstring + meta: SharedSeq[byte] + version: uint32 + timestamp: Timestamp + ephemeral: bool + when defined(rln): + proof: SharedSeq[byte] + +type LightpushRequest* = object + operation: LightpushMsgType + pubsubTopic: cstring + message: ThreadSafeWakuMessage # only used in 'PUBLISH' requests + +proc createShared*( + T: type LightpushRequest, + op: LightpushMsgType, + pubsubTopic: cstring, + m = WakuMessage(), +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].pubsubTopic = pubsubTopic.alloc() + ret[].message = ThreadSafeWakuMessage( + payload: allocSharedSeq(m.payload), + contentTopic: m.contentTopic.alloc(), + meta: allocSharedSeq(m.meta), + version: m.version, + timestamp: m.timestamp, + ephemeral: m.ephemeral, + ) + when defined(rln): + ret[].message.proof = allocSharedSeq(m.proof) + + return ret + +proc destroyShared(self: ptr LightpushRequest) = + deallocSharedSeq(self[].message.payload) + deallocShared(self[].message.contentTopic) + deallocSharedSeq(self[].message.meta) + when defined(rln): + deallocSharedSeq(self[].message.proof) + + deallocShared(self) + +proc toWakuMessage(m: ThreadSafeWakuMessage): WakuMessage = + var wakuMessage = WakuMessage() + + wakuMessage.payload = m.payload.toSeq() + wakuMessage.contentTopic = $m.contentTopic + wakuMessage.meta = m.meta.toSeq() + wakuMessage.version = m.version + wakuMessage.timestamp = m.timestamp + wakuMessage.ephemeral = m.ephemeral + + when defined(rln): + wakuMessage.proof = m.proof + + return wakuMessage + +proc process*( + self: ptr LightpushRequest, waku: ptr Waku +): Future[Result[string, string]] {.async.} = + defer: + destroyShared(self) + + case self.operation + of PUBLISH: + let msg = self.message.toWakuMessage() + let pubsubTopic = $self.pubsubTopic + + if waku.node.wakuLightpushClient.isNil(): + let errorMsg = "LightpushRequest waku.node.wakuLightpushClient is nil" + error "PUBLISH failed", error = errorMsg + return err(errorMsg) + + let peerOpt = waku.node.peerManager.selectPeer(WakuLightPushCodec) + if peerOpt.isNone(): + let errorMsg = "failed to lightpublish message, no suitable remote peers" + error "PUBLISH failed", error = errorMsg + return err(errorMsg) + + let msgHashHex = ( + await waku.node.wakuLegacyLightpushClient.publish( + pubsubTopic, msg, peer = peerOpt.get() + ) + ).valueOr: + error "PUBLISH failed", error = error + return err($error) + + return ok(msgHashHex) diff --git a/third-party/nwaku/library/waku_thread_requests/requests/protocols/relay_request.nim b/third-party/nwaku/library/waku_thread_requests/requests/protocols/relay_request.nim new file mode 100644 index 0000000..5c07327 --- /dev/null +++ b/third-party/nwaku/library/waku_thread_requests/requests/protocols/relay_request.nim @@ -0,0 +1,166 @@ +import std/[net, sequtils, strutils] +import chronicles, chronos, stew/byteutils, results +import + ../../../../waku/waku_core/message/message, + ../../../../waku/factory/[validator_signed, waku], + ../../../../tools/confutils/cli_args, + ../../../../waku/waku_node, + ../../../../waku/waku_core/message, + ../../../../waku/waku_core/time, # Timestamp + ../../../../waku/waku_core/topics/pubsub_topic, + ../../../../waku/waku_core/topics, + ../../../../waku/waku_relay/protocol, + ../../../../waku/node/peer_manager, + ../../../alloc + +type RelayMsgType* = enum + SUBSCRIBE + UNSUBSCRIBE + PUBLISH + NUM_CONNECTED_PEERS + LIST_CONNECTED_PEERS + ## to return the list of all connected peers to an specific pubsub topic + NUM_MESH_PEERS + LIST_MESH_PEERS + ## to return the list of only the peers that conform the mesh for a particular pubsub topic + ADD_PROTECTED_SHARD ## Protects a shard with a public key + +type ThreadSafeWakuMessage* = object + payload: SharedSeq[byte] + contentTopic: cstring + meta: SharedSeq[byte] + version: uint32 + timestamp: Timestamp + ephemeral: bool + when defined(rln): + proof: SharedSeq[byte] + +type RelayRequest* = object + operation: RelayMsgType + pubsubTopic: cstring + relayEventCallback: WakuRelayHandler # not used in 'PUBLISH' requests + message: ThreadSafeWakuMessage # only used in 'PUBLISH' requests + clusterId: cint # only used in 'ADD_PROTECTED_SHARD' requests + shardId: cint # only used in 'ADD_PROTECTED_SHARD' requests + publicKey: cstring # only used in 'ADD_PROTECTED_SHARD' requests + +proc createShared*( + T: type RelayRequest, + op: RelayMsgType, + pubsubTopic: cstring = nil, + relayEventCallback: WakuRelayHandler = nil, + m = WakuMessage(), + clusterId: cint = 0, + shardId: cint = 0, + publicKey: cstring = nil, +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].pubsubTopic = pubsubTopic.alloc() + ret[].clusterId = clusterId + ret[].shardId = shardId + ret[].publicKey = publicKey.alloc() + ret[].relayEventCallback = relayEventCallback + ret[].message = ThreadSafeWakuMessage( + payload: allocSharedSeq(m.payload), + contentTopic: m.contentTopic.alloc(), + meta: allocSharedSeq(m.meta), + version: m.version, + timestamp: m.timestamp, + ephemeral: m.ephemeral, + ) + when defined(rln): + ret[].message.proof = allocSharedSeq(m.proof) + + return ret + +proc destroyShared(self: ptr RelayRequest) = + deallocSharedSeq(self[].message.payload) + deallocShared(self[].message.contentTopic) + deallocSharedSeq(self[].message.meta) + when defined(rln): + deallocSharedSeq(self[].message.proof) + deallocShared(self[].pubsubTopic) + deallocShared(self[].publicKey) + deallocShared(self) + +proc toWakuMessage(m: ThreadSafeWakuMessage): WakuMessage = + var wakuMessage = WakuMessage() + + wakuMessage.payload = m.payload.toSeq() + wakuMessage.contentTopic = $m.contentTopic + wakuMessage.meta = m.meta.toSeq() + wakuMessage.version = m.version + wakuMessage.timestamp = m.timestamp + wakuMessage.ephemeral = m.ephemeral + + when defined(rln): + wakuMessage.proof = m.proof + + return wakuMessage + +proc process*( + self: ptr RelayRequest, waku: ptr Waku +): Future[Result[string, string]] {.async.} = + defer: + destroyShared(self) + + if waku.node.wakuRelay.isNil(): + return err("Operation not supported without Waku Relay enabled.") + + case self.operation + of SUBSCRIBE: + waku.node.subscribe( + (kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic), + handler = self.relayEventCallback, + ).isOkOr: + error "SUBSCRIBE failed", error + return err($error) + of UNSUBSCRIBE: + waku.node.unsubscribe((kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic)).isOkOr: + error "UNSUBSCRIBE failed", error + return err($error) + of PUBLISH: + let msg = self.message.toWakuMessage() + let pubsubTopic = $self.pubsubTopic + + (await waku.node.wakuRelay.publish(pubsubTopic, msg)).isOkOr: + error "PUBLISH failed", error + return err($error) + + let msgHash = computeMessageHash(pubSubTopic, msg).to0xHex + return ok(msgHash) + of NUM_CONNECTED_PEERS: + let numConnPeers = waku.node.wakuRelay.getNumConnectedPeers($self.pubsubTopic).valueOr: + error "NUM_CONNECTED_PEERS failed", error + return err($error) + return ok($numConnPeers) + of LIST_CONNECTED_PEERS: + let connPeers = waku.node.wakuRelay.getConnectedPeers($self.pubsubTopic).valueOr: + error "LIST_CONNECTED_PEERS failed", error = error + return err($error) + ## returns a comma-separated string of peerIDs + return ok(connPeers.mapIt($it).join(",")) + of NUM_MESH_PEERS: + let numPeersInMesh = waku.node.wakuRelay.getNumPeersInMesh($self.pubsubTopic).valueOr: + error "NUM_MESH_PEERS failed", error = error + return err($error) + return ok($numPeersInMesh) + of LIST_MESH_PEERS: + let meshPeers = waku.node.wakuRelay.getPeersInMesh($self.pubsubTopic).valueOr: + error "LIST_MESH_PEERS failed", error = error + return err($error) + ## returns a comma-separated string of peerIDs + return ok(meshPeers.mapIt($it).join(",")) + of ADD_PROTECTED_SHARD: + try: + let relayShard = + RelayShard(clusterId: uint16(self.clusterId), shardId: uint16(self.shardId)) + let protectedShard = + ProtectedShard.parseCmdArg($relayShard & ":" & $self.publicKey) + waku.node.wakuRelay.addSignedShardsValidator( + @[protectedShard], uint16(self.clusterId) + ) + except ValueError: + return err(getCurrentExceptionMsg()) + return ok("") diff --git a/third-party/nwaku/library/waku_thread_requests/requests/protocols/store_request.nim b/third-party/nwaku/library/waku_thread_requests/requests/protocols/store_request.nim new file mode 100644 index 0000000..3fe1e2f --- /dev/null +++ b/third-party/nwaku/library/waku_thread_requests/requests/protocols/store_request.nim @@ -0,0 +1,131 @@ +import std/[json, sugar, strutils, options] +import chronos, chronicles, results, stew/byteutils +import + ../../../../waku/factory/waku, + ../../../alloc, + ../../../utils, + ../../../../waku/waku_core/peers, + ../../../../waku/waku_core/time, + ../../../../waku/waku_core/message/digest, + ../../../../waku/waku_store/common, + ../../../../waku/waku_store/client, + ../../../../waku/common/paging + +type StoreReqType* = enum + REMOTE_QUERY ## to perform a query to another Store node + +type StoreRequest* = object + operation: StoreReqType + jsonQuery: cstring + peerAddr: cstring + timeoutMs: cint + +func fromJsonNode( + T: type StoreRequest, jsonContent: JsonNode +): Result[StoreQueryRequest, string] = + var contentTopics: seq[string] + if jsonContent.contains("contentTopics"): + contentTopics = collect(newSeq): + for cTopic in jsonContent["contentTopics"].getElems(): + cTopic.getStr() + + var msgHashes: seq[WakuMessageHash] + if jsonContent.contains("messageHashes"): + for hashJsonObj in jsonContent["messageHashes"].getElems(): + let hash = hashJsonObj.getStr().hexToHash().valueOr: + return err("Failed converting message hash hex string to bytes: " & error) + msgHashes.add(hash) + + let pubsubTopic = + if jsonContent.contains("pubsubTopic"): + some(jsonContent["pubsubTopic"].getStr()) + else: + none(string) + + let paginationCursor = + if jsonContent.contains("paginationCursor"): + let hash = jsonContent["paginationCursor"].getStr().hexToHash().valueOr: + return err("Failed converting paginationCursor hex string to bytes: " & error) + some(hash) + else: + none(WakuMessageHash) + + let paginationForwardBool = jsonContent["paginationForward"].getBool() + let paginationForward = + if paginationForwardBool: PagingDirection.FORWARD else: PagingDirection.BACKWARD + + let paginationLimit = + if jsonContent.contains("paginationLimit"): + some(uint64(jsonContent["paginationLimit"].getInt())) + else: + none(uint64) + + let startTime = ?jsonContent.getProtoInt64("timeStart") + let endTime = ?jsonContent.getProtoInt64("timeEnd") + + return ok( + StoreQueryRequest( + requestId: jsonContent["requestId"].getStr(), + includeData: jsonContent["includeData"].getBool(), + pubsubTopic: pubsubTopic, + contentTopics: contentTopics, + startTime: startTime, + endTime: endTime, + messageHashes: msgHashes, + paginationCursor: paginationCursor, + paginationForward: paginationForward, + paginationLimit: paginationLimit, + ) + ) + +proc createShared*( + T: type StoreRequest, + op: StoreReqType, + jsonQuery: cstring, + peerAddr: cstring, + timeoutMs: cint, +): ptr type T = + var ret = createShared(T) + ret[].operation = op + ret[].timeoutMs = timeoutMs + ret[].jsonQuery = jsonQuery.alloc() + ret[].peerAddr = peerAddr.alloc() + return ret + +proc destroyShared(self: ptr StoreRequest) = + deallocShared(self[].jsonQuery) + deallocShared(self[].peerAddr) + deallocShared(self) + +proc process_remote_query( + self: ptr StoreRequest, waku: ptr Waku +): Future[Result[string, string]] {.async.} = + let jsonContentRes = catch: + parseJson($self[].jsonQuery) + + if jsonContentRes.isErr(): + return err("StoreRequest failed parsing store request: " & jsonContentRes.error.msg) + + let storeQueryRequest = ?StoreRequest.fromJsonNode(jsonContentRes.get()) + + let peer = peers.parsePeerInfo(($self[].peerAddr).split(",")).valueOr: + return err("StoreRequest failed to parse peer addr: " & $error) + + let queryResponse = (await waku.node.wakuStoreClient.query(storeQueryRequest, peer)).valueOr: + return err("StoreRequest failed store query: " & $error) + + let res = $(%*(queryResponse.toHex())) + return ok(res) ## returning the response in json format + +proc process*( + self: ptr StoreRequest, waku: ptr Waku +): Future[Result[string, string]] {.async.} = + defer: + deallocShared(self) + + case self.operation + of REMOTE_QUERY: + return await self.process_remote_query(waku) + + error "store request not handled at all" + return err("store request not handled at all") diff --git a/third-party/nwaku/library/waku_thread_requests/waku_thread_request.nim b/third-party/nwaku/library/waku_thread_requests/waku_thread_request.nim new file mode 100644 index 0000000..50462fb --- /dev/null +++ b/third-party/nwaku/library/waku_thread_requests/waku_thread_request.nim @@ -0,0 +1,104 @@ +## This file contains the base message request type that will be handled. +## The requests are created by the main thread and processed by +## the Waku Thread. + +import std/json, results +import chronos, chronos/threadsync +import + ../../waku/factory/waku, + ../ffi_types, + ./requests/node_lifecycle_request, + ./requests/peer_manager_request, + ./requests/protocols/relay_request, + ./requests/protocols/store_request, + ./requests/protocols/lightpush_request, + ./requests/protocols/filter_request, + ./requests/debug_node_request, + ./requests/discovery_request, + ./requests/ping_request + +type RequestType* {.pure.} = enum + LIFECYCLE + PEER_MANAGER + PING + RELAY + STORE + DEBUG + DISCOVERY + LIGHTPUSH + FILTER + +type WakuThreadRequest* = object + reqType: RequestType + reqContent: pointer + callback: WakuCallBack + userData: pointer + +proc createShared*( + T: type WakuThreadRequest, + reqType: RequestType, + reqContent: pointer, + callback: WakuCallBack, + userData: pointer, +): ptr type T = + var ret = createShared(T) + ret[].reqType = reqType + ret[].reqContent = reqContent + ret[].callback = callback + ret[].userData = userData + return ret + +proc handleRes[T: string | void]( + res: Result[T, string], request: ptr WakuThreadRequest +) = + ## Handles the Result responses, which can either be Result[string, string] or + ## Result[void, string]. + + defer: + deallocShared(request) + + if res.isErr(): + foreignThreadGc: + let msg = "libwaku error: handleRes fireSyncRes error: " & $res.error + request[].callback( + RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData + ) + return + + foreignThreadGc: + var msg: cstring = "" + when T is string: + msg = res.get().cstring() + request[].callback( + RET_OK, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData + ) + return + +proc process*( + T: type WakuThreadRequest, request: ptr WakuThreadRequest, waku: ptr Waku +) {.async.} = + let retFut = + case request[].reqType + of LIFECYCLE: + cast[ptr NodeLifecycleRequest](request[].reqContent).process(waku) + of PEER_MANAGER: + cast[ptr PeerManagementRequest](request[].reqContent).process(waku[]) + of PING: + cast[ptr PingRequest](request[].reqContent).process(waku) + of RELAY: + cast[ptr RelayRequest](request[].reqContent).process(waku) + of STORE: + cast[ptr StoreRequest](request[].reqContent).process(waku) + of DEBUG: + cast[ptr DebugNodeRequest](request[].reqContent).process(waku[]) + of DISCOVERY: + cast[ptr DiscoveryRequest](request[].reqContent).process(waku) + of LIGHTPUSH: + cast[ptr LightpushRequest](request[].reqContent).process(waku) + of FILTER: + cast[ptr FilterRequest](request[].reqContent).process(waku) + + handleRes(await retFut, request) + +proc `$`*(self: WakuThreadRequest): string = + return $self.reqType diff --git a/third-party/nwaku/metrics/waku-fleet-dashboard.json b/third-party/nwaku/metrics/waku-fleet-dashboard.json new file mode 100644 index 0000000..d7486ce --- /dev/null +++ b/third-party/nwaku/metrics/waku-fleet-dashboard.json @@ -0,0 +1,9099 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "Metrics for Waku nodes written in Nim", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "id": 36, + "links": [], + "panels": [ + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 56, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 150, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0 + }, + { + "color": "yellow", + "value": 1 + }, + { + "color": "green", + "value": 2 + }, + { + "color": "#EAB839", + "value": 120 + }, + { + "color": "red", + "value": 149 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 4, + "x": 0, + "y": 1 + }, + "id": 52, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto", + "text": {} + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "libp2p_pubsub_peers{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{fleet}}: {{datacenter}}", + "range": true, + "refId": "A" + } + ], + "title": "Libp2p PubSub Peers", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "blue", + "value": null + } + ] + }, + "unit": "dateTimeAsIso" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 7, + "x": 4, + "y": 1 + }, + "id": 46, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "titleSize": 18, + "valueSize": 20 + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "process_start_time_seconds{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"} * 1000", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Node start times (UTC)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 8, + "x": 11, + "y": 1 + }, + "id": 58, + "options": { + "displayMode": "lcd", + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "maxVizHeight": 300, + "minVizHeight": 10, + "minVizWidth": 0, + "namePlacement": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "text": {}, + "valueMode": "color" + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "avg by (instance)(netdata_cpu_cpu_percentage_average{dimension=\"user\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "CPU Usage", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "source" + }, + "properties": [ + { + "id": "custom.width", + "value": 166 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.width", + "value": 181 + } + ] + } + ] + }, + "gridPos": { + "h": 10, + "w": 5, + "x": 19, + "y": 1 + }, + "id": 85, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "waku_version{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Version", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "version", + "source", + "Time" + ] + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 197 + }, + "id": 81, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "libp2p_autonat_reachability_confidence{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\", reachability=\"Reachable\"}", + "legendFormat": "{{instance}}:{{reachability}}", + "range": true, + "refId": "A" + } + ], + "title": "Node is Reachable (Experimental)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 197 + }, + "id": 82, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "libp2p_autonat_reachability_confidence{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\", reachability=\"NotReachable\"}", + "legendFormat": "{{instance}}:{{reachability}}", + "range": true, + "refId": "A" + } + ], + "title": "Node is NotReachable (Experimental)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 206 + }, + "id": 78, + "interval": "15s", + "maxDataPoints": 1000, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "rate(libp2p_network_bytes_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\", direction=\"in\"}[$__rate_interval])", + "interval": "", + "legendFormat": "{{instance}}:{{direction}}", + "range": true, + "refId": "A" + } + ], + "title": "Inbound Traffic", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 206 + }, + "id": 79, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "rate(libp2p_network_bytes_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\", direction=\"out\"}[$__rate_interval])", + "interval": "", + "legendFormat": "{{instance}}:{{direction}}", + "range": true, + "refId": "A" + } + ], + "title": "Outbound Traffic", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 215 + }, + "id": 124, + "options": { + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg by(topic, type)(rate(waku_relay_network_bytes_total{direction=\"in\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[$__rate_interval]))", + "instant": false, + "legendFormat": "{{topic}} - {{type}}", + "range": true, + "refId": "A" + } + ], + "title": "Relay traffic per shard (in) - average of all peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 215 + }, + "id": 126, + "options": { + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "avg by(topic, type)(rate(waku_relay_network_bytes_total{direction=\"out\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[$__rate_interval]))", + "legendFormat": "{{topic}}", + "range": true, + "refId": "A" + } + ], + "title": "Relay traffic per shard (out) - average of all peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 225 + }, + "id": 169, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg by (protocol)(waku_connected_peers{direction=\"In\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "instant": false, + "legendFormat": "{{protocol}}", + "range": true, + "refId": "A" + } + ], + "title": "# peers per protocol (in)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 225 + }, + "id": 170, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg by (protocol)(waku_connected_peers{direction=\"Out\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "instant": false, + "legendFormat": "{{protocol}}", + "range": true, + "refId": "A" + } + ], + "title": "# peers per protocol (out)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 235 + }, + "id": 11, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "(increase(waku_node_messages_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[1m]))", + "interval": "", + "legendFormat": "{{type}}: {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Messages (1m rate)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 4, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 235 + }, + "id": 54, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "libp2p_pubsub_peers{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "peer {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Connected Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 244 + }, + "id": 66, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "count(count by (contentTopic)(waku_node_messages_total))", + "interval": "", + "legendFormat": "content topics", + "refId": "A" + } + ], + "title": "Total Content Topics", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 9, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 244 + }, + "id": 122, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum by (type)(increase(waku_peers_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "peer {{type}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum by (type)(increase(waku_store_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval]))", + "hide": false, + "interval": "", + "legendFormat": "store {{type}}", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum by (type)(increase(waku_node_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval]))", + "hide": false, + "interval": "", + "legendFormat": "node {{type}}", + "range": true, + "refId": "C" + } + ], + "title": "Waku Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 5, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "stepBefore", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 253 + }, + "id": 68, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "waku_version{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}:{{version}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku version", + "type": "timeseries" + } + ], + "title": "At a glance", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 17, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 1559 + }, + "id": 48, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_node_filters{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku Node Filters", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 1559 + }, + "id": 50, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_node_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{type}}: {{instance}}", + "refId": "A" + } + ], + "title": "Waku Node Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 2461 + }, + "id": 60, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_pubsub_topics {instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "Topics: {{instance}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_pubsub_subscriptions_total {instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "Subscriptions: {{instance}}", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_pubsub_unsubscriptions_total {instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "Unsubscriptions: {{instance}}", + "refId": "C" + } + ], + "title": "Pubsub Topics", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 2461 + }, + "id": 8, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum by (instance)(libp2p_pubsub_peers{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "LibP2P PubSub Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 2467 + }, + "id": 2, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum by (instance)(libp2p_peers{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "LibP2P Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 3, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 2467 + }, + "id": 83, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_peer_store_size{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Peer Store Size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 2473 + }, + "id": 3, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "list", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum by (type)(libp2p_open_streams{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "title": "LibP2P Open Streams", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 2473 + }, + "id": 9, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_pubsub_validation_success_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "success {{instance}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_pubsub_validation_failure_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "failure {{instance}}", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_pubsub_validation_ignore_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "ignore {{instance}}", + "refId": "C" + } + ], + "title": "LibP2P Validations", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 2479 + }, + "id": 6, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum by (instance)(process_open_fds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Open File Descriptors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 2479 + }, + "id": 7, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_total_dial_attempts_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "format": "time_series", + "hide": false, + "interval": "", + "legendFormat": "Attempts: {{instance}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_failed_dials_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "Failed: {{instance}}", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_successful_dials_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "Successful: {{instance}}", + "refId": "C" + } + ], + "title": "LibP2P Dials", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 4, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 2485 + }, + "id": 44, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "nim_gc_mem_bytes{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "Nim total memory: {{instance}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "nim_gc_mem_occupied_bytes{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "Nim occupied memory: {{instance}}", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "nim_gc_heap_instance_occupied_summed_bytes{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "Nim total heap: {{instance}}", + "refId": "C" + } + ], + "title": "Nim Memory Usage", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 2485 + }, + "id": 10, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "avg by (instance)(netdata_cpu_cpu_percentage_average{dimension=\"user\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "hide": false, + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "CPU Usage", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 2491 + }, + "id": 64, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "nim_gc_heap_instance_occupied_bytes{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}} {{type_name}}", + "refId": "A" + } + ], + "title": "Heap allocation", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 2493 + }, + "id": 4, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum by (instance)(process_virtual_memory_bytes{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Virtual Memory", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 2499 + }, + "id": 5, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum by (instance)(process_resident_memory_bytes{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Resident Memory", + "type": "timeseries" + } + ], + "title": "General", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 2 + }, + "id": 111, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "total number of proofs generated since the node started", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 1560 + }, + "id": 159, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_rln_total_generated_proofs", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Total Generated RLN Proofs", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 1560 + }, + "id": 117, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_rln_proof_verification_total_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "RLN proofs verified", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "number of proofs remaining to be generated for the current epoch", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 1560 + }, + "id": 160, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_rln_remaining_proofs_per_epoch", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Remaining RLN Proofs per epoch", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 3, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 1818 + }, + "id": 119, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_rln_proof_generation_duration_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "RLN Proof Generation Time (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "points", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 3, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 1818 + }, + "id": 121, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_rln_proof_verification_duration_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "RLN Proof verification Time (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 1826 + }, + "id": 113, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "changes(waku_rln_proof_generation_duration_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[14m])", + "hide": false, + "legendFormat": "Proofs generated: {{instance}}", + "range": true, + "refId": "B" + } + ], + "title": "RLN Proof Generation TIme Changes per epoch ( Entropy )", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 1826 + }, + "id": 115, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_rln_invalid_messages_total_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "legendFormat": "{{type}} : {{instance}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_rln_spam_messages_total_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "hide": false, + "legendFormat": "spam : {{instance}}", + "range": true, + "refId": "C" + } + ], + "title": "RLN Message Counter (Invalid & Spam)", + "type": "timeseries" + } + ], + "title": "RLN", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 34, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 4 + }, + "id": 36, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "waku_store_peers{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku Store Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 2, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 4 + }, + "id": 38, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "waku_store_messages{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "{{type}}: {{instance}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_archive_messages{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "hide": false, + "legendFormat": "{{type}}: {{instance}}", + "range": true, + "refId": "B" + } + ], + "title": "Waku Archive Messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 3, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 10 + }, + "id": 62, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "increase(waku_store_queries_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[1m])", + "interval": "", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku Store Queries (1m rate)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 10 + }, + "id": 40, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum by (type)(increase(waku_archive_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "{{type}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum by (type)(increase(waku_store_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "{{type}}", + "range": true, + "refId": "B" + } + ], + "title": "Waku Archive Errors (1m rate)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "points", + "fillOpacity": 3, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 6, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 1, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 144, + "options": { + "legend": { + "calcs": [ + "mean", + "max", + "min", + "stdDev" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "query_time_secs{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\", phase=\"sendQuery\"} and deriv(query_time_secs{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\", phase=\"sendQuery\"}[1m]) != 0", + "interval": "", + "legendFormat": "{{query}}", + "range": true, + "refId": "A" + } + ], + "title": "Time Send Query To DB (sec)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "Shows the time spent while waiting for feedback from the database. That time includes the database query time plus the time spent waiting for the response from the database.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "points", + "fillOpacity": 3, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 6, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 145, + "options": { + "legend": { + "calcs": [ + "mean", + "max", + "min", + "stdDev" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "query_time_secs{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\", phase=\"waitFinish\"} and deriv(query_time_secs{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\", phase=\"waitFinish\"}[45s]) != 0", + "hide": false, + "interval": "", + "legendFormat": "{{query}}", + "range": true, + "refId": "A" + } + ], + "title": "Wait Queries To Finish (sec)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "points", + "fillOpacity": 3, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 10, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 27 + }, + "id": 146, + "options": { + "legend": { + "calcs": [ + "mean", + "max", + "min", + "stdDev" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "waku_legacy_store_time_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"} and deriv(waku_legacy_store_time_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[45s]) != 0", + "interval": "", + "legendFormat": "{{phase}}", + "range": true, + "refId": "A" + } + ], + "title": "Store V2 Times (sec)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "query-db-time: time for DB to complete query\nsend-store-resp-time: time for nwaku to process DB response and forward to the wire", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "points", + "fillOpacity": 3, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 27 + }, + "id": 148, + "options": { + "legend": { + "calcs": [ + "mean", + "max", + "min", + "stdDev" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "waku_store_time_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"} and deriv(waku_store_time_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[45s]) != 0", + "interval": "", + "legendFormat": "{{phase}}", + "range": true, + "refId": "A" + } + ], + "title": "Store V3 Times details", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "Total time from moment query hits nwaku node to response sent over wire. This represents an aggregation and helps to get an idea of current node's response time.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "points", + "fillOpacity": 3, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 10, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 35 + }, + "id": 158, + "options": { + "legend": { + "calcs": [ + "mean", + "max", + "min", + "stdDev" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "sum by(instance) (waku_legacy_store_time_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"} unless deriv(waku_legacy_store_time_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[45s]) == 0)\n", + "hide": false, + "legendFormat": "{{instance}}", + "range": true, + "refId": "B" + } + ], + "title": "Store V2 Times wire-to-wire", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "Total time from moment query hits nwaku node to response sent over wire. This represents an aggregation and helps to get an idea of current node's response time.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "points", + "fillOpacity": 3, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 4, + "scaleDistribution": { + "log": 10, + "type": "log" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 35 + }, + "id": 157, + "options": { + "legend": { + "calcs": [ + "mean", + "max", + "min", + "stdDev" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "builder", + "expr": "sum by(instance) (waku_store_time_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"} unless deriv(waku_store_time_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[45s]) == 0)\n", + "hide": false, + "legendFormat": "{{instance}}", + "range": true, + "refId": "B" + } + ], + "title": "Store V3 Times wire-to-wire", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "points", + "fillOpacity": 3, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 4, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 43 + }, + "id": 149, + "options": { + "legend": { + "calcs": [ + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "rate(query_count_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\", query!=\"InsertRow\"}[1m]) and rate(query_count_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\", query!=\"InsertRowMessagesLookup\"}[1m]) ", + "interval": "", + "legendFormat": "{{query}}", + "range": true, + "refId": "A" + } + ], + "title": "Not-Insert Queries Rate (query/sec)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "points", + "fillOpacity": 3, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 4, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "__systemRef": "hideSeriesFrom", + "matcher": { + "id": "byNames", + "options": { + "mode": "exclude", + "names": [ + "InsertRow" + ], + "prefix": "All except:", + "readOnly": true + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": false, + "tooltip": false, + "viz": true + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 43 + }, + "id": 147, + "options": { + "legend": { + "calcs": [ + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "rate(query_count_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\", query=\"InsertRow\"}[5m])", + "interval": "", + "legendFormat": "{{query}}", + "range": true, + "refId": "A" + } + ], + "title": "Insert Queries Rate (insert/sec)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 51 + }, + "id": 77, + "maxDataPoints": 60, + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 2, + "cellValues": {}, + "color": { + "exponent": 0.5, + "fill": "#b4ff00", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Turbo", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": false + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "mode": "single", + "showColorScale": false, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "sum(increase(waku_archive_query_duration_seconds_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "hide": false, + "legendFormat": "{{le}}", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "sum(increase(waku_store_query_duration_seconds_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "hide": true, + "legendFormat": "{{le}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku Archive Query Duration", + "type": "heatmap" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 51 + }, + "id": 75, + "maxDataPoints": 60, + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 2, + "cellValues": {}, + "color": { + "exponent": 0.5, + "fill": "#b4ff00", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Turbo", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": false + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "mode": "single", + "showColorScale": false, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(increase(waku_archive_insert_duration_seconds_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(increase(waku_store_insert_duration_seconds_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "hide": true, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(increase(waku_legacy_archive_insert_duration_seconds_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "refId": "C" + } + ], + "title": "Waku Archive Insert Duration", + "type": "heatmap" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 12, + "x": 0, + "y": 58 + }, + "id": 142, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_service_network_bytes_total{service=~\"/vac/waku/store/2.*\", direction=\"in\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[$__rate_interval])", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Store v2 protocol traffic (in)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 12, + "x": 12, + "y": 58 + }, + "id": 130, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_service_network_bytes_total{service=\"/vac/waku/store-query/3.0.0\", direction=\"in\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[$__rate_interval])", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Store v3 protocol traffic (in)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 12, + "x": 0, + "y": 71 + }, + "id": 132, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Max", + "sortDesc": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_service_network_bytes_total{service=~\"/vac/waku/store/2.*\", direction=\"out\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[$__rate_interval])", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Store v2 protocol traffic (out)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 12, + "x": 12, + "y": 71 + }, + "id": 143, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_service_network_bytes_total{service=\"/vac/waku/store-query/3.0.0\", direction=\"out\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[$__rate_interval])", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Store v3 protocol traffic (out)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 12, + "x": 0, + "y": 84 + }, + "id": 128, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_service_requests_total{service =~\"/vac/waku/store/2.*\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[$__rate_interval])", + "legendFormat": "{{instance}} - {{state}}", + "range": true, + "refId": "A" + } + ], + "title": "Store v2 query request rates", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 12, + "x": 12, + "y": 84 + }, + "id": 141, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_service_requests_total{service =~\"/vac/waku/store-query/3.*\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[$__rate_interval])", + "legendFormat": "{{instance}} - {{state}}", + "range": true, + "refId": "A" + } + ], + "title": "Store v3 query request rates", + "type": "timeseries" + } + ], + "title": "Store/Archive", + "type": "row" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 4 + }, + "id": 162, + "panels": [], + "title": "Store Sync", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "the number of bytes sent and received by the protocols.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "deckbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 5 + }, + "id": 166, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "sum by(direction, protocol) (total_bytes_exchanged_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "fullMetaSearch": false, + "includeNullMetadata": false, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Bytes Exchanged", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "the number of messages sent and received by the transfer protocol.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 5 + }, + "id": 168, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "disableTextWrap": false, + "editorMode": "code", + "expr": "sum by(direction) (total_transfer_messages_exchanged_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Messages Exchanged", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "the number of roundtrips for each reconciliation", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 13 + }, + "id": 164, + "options": { + "displayMode": "lcd", + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "maxVizHeight": 300, + "minVizHeight": 16, + "minVizWidth": 8, + "namePlacement": "left", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "hidden" + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "sum by(le) (reconciliation_roundtrips_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "format": "heatmap", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": false, + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Distribution of Round-Trips per Reconciliation", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 13 + }, + "id": 171, + "options": { + "displayMode": "lcd", + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "maxVizHeight": 300, + "minVizHeight": 16, + "minVizWidth": 8, + "namePlacement": "left", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "hidden" + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "disableTextWrap": false, + "editorMode": "builder", + "expr": "sum by(le) (reconciliation_differences_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "format": "heatmap", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "__auto", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Distribution of differences per reconciliation.", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "The total number of messages cached by nodes.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 21 + }, + "id": 172, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "disableTextWrap": false, + "editorMode": "code", + "exemplar": false, + "expr": "total_messages_cached{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Messages Cached", + "type": "timeseries" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 29 + }, + "id": 87, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 4386 + }, + "id": 93, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "rate(waku_filter_requests_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval])", + "legendFormat": "{{type}} : {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku Filter Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 4386 + }, + "id": 89, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_filter_subscriptions{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku Filter Subscriptions", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 4386 + }, + "id": 91, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "rate(waku_filter_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval])", + "legendFormat": "{{type}} : {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku Filter Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 4394 + }, + "id": 95, + "options": { + "calculate": false, + "cellGap": 2, + "color": { + "exponent": 0.5, + "fill": "dark-orange", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "RdYlGn", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": false + }, + "rowsFrame": { + "layout": "auto" + }, + "tooltip": { + "mode": "single", + "showColorScale": false, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "sum(increase(waku_filter_request_duration_seconds_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "interval": "", + "legendFormat": "{{le}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku Filter Request Duration", + "type": "heatmap" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 4394 + }, + "id": 97, + "options": { + "calculate": false, + "cellGap": 2, + "color": { + "exponent": 0.5, + "fill": "dark-orange", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "RdYlGn", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": false + }, + "rowsFrame": { + "layout": "auto" + }, + "tooltip": { + "mode": "single", + "showColorScale": false, + "yHistogram": false + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "sum(increase(waku_filter_handle_message_duration_seconds_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "legendFormat": "{{le}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku Filter Handle Message Duration", + "type": "heatmap" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 12, + "x": 0, + "y": 4402 + }, + "id": 134, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_service_requests_total{service = \"/vac/waku/filter-subscribe/2.0.0-beta1\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[$__rate_interval])", + "legendFormat": "{{instance}} - {{state}}", + "range": true, + "refId": "A" + } + ], + "title": "Filter subscribe request rates", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 13, + "w": 12, + "x": 12, + "y": 4402 + }, + "id": 136, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_service_network_bytes_total{service=\"/vac/waku/filter-push/2.0.0-beta1\", direction=\"out\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[$__rate_interval])", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Filter protocol message push traffic (out)", + "type": "timeseries" + } + ], + "title": "Filter", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 30 + }, + "id": 28, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 4461 + }, + "id": 30, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_lightpush_peers{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku Lightpush Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 4461 + }, + "id": 32, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_lightpush_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{type}}: {[instance}}", + "refId": "A" + } + ], + "title": "Waku Lightpush Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 4469 + }, + "id": 138, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_service_requests_total{service = \"/vac/waku/lightpush/2.0.0-beta1\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[$__rate_interval])", + "legendFormat": "{{instance}} . {{state}}", + "range": true, + "refId": "A" + } + ], + "title": "Lightpush request rates", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 4469 + }, + "id": 140, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_service_network_bytes_total{service=\"/vac/waku/lightpush/2.0.0-beta1\", direction=\"in\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[$__rate_interval])", + "interval": "", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Lightpush protocol traffic (in)", + "type": "timeseries" + } + ], + "title": "Lightpush", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 31 + }, + "id": 151, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 4482 + }, + "id": 153, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_service_requests_total{service = \"/vac/waku/peer-exchange/2.0.0-alpha1\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[$__rate_interval])", + "legendFormat": "{{instance}} . {{state}}", + "range": true, + "refId": "A" + } + ], + "title": "Peer Exchange request rates", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 4482 + }, + "id": 154, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_px_peers_sent_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[$__rate_interval])", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Peer Exchange number of sent peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 4494 + }, + "id": 156, + "options": { + "displayMode": "basic", + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "maxVizHeight": 300, + "minVizHeight": 10, + "minVizWidth": 0, + "namePlacement": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "color" + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "sum by(instance, type) (waku_px_errors_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "legendFormat": "{{instance}} - {{type}}", + "range": true, + "refId": "A" + } + ], + "title": "Peer Exchange errors total", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 4494 + }, + "id": 155, + "options": { + "legend": { + "calcs": [ + "max", + "mean" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true, + "sortBy": "Max", + "sortDesc": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_px_errors_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[$__rate_interval])", + "legendFormat": "{{instance}} - {{type}}", + "range": true, + "refId": "A" + } + ], + "title": "Peer Exchange errors", + "type": "timeseries" + } + ], + "title": "Peer Exchange", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 15, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "number of swap peers", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 4507 + }, + "id": 13, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_swap_peers_count{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku Swap Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "swap account state for each peer", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 4507 + }, + "id": 18, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum(waku_peer_swap_account_balance_bucket{le=\"250.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"} - ignoring(le) waku_peer_swap_account_balance_bucket{le=\"200.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "250", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum(waku_peer_swap_account_balance_bucket{le=\"200.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"} - ignoring(le) waku_peer_swap_account_balance_bucket{le=\"150.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "200", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum(waku_peer_swap_account_balance_bucket{le=\"150.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"} - ignoring(le) waku_peer_swap_account_balance_bucket{le=\"100.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "150", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum(waku_peer_swap_account_balance_bucket{le=\"100.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"} - ignoring(le) waku_peer_swap_account_balance_bucket{le=\"50.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "100", + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum(waku_peer_swap_account_balance_bucket{le=\"50.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"} - ignoring(le) waku_peer_swap_account_balance_bucket{le=\"0.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "50", + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum(waku_peer_swap_account_balance_bucket{le=\"0.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"} - ignoring(le) waku_peer_swap_account_balance_bucket{le=\"-50.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "0", + "refId": "F" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum(waku_peer_swap_account_balance_bucket{le=\"-50.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"} - ignoring(le) waku_peer_swap_account_balance_bucket{le=\"-100.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "-50", + "refId": "G" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum(waku_peer_swap_account_balance_bucket{le=\"-100.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"} - ignoring(le) waku_peer_swap_account_balance_bucket{le=\"-150.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "-100", + "refId": "H" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum(waku_peer_swap_account_balance_bucket{le=\"-150.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"} - ignoring(le) waku_peer_swap_account_balance_bucket{le=\"-200.0\", instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "-150", + "refId": "I" + } + ], + "title": "Waku Swap Account State", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 4507 + }, + "id": 42, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_swap_errors{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{type}}: {{instance}}", + "refId": "A" + } + ], + "title": "Waku Swap Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 4514 + }, + "id": 103, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "(increase(waku_node_messages_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[1m]))/60", + "format": "heatmap", + "instant": false, + "legendFormat": "{{fleet}}_{{datacenter}}", + "range": true, + "refId": "A" + } + ], + "title": "Messages/second", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "deckbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 4514 + }, + "id": 102, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "waku_histogram_message_size_sum{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}/waku_histogram_message_size_count{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "format": "heatmap", + "instant": false, + "legendFormat": "{{fleet}}_{{datacenter}}", + "range": true, + "refId": "A" + } + ], + "title": "Average msg size (kBytes)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 4522 + }, + "id": 101, + "options": { + "displayMode": "gradient", + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "maxVizHeight": 300, + "minVizHeight": 10, + "minVizWidth": 0, + "namePlacement": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "color" + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "rate(waku_histogram_message_size_bucket{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[1h])/scalar(rate(waku_histogram_message_size_count{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[1h]))*100", + "format": "heatmap", + "instant": false, + "legendFormat": "{{le}}", + "range": true, + "refId": "A" + } + ], + "title": "Message distrubution %/kBytes (Last Hour)", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 4530 + }, + "id": 105, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "titleSize": 15, + "valueSize": 15 + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "waku_connected_peers{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "format": "heatmap", + "instant": false, + "legendFormat": "Direction:{{direction}} {{protocol}}", + "range": true, + "refId": "A" + } + ], + "title": "Connected Peers per Protocol", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 4530 + }, + "id": 104, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "titleSize": 15, + "valueSize": 15 + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "waku_streams_peers{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "format": "heatmap", + "instant": false, + "legendFormat": "Direction:{{direction}} {{protocol}}", + "range": true, + "refId": "A" + } + ], + "title": "Connected Streams per Protocol", + "type": "stat" + } + ], + "title": "Swap", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 33 + }, + "id": 107, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decgbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 4539 + }, + "id": 109, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "builder", + "expr": "netdata_disk_space_GiB_average{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\",dimension=\"used\", family=\"/data\"}", + "legendFormat": "{{ instance }}", + "range": true, + "refId": "A" + } + ], + "title": "Disk space used", + "type": "timeseries" + } + ], + "title": "Disk", + "type": "row" + } + ], + "preload": false, + "refresh": "30s", + "schemaVersion": 40, + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": ".*", + "value": ".*" + }, + "includeAll": false, + "label": "Hostname regex", + "name": "host", + "options": [ + { + "selected": true, + "text": ".*", + "value": ".*" + }, + { + "selected": false, + "text": "node-.*", + "value": "node-.*" + }, + { + "selected": false, + "text": "boot-.*", + "value": "boot-.*" + }, + { + "selected": false, + "text": "store-.*", + "value": "store-.*" + } + ], + "query": ".*,node-.*,boot-.*,store-.*", + "type": "custom" + }, + { + "current": { + "text": [ + "waku.test" + ], + "value": [ + "waku.test" + ] + }, + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "definition": "label_values(libp2p_peers, fleet)", + "includeAll": false, + "label": "Fleet name", + "multi": true, + "name": "fleet", + "options": [], + "query": { + "query": "label_values(libp2p_peers, fleet)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "/waku|status|shards/", + "type": "query" + }, + { + "current": { + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "definition": "label_values(libp2p_peers, datacenter)", + "includeAll": true, + "label": "Data Center", + "multi": true, + "name": "dc", + "options": [], + "query": { + "query": "label_values(libp2p_peers, datacenter)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "sort": 1, + "type": "query" + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "browser", + "title": "Nim-Waku V2", + "uid": "qrp_ZCTGz", + "version": 181, + "weekStart": "" +} \ No newline at end of file diff --git a/third-party/nwaku/metrics/waku-network-monitor-dashboard.json b/third-party/nwaku/metrics/waku-network-monitor-dashboard.json new file mode 100644 index 0000000..0480e66 --- /dev/null +++ b/third-party/nwaku/metrics/waku-network-monitor-dashboard.json @@ -0,0 +1,7419 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 1, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 154, + "panels": [], + "title": "Node - Network & Connectivity", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "dateTimeAsIso" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 0, + "y": 1 + }, + "id": 10, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "process_start_time_seconds{job=\"nwaku\"}*1000", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Start Times (UTC)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 3, + "y": 1 + }, + "id": 2, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "exemplar": false, + "expr": "waku_version{instance=\"nwaku:8003\"}", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Version", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "version" + ] + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 6, + "y": 1 + }, + "id": 33, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "routing_table_nodes", + "legendFormat": "{{label_name}}", + "range": true, + "refId": "A" + } + ], + "title": "Discv5 (Nodes)", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "Time", + "{__name__=\"routing_table_nodes\", instance=\"nwaku:8003\", job=\"nwaku\"}" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 250 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 9, + "y": 1 + }, + "id": 25, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "libp2p_peers", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Connected Peers", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 12, + "y": 1 + }, + "id": 28, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "libp2p_pubsub_topics", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Number Pubsub Topics", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 15, + "y": 1 + }, + "id": 32, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "routing_table_nodes{state=\"seen\"}", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Discv5 (Seen Nodes)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 18, + "y": 1 + }, + "id": 22, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "libp2p_autonat_reachability_confidence", + "legendFormat": "{{reachability}}", + "range": true, + "refId": "A" + } + ], + "title": "Reachability", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "max" + ] + } + } + ], + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 14, + "x": 0, + "y": 5 + }, + "id": 44, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "waku_connected_peers", + "legendFormat": "{{direction}}_{{protocol}}", + "range": true, + "refId": "A" + } + ], + "title": "Connected Peers (Direction/Protocol)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 7, + "x": 14, + "y": 5 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "libp2p_peers", + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A" + } + ], + "title": "Connected Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 7, + "x": 0, + "y": 15 + }, + "id": 155, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_discv5_discovered_per_shard", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "shard-{{shard}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Discv5 Discovered Peers Per Shard", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 7, + "x": 7, + "y": 15 + }, + "id": 156, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_connected_peers_per_shard", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "shard-{{shard}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Connected Peers Per Shard", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 7, + "x": 14, + "y": 15 + }, + "id": 165, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_connected_peers_per_agent", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "{{agent}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Connected Peers Per Agent", + "type": "piechart" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 45, + "panels": [], + "title": "Node - Relay Traffic ", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "deckbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 25 + }, + "id": 43, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "waku_histogram_message_size_sum/waku_histogram_message_size_count", + "format": "heatmap", + "instant": false, + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Average msg size (kBytes)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 9, + "x": 8, + "y": 25 + }, + "id": 41, + "options": { + "displayMode": "gradient", + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "maxVizHeight": 300, + "minVizHeight": 10, + "minVizWidth": 0, + "namePlacement": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "valueMode": "color" + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "rate(waku_histogram_message_size_bucket[1h])/scalar(rate(waku_histogram_message_size_count[1h]))*100", + "format": "heatmap", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Message distrubution %/kBytes (Last Hour)", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "deckbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 17, + "y": 25 + }, + "id": 38, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_histogram_message_size_sum[1h])/rate(waku_histogram_message_size_count[1h])", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Average Msg Size (Last Hour)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "deckbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 17, + "y": 28 + }, + "id": 42, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "histogram_quantile(0.75, rate(waku_histogram_message_size_bucket[1h]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "75% Percentile (Last hour)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "deckbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 4, + "x": 17, + "y": 31 + }, + "id": 39, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "histogram_quantile(0.99, rate(waku_histogram_message_size_bucket[1h]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "99% Percentile (Last Hour)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binbps" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 34 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(libp2p_network_bytes_total{direction=\"in\"}[$__rate_interval])*8", + "legendFormat": "traffic_{{direction}}", + "range": true, + "refId": "A" + } + ], + "title": "libp2p traffic (in)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binbps" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 9, + "x": 8, + "y": 34 + }, + "id": 29, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(libp2p_network_bytes_total{direction=\"out\"}[$__rate_interval])*8", + "legendFormat": "traffic_{{direction}}", + "range": true, + "refId": "A" + } + ], + "title": "libp2p traffic (out)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 4, + "x": 17, + "y": 34 + }, + "id": 36, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "waku_peer_store_size", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Peer Store Size", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 22, + "w": 4, + "x": 17, + "y": 39 + }, + "id": 149, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_service_requests_total[$__rate_interval])", + "legendFormat": "{{proto}}/{{state}}", + "range": true, + "refId": "A" + } + ], + "title": "Protocol request rates", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binbps" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 43 + }, + "id": 147, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "rate(waku_relay_network_bytes_total{direction=\"in\"}[$__rate_interval])*8", + "legendFormat": "{{topic}}/{{type}}", + "range": true, + "refId": "A" + } + ], + "title": "Relay traffic per shard (in)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binbps" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 9, + "x": 8, + "y": 43 + }, + "id": 148, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_relay_network_bytes_total{direction=\"out\"}[$__rate_interval])*8", + "legendFormat": "{{topic}}", + "range": true, + "refId": "A" + } + ], + "title": "Relay traffic per shard (out)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 8, + "x": 0, + "y": 52 + }, + "id": 150, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_service_network_bytes_total{direction=\"in\"}[$__rate_interval])", + "legendFormat": "{{service}}", + "range": true, + "refId": "A" + } + ], + "title": "Non relay protocol traffic (in)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 9, + "x": 8, + "y": 52 + }, + "id": 151, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "rate(waku_service_network_bytes_total{direction=\"out\"}[$__rate_interval])", + "legendFormat": "{{service}}", + "range": true, + "refId": "A" + } + ], + "title": "Non relay protocol traffic (out)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 7, + "x": 0, + "y": 61 + }, + "id": 166, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_relay_total_msg_bytes_per_shard", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "shard-{{shard}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Total Message Bytes Per Shard", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 7, + "x": 7, + "y": 61 + }, + "id": 157, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_relay_max_msg_bytes_per_shard", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "shard-{{shard}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Max Message Bytes Per Shard", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 7, + "x": 14, + "y": 61 + }, + "id": 159, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_relay_avg_msg_bytes_per_shard", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "shard-{{shard}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Avg Message Bytes Per Shard", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 7, + "x": 0, + "y": 71 + }, + "id": 164, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_archive_messages_per_shard", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "shard-{{shard}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Archive Message Per Shard", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 7, + "x": 7, + "y": 71 + }, + "id": 162, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_relay_fleet_store_msg_size_bytes", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "shard-{{shard}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Total Messages Size On Store Fleet", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 7, + "x": 14, + "y": 71 + }, + "id": 163, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_relay_fleet_store_msg_count", + "fullMetaSearch": false, + "includeNullMetadata": true, + "legendFormat": "shard-{{shard}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Total Message Count On Store Fleet", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 7, + "x": 0, + "y": 81 + }, + "id": 20, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "nim_gc_heap_instance_occupied_bytes{}", + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A" + } + ], + "title": "Heap allocation", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 7, + "x": 7, + "y": 81 + }, + "id": 18, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "nim_gc_mem_bytes{}", + "hide": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A" + } + ], + "title": "Nim Memory Usage", + "type": "timeseries" + }, + { + "datasource": { + "type": "postgres", + "uid": "e5d2e0c2-371d-4178-ac71-edc122fb459c" + }, + "description": "Messages in local database per app name, as extracted from the content topic.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [ + { + "options": { + "/waku/2/rs/1/0": { + "index": 0, + "text": "0" + }, + "/waku/2/rs/1/1": { + "index": 1, + "text": "1" + }, + "/waku/2/rs/1/2": { + "index": 2, + "text": "2" + }, + "/waku/2/rs/1/3": { + "index": 3, + "text": "3" + }, + "/waku/2/rs/1/4": { + "index": 4, + "text": "4" + }, + "/waku/2/rs/1/5": { + "index": 5, + "text": "5" + }, + "/waku/2/rs/1/6": { + "index": 6, + "text": "6" + }, + "/waku/2/rs/1/7": { + "index": 7, + "text": "7" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "string" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Number of Messages (sum)" + }, + "properties": [ + { + "id": "unit", + "value": "none" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Total Payload Size (sum)" + }, + "properties": [ + { + "id": "unit", + "value": "decbytes" + } + ] + } + ] + }, + "gridPos": { + "h": 10, + "w": 7, + "x": 14, + "y": 81 + }, + "id": 144, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "enablePagination": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "frameIndex": 1, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "postgres", + "uid": "e5d2e0c2-371d-4178-ac71-edc122fb459c" + }, + "editorMode": "code", + "format": "table", + "hide": false, + "rawQuery": true, + "rawSql": "SELECT REGEXP_REPLACE(contenttopic,'^\\/(.+)\\/(\\d+)\\/(.+)\\/(.+)$','\\1') as \"App name\", COUNT(id), pg_column_size(payload)\nFROM messages\nGROUP BY contenttopic, payload", + "refId": "A", + "sql": { + "columns": [ + { + "parameters": [ + { + "name": "pubsubtopic", + "type": "functionParameter" + } + ], + "type": "function" + } + ], + "groupBy": [ + { + "property": { + "name": "pubsubtopic", + "type": "string" + }, + "type": "groupBy" + } + ], + "limit": 50 + }, + "table": "messages" + } + ], + "title": "Stored Message by Content Topic App Name", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": {}, + "renameByName": { + "contenttopic": "App name", + "count": "Number of Messages", + "pg_column_size": "Total Payload Size" + } + } + }, + { + "id": "groupBy", + "options": { + "fields": { + "App name": { + "aggregations": [ + "uniqueValues" + ], + "operation": "groupby" + }, + "Number of Messages": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + }, + "Total Payload Size": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + }, + "pg_column_size": { + "aggregations": [ + "sum" + ], + "operation": "aggregate" + } + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "desc": true, + "field": "Number of Messages (sum)" + } + ] + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 7, + "x": 0, + "y": 91 + }, + "id": 135, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_membership_insertion_duration_seconds", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Membership Insertion (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 7, + "x": 7, + "y": 91 + }, + "id": 134, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_membership_credentials_import_duration_seconds", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Credentials Import (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 7, + "x": 14, + "y": 91 + }, + "id": 128, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_number_registered_memberships", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Registered Memberships", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 7, + "x": 0, + "y": 97 + }, + "id": 127, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_proof_generation_duration_seconds", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Proof Generation (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 7, + "x": 7, + "y": 97 + }, + "id": 126, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_proof_verification_duration_seconds", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Proof Verification (seconds)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 7, + "x": 14, + "y": 97 + }, + "id": 153, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_remaining_proofs_per_epoch", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Remaining RLN Proofs per epoch", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 10, + "x": 0, + "y": 103 + }, + "id": 152, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_total_generated_proofs", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Total Generated RLN Proofs", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 11, + "x": 10, + "y": 103 + }, + "id": 136, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_proof_verification_total_total", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Total Verified RLN Proof", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 7, + "x": 0, + "y": 109 + }, + "id": 137, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_valid_messages_total_total", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "shard-{{shard}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Valid Messages ( Total )", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 7, + "x": 7, + "y": 109 + }, + "id": 133, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_invalid_messages_total_total", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "type-{{type}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Invalid Messages ( Total )", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 7, + "x": 14, + "y": 109 + }, + "id": 130, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "waku_rln_spam_messages_total_total", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "{{__name__}}", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "RLN Spam Messages ( Total )", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 116 + }, + "id": 46, + "panels": [], + "title": "Node - Store & DB", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Number of messages currently stored in the database", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 9, + "x": 0, + "y": 117 + }, + "id": 146, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "exemplar": false, + "expr": "pg_tb_messages_count{}", + "instant": false, + "interval": "", + "legendFormat": "messages", + "range": true, + "refId": "A" + } + ], + "title": "Unique stored messages (Postgres)", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "Source: server_version_num", + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 7, + "x": 9, + "y": 117 + }, + "id": 11, + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "max(pg_settings_server_version_num)", + "legendFormat": "", + "refId": "A" + } + ], + "title": "PostgreSQL Version", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "Clients executing Statements.\n\nSource: pg_stat_activity", + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 16, + "y": 117 + }, + "id": 23, + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum(pg_stat_activity_count{state=\"active\",instance=\"$Instance\"})", + "refId": "A" + } + ], + "title": "Active clients (Postgres)", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "Size of all databases in $Instance.\n\nSource: pg_database_size()", + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 7, + "x": 9, + "y": 120 + }, + "id": 37, + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum(pg_database_size_bytes{instance=\"$Instance\"})", + "refId": "A" + } + ], + "title": "Total database size (Postgres)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "dateTimeAsIso" + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 16, + "y": 120 + }, + "id": 125, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "pg_postmaster_start_time_seconds*1000", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Postgres start time", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "Max Replication lag behind master in seconds\n\nOnly available on a standby system.\n\nSource: pg_last_xact_replay_timestamp\n\nUse: pg_stat_replication for Details.", + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 7, + "x": 9, + "y": 123 + }, + "id": 84, + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "max(pg_replication_lag{instance=\"$Instance\"})", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Max Replication Lag (Postgres)", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "Source: pg_stat_statements.total_time / pg_stat_statements.calls", + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 16, + "y": 123 + }, + "id": 102, + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum((delta(pg_stat_statements_total_time_seconds{instance=\"$Instance\"}[$Interval])))/sum((delta(pg_stat_statements_calls{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Average query runtime (Postgres)", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "View: pg_stat_activity", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 9, + "x": 0, + "y": 126 + }, + "id": 121, + "interval": "$Interval", + "links": [ + { + "targetBlank": true, + "title": "PostgreSQL Documentation", + "url": "https://www.postgresql.org/docs/current/monitoring-stats.html" + } + ], + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum by (datname) (pg_stat_activity_count{instance=\"$Instance\"})", + "legendFormat": "{{datname}}", + "refId": "A" + } + ], + "title": "Connections by database (stacked) (Postgres)", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "Transactions committed + roolback per minute\n\nSource: pg_stat_database,xact_commit + xact_rollback", + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 7, + "x": 9, + "y": 126 + }, + "id": 14, + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum((rate(pg_stat_database_xact_commit{instance=\"$Instance\"}[$Interval])))+sum((rate(pg_stat_database_xact_rollback{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Transaction rate (Postgres)", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "Statements executed per Minute.\n\nSource: pg_stat_statements.calls", + "fieldConfig": { + "defaults": { + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 6, + "x": 16, + "y": 126 + }, + "id": 93, + "maxDataPoints": 100, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum((rate(pg_stat_statements_calls{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Query rate (Postgres)", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "Transaction committed vs rollbacked", + "fieldConfig": { + "defaults": { + "decimals": 2, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "semi-dark-red", + "value": 0 + }, + { + "color": "#EAB839", + "value": 0.75 + }, + { + "color": "semi-dark-green", + "value": 0.9 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 3, + "x": 9, + "y": 129 + }, + "id": 15, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum(pg_stat_database_xact_commit{instance=\"$Instance\"})/(sum(pg_stat_database_xact_commit{instance=\"$Instance\"}) + sum(pg_stat_database_xact_rollback{instance=\"$Instance\"}))", + "refId": "A" + } + ], + "title": "Commit Ratio (Postgres)", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "Percentage of max_connections used", + "fieldConfig": { + "defaults": { + "decimals": 0, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "semi-dark-green", + "value": 0 + }, + { + "color": "semi-dark-yellow", + "value": 0.75 + }, + { + "color": "semi-dark-red", + "value": 0.9 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 3, + "x": 12, + "y": 129 + }, + "id": 9, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum(pg_stat_database_numbackends)/max(pg_settings_max_connections)", + "refId": "A" + } + ], + "title": "Connections used (Postgres)", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "Shared buffer hits vs reads from disc", + "fieldConfig": { + "defaults": { + "decimals": 2, + "mappings": [ + { + "id": 0, + "op": "=", + "text": "N/A", + "type": 1, + "value": "null" + } + ], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "semi-dark-red", + "value": 0 + }, + { + "color": "semi-dark-yellow", + "value": 80 + }, + { + "color": "semi-dark-green", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 3, + "x": 15, + "y": 129 + }, + "id": 16, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum(pg_stat_database_blks_hit{instance=~\"$Instance\"})/(sum(pg_stat_database_blks_hit{instance=~\"$Instance\"})+sum(pg_stat_database_blks_read{instance=~\"$Instance\"}))*100", + "refId": "A" + } + ], + "title": "Shared Buffer Hits (Postgres)", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 5, + "x": 18, + "y": 129 + }, + "id": 12, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "(increase(waku_node_messages_total[1m]))/60", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Messages/second", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "View: pg_stat_activity", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "bars", + "fillOpacity": 100, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 137 + }, + "id": 24, + "interval": "$Interval", + "links": [ + { + "targetBlank": true, + "title": "PostgreSQL Documentation", + "url": "https://www.postgresql.org/docs/current/monitoring-stats.html" + } + ], + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum by (state) (pg_stat_activity_count{instance=\"$Instance\"})", + "legendFormat": "{{state}}", + "refId": "A" + } + ], + "title": "Connections by state (stacked) (Postgres)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "bars", + "fillOpacity": 51, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 6, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 137 + }, + "id": 142, + "options": { + "legend": { + "calcs": [ + "last", + "max" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "pg_stat_user_tables_n_live_tup{datname=\"postgres\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Live", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "pg_stat_user_tables_n_dead_tup", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Dead", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Estimated number of rows (Postgres)", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "Source: pg_stat_database", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 145 + }, + "id": 27, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum((rate(pg_stat_database_tup_inserted{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "Inserts", + "refId": "A" + }, + { + "expr": "sum((rate(pg_stat_database_tup_updated{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "Updates", + "refId": "B" + }, + { + "expr": "sum((rate(pg_stat_database_tup_deleted{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "Deletes", + "refId": "C" + } + ], + "title": "Tuples inserts/updates/deletes (Postgres)", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "1 Minute rate of transactions committed or rollback.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 146 + }, + "id": 122, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum ((rate(pg_stat_database_xact_commit[$Interval])))", + "interval": "", + "legendFormat": "committed", + "refId": "A" + }, + { + "expr": "sum ((rate(pg_stat_database_xact_rollback[$Interval])))", + "hide": false, + "interval": "", + "legendFormat": "rollback", + "refId": "B" + } + ], + "title": "Transactions (Postgres)", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "Source: pg_stat_database\n\n* tup_fetched: rows needed to satisfy queries\n* tup_returned: rows read/scanned", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 154 + }, + "id": 111, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum((rate(pg_stat_database_tup_fetched{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "Fetched", + "refId": "A" + }, + { + "expr": "sum((rate(pg_stat_database_tup_returned{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "Returned", + "refId": "B" + } + ], + "title": "Tuples fetched/returned (Postgres)", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "* blk_read_time: Time spent reading data file blocks by backends in this database, in milliseconds\n* blk_write_time: Time spent writing data file blocks by backends in this database, in milliseconds\n\ntrack_io_timings needs to be activated", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ms" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 155 + }, + "id": 26, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum ((rate(pg_stat_database_blk_read_time{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "blk_read_time", + "refId": "A" + }, + { + "expr": "sum ((rate(pg_stat_database_blk_write_time{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "blk_read_time", + "refId": "B" + } + ], + "title": "I/O Read/Write time (Postgres)", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "Should be 0 \n\nSource: pg_stat_database\n\nWith log_lock_waits turned on, deadlocks will be logged to the PostgreSQL Logfiles.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + } + ] + }, + "gridPos": { + "h": 5, + "w": 12, + "x": 12, + "y": 163 + }, + "id": 30, + "links": [ + { + "title": "PostgreSQL Locking", + "url": "https://www.postgresql.org/docs/12/explicit-locking.html" + } + ], + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum by (datname) ((rate(pg_stat_database_deadlocks{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "{{datname}}", + "refId": "A" + } + ], + "title": "Deadlocks by database (Postgres)", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "Source: pg_locks", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 164 + }, + "id": 123, + "links": [ + { + "title": "PostgreSQL Lock Modes", + "url": "https://www.postgresql.org/docs/12/explicit-locking.html#LOCKING-TABLES" + } + ], + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum by (mode) (pg_locks_count{instance=\"$Instance\"})", + "legendFormat": "{{mode}}", + "refId": "A" + } + ], + "title": "Locks by state (Postgres)", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "Should be 0. If temporary files are created, it can indicate insufficient work_mem. With log_temp_files the creation of temporary files are logged to the PostgreSQL Logfiles.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsZero", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + }, + { + "matcher": { + "id": "byValue", + "options": { + "op": "gte", + "reducer": "allIsNull", + "value": 0 + } + }, + "properties": [ + { + "id": "custom.hideFrom", + "value": { + "legend": true, + "tooltip": true, + "viz": false + } + } + ] + } + ] + }, + "gridPos": { + "h": 5, + "w": 12, + "x": 12, + "y": 168 + }, + "id": 31, + "links": [ + { + "title": "PostgreSQL Ressources", + "url": "https://www.postgresql.org/docs/current/runtime-config-resource.html" + } + ], + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "sum by (datname) ((rate(pg_stat_database_temp_files{instance=\"$Instance\"}[$Interval])))", + "interval": "", + "legendFormat": "{{datname}}", + "refId": "A" + } + ], + "title": "Temporary files by database (Postgres)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Number of messages currently stored in the database", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 7, + "x": 12, + "y": 173 + }, + "id": 141, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "code", + "expr": "pg_tb_stats_messages{}", + "instant": false, + "legendFormat": "{{ pubsubtopic }}", + "range": true, + "refId": "A" + } + ], + "title": "# messages per shard", + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "Lag behind master in seconds.\n\nOnly available on a standby System.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 174 + }, + "id": 120, + "interval": "1m", + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean", + "lastNotNull", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.1.1", + "targets": [ + { + "expr": "max(pg_replication_lag{instance=\"$Instance\"})", + "instant": false, + "intervalFactor": 1, + "legendFormat": "lag ", + "refId": "A" + } + ], + "title": "Replication lag (Postgres)", + "type": "timeseries" + } + ], + "preload": false, + "schemaVersion": 41, + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "postgres-exporter:9187", + "value": "postgres-exporter:9187" + }, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "definition": "label_values({job=\"postgres-exporter\"}, instance)", + "includeAll": false, + "name": "Instance", + "options": [], + "query": "label_values({job=\"postgres-exporter\"}, instance)", + "refresh": 1, + "regex": "", + "type": "query" + }, + { + "current": { + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "definition": "label_values(datname)", + "includeAll": true, + "multi": true, + "name": "Database", + "options": [], + "query": "label_values(datname)", + "refresh": 1, + "regex": "/^(?!template*|postgres).*$/", + "sort": 1, + "type": "query" + }, + { + "auto": true, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "10m", + "value": "10m" + }, + "name": "Interval", + "options": [ + { + "selected": false, + "text": "auto", + "value": "$__auto_interval_Interval" + }, + { + "selected": false, + "text": "30sec", + "value": "30sec" + }, + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": true, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + } + ], + "query": "30sec,1m,10m,30m,1h,6h,12h,1d", + "refresh": 2, + "type": "interval" + } + ] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "browser", + "title": "nwaku-monitoring", + "uid": "yns_4vFVk", + "version": 1 +} \ No newline at end of file diff --git a/third-party/nwaku/metrics/waku-network-monitor-discovery.json b/third-party/nwaku/metrics/waku-network-monitor-discovery.json new file mode 100644 index 0000000..7716c58 --- /dev/null +++ b/third-party/nwaku/metrics/waku-network-monitor-discovery.json @@ -0,0 +1,789 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "description": "", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 3, + "links": [], + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "networkmonitor_peer_count{connected=\"false\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Unconnected", + "range": true, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "networkmonitor_peer_count{connected=\"true\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Connected", + "range": true, + "refId": "B", + "useBackend": false + } + ], + "title": "Discovered Peers", + "transparent": true, + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "description": "Ratio of peers supporting each protocol", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 3, + "options": { + "displayLabels": [ + "value" + ], + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "networkmonitor_peer_type_as_per_protocol{protocols=\"/vac/waku/filter-subscribe/2.0.0-beta1\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Filter", + "range": false, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "networkmonitor_peer_type_as_per_protocol{protocols=\"/vac/waku/lightpush/2.0.0-beta1\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Light Push", + "range": false, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "networkmonitor_peer_type_as_per_protocol{protocols=\"/vac/waku/peer-exchange/2.0.0-alpha1\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Peer Exchange", + "range": false, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "networkmonitor_peer_type_as_per_protocol{protocols=\"/vac/waku/relay/2.0.0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Relay", + "range": false, + "refId": "D", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "networkmonitor_peer_type_as_per_protocol{protocols=\"/vac/waku/store-query/3.0.0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Store v3", + "range": false, + "refId": "E", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "networkmonitor_peer_type_as_per_protocol{protocols=\"/vac/waku/store/2.0.0-beta4\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Store v2", + "range": false, + "refId": "F", + "useBackend": false + } + ], + "title": "Network Protocol Support", + "transparent": true, + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 1, + "options": { + "displayLabels": [ + "value" + ], + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "sum" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "discovery_session_lru_cache_hits_total", + "format": "time_series", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Hits", + "range": false, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "discovery_session_lru_cache_misses_total", + "format": "time_series", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Miss", + "range": false, + "refId": "B", + "useBackend": false + } + ], + "title": "Discovery Session LRU Cache", + "transparent": true, + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "routing_table_nodes{state!=\"seen\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Total", + "range": true, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "expr": "routing_table_nodes{state=\"seen\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": false, + "legendFormat": "Has connected", + "range": true, + "refId": "A", + "useBackend": false + } + ], + "title": "Routing Table", + "transparent": true, + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 5, + "options": { + "displayLabels": [ + "value" + ], + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "networkmonitor_peer_cluster_as_per_enr{cluster=\"1\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Cluster 1", + "range": false, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "networkmonitor_peer_cluster_as_per_enr{cluster=\"16\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Cluster 16 (Status)", + "range": false, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "networkmonitor_peer_cluster_as_per_enr{cluster=\"0\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Cluster 0", + "range": false, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "networkmonitor_peer_cluster_as_per_enr{cluster!~\"0|16|1\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Others", + "range": false, + "refId": "D", + "useBackend": false + } + ], + "title": "Clusters as per ENR", + "transparent": true, + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "description": "Check all the ENRs in the network.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 6, + "options": { + "displayLabels": [ + "value" + ], + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "networkmonitor_peer_type_as_per_enr{capability=\"Filter\"}", + "fullMetaSearch": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Filter", + "range": false, + "refId": "A", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "networkmonitor_peer_type_as_per_enr{capability=\"Lightpush\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Light Push", + "range": false, + "refId": "B", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "networkmonitor_peer_type_as_per_enr{capability=\"Relay\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Relay", + "range": false, + "refId": "C", + "useBackend": false + }, + { + "datasource": { + "type": "prometheus", + "uid": "bdspz2dtm1clce" + }, + "disableTextWrap": false, + "editorMode": "builder", + "exemplar": false, + "expr": "networkmonitor_peer_type_as_per_enr{capability=\"Store\"}", + "fullMetaSearch": false, + "hide": false, + "includeNullMetadata": true, + "instant": true, + "legendFormat": "Store", + "range": false, + "refId": "D", + "useBackend": false + } + ], + "title": "Capabilities as per ENR", + "transparent": true, + "type": "piechart" + } + ], + "refresh": "30s", + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": {}, + "timezone": "browser", + "title": "Network Monitor Discovery", + "uid": "adsq0j7yxz2f4e", + "version": 9, + "weekStart": "" +} \ No newline at end of file diff --git a/third-party/nwaku/metrics/waku-rln-relay-fleet-dashboard.json b/third-party/nwaku/metrics/waku-rln-relay-fleet-dashboard.json new file mode 100644 index 0000000..96f8a3f --- /dev/null +++ b/third-party/nwaku/metrics/waku-rln-relay-fleet-dashboard.json @@ -0,0 +1,1151 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 46, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_membership_insertion_duration_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku RLN Relay Membership Insertion Duration ", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_messages_total_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku RLN Relay Total Messages Count", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_proof_verification_total_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku RLN Relay Total Proofs Verified Count", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 22, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "waku_rln_proof_verification_duration_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku RLN Relay Proof Verification Duration", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 17 + }, + "id": 10, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_spam_messages_total_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku RLN Relay Spam Messages Count", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 17 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_relay_mounting_duration_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku RLN Relay Mounting Duration", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 25 + }, + "id": 13, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_membership_credentials_import_duration_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku RLN Relay Credentials Import Duration", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 25 + }, + "id": 11, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_valid_messages_total_sum{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku RLN Relay Valid Messages Count", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 33 + }, + "id": 17, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_proof_generation_duration_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku RLN Relay Proof Generation Duration", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 33 + }, + "id": 15, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_valid_messages_total_bucket{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{instance}}:{{le}}", + "refId": "A" + } + ], + "title": "Waku RLN Relay Valid Messages By Root Window", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 41 + }, + "id": 19, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_registration_duration_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku RLN Relay Registration Duration", + "type": "timeseries" + } + ], + "refresh": false, + "schemaVersion": 37, + "style": "dark", + "tags": [], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": [ + "waku.test" + ], + "value": [ + "waku.test" + ] + }, + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "definition": "label_values(libp2p_peers, fleet)", + "hide": 0, + "includeAll": false, + "label": "Fleet name", + "multi": true, + "name": "fleet", + "options": [], + "query": { + "query": "label_values(libp2p_peers, fleet)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "/waku|status/", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": { + "selected": true, + "text": ".*", + "value": ".*" + }, + "hide": 0, + "includeAll": false, + "label": "Hostname regex", + "multi": false, + "name": "host", + "options": [ + { + "selected": true, + "text": ".*", + "value": ".*" + } + ], + "query": ".*", + "queryValue": "", + "skipUrlSync": false, + "type": "custom" + }, + { + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "definition": "label_values(libp2p_peers, datacenter)", + "hide": 0, + "includeAll": true, + "label": "Data Center", + "multi": true, + "name": "dc", + "options": [], + "query": { + "query": "label_values(libp2p_peers, datacenter)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + } + ] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Waku RLN Relay Testnet 3", + "uid": "U6YIdDSVz", + "version": 1, + "weekStart": "" +} diff --git a/third-party/nwaku/metrics/waku-rln-relay-single-node-dashboard.json b/third-party/nwaku/metrics/waku-rln-relay-single-node-dashboard.json new file mode 100644 index 0000000..df76c6b --- /dev/null +++ b/third-party/nwaku/metrics/waku-rln-relay-single-node-dashboard.json @@ -0,0 +1,983 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 47, + "links": [], + "liveNow": false, + "panels": [ + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_membership_insertion_duration_seconds{}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Waku RLN Relay Membership Insertion Duration ", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_messages_total_total{}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Waku RLN Relay Total Messages Count", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 9 + }, + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_proof_verification_total_total{}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Waku RLN Relay Total Proofs Verified Count", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 9 + }, + "id": 8, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_relay_mounting_duration_seconds{}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Waku RLN Relay Mounting Duration", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 17 + }, + "id": 10, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_spam_messages_total_total{}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Waku RLN Relay Spam Messages Count", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 17 + }, + "id": 11, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_valid_messages_total_sum{}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Waku RLN Relay Valid Messages Count", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 25 + }, + "id": 13, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_membership_credentials_import_duration_seconds{}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Waku RLN Relay Credentials Import Duration", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 25 + }, + "id": 15, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_valid_messages_total_bucket{}", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": ":{{le}}", + "refId": "A" + } + ], + "title": "Waku RLN Relay Valid Messages By Root Window", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 33 + }, + "id": 17, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_proof_generation_duration_seconds{}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Waku RLN Relay Proof Generation Duration", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 33 + }, + "id": 19, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_registration_duration_seconds{}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Waku RLN Relay Registration Duration", + "type": "timeseries" + }, + { + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 33 + }, + "id": 20, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_rln_proof_verification_duration_seconds{}", + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Waku RLN Relay Proof Verification Duration", + "type": "timeseries" + } + ], + "refresh": false, + "schemaVersion": 35, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Waku RLN Relay - Single Node", + "uid": "JCJmf8S4k", + "version": 2, + "weekStart": "" +} diff --git a/third-party/nwaku/metrics/waku-single-node-dashboard.json b/third-party/nwaku/metrics/waku-single-node-dashboard.json new file mode 100644 index 0000000..9a055c9 --- /dev/null +++ b/third-party/nwaku/metrics/waku-single-node-dashboard.json @@ -0,0 +1,5754 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "Metrics for Waku nodes written in Nim", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "id": 58, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 56, + "panels": [], + "title": "At a glance", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0 + }, + { + "color": "yellow", + "value": 1 + }, + { + "color": "green", + "value": 2 + }, + { + "color": "#EAB839", + "value": 120 + }, + { + "color": "red", + "value": 149 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 4, + "x": 0, + "y": 1 + }, + "id": 52, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "libp2p_pubsub_peers{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{fleet}}: {{datacenter}}", + "range": true, + "refId": "A" + } + ], + "title": "Libp2p PubSub Peers", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "blue", + "value": null + } + ] + }, + "unit": "dateTimeAsIso" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 7, + "x": 4, + "y": 1 + }, + "id": 46, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "titleSize": 18, + "valueSize": 20 + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "process_start_time_seconds{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"} * 1000", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Node start times (UTC)", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 8, + "x": 11, + "y": 1 + }, + "id": 58, + "options": { + "displayMode": "lcd", + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "maxVizHeight": 300, + "minVizHeight": 10, + "minVizWidth": 0, + "namePlacement": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "sizing": "auto", + "text": {}, + "valueMode": "color" + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "avg by (instance)(netdata_cpu_cpu_percentage_average{dimension=\"user\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "CPU Usage", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "source" + }, + "properties": [ + { + "id": "custom.width", + "value": 122 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.width", + "value": 181 + } + ] + } + ] + }, + "gridPos": { + "h": 10, + "w": 5, + "x": 19, + "y": 1 + }, + "id": 85, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "waku_version{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Version", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "version", + "source", + "Time" + ] + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 11 + }, + "id": 81, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "libp2p_autonat_reachability_confidence{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\", reachability=\"Reachable\"}", + "legendFormat": "{{instance}}:{{reachability}}", + "range": true, + "refId": "A" + } + ], + "title": "Node is Reachable (Experimental)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 11 + }, + "id": 82, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "libp2p_autonat_reachability_confidence{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\", reachability=\"NotReachable\"}", + "legendFormat": "{{instance}}:{{reachability}}", + "range": true, + "refId": "A" + } + ], + "title": "Node is NotReachable (Experimental)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 20 + }, + "id": 78, + "interval": "15s", + "maxDataPoints": 1000, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "rate(libp2p_network_bytes_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\", direction=\"in\"}[$__rate_interval])", + "interval": "", + "legendFormat": "{{instance}}:{{direction}}", + "range": true, + "refId": "A" + } + ], + "title": "Inbound Traffic", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "binBps" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 20 + }, + "id": 79, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "rate(libp2p_network_bytes_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\", direction=\"out\"}[$__rate_interval])", + "interval": "", + "legendFormat": "{{instance}}:{{direction}}", + "range": true, + "refId": "A" + } + ], + "title": "Outbound Traffic", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 29 + }, + "id": 11, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "(increase(waku_node_messages_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[1m]))", + "interval": "", + "legendFormat": "{{type}}: {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Messages (1m rate)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 9, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 29 + }, + "id": 54, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum by (type)(increase(waku_peers_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval]))", + "interval": "", + "legendFormat": "peer {{type}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum by (type)(increase(waku_store_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval]))", + "hide": false, + "interval": "", + "legendFormat": "store {{type}}", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum by (type)(increase(waku_node_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval]))", + "hide": false, + "interval": "", + "legendFormat": "node {{type}}", + "range": true, + "refId": "C" + } + ], + "title": "Waku Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 38 + }, + "id": 66, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "count(count by (contentTopic)(waku_node_messages_total))", + "interval": "", + "legendFormat": "content topics", + "refId": "A" + } + ], + "title": "Total Content Topics", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 5, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "stepBefore", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 38 + }, + "id": 68, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "waku_version{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}:{{version}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku version", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 46 + }, + "id": 17, + "panels": [], + "title": "General", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 47 + }, + "id": 48, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_node_filters{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku Node Filters", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 47 + }, + "id": 50, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_node_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{type}}: {{instance}}", + "refId": "A" + } + ], + "title": "Waku Node Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 53 + }, + "id": 60, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_pubsub_topics {instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "Topics: {{instance}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_pubsub_subscriptions_total {instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "Subscriptions: {{instance}}", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_pubsub_unsubscriptions_total {instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "Unsubscriptions: {{instance}}", + "refId": "C" + } + ], + "title": "Pubsub Topics", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 50, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 53 + }, + "id": 8, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum by (instance)(libp2p_pubsub_peers{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "LibP2P PubSub Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 50, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 59 + }, + "id": 2, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum by (instance)(libp2p_peers{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "LibP2P Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 3, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 59 + }, + "id": 83, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_peer_store_size{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Peer Store Size", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 65 + }, + "id": 3, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "list", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum by (type)(libp2p_open_streams{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "{{type}}", + "refId": "A" + } + ], + "title": "LibP2P Open Streams", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 65 + }, + "id": 9, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_pubsub_validation_success_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "success {{instance}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_pubsub_validation_failure_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "failure {{instance}}", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_pubsub_validation_ignore_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "ignore {{instance}}", + "refId": "C" + } + ], + "title": "LibP2P Validations", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 71 + }, + "id": 6, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum by (instance)(process_open_fds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Open File Descriptors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "short" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 71 + }, + "id": 7, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_total_dial_attempts_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "format": "time_series", + "hide": false, + "interval": "", + "legendFormat": "Attempts: {{instance}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_failed_dials_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "Failed: {{instance}}", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_successful_dials_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "Successful: {{instance}}", + "refId": "C" + } + ], + "title": "LibP2P Dials", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 4, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 77 + }, + "id": 44, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "nim_gc_mem_bytes{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "Nim total memory: {{instance}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "nim_gc_mem_occupied_bytes{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "Nim occupied memory: {{instance}}", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "nim_gc_heap_instance_occupied_summed_bytes{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "Nim total heap: {{instance}}", + "refId": "C" + } + ], + "title": "Nim Memory Usage", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 77 + }, + "id": 10, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "max" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "avg by (instance)(netdata_cpu_cpu_percentage_average{dimension=\"user\", instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "hide": false, + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "CPU Usage", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 83 + }, + "id": 64, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "nim_gc_heap_instance_occupied_bytes{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}} {{type_name}}", + "refId": "A" + } + ], + "title": "Heap allocation", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 85 + }, + "id": 4, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum by (instance)(process_virtual_memory_bytes{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Virtual Memory", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "links": [], + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "decbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 91 + }, + "id": 5, + "options": { + "alertThreshold": true, + "legend": { + "calcs": [ + "mean" + ], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "expr": "sum by (instance)(process_resident_memory_bytes{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"})", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Resident Memory", + "type": "timeseries" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 97 + }, + "id": 72, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 3, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 122 + }, + "id": 70, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "increase(waku_bridge_transfers_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[10m])", + "interval": "", + "legendFormat": "{{fleet}} : {{type}}", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "increase(envelopes_valid_total{instance=~\"bridge*[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[10m])", + "hide": false, + "interval": "", + "legendFormat": "{{fleet}} : v1_envelopes", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "increase(waku_node_messages_total{instance=~\"bridge*[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[10m])", + "hide": false, + "interval": "", + "legendFormat": "{{fleet}} : v2_messages", + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "increase(envelopes_dropped_total{instance=~\"bridge*[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[10m])", + "hide": false, + "interval": "", + "legendFormat": "{{fleet}} : v1_envelopes_dropped ({{reason}})", + "refId": "D" + } + ], + "title": "Bridge (10m rate)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 122 + }, + "id": 74, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "connected_peers{instance=~\"bridge*[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "v1_connected_peers", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "libp2p_pubsub_peers{instance=~\"bridge*[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "v2_connected_peers", + "refId": "B" + } + ], + "title": "Connected Peers", + "type": "timeseries" + } + ], + "title": "Bridge", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 98 + }, + "id": 34, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 99 + }, + "id": 36, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_store_peers{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku Store Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 2, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 99 + }, + "id": 38, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "waku_store_messages{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "{{type}}: {{instance}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_archive_messages{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "hide": false, + "legendFormat": "{{type}}: {{instance}}", + "range": true, + "refId": "B" + } + ], + "title": "Waku Archive Messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 3, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 105 + }, + "id": 62, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "increase(waku_store_queries_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[1m])", + "interval": "", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku Store Queries (1m rate)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 105 + }, + "id": 40, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum by (type)(increase(waku_archive_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "{{type}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum by (type)(increase(waku_store_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[1m]))", + "hide": false, + "interval": "", + "legendFormat": "{{type}}", + "range": true, + "refId": "B" + } + ], + "title": "Waku Archive Errors (1m rate)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 111 + }, + "id": 77, + "maxDataPoints": 60, + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 2, + "cellValues": {}, + "color": { + "exponent": 0.5, + "fill": "#b4ff00", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "RdYlGn", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": false + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "9.2.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "sum(increase(waku_archive_query_duration_seconds_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "hide": false, + "legendFormat": "{{le}}", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "sum(increase(waku_store_query_duration_seconds_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "hide": true, + "legendFormat": "{{le}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku Archive Query Duration", + "type": "heatmap" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 111 + }, + "id": 75, + "maxDataPoints": 60, + "options": { + "calculate": false, + "calculation": {}, + "cellGap": 2, + "cellValues": {}, + "color": { + "exponent": 0.5, + "fill": "#b4ff00", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "RdYlGn", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": false + }, + "rowsFrame": { + "layout": "auto" + }, + "showValue": "never", + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "9.2.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(increase(waku_archive_insert_duration_seconds_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "hide": false, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "sum(increase(waku_store_insert_duration_seconds_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "hide": true, + "instant": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{le}}", + "refId": "A" + } + ], + "title": "Waku Archive Insert Duration", + "type": "heatmap" + } + ], + "title": "Store/Archive", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 99 + }, + "id": 87, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 135 + }, + "id": 93, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_filter_requests_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "legendFormat": "{{type}} : {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku Filter Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 135 + }, + "id": 89, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_filter_subscriptions{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku Filter Subscriptions", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 135 + }, + "id": 91, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_filter_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "legendFormat": "{{type}} : {{instance}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku Filter Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 143 + }, + "id": 95, + "options": { + "calculate": false, + "cellGap": 2, + "color": { + "exponent": 0.5, + "fill": "dark-orange", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "RdYlGn", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": false + }, + "rowsFrame": { + "layout": "auto" + }, + "tooltip": { + "show": true, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "9.2.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "sum(increase(waku_filter_request_duration_seconds_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "interval": "", + "legendFormat": "{{le}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku Filter Request Duration", + "type": "heatmap" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 143 + }, + "id": 97, + "options": { + "calculate": false, + "cellGap": 2, + "color": { + "exponent": 0.5, + "fill": "dark-orange", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "RdYlGn", + "steps": 128 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": false + }, + "rowsFrame": { + "layout": "auto" + }, + "tooltip": { + "show": true, + "yHistogram": false + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "9.2.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "sum(increase(waku_filter_handle_message_duration_seconds_bucket{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "legendFormat": "{{le}}", + "range": true, + "refId": "A" + } + ], + "title": "Waku Filter Handle Message Duration", + "type": "heatmap" + } + ], + "title": "Filter", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 100 + }, + "id": 20, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "Waku Filter Peers", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 149 + }, + "id": 22, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_filter_peers{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "hide": false, + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku Filter Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 149 + }, + "id": 26, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "waku_filter_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{type}}: {{instance}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_legacy_filter_errors{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "hide": false, + "legendFormat": "{{type}} : {{instance}}", + "range": true, + "refId": "B" + } + ], + "title": "Waku Filter Errors", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 157 + }, + "id": 99, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_filter_messages{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "legendFormat": "{{type}} : {{instance}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_legacy_filter_messages{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "hide": false, + "legendFormat": "{{type}} : {{instance}}", + "range": true, + "refId": "B" + } + ], + "title": "Waku Filter Messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 3, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 157 + }, + "id": 24, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": true, + "expr": "waku_filter_subscribers{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_legacy_filter_subscribers{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "hide": false, + "legendFormat": "{{instance}}", + "range": true, + "refId": "B" + } + ], + "title": "Waku Filter Subscribers", + "type": "timeseries" + } + ], + "title": "Filter (Legacy)", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 101 + }, + "id": 28, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 129 + }, + "id": 30, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_lightpush_peers{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{instance}}", + "refId": "A" + } + ], + "title": "Waku Lightpush Peers", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 129 + }, + "id": 32, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "exemplar": true, + "expr": "waku_lightpush_errors_total{instance=~\"[[host]].([[dc:pipe]]).*.([[fleet:pipe]])\"}", + "interval": "", + "legendFormat": "{{type}}: {[instance}}", + "refId": "A" + } + ], + "title": "Waku Lightpush Errors", + "type": "timeseries" + } + ], + "title": "Lightpush", + "type": "row" + }, + { + "collapsed": true, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 102 + }, + "id": 15, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 122 + }, + "id": 103, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.2.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "(increase(waku_node_messages_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[1m]))/60", + "format": "heatmap", + "instant": false, + "legendFormat": "{{fleet}}_{{datacenter}}", + "range": true, + "refId": "A" + } + ], + "title": "Messages/second", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "deckbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 122 + }, + "id": 102, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.2.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "waku_histogram_message_size_sum{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}/waku_histogram_message_size_count{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "format": "heatmap", + "instant": false, + "legendFormat": "{{fleet}}_{{datacenter}}", + "range": true, + "refId": "A" + } + ], + "title": "Average msg size (kBytes)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 130 + }, + "id": 101, + "options": { + "displayMode": "gradient", + "minVizHeight": 10, + "minVizWidth": 0, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true + }, + "pluginVersion": "9.2.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "rate(waku_histogram_message_size_bucket{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[1h])/scalar(rate(waku_histogram_message_size_count{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}[1h]))*100", + "format": "heatmap", + "instant": false, + "legendFormat": "{{le}}", + "range": true, + "refId": "A" + } + ], + "title": "Message distrubution %/kBytes (Last Hour)", + "type": "bargauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 138 + }, + "id": 105, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": { + "titleSize": 15, + "valueSize": 50 + }, + "textMode": "auto" + }, + "pluginVersion": "9.2.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "waku_connected_peers{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "format": "heatmap", + "instant": false, + "legendFormat": "Direction:{{direction}} {{protocol}}", + "range": true, + "refId": "A" + } + ], + "title": "Connected Peers per Protocol", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 138 + }, + "id": 104, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": { + "titleSize": 15, + "valueSize": 50 + }, + "textMode": "auto" + }, + "pluginVersion": "9.2.5", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "exemplar": false, + "expr": "waku_streams_peers{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "format": "heatmap", + "instant": false, + "legendFormat": "Direction:{{direction}} {{protocol}}", + "range": true, + "refId": "A" + } + ], + "title": "Connected Streams per Protocol", + "type": "stat" + } + ], + "title": "Messages", + "type": "row" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 103 + }, + "id": 112, + "panels": [], + "title": "RLN", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 104 + }, + "id": 107, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_rln_invalid_messages_total_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "legendFormat": "{{type}}_{{datacenter}}", + "range": true, + "refId": "A" + } + ], + "title": "Invalid Messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 104 + }, + "id": 110, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_rln_valid_messages_total_count{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "legendFormat": "{{__name__}}_{{datacenter}}", + "range": true, + "refId": "A" + } + ], + "title": "Valid RLN Messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 112 + }, + "id": 109, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_rln_spam_messages_total_total{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "legendFormat": "{{__name__}}_{{datacenter}}", + "range": true, + "refId": "A" + } + ], + "title": "Spam messages", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 112 + }, + "id": 108, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_rln_proof_verification_duration_seconds{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "legendFormat": "{{__name__}}_{{datacenter}}", + "range": true, + "refId": "A" + } + ], + "title": "Proof Verification (s)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 120 + }, + "id": 113, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.5.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "editorMode": "code", + "expr": "waku_rln_number_registered_memberships{instance=~\"[[host]].([[dc:pipe]]).([[fleet:pipe]])\"}", + "legendFormat": "{{__name__}}_{{datacenter}}", + "range": true, + "refId": "A" + } + ], + "title": "RLN Memberships", + "type": "timeseries" + } + ], + "preload": false, + "refresh": "30s", + "schemaVersion": 40, + "tags": [], + "templating": { + "list": [ + { + "current": { + "text": "node-.*", + "value": "node-.*" + }, + "includeAll": false, + "label": "Hostname regex", + "name": "host", + "options": [ + { + "selected": true, + "text": "node-.*", + "value": "node-.*" + } + ], + "query": "node-.*", + "type": "custom" + }, + { + "current": { + "text": [ + "status.prod" + ], + "value": [ + "status.prod" + ] + }, + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "definition": "label_values(libp2p_peers, fleet)", + "includeAll": false, + "label": "Fleet name", + "multi": true, + "name": "fleet", + "options": [], + "query": { + "query": "label_values(libp2p_peers, fleet)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "/waku|status|shards/", + "type": "query" + }, + { + "current": { + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "prometheus", + "uid": "P6693426190CB2316" + }, + "definition": "label_values(libp2p_peers, datacenter)", + "includeAll": true, + "label": "Data Center", + "multi": true, + "name": "dc", + "options": [], + "query": { + "query": "label_values(libp2p_peers, datacenter)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "sort": 1, + "type": "query" + } + ] + }, + "time": { + "from": "now-10d", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "browser", + "title": "Nim-Waku Single Node", + "uid": "TgQ8wf8Vz", + "version": 7, + "weekStart": "" +} \ No newline at end of file diff --git a/third-party/nwaku/migrations/message_store/00001_addMessageTable.up.sql b/third-party/nwaku/migrations/message_store/00001_addMessageTable.up.sql new file mode 100644 index 0000000..75d8d7e --- /dev/null +++ b/third-party/nwaku/migrations/message_store/00001_addMessageTable.up.sql @@ -0,0 +1,8 @@ +CREATE TABLE IF NOT EXISTS Message( + id BLOB PRIMARY KEY, + timestamp INTEGER NOT NULL, + contentTopic BLOB NOT NULL, + pubsubTopic BLOB NOT NULL, + payload BLOB, + version INTEGER NOT NULL + ) WITHOUT ROWID; \ No newline at end of file diff --git a/third-party/nwaku/migrations/message_store/00002_addSenderTimeStamp.up.sql b/third-party/nwaku/migrations/message_store/00002_addSenderTimeStamp.up.sql new file mode 100644 index 0000000..ce263d1 --- /dev/null +++ b/third-party/nwaku/migrations/message_store/00002_addSenderTimeStamp.up.sql @@ -0,0 +1,29 @@ +CREATE TABLE IF NOT EXISTS Message_backup ( + id BLOB PRIMARY KEY, + timestamp INTEGER NOT NULL, + contentTopic BLOB NOT NULL, + pubsubTopic BLOB NOT NULL, + payload BLOB, + version INTEGER NOT NULL + ) WITHOUT ROWID; + +INSERT INTO Message_backup SELECT id, timestamp, contentTopic, pubsubTopic, payload, version FROM Message; + +DROP TABLE Message; + +CREATE TABLE IF NOT EXISTS Message( + id BLOB PRIMARY KEY, + receiverTimestamp REAL NOT NULL, + contentTopic BLOB NOT NULL, + pubsubTopic BLOB NOT NULL, + payload BLOB, + version INTEGER NOT NULL, + senderTimestamp REAL NOT NULL + ) WITHOUT ROWID; + + +INSERT INTO Message (id, receiverTimestamp, contentTopic, pubsubTopic, payload, version, senderTimestamp) + SELECT id, timestamp, contentTopic, pubsubTopic, payload, version, 0 + FROM Message_backup; + +DROP TABLE Message_backup; \ No newline at end of file diff --git a/third-party/nwaku/migrations/message_store/00003_convertTimestampsToInt64.up.sql b/third-party/nwaku/migrations/message_store/00003_convertTimestampsToInt64.up.sql new file mode 100644 index 0000000..89e0250 --- /dev/null +++ b/third-party/nwaku/migrations/message_store/00003_convertTimestampsToInt64.up.sql @@ -0,0 +1,29 @@ +CREATE TABLE IF NOT EXISTS Message_backup ( + id BLOB PRIMARY KEY, + receiverTimestamp REAL NOT NULL, + contentTopic BLOB NOT NULL, + pubsubTopic BLOB NOT NULL, + payload BLOB, + version INTEGER NOT NULL, + senderTimestamp REAL NOT NULL + ) WITHOUT ROWID; + +INSERT INTO Message_backup SELECT id, receiverTimestamp, contentTopic, pubsubTopic, payload, version, senderTimestamp FROM Message; + +DROP TABLE Message; + +CREATE TABLE IF NOT EXISTS Message( + id BLOB PRIMARY KEY, + receiverTimestamp INTEGER NOT NULL, + contentTopic BLOB NOT NULL, + pubsubTopic BLOB NOT NULL, + payload BLOB, + version INTEGER NOT NULL, + senderTimestamp INTEGER NOT NULL + ) WITHOUT ROWID; + +INSERT INTO Message (id, receiverTimestamp, contentTopic, pubsubTopic, payload, version, senderTimestamp) + SELECT id, CAST(receiverTimestamp*1000000000 AS INTEGER), contentTopic, pubsubTopic, payload, version, CAST(senderTimestamp*1000000000 AS INTEGER) + FROM Message_backup; + +DROP TABLE Message_backup; \ No newline at end of file diff --git a/third-party/nwaku/migrations/message_store/00004_extendPrimaryKey.up.sql b/third-party/nwaku/migrations/message_store/00004_extendPrimaryKey.up.sql new file mode 100644 index 0000000..88b191f --- /dev/null +++ b/third-party/nwaku/migrations/message_store/00004_extendPrimaryKey.up.sql @@ -0,0 +1,18 @@ +ALTER TABLE Message RENAME TO Message_backup; + +CREATE TABLE IF NOT EXISTS Message( + id BLOB, + receiverTimestamp INTEGER NOT NULL, + contentTopic BLOB NOT NULL, + pubsubTopic BLOB NOT NULL, + payload BLOB, + version INTEGER NOT NULL, + senderTimestamp INTEGER NOT NULL, + CONSTRAINT messageIndex PRIMARY KEY (senderTimestamp, id, pubsubTopic) + ) WITHOUT ROWID; + +INSERT INTO Message + SELECT * + FROM Message_backup; + +DROP TABLE Message_backup; \ No newline at end of file diff --git a/third-party/nwaku/migrations/message_store/00005_updateIndex.up.sql b/third-party/nwaku/migrations/message_store/00005_updateIndex.up.sql new file mode 100644 index 0000000..ec039e1 --- /dev/null +++ b/third-party/nwaku/migrations/message_store/00005_updateIndex.up.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS i_msg ON Message (contentTopic, pubsubTopic, senderTimestamp, id); diff --git a/third-party/nwaku/migrations/message_store/00006_renameColumn.up.sql b/third-party/nwaku/migrations/message_store/00006_renameColumn.up.sql new file mode 100644 index 0000000..360218d --- /dev/null +++ b/third-party/nwaku/migrations/message_store/00006_renameColumn.up.sql @@ -0,0 +1,11 @@ +ALTER TABLE message RENAME COLUMN receiverTimestamp TO storedAt; + + +DROP INDEX IF EXISTS i_msg; + +CREATE INDEX IF NOT EXISTS i_query ON message (contentTopic, pubsubTopic, storedAt, id); + + +DROP INDEX IF EXISTS i_rt; + +CREATE INDEX IF NOT EXISTS i_ts ON message (storedAt); diff --git a/third-party/nwaku/migrations/message_store/00007_updatePrimaryKey.up.sql b/third-party/nwaku/migrations/message_store/00007_updatePrimaryKey.up.sql new file mode 100644 index 0000000..f1936c8 --- /dev/null +++ b/third-party/nwaku/migrations/message_store/00007_updatePrimaryKey.up.sql @@ -0,0 +1,18 @@ +ALTER TABLE message RENAME TO message_backup; + +CREATE TABLE IF NOT EXISTS message( + pubsubTopic BLOB NOT NULL, + contentTopic BLOB NOT NULL, + payload BLOB, + version INTEGER NOT NULL, + timestamp INTEGER NOT NULL, + id BLOB, + storedAt INTEGER NOT NULL, + CONSTRAINT messageIndex PRIMARY KEY (storedAt, id, pubsubTopic) +) WITHOUT ROWID; + +INSERT OR IGNORE INTO message(pubsubTopic, contentTopic, payload, version, timestamp, id, storedAt) + SELECT pubsubTopic, contentTopic, payload, version, senderTimestamp, id, storedAt + FROM message_backup; + +DROP TABLE message_backup; \ No newline at end of file diff --git a/third-party/nwaku/migrations/message_store/00008_updatePrimaryKey_add_col.up.sql b/third-party/nwaku/migrations/message_store/00008_updatePrimaryKey_add_col.up.sql new file mode 100644 index 0000000..f933d08 --- /dev/null +++ b/third-party/nwaku/migrations/message_store/00008_updatePrimaryKey_add_col.up.sql @@ -0,0 +1,28 @@ +ALTER TABLE message RENAME TO message_backup; + +CREATE TABLE IF NOT EXISTS message ( + pubsubTopic BLOB NOT NULL, + contentTopic BLOB NOT NULL, + payload BLOB, + version INTEGER NOT NULL, + timestamp INTEGER NOT NULL, + id BLOB, + messageHash BLOB, -- Newly added, this will be populated with a counter value + storedAt INTEGER NOT NULL, + CONSTRAINT messageIndex PRIMARY KEY (messageHash) +) WITHOUT ROWID; + + +INSERT INTO message(pubsubTopic, contentTopic, payload, version, timestamp, id, messageHash, storedAt) +SELECT + mb.pubsubTopic, + mb.contentTopic, + mb.payload, + mb.version, + mb.timestamp, + mb.id, + randomblob(32), -- to populate 32-byte random blob + mb.storedAt +FROM message_backup AS mb; + +DROP TABLE message_backup; \ No newline at end of file diff --git a/third-party/nwaku/migrations/message_store/00009_addMetaColumn.up.sql b/third-party/nwaku/migrations/message_store/00009_addMetaColumn.up.sql new file mode 100644 index 0000000..f8fc2e4 --- /dev/null +++ b/third-party/nwaku/migrations/message_store/00009_addMetaColumn.up.sql @@ -0,0 +1 @@ +ALTER TABLE message ADD COLUMN meta BLOB; diff --git a/third-party/nwaku/migrations/message_store/00010_dropStoredAt.up.sql b/third-party/nwaku/migrations/message_store/00010_dropStoredAt.up.sql new file mode 100644 index 0000000..4ef60db --- /dev/null +++ b/third-party/nwaku/migrations/message_store/00010_dropStoredAt.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE message DROP COLUMN timestamp; + +ALTER TABLE message RENAME COLUMN storedAt TO timestamp; \ No newline at end of file diff --git a/third-party/nwaku/migrations/message_store_postgres/content_script_version_1.nim b/third-party/nwaku/migrations/message_store_postgres/content_script_version_1.nim new file mode 100644 index 0000000..18133bd --- /dev/null +++ b/third-party/nwaku/migrations/message_store_postgres/content_script_version_1.nim @@ -0,0 +1,21 @@ +const ContentScriptVersion_1* = + """ +CREATE TABLE IF NOT EXISTS messages ( + pubsubTopic VARCHAR NOT NULL, + contentTopic VARCHAR NOT NULL, + payload VARCHAR, + version INTEGER NOT NULL, + timestamp BIGINT NOT NULL, + id VARCHAR NOT NULL, + messageHash VARCHAR NOT NULL, + storedAt BIGINT NOT NULL, + CONSTRAINT messageIndex PRIMARY KEY (messageHash) +); + +CREATE TABLE iF NOT EXISTS version ( + version INTEGER NOT NULL +); + +INSERT INTO version (version) VALUES(1); + +""" diff --git a/third-party/nwaku/migrations/message_store_postgres/content_script_version_2.nim b/third-party/nwaku/migrations/message_store_postgres/content_script_version_2.nim new file mode 100644 index 0000000..8c3656e --- /dev/null +++ b/third-party/nwaku/migrations/message_store_postgres/content_script_version_2.nim @@ -0,0 +1,70 @@ +const ContentScriptVersion_2* = + """ +ALTER TABLE IF EXISTS messages_backup RENAME TO messages; +ALTER TABLE messages RENAME TO messages_backup; +ALTER TABLE messages_backup DROP CONSTRAINT messageIndex; + +CREATE TABLE IF NOT EXISTS messages ( + pubsubTopic VARCHAR NOT NULL, + contentTopic VARCHAR NOT NULL, + payload VARCHAR, + version INTEGER NOT NULL, + timestamp BIGINT NOT NULL, + id VARCHAR NOT NULL, + messageHash VARCHAR NOT NULL, + storedAt BIGINT NOT NULL, + CONSTRAINT messageIndex PRIMARY KEY (messageHash, storedAt) + ) PARTITION BY RANGE (storedAt); + +DO $$ +DECLARE + min_storedAt numeric; + max_storedAt numeric; + min_storedAtSeconds integer = 0; + max_storedAtSeconds integer = 0; + partition_name TEXT; + create_partition_stmt TEXT; +BEGIN + SELECT MIN(storedAt) into min_storedAt + FROM messages_backup; + + SELECT MAX(storedAt) into max_storedAt + FROM messages_backup; + + min_storedAtSeconds := min_storedAt / 1000000000; + max_storedAtSeconds := max_storedAt / 1000000000; + + partition_name := 'messages_' || min_storedAtSeconds || '_' || max_storedAtSeconds; + create_partition_stmt := 'CREATE TABLE ' || partition_name || + ' PARTITION OF messages FOR VALUES FROM (' || + min_storedAt || ') TO (' || (max_storedAt + 1) || ')'; + IF min_storedAtSeconds > 0 AND max_storedAtSeconds > 0 THEN + EXECUTE create_partition_stmt USING partition_name, min_storedAt, max_storedAt; + END IF; +END $$; + +INSERT INTO messages ( + pubsubTopic, + contentTopic, + payload, + version, + timestamp, + id, + messageHash, + storedAt + ) + SELECT pubsubTopic, + contentTopic, + payload, + version, + timestamp, + id, + messageHash, + storedAt + FROM messages_backup; + +DROP TABLE messages_backup; + +UPDATE version SET version = 2 WHERE version = 1; + +""" diff --git a/third-party/nwaku/migrations/message_store_postgres/content_script_version_3.nim b/third-party/nwaku/migrations/message_store_postgres/content_script_version_3.nim new file mode 100644 index 0000000..2938087 --- /dev/null +++ b/third-party/nwaku/migrations/message_store_postgres/content_script_version_3.nim @@ -0,0 +1,8 @@ +const ContentScriptVersion_3* = + """ +CREATE INDEX IF NOT EXISTS i_query ON messages + (contentTopic, pubsubTopic, storedAt, id); + +UPDATE version SET version = 3 WHERE version = 2; + +""" diff --git a/third-party/nwaku/migrations/message_store_postgres/content_script_version_4.nim b/third-party/nwaku/migrations/message_store_postgres/content_script_version_4.nim new file mode 100644 index 0000000..50ee269 --- /dev/null +++ b/third-party/nwaku/migrations/message_store_postgres/content_script_version_4.nim @@ -0,0 +1,9 @@ +const ContentScriptVersion_4* = + """ +ALTER TABLE messages ADD meta VARCHAR default null; + +CREATE INDEX IF NOT EXISTS i_query ON messages (contentTopic, pubsubTopic, storedAt, id); + +UPDATE version SET version = 4 WHERE version = 3; + +""" diff --git a/third-party/nwaku/migrations/message_store_postgres/content_script_version_5.nim b/third-party/nwaku/migrations/message_store_postgres/content_script_version_5.nim new file mode 100644 index 0000000..a59b2da --- /dev/null +++ b/third-party/nwaku/migrations/message_store_postgres/content_script_version_5.nim @@ -0,0 +1,6 @@ +const ContentScriptVersion_5* = + """ +CREATE INDEX IF NOT EXISTS i_query_storedAt ON messages (storedAt, id); + +UPDATE version SET version = 5 WHERE version = 4; +""" diff --git a/third-party/nwaku/migrations/message_store_postgres/content_script_version_6.nim b/third-party/nwaku/migrations/message_store_postgres/content_script_version_6.nim new file mode 100644 index 0000000..126ec6d --- /dev/null +++ b/third-party/nwaku/migrations/message_store_postgres/content_script_version_6.nim @@ -0,0 +1,12 @@ +const ContentScriptVersion_6* = + """ +-- we can drop the timestamp column because this data is also kept in the storedAt column +ALTER TABLE messages DROP COLUMN timestamp; + +-- from now on we are only interested in the message timestamp +ALTER TABLE messages RENAME COLUMN storedAt TO timestamp; + +-- Update to new version +UPDATE version SET version = 6 WHERE version = 5; + +""" diff --git a/third-party/nwaku/migrations/message_store_postgres/content_script_version_7.nim b/third-party/nwaku/migrations/message_store_postgres/content_script_version_7.nim new file mode 100644 index 0000000..01d7ad8 --- /dev/null +++ b/third-party/nwaku/migrations/message_store_postgres/content_script_version_7.nim @@ -0,0 +1,30 @@ +const ContentScriptVersion_7* = + """ + +-- Create lookup table +CREATE TABLE IF NOT EXISTS messages_lookup ( + timestamp BIGINT NOT NULL, + messageHash VARCHAR NOT NULL + ); + +-- Put data into lookup table +INSERT INTO messages_lookup (messageHash, timestamp) SELECT messageHash, timestamp from messages; + +ALTER TABLE messages_lookup ADD CONSTRAINT messageIndexLookupTable PRIMARY KEY (messageHash, timestamp); + +-- Create indexes +CREATE INDEX IF NOT EXISTS idx_messages_messagehash ON messages (messagehash); +CREATE INDEX IF NOT EXISTS idx_messages_timestamp ON messages (timestamp); +CREATE INDEX IF NOT EXISTS idx_messages_lookup_messagehash ON messages_lookup (messagehash); +CREATE INDEX IF NOT EXISTS idx_messages_lookup_timestamp ON messages_lookup (timestamp); + +DROP INDEX IF EXISTS i_query_storedat; +DROP INDEX IF EXISTS i_query; + +CREATE INDEX IF NOT EXISTS idx_query_pubsubtopic ON messages (pubsubTopic); +CREATE INDEX IF NOT EXISTS idx_query_contenttopic ON messages (contentTopic); + +-- Update to new version +UPDATE version SET version = 7 WHERE version = 6; + +""" diff --git a/third-party/nwaku/migrations/message_store_postgres/pg_migration_manager.nim b/third-party/nwaku/migrations/message_store_postgres/pg_migration_manager.nim new file mode 100644 index 0000000..051ac9e --- /dev/null +++ b/third-party/nwaku/migrations/message_store_postgres/pg_migration_manager.nim @@ -0,0 +1,30 @@ +import + content_script_version_1, content_script_version_2, content_script_version_3, + content_script_version_4, content_script_version_5, content_script_version_6, + content_script_version_7 + +type MigrationScript* = object + version*: int + scriptContent*: string + +proc init*(T: type MigrationScript, targetVersion: int, scriptContent: string): T = + return MigrationScript(targetVersion: targetVersion, scriptContent: scriptContent) + +const PgMigrationScripts* = + @[ + MigrationScript(version: 1, scriptContent: ContentScriptVersion_1), + MigrationScript(version: 2, scriptContent: ContentScriptVersion_2), + MigrationScript(version: 3, scriptContent: ContentScriptVersion_3), + MigrationScript(version: 4, scriptContent: ContentScriptVersion_4), + MigrationScript(version: 5, scriptContent: ContentScriptVersion_5), + MigrationScript(version: 6, scriptContent: ContentScriptVersion_6), + MigrationScript(version: 7, scriptContent: ContentScriptVersion_7), + ] + +proc getMigrationScripts*(currentVersion: int64, targetVersion: int64): seq[string] = + var ret = newSeq[string]() + var v = currentVersion + while v < targetVersion: + ret.add(PgMigrationScripts[v].scriptContent) + v.inc() + return ret diff --git a/third-party/nwaku/migrations/peer_store/00001_addPeerTable.up.sql b/third-party/nwaku/migrations/peer_store/00001_addPeerTable.up.sql new file mode 100644 index 0000000..ec242f6 --- /dev/null +++ b/third-party/nwaku/migrations/peer_store/00001_addPeerTable.up.sql @@ -0,0 +1,6 @@ +CREATE TABLE IF NOT EXISTS Peer ( + peerId BLOB PRIMARY KEY, + storedInfo BLOB, + connectedness INTEGER, + disconnectTime INTEGER + ) WITHOUT ROWID; \ No newline at end of file diff --git a/third-party/nwaku/migrations/sent_msgs/00001_addNotDeliveredMessagesTable.up.sql b/third-party/nwaku/migrations/sent_msgs/00001_addNotDeliveredMessagesTable.up.sql new file mode 100644 index 0000000..2c0a13b --- /dev/null +++ b/third-party/nwaku/migrations/sent_msgs/00001_addNotDeliveredMessagesTable.up.sql @@ -0,0 +1,9 @@ +CREATE TABLE IF NOT EXISTS NotDeliveredMessages( + messageHash BLOB PRIMARY KEY, + timestamp INTEGER NOT NULL, + contentTopic BLOB NOT NULL, + pubsubTopic BLOB NOT NULL, + payload BLOB, + meta BLOB, + version INTEGER NOT NULL + ); \ No newline at end of file diff --git a/third-party/nwaku/nix/README.md b/third-party/nwaku/nix/README.md new file mode 100644 index 0000000..e928b79 --- /dev/null +++ b/third-party/nwaku/nix/README.md @@ -0,0 +1,35 @@ +# Usage + +## Shell + +A development shell can be started using: +```sh +nix develop +``` + +## Building + +To build a Codex you can use: +```sh +nix build '.?submodules=1#default' +``` +The `?submodules=1` part should eventually not be necessary. +For more details see: +https://github.com/NixOS/nix/issues/4423 + +It can be also done without even cloning the repo: +```sh +nix build 'git+https://github.com/waku-org/nwaku?submodules=1#' +``` + +## Running + +```sh +nix run 'git+https://github.com/waku-org/nwaku?submodules=1#'' +``` + +## Testing + +```sh +nix flake check ".?submodules=1#" +``` diff --git a/third-party/nwaku/nix/atlas.nix b/third-party/nwaku/nix/atlas.nix new file mode 100644 index 0000000..43336e0 --- /dev/null +++ b/third-party/nwaku/nix/atlas.nix @@ -0,0 +1,12 @@ +{ pkgs ? import { } }: + +let + tools = pkgs.callPackage ./tools.nix {}; + sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim; +in pkgs.fetchFromGitHub { + owner = "nim-lang"; + repo = "atlas"; + rev = tools.findKeyValue "^ +AtlasStableCommit = \"([a-f0-9]+)\"$" sourceFile; + # WARNING: Requires manual updates when Nim compiler version changes. + hash = "sha256-G1TZdgbRPSgxXZ3VsBP2+XFCLHXVb3an65MuQx67o/k="; +} \ No newline at end of file diff --git a/third-party/nwaku/nix/checksums.nix b/third-party/nwaku/nix/checksums.nix new file mode 100644 index 0000000..d79345d --- /dev/null +++ b/third-party/nwaku/nix/checksums.nix @@ -0,0 +1,12 @@ +{ pkgs ? import { } }: + +let + tools = pkgs.callPackage ./tools.nix {}; + sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim; +in pkgs.fetchFromGitHub { + owner = "nim-lang"; + repo = "checksums"; + rev = tools.findKeyValue "^ +ChecksumsStableCommit = \"([a-f0-9]+)\"$" sourceFile; + # WARNING: Requires manual updates when Nim compiler version changes. + hash = "sha256-Bm5iJoT2kAvcTexiLMFBa9oU5gf7d4rWjo3OiN7obWQ="; +} diff --git a/third-party/nwaku/nix/csources.nix b/third-party/nwaku/nix/csources.nix new file mode 100644 index 0000000..5aa90fd --- /dev/null +++ b/third-party/nwaku/nix/csources.nix @@ -0,0 +1,12 @@ +{ pkgs ? import { } }: + +let + tools = pkgs.callPackage ./tools.nix {}; + sourceFile = ../vendor/nimbus-build-system/vendor/Nim/config/build_config.txt; +in pkgs.fetchFromGitHub { + owner = "nim-lang"; + repo = "csources_v2"; + rev = tools.findKeyValue "^nim_csourcesHash=([a-f0-9]+)$" sourceFile; + # WARNING: Requires manual updates when Nim compiler version changes. + hash = "sha256-UCLtoxOcGYjBdvHx7A47x6FjLMi6VZqpSs65MN7fpBs="; +} \ No newline at end of file diff --git a/third-party/nwaku/nix/default.nix b/third-party/nwaku/nix/default.nix new file mode 100644 index 0000000..29eec84 --- /dev/null +++ b/third-party/nwaku/nix/default.nix @@ -0,0 +1,116 @@ +{ + config ? {}, + pkgs ? import { }, + src ? ../., + targets ? ["libwaku-android-arm64"], + verbosity ? 2, + useSystemNim ? true, + quickAndDirty ? true, + stableSystems ? [ + "x86_64-linux" "aarch64-linux" + ], + androidArch, + abidir, + zerokitPkg, +}: + +assert pkgs.lib.assertMsg ((src.submodules or true) == true) + "Unable to build without submodules. Append '?submodules=1#' to the URI."; + +let + inherit (pkgs) stdenv lib writeScriptBin callPackage; + + revision = lib.substring 0 8 (src.rev or "dirty"); + +in stdenv.mkDerivation rec { + + pname = "nwaku"; + + version = "1.0.0-${revision}"; + + inherit src; + + buildInputs = with pkgs; [ + openssl + gmp + zip + ]; + + # Dependencies that should only exist in the build environment. + nativeBuildInputs = let + # Fix for Nim compiler calling 'git rev-parse' and 'lsb_release'. + fakeGit = writeScriptBin "git" "echo ${version}"; + # Fix for the zerokit package that is built with cargo/rustup/cross. + fakeCargo = writeScriptBin "cargo" "echo ${version}"; + # Fix for the zerokit package that is built with cargo/rustup/cross. + fakeRustup = writeScriptBin "rustup" "echo ${version}"; + # Fix for the zerokit package that is built with cargo/rustup/cross. + fakeCross = writeScriptBin "cross" "echo ${version}"; + in + with pkgs; [ + cmake + which + lsb-release + zerokitPkg + nim-unwrapped-2_0 + fakeGit + fakeCargo + fakeRustup + fakeCross + ]; + + # Environment variables required for Android builds + ANDROID_SDK_ROOT="${pkgs.androidPkgs.sdk}"; + ANDROID_NDK_HOME="${pkgs.androidPkgs.ndk}"; + NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}"; + XDG_CACHE_HOME = "/tmp"; + androidManifest = ""; + + makeFlags = targets ++ [ + "V=${toString verbosity}" + "QUICK_AND_DIRTY_COMPILER=${if quickAndDirty then "1" else "0"}" + "QUICK_AND_DIRTY_NIMBLE=${if quickAndDirty then "1" else "0"}" + "USE_SYSTEM_NIM=${if useSystemNim then "1" else "0"}" + ]; + + configurePhase = '' + patchShebangs . vendor/nimbus-build-system > /dev/null + make nimbus-build-system-paths + make nimbus-build-system-nimble-dir + ''; + + preBuild = '' + ln -s waku.nimble waku.nims + pushd vendor/nimbus-build-system/vendor/Nim + mkdir dist + cp -r ${callPackage ./nimble.nix {}} dist/nimble + chmod 777 -R dist/nimble + mkdir -p dist/nimble/dist + cp -r ${callPackage ./checksums.nix {}} dist/checksums # need both + cp -r ${callPackage ./checksums.nix {}} dist/nimble/dist/checksums + cp -r ${callPackage ./atlas.nix {}} dist/atlas + chmod 777 -R dist/atlas + mkdir dist/atlas/dist + cp -r ${callPackage ./sat.nix {}} dist/nimble/dist/sat + cp -r ${callPackage ./sat.nix {}} dist/atlas/dist/sat + cp -r ${callPackage ./csources.nix {}} csources_v2 + chmod 777 -R dist/nimble csources_v2 + popd + mkdir -p vendor/zerokit/target/${androidArch}/release + cp ${zerokitPkg}/librln.so vendor/zerokit/target/${androidArch}/release/ + ''; + + installPhase = '' + mkdir -p $out/jni + cp -r ./build/android/${abidir}/* $out/jni/ + echo '${androidManifest}' > $out/jni/AndroidManifest.xml + cd $out && zip -r libwaku.aar * + ''; + + meta = with pkgs.lib; { + description = "NWaku derivation to build libwaku for mobile targets using Android NDK and Rust."; + homepage = "https://github.com/status-im/nwaku"; + license = licenses.mit; + platforms = stableSystems; + }; +} diff --git a/third-party/nwaku/nix/nimble.nix b/third-party/nwaku/nix/nimble.nix new file mode 100644 index 0000000..5bd7b0f --- /dev/null +++ b/third-party/nwaku/nix/nimble.nix @@ -0,0 +1,12 @@ +{ pkgs ? import { } }: + +let + tools = pkgs.callPackage ./tools.nix {}; + sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim; +in pkgs.fetchFromGitHub { + owner = "nim-lang"; + repo = "nimble"; + rev = tools.findKeyValue "^ +NimbleStableCommit = \"([a-f0-9]+)\".+" sourceFile; + # WARNING: Requires manual updates when Nim compiler version changes. + hash = "sha256-MVHf19UbOWk8Zba2scj06PxdYYOJA6OXrVyDQ9Ku6Us="; +} \ No newline at end of file diff --git a/third-party/nwaku/nix/pkgs/android-sdk/compose.nix b/third-party/nwaku/nix/pkgs/android-sdk/compose.nix new file mode 100644 index 0000000..c73aaee --- /dev/null +++ b/third-party/nwaku/nix/pkgs/android-sdk/compose.nix @@ -0,0 +1,26 @@ +# +# This Nix expression centralizes the configuration +# for the Android development environment. +# + +{ androidenv, lib, stdenv }: + +assert lib.assertMsg (stdenv.system != "aarch64-darwin") + "aarch64-darwin not supported for Android SDK. Use: NIXPKGS_SYSTEM_OVERRIDE=x86_64-darwin"; + +# The "android-sdk-license" license is accepted +# by setting android_sdk.accept_license = true. +androidenv.composeAndroidPackages { + cmdLineToolsVersion = "9.0"; + toolsVersion = "26.1.1"; + platformToolsVersion = "33.0.3"; + buildToolsVersions = [ "34.0.0" ]; + platformVersions = [ "34" ]; + cmakeVersions = [ "3.22.1" ]; + ndkVersion = "25.2.9519653"; + includeNDK = true; + includeExtras = [ + "extras;android;m2repository" + "extras;google;m2repository" + ]; +} diff --git a/third-party/nwaku/nix/pkgs/android-sdk/default.nix b/third-party/nwaku/nix/pkgs/android-sdk/default.nix new file mode 100644 index 0000000..f3f7952 --- /dev/null +++ b/third-party/nwaku/nix/pkgs/android-sdk/default.nix @@ -0,0 +1,14 @@ +# +# This Nix expression centralizes the configuration +# for the Android development environment. +# + +{ callPackage }: + +let + compose = callPackage ./compose.nix { }; + pkgs = callPackage ./pkgs.nix { inherit compose; }; + shell = callPackage ./shell.nix { androidPkgs = pkgs; }; +in { + inherit compose pkgs shell; +} diff --git a/third-party/nwaku/nix/pkgs/android-sdk/pkgs.nix b/third-party/nwaku/nix/pkgs/android-sdk/pkgs.nix new file mode 100644 index 0000000..645987b --- /dev/null +++ b/third-party/nwaku/nix/pkgs/android-sdk/pkgs.nix @@ -0,0 +1,17 @@ +{ stdenv, compose }: + +# +# This derivation simply symlinks some stuff to get +# shorter paths as libexec/android-sdk is quite the mouthful. +# With this you can just do `androidPkgs.sdk` and `androidPkgs.ndk`. +# +stdenv.mkDerivation { + name = "${compose.androidsdk.name}-mod"; + phases = [ "symlinkPhase" ]; + outputs = [ "out" "sdk" "ndk" ]; + symlinkPhase = '' + ln -s ${compose.androidsdk} $out + ln -s ${compose.androidsdk}/libexec/android-sdk $sdk + ln -s ${compose.androidsdk}/libexec/android-sdk/ndk-bundle $ndk + ''; +} diff --git a/third-party/nwaku/nix/pkgs/android-sdk/shell.nix b/third-party/nwaku/nix/pkgs/android-sdk/shell.nix new file mode 100644 index 0000000..b539776 --- /dev/null +++ b/third-party/nwaku/nix/pkgs/android-sdk/shell.nix @@ -0,0 +1,19 @@ +{ mkShell, openjdk, androidPkgs }: + +mkShell { + name = "android-sdk-shell"; + buildInputs = [ openjdk ]; + + shellHook = '' + export ANDROID_HOME="${androidPkgs.sdk}" + export ANDROID_NDK_ROOT="${androidPkgs.ndk}" + export ANDROID_SDK_ROOT="$ANDROID_HOME" + export ANDROID_NDK_HOME="${androidPkgs.ndk}" + + export PATH="$ANDROID_NDK_ROOT:$PATH" + export PATH="$ANDROID_SDK_ROOT/tools:$PATH" + export PATH="$ANDROID_SDK_ROOT/tools/bin:$PATH" + export PATH="$(echo $ANDROID_SDK_ROOT/cmdline-tools/*/bin):$PATH" + export PATH="$ANDROID_SDK_ROOT/platform-tools:$PATH" + ''; +} diff --git a/third-party/nwaku/nix/sat.nix b/third-party/nwaku/nix/sat.nix new file mode 100644 index 0000000..31f2644 --- /dev/null +++ b/third-party/nwaku/nix/sat.nix @@ -0,0 +1,12 @@ +{ pkgs ? import { } }: + +let + tools = pkgs.callPackage ./tools.nix {}; + sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim; +in pkgs.fetchFromGitHub { + owner = "nim-lang"; + repo = "sat"; + rev = tools.findKeyValue "^ +SatStableCommit = \"([a-f0-9]+)\"$" sourceFile; + # WARNING: Requires manual updates when Nim compiler version changes. + hash = "sha256-JFrrSV+mehG0gP7NiQ8hYthL0cjh44HNbXfuxQNhq7c="; +} \ No newline at end of file diff --git a/third-party/nwaku/nix/shell.nix b/third-party/nwaku/nix/shell.nix new file mode 100644 index 0000000..0db73dc --- /dev/null +++ b/third-party/nwaku/nix/shell.nix @@ -0,0 +1,23 @@ +{ + pkgs ? import { }, +}: +let + optionalDarwinDeps = pkgs.lib.optionals pkgs.stdenv.isDarwin [ + pkgs.libiconv + pkgs.darwin.apple_sdk.frameworks.Security + ]; +in +pkgs.mkShell { + inputsFrom = [ + pkgs.androidShell + ] ++ optionalDarwinDeps; + + buildInputs = with pkgs; [ + git + cargo + rustup + cmake + nim-unwrapped-2_0 + ]; + +} diff --git a/third-party/nwaku/nix/tools.nix b/third-party/nwaku/nix/tools.nix new file mode 100644 index 0000000..108d386 --- /dev/null +++ b/third-party/nwaku/nix/tools.nix @@ -0,0 +1,15 @@ +{ pkgs ? import { } }: + +let + + inherit (pkgs.lib) fileContents last splitString flatten remove; + inherit (builtins) map match; +in { + findKeyValue = regex: sourceFile: + let + linesFrom = file: splitString "\n" (fileContents file); + matching = regex: lines: map (line: match regex line) lines; + extractMatch = matches: last (flatten (remove null matches)); + in + extractMatch (matching regex (linesFrom sourceFile)); +} diff --git a/third-party/nwaku/scripts/build_rln.sh b/third-party/nwaku/scripts/build_rln.sh new file mode 100755 index 0000000..cd2fa38 --- /dev/null +++ b/third-party/nwaku/scripts/build_rln.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +# This script is used to build the rln library for the current platform, or download it from the +# release page if it is available. + +set -e + +# first argument is the build directory +build_dir=$1 +rln_version=$2 +output_filename=$3 + +[[ -z "${build_dir}" ]] && { echo "No build directory specified"; exit 1; } +[[ -z "${rln_version}" ]] && { echo "No rln version specified"; exit 1; } +[[ -z "${output_filename}" ]] && { echo "No output filename specified"; exit 1; } + +# Get the host triplet +host_triplet=$(rustc --version --verbose | awk '/host:/{print $2}') + +tarball="${host_triplet}" + +# use arkzkey feature for v0.7.0 +# TODO: update this script in the future when arkzkey is default +if [[ "${rln_version}" == "v0.7.0" ]]; then + tarball+="-arkzkey-rln.tar.gz" +else + tarball+="-rln.tar.gz" +fi + +# Download the prebuilt rln library if it is available +if curl --silent --fail-with-body -L \ + "https://github.com/vacp2p/zerokit/releases/download/$rln_version/$tarball" \ + -o "${tarball}"; +then + echo "Downloaded ${tarball}" + tar -xzf "${tarball}" + mv "release/librln.a" "${output_filename}" + rm -rf "${tarball}" release +else + echo "Failed to download ${tarball}" + # Build rln instead + # first, check if submodule version = version in Makefile + cargo metadata --format-version=1 --no-deps --manifest-path "${build_dir}/rln/Cargo.toml" + + detected_OS=$(uname -s) + if [[ "$detected_OS" == MINGW* || "$detected_OS" == MSYS* ]]; then + submodule_version=$(cargo metadata --format-version=1 --no-deps --manifest-path "${build_dir}/rln/Cargo.toml" | sed -n 's/.*"name":"rln","version":"\([^"]*\)".*/\1/p') + else + submodule_version=$(cargo metadata --format-version=1 --no-deps --manifest-path "${build_dir}/rln/Cargo.toml" | jq -r '.packages[] | select(.name == "rln") | .version') + fi + + if [[ "v${submodule_version}" != "${rln_version}" ]]; then + echo "Submodule version (v${submodule_version}) does not match version in Makefile (${rln_version})" + echo "Please update the submodule to ${rln_version}" + exit 1 + fi + # if submodule version = version in Makefile, build rln + cargo build --release -p rln --manifest-path "${build_dir}/rln/Cargo.toml" --features arkzkey + cp "${build_dir}/target/release/librln.a" "${output_filename}" +fi diff --git a/third-party/nwaku/scripts/build_rln_android.sh b/third-party/nwaku/scripts/build_rln_android.sh new file mode 100755 index 0000000..93a8c47 --- /dev/null +++ b/third-party/nwaku/scripts/build_rln_android.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +nwaku_build_dir=$1 +zerokit_dir=$2 +rln_version=$3 +android_arch=$4 +abi=$5 + +[[ -z "${nwaku_build_dir}" ]] && { echo "No nwaku build directory specified"; exit 1; } +[[ -z "${zerokit_dir}" ]] && { echo "No zerokit directory specified"; exit 1; } +[[ -z "${rln_version}" ]] && { echo "No rln version specified"; exit 1; } +[[ -z "${android_arch}" ]] && { echo "No android architecture specified"; exit 1; } +[[ -z "${abi}" ]] && { echo "No abi specified"; exit 1; } + +export RUSTFLAGS="-Ccodegen-units=1" + +rustup upgrade + +cargo install cross --git https://github.com/cross-rs/cross + +output_dir=`echo ${nwaku_build_dir}/android/${abi}` +mkdir -p ${output_dir} +pushd ${zerokit_dir}/rln +cargo clean +cross rustc --release --lib --target=${android_arch} --crate-type=cdylib +cp ../target/${android_arch}/release/librln.so ${output_dir}/. +popd + diff --git a/third-party/nwaku/scripts/build_windows.sh b/third-party/nwaku/scripts/build_windows.sh new file mode 100755 index 0000000..c9d4bd8 --- /dev/null +++ b/third-party/nwaku/scripts/build_windows.sh @@ -0,0 +1,60 @@ +#!/bin/sh + +echo "- - - - - - - - - - Windows Setup Script - - - - - - - - - -" + +success_count=0 +failure_count=0 + +# Function to execute a command and check its status +execute_command() { + echo "Executing: $1" + if eval "$1"; then + echo -e "✓ Command succeeded \n" + ((success_count++)) + else + echo -e "✗ Command failed \n" + ((failure_count++)) + fi +} + +echo "1. -.-.-.-- Set PATH -.-.-.-" +export PATH="/c/msys64/usr/bin:/c/msys64/mingw64/bin:/c/msys64/usr/lib:/c/msys64/mingw64/lib:$PATH" + +echo "2. -.-.-.- Verify dependencies -.-.-.-" +execute_command "which gcc g++ make cmake cargo upx rustc python" + +echo "3. -.-.-.- Updating submodules -.-.-.-" +execute_command "git submodule update --init --recursive" + +echo "4. -.-.-.- Creating tmp directory -.-.-.-" +execute_command "mkdir -p tmp" + +echo "5. -.-.-.- Building Nim -.-.-.-" +cd vendor/nimbus-build-system/vendor/Nim +execute_command "./build_all.bat" +cd ../../../.. + +echo "6. -.-.-.- Building libunwind -.-.-.-" +cd vendor/nim-libbacktrace +execute_command "make all V=1 -j8" +cd ../../ + +echo "7. -.-.-.- Building miniupnpc -.-.-.- " +cd vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc +execute_command "make -f Makefile.mingw CC=gcc CXX=g++ libminiupnpc.a V=1 -j8" +cd ../../../../.. + +echo "8. -.-.-.- Building libnatpmp -.-.-.- " +cd ./vendor/nim-nat-traversal/vendor/libnatpmp-upstream +make CC="gcc -fPIC -D_WIN32_WINNT=0x0600 -DNATPMP_STATICLIB" libnatpmp.a V=1 -j8 +cd ../../../../ + +echo "9. -.-.-.- Building wakunode2 -.-.-.- " +execute_command "make wakunode2 LOG_LEVEL=DEBUG V=1 -j8" + +echo "10. -.-.-.- Building libwaku -.-.-.- " +execute_command "make libwaku STATIC=0 LOG_LEVEL=DEBUG V=1 -j8" + +echo "Windows setup completed successfully!" +echo "✓ Successful commands: $success_count" +echo "✗ Failed commands: $failure_count" diff --git a/third-party/nwaku/scripts/chkhealth.sh b/third-party/nwaku/scripts/chkhealth.sh new file mode 100755 index 0000000..ea87211 --- /dev/null +++ b/third-party/nwaku/scripts/chkhealth.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +# optional argument to specgify the ip address +ip_address="localhost:8645" +plain_text_out=false + +# Parse command line arguments +POSITIONAL_ARGS=() + +while [[ $# -gt 0 ]]; do + case $1 in + -p|--plain) + plain_text_out=true + shift # past argument + ;; + -*|--*) + echo "Unknown option $1" + exit 1 + ;; + *) + POSITIONAL_ARGS+=("$1") # save positional arg + shift # past argument + ;; + esac +done + +set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters + +# Check if an IP address is provided as an argument +if [[ -n "$1" ]]; then + ip_address="$1" +fi + +# check if curl is available +if ! command -v curl &> /dev/null +then + echo "curl could not be found" + exit 1 +fi + +response=$(curl --connect-timeout 6 -s GET http://${ip_address}/health) + +if [[ $? -ne 0 ]]; then + echo -e "$(date +'%H:%M:%S') - Node may not be running or not reachable at http://${ip_address}\n" + exit 1 +fi + +if [[ -z "${response}" ]]; then + echo -e "$(date +'%H:%M:%S') - node health status is: unknown\n" + exit 1 +fi + +if ! command -v jq &> /dev/null || [[ "$plain_text_out" = true ]]; then + echo -e "$(date +'%H:%M:%S') - node health status is: ${response}\n" +else + echo -e "$(date +'%H:%M:%S') - node health status is:\n" + echo "${response}" | jq . 2>/dev/null + if [[ $? -ne 0 ]]; then + echo -e "${response}" + fi +fi diff --git a/third-party/nwaku/scripts/generate_nimble_links.sh b/third-party/nwaku/scripts/generate_nimble_links.sh new file mode 100755 index 0000000..e01e6db --- /dev/null +++ b/third-party/nwaku/scripts/generate_nimble_links.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +# This script is used for building Nix derivation which doesn't allow Git commands. +# It implements similar logic as $(NIMBLE_DIR) target in nimbus-build-system Makefile. + +create_nimble_link_script_path="$(pwd)/${BUILD_SYSTEM_DIR}/scripts/create_nimble_link.sh" + +process_gitmodules() { + local gitmodules_file="$1" + local gitmodules_dir=$(dirname "$gitmodules_file") + + # Extract all submodule paths from the .gitmodules file + grep "path" $gitmodules_file | awk '{print $3}' | while read submodule_path; do + # Change pwd to the submodule dir and execute script + pushd "$gitmodules_dir/$submodule_path" > /dev/null + NIMBLE_DIR=$NIMBLE_DIR PWD_CMD=$PWD_CMD EXCLUDED_NIM_PACKAGES=$EXCLUDED_NIM_PACKAGES \ + "$create_nimble_link_script_path" "$submodule_path" + popd > /dev/null + done +} + +# Create the base directory if it doesn't exist +mkdir -p "${NIMBLE_DIR}/pkgs" + +# Find all .gitmodules files and process them +for gitmodules_file in $(find . -name '.gitmodules'); do + echo "Processing .gitmodules file: $gitmodules_file" + process_gitmodules "$gitmodules_file" +done diff --git a/third-party/nwaku/scripts/git_pre_commit_format.sh b/third-party/nwaku/scripts/git_pre_commit_format.sh new file mode 100644 index 0000000..f52c365 --- /dev/null +++ b/third-party/nwaku/scripts/git_pre_commit_format.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +echo "Running pre-commit hook" + +# Regexp for grep to only choose some file extensions for formatting +exts="\.\(nim\|nims\)$" + +# Build nph lazily +make build-nph || (1>&2 echo "failed to build nph. Pre-commit formatting will not be done."; exit 0) + +# Format staged files +git diff --cached --name-only --diff-filter=ACMR | grep "$exts" | while read file; do + echo "Formatting $file" + make nph/"$file" + git add "$file" +done diff --git a/third-party/nwaku/scripts/install_anvil.sh b/third-party/nwaku/scripts/install_anvil.sh new file mode 100755 index 0000000..1bf4bd7 --- /dev/null +++ b/third-party/nwaku/scripts/install_anvil.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# Install Anvil + +if ! command -v anvil &> /dev/null; then + BASE_DIR="${XDG_CONFIG_HOME:-$HOME}" + FOUNDRY_DIR="${FOUNDRY_DIR:-"$BASE_DIR/.foundry"}" + FOUNDRY_BIN_DIR="$FOUNDRY_DIR/bin" + + curl -L https://foundry.paradigm.xyz | bash + # Extract the source path from the download result + echo "foundryup_path: $FOUNDRY_BIN_DIR" + # run foundryup + $FOUNDRY_BIN_DIR/foundryup +fi \ No newline at end of file diff --git a/third-party/nwaku/scripts/install_pnpm.sh b/third-party/nwaku/scripts/install_pnpm.sh new file mode 100755 index 0000000..34ba47b --- /dev/null +++ b/third-party/nwaku/scripts/install_pnpm.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +# Install pnpm +if ! command -v pnpm &> /dev/null; then + echo "pnpm is not installed, installing it now..." + npm i pnpm --global +fi + diff --git a/third-party/nwaku/scripts/install_rln_tests_dependencies.sh b/third-party/nwaku/scripts/install_rln_tests_dependencies.sh new file mode 100755 index 0000000..e19e0ef --- /dev/null +++ b/third-party/nwaku/scripts/install_rln_tests_dependencies.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +# Install Anvil +./scripts/install_anvil.sh + +#Install pnpm +./scripts/install_pnpm.sh \ No newline at end of file diff --git a/third-party/nwaku/scripts/run_cov.sh b/third-party/nwaku/scripts/run_cov.sh new file mode 100755 index 0000000..a95739f --- /dev/null +++ b/third-party/nwaku/scripts/run_cov.sh @@ -0,0 +1,52 @@ +#!/bin/sh + +# Check if env.sh has been loaded, or if this file is being ran from it. +# Using NIMC as a proxy for this, as it's defined in the nimbus-build-system's env.sh. +if [ -z "$NIMC" ] +then + echo "[ERROR] This tool can only be ran from the Nimbus environment. Either:" + echo "- Source env.sh 'source /path/to/env.sh', and then run the script directly '/path/to/scripts/run_cov.sh'." + echo "- Run this script as a parameter to env.sh '/path/to/env.sh /path/to/scripts/run_cov.sh'." + exit 1 +fi + +# Check for lcov tool +which lcov 1>/dev/null 2>&1 +if [ $? != 0 ] +then + echo "[ERROR] You need to have lcov installed in order to generate the test coverage report." + exit 2 +fi + +SCRIPT_PATH=$(dirname "$(realpath -s "$0")") +REPO_ROOT=$(dirname $SCRIPT_PATH) +generated_not_to_break_here="$REPO_ROOT/generated_not_to_break_here" + +if [ "$1" != "-y" ] && [ -f "$generated_not_to_break_here" ] +then + echo "The file '$generated_not_to_break_here' already exists. Do you want to continue? (y/n)" + read -r response + if [ "$response" != "y" ] + then + exit 3 + fi +fi + +output_directory="$REPO_ROOT/coverage_html_report" +base_filepath="$REPO_ROOT/tests/test_all" +nim_filepath=$base_filepath.nim +info_filepath=$base_filepath.info + +# Workaround a nim bug. See https://github.com/nim-lang/Nim/issues/12376 +touch $generated_not_to_break_here + +# Generate the coverage report +nim --debugger:native --passC:--coverage --passL:--coverage --passL:librln_v0.3.4.a --passL:-lm c $nim_filepath +lcov --base-directory . --directory . --zerocounters -q +$base_filepath +lcov --base-directory . --directory . --include "*/waku/**" --include "*/apps/**" --exclude "*/vendor/**" -c -o $info_filepath +genhtml -o $output_directory $info_filepath + +# Cleanup +rm -rf $info_filepath $base_filepath nimcache +rm $generated_not_to_break_here diff --git a/third-party/nwaku/simulations/README.md b/third-party/nwaku/simulations/README.md new file mode 100644 index 0000000..c035fc9 --- /dev/null +++ b/third-party/nwaku/simulations/README.md @@ -0,0 +1,4 @@ +# Purpose + +This is a place where any simulation related scripts and utilities can be stored. +Checkout mixnet folder to get an idea. diff --git a/third-party/nwaku/simulations/mixnet/README.md b/third-party/nwaku/simulations/mixnet/README.md new file mode 100644 index 0000000..fcc67b6 --- /dev/null +++ b/third-party/nwaku/simulations/mixnet/README.md @@ -0,0 +1,70 @@ +# Mixnet simulation + +## Aim + +Simulate a local mixnet along with a chat app to publish using mix. +This is helpful to test any changes while development. +It includes scripts that run a `4 node` mixnet along with a lightpush service node(without mix) that can be used to test quickly. + +## Simulation Details + +Note that before running the simulation both `wakunode2` and `chat2mix` have to be built. + +```bash +cd +make wakunode2 +make chat2mix +``` + +Simulation includes scripts for: + +1. a 4 waku-node mixnet where `node1` is bootstrap node for the other 3 nodes. +2. scripts to run chat app that publishes using lightpush protocol over the mixnet + +## Usage + +Start the service node with below command, which acts as bootstrap node for all other mix nodes. + +`./run_lp_service_node.sh` + +To run the nodes for mixnet run the 4 node scripts in different terminals as below. + +`./run_mix_node1.sh` + +Look for following 2 log lines to ensure node ran successfully and has also mounted mix protocol. + +```log +INF 2025-08-01 14:51:05.445+05:30 mounting mix protocol topics="waku node" tid=39996871 file=waku_node.nim:231 nodeId="(listenAddresses: @[\"/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o\"], enrUri: \"enr:-NC4QKYtas8STkenlqBTJ3a1TTLzJA2DsGGbFlnxem9aSM2IXm-CSVZULdk2467bAyFnepnt8KP_QlfDzdaMXd_zqtwBgmlkgnY0gmlwhH8AAAGHbWl4LWtleaCdCc5iT3bo9gYmXtucyit96bQXcqbXhL3a-S_6j7p9LIptdWx0aWFkZHJzgIJyc4UAAgEAAIlzZWNwMjU2azGhA6RFtVJVBh0SYOoP8xrgnXSlpiFARmQkF9d8Rn4fSeiog3RjcILqYYN1ZHCCIymFd2FrdTIt\")" + +INF 2025-08-01 14:49:23.467+05:30 Node setup complete topics="wakunode main" tid=39994244 file=wakunode2.nim:104 +``` + +Once all the 4 nodes are up without any issues, run the script to start the chat application. + +`./run_chat_app.sh` + +Enter a nickname to be used. + +```bash +pubsub topic is: /waku/2/rs/2/0 +Choose a nickname >> +``` + +Once you see below log, it means the app is ready for publishing messages over the mixnet. + +```bash +Welcome, test! +Listening on + /ip4/192.168.68.64/tcp/60000/p2p/16Uiu2HAkxDGqix1ifY3wF1ZzojQWRAQEdKP75wn1LJMfoHhfHz57 +ready to publish messages now +``` + +Follow similar instructions to run second instance of chat app. +Once both the apps run successfully, send a message and check if it is received by the other app. + +You can exit the chat apps by entering `/exit` as below + +```bash +>> /exit +quitting... +``` diff --git a/third-party/nwaku/simulations/mixnet/config.toml b/third-party/nwaku/simulations/mixnet/config.toml new file mode 100644 index 0000000..17e9242 --- /dev/null +++ b/third-party/nwaku/simulations/mixnet/config.toml @@ -0,0 +1,25 @@ +log-level = "INFO" +relay = true +#mix = true +filter = true +store = false +lightpush = true +max-connections = 150 +peer-exchange = true +metrics-logging = false +cluster-id = 2 +discv5-discovery = true +discv5-udp-port = 9000 +discv5-enr-auto-update = true +rest = true +rest-admin = true +ports-shift = 1 +num-shards-in-network = 1 +shard = [0] +agent-string = "nwaku-mix" +nodekey = "f98e3fba96c32e8d1967d460f1b79457380e1a895f7971cecc8528abe733781a" +#mixkey = "a87db88246ec0eedda347b9b643864bee3d6933eb15ba41e6d58cb678d813258" +rendezvous = true +listen-address = "127.0.0.1" +nat = "extip:127.0.0.1" +ip-colocation-limit=0 diff --git a/third-party/nwaku/simulations/mixnet/config1.toml b/third-party/nwaku/simulations/mixnet/config1.toml new file mode 100644 index 0000000..e06a527 --- /dev/null +++ b/third-party/nwaku/simulations/mixnet/config1.toml @@ -0,0 +1,27 @@ +log-level = "INFO" +relay = true +mix = true +filter = true +store = false +lightpush = true +max-connections = 150 +peer-exchange = true +metrics-logging = false +cluster-id = 2 +discv5-discovery = true +discv5-udp-port = 9001 +discv5-enr-auto-update = true +discv5-bootstrap-node = ["enr:-LG4QBaAbcA921hmu3IrreLqGZ4y3VWCjBCgNN9mpX9vqkkbSrM3HJHZTXnb5iVXgc5pPtDhWLxkB6F3yY25hSwMezkEgmlkgnY0gmlwhH8AAAGKbXVsdGlhZGRyc4oACATAqEQ-BuphgnJzhQACAQAAiXNlY3AyNTZrMaEDpEW1UlUGHRJg6g_zGuCddKWmIUBGZCQX13xGfh9J6KiDdGNwguphg3VkcIIjKYV3YWt1Mg0"] +rest = true +rest-admin = true +ports-shift = 2 +num-shards-in-network = 1 +shard = [0] +agent-string = "nwaku-mix" +nodekey = "09e9d134331953357bd38bbfce8edb377f4b6308b4f3bfbe85c610497053d684" +mixkey = "c86029e02c05a7e25182974b519d0d52fcbafeca6fe191fbb64857fb05be1a53" +rendezvous = true +listen-address = "127.0.0.1" +nat = "extip:127.0.0.1" +ip-colocation-limit=0 +#staticnode = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o", "/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA","/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f","/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu"] diff --git a/third-party/nwaku/simulations/mixnet/config2.toml b/third-party/nwaku/simulations/mixnet/config2.toml new file mode 100644 index 0000000..9382260 --- /dev/null +++ b/third-party/nwaku/simulations/mixnet/config2.toml @@ -0,0 +1,27 @@ +log-level = "INFO" +relay = true +mix = true +filter = true +store = false +lightpush = true +max-connections = 150 +peer-exchange = true +metrics-logging = false +cluster-id = 2 +discv5-discovery = true +discv5-udp-port = 9002 +discv5-enr-auto-update = true +discv5-bootstrap-node = ["enr:-LG4QBaAbcA921hmu3IrreLqGZ4y3VWCjBCgNN9mpX9vqkkbSrM3HJHZTXnb5iVXgc5pPtDhWLxkB6F3yY25hSwMezkEgmlkgnY0gmlwhH8AAAGKbXVsdGlhZGRyc4oACATAqEQ-BuphgnJzhQACAQAAiXNlY3AyNTZrMaEDpEW1UlUGHRJg6g_zGuCddKWmIUBGZCQX13xGfh9J6KiDdGNwguphg3VkcIIjKYV3YWt1Mg0"] +rest = false +rest-admin = false +ports-shift = 3 +num-shards-in-network = 1 +shard = [0] +agent-string = "nwaku-mix" +nodekey = "ed54db994682e857d77cd6fb81be697382dc43aa5cd78e16b0ec8098549f860e" +mixkey = "b858ac16bbb551c4b2973313b1c8c8f7ea469fca03f1608d200bbf58d388ec7f" +rendezvous = true +listen-address = "127.0.0.1" +nat = "extip:127.0.0.1" +ip-colocation-limit=0 +#staticnode = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o", "/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF","/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f","/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu"] diff --git a/third-party/nwaku/simulations/mixnet/config3.toml b/third-party/nwaku/simulations/mixnet/config3.toml new file mode 100644 index 0000000..6f339df --- /dev/null +++ b/third-party/nwaku/simulations/mixnet/config3.toml @@ -0,0 +1,27 @@ +log-level = "INFO" +relay = true +mix = true +filter = true +store = false +lightpush = true +max-connections = 150 +peer-exchange = true +metrics-logging = false +cluster-id = 2 +discv5-discovery = true +discv5-udp-port = 9003 +discv5-enr-auto-update = true +discv5-bootstrap-node = ["enr:-LG4QBaAbcA921hmu3IrreLqGZ4y3VWCjBCgNN9mpX9vqkkbSrM3HJHZTXnb5iVXgc5pPtDhWLxkB6F3yY25hSwMezkEgmlkgnY0gmlwhH8AAAGKbXVsdGlhZGRyc4oACATAqEQ-BuphgnJzhQACAQAAiXNlY3AyNTZrMaEDpEW1UlUGHRJg6g_zGuCddKWmIUBGZCQX13xGfh9J6KiDdGNwguphg3VkcIIjKYV3YWt1Mg0"] +rest = false +rest-admin = false +ports-shift = 4 +num-shards-in-network = 1 +shard = [0] +agent-string = "nwaku-mix" +nodekey = "42f96f29f2d6670938b0864aced65a332dcf5774103b4c44ec4d0ea4ef3c47d6" +mixkey = "d8bd379bb394b0f22dd236d63af9f1a9bc45266beffc3fbbe19e8b6575f2535b" +rendezvous = true +listen-address = "127.0.0.1" +nat = "extip:127.0.0.1" +ip-colocation-limit=0 +#staticnode = ["/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF", "/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA","/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o","/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu"] diff --git a/third-party/nwaku/simulations/mixnet/config4.toml b/third-party/nwaku/simulations/mixnet/config4.toml new file mode 100644 index 0000000..23115ac --- /dev/null +++ b/third-party/nwaku/simulations/mixnet/config4.toml @@ -0,0 +1,27 @@ +log-level = "INFO" +relay = true +mix = true +filter = true +store = false +lightpush = true +max-connections = 150 +peer-exchange = true +metrics-logging = false +cluster-id = 2 +discv5-discovery = true +discv5-udp-port = 9004 +discv5-enr-auto-update = true +discv5-bootstrap-node = ["enr:-LG4QBaAbcA921hmu3IrreLqGZ4y3VWCjBCgNN9mpX9vqkkbSrM3HJHZTXnb5iVXgc5pPtDhWLxkB6F3yY25hSwMezkEgmlkgnY0gmlwhH8AAAGKbXVsdGlhZGRyc4oACATAqEQ-BuphgnJzhQACAQAAiXNlY3AyNTZrMaEDpEW1UlUGHRJg6g_zGuCddKWmIUBGZCQX13xGfh9J6KiDdGNwguphg3VkcIIjKYV3YWt1Mg0"] +rest = false +rest-admin = false +ports-shift = 5 +num-shards-in-network = 1 +shard = [0] +agent-string = "nwaku-mix" +nodekey = "3ce887b3c34b7a92dd2868af33941ed1dbec4893b054572cd5078da09dd923d4" +mixkey = "780fff09e51e98df574e266bf3266ec6a3a1ddfcf7da826a349a29c137009d49" +rendezvous = true +listen-address = "127.0.0.1" +nat = "extip:127.0.0.1" +ip-colocation-limit=0 +#staticnode = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o", "/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA","/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f","/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF"] diff --git a/third-party/nwaku/simulations/mixnet/run_chat_mix.sh b/third-party/nwaku/simulations/mixnet/run_chat_mix.sh new file mode 100755 index 0000000..7324384 --- /dev/null +++ b/third-party/nwaku/simulations/mixnet/run_chat_mix.sh @@ -0,0 +1 @@ +../../build/chat2mix --cluster-id=2 --num-shards-in-network=1 --shard=0 --servicenode="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o" --log-level=TRACE --mixnode="enr:-Nq4QIPd6TbOWns1TsbSq2KB6g3hIClJa8qBUWFFwbGut9OBCwTHYshi0-iv1ilTMx4FkuSJ4NtkZVx0QSrrMRTGpEsDgmlkgnY0gmlwhH8AAAGHbWl4LWtleaCSMehtpkMlApAKhPhnAEznhjKrUs2OMLHsMizXlXEMKoptdWx0aWFkZHJzigAIBMCoRD4G6mKCcnOFAAIBAACJc2VjcDI1NmsxoQN6R8gw1Pu8IwMlTap0_E7vVd1wcaFgg_VUaaeVWSZYVIN0Y3CC6mKDdWRwgiMrhXdha3UyLQ" --mixnode="enr:-Nq4QC6XyKXZSlJNFzTDPI118SBC2ilLqE05RR4o4OzEZxueGkYtExHtTBvmY-9pl17EXZtXvF_tIV_2g0K_fb2LmsoDgmlkgnY0gmlwhH8AAAGHbWl4LWtleaAnXNaInh8pykjlue24ANGpT0nxPTk6Ds8aB691NQbebIptdWx0aWFkZHJzigAIBMCoRD4G6mOCcnOFAAIBAACJc2VjcDI1NmsxoQPYhmrbTqylbdenVfvO2U0w6EC4A-l5lwvu3QWL7IqkO4N0Y3CC6mODdWRwgiMthXdha3UyLQ" --mixnode="enr:-Nq4QKoh8Ta8Q3zLLAkf4hyYzxpuTc-BRBGb_WYVIm6hRptKZFuIo3DNlWCpfIxJnNI5epjLWQWHFUo3dqpAoWhoXEUDgmlkgnY0gmlwhH8AAAGHbWl4LWtleaDg7VlKjVBmgb4HXo4jcjR4OI-xgkd_ekaTCaJecHb8GIptdWx0aWFkZHJzigAIBMCoRD4G6mSCcnOFAAIBAACJc2VjcDI1NmsxoQOnphVC3U5zmOCkjOI2tY0v8K5QkXSaE5xO37q3iFfKGIN0Y3CC6mSDdWRwgiMvhXdha3UyLQ" --mixnode="enr:-Nq4QN7ub3xi53eDyKKstEM2IjFo7oY5Kf4glFz45W2saWqNXPqJFruw08c9B_EIu1LoW4opwXId_4zvPmekZwYHKp8DgmlkgnY0gmlwhH8AAAGHbWl4LWtleaCP16GnwZtAPSMUUqmx6kDrHMdvRV2RjviYDnaF-e7rH4ptdWx0aWFkZHJzigAIBMCoRD4G6mWCcnOFAAIBAACJc2VjcDI1NmsxoQLJtl9kA98YgBkVElkJgl9XyyRNco78oShb1hsv6Mlbs4N0Y3CC6mWDdWRwgiMxhXdha3UyLQ" diff --git a/third-party/nwaku/simulations/mixnet/run_chat_mix1.sh b/third-party/nwaku/simulations/mixnet/run_chat_mix1.sh new file mode 100755 index 0000000..7324384 --- /dev/null +++ b/third-party/nwaku/simulations/mixnet/run_chat_mix1.sh @@ -0,0 +1 @@ +../../build/chat2mix --cluster-id=2 --num-shards-in-network=1 --shard=0 --servicenode="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o" --log-level=TRACE --mixnode="enr:-Nq4QIPd6TbOWns1TsbSq2KB6g3hIClJa8qBUWFFwbGut9OBCwTHYshi0-iv1ilTMx4FkuSJ4NtkZVx0QSrrMRTGpEsDgmlkgnY0gmlwhH8AAAGHbWl4LWtleaCSMehtpkMlApAKhPhnAEznhjKrUs2OMLHsMizXlXEMKoptdWx0aWFkZHJzigAIBMCoRD4G6mKCcnOFAAIBAACJc2VjcDI1NmsxoQN6R8gw1Pu8IwMlTap0_E7vVd1wcaFgg_VUaaeVWSZYVIN0Y3CC6mKDdWRwgiMrhXdha3UyLQ" --mixnode="enr:-Nq4QC6XyKXZSlJNFzTDPI118SBC2ilLqE05RR4o4OzEZxueGkYtExHtTBvmY-9pl17EXZtXvF_tIV_2g0K_fb2LmsoDgmlkgnY0gmlwhH8AAAGHbWl4LWtleaAnXNaInh8pykjlue24ANGpT0nxPTk6Ds8aB691NQbebIptdWx0aWFkZHJzigAIBMCoRD4G6mOCcnOFAAIBAACJc2VjcDI1NmsxoQPYhmrbTqylbdenVfvO2U0w6EC4A-l5lwvu3QWL7IqkO4N0Y3CC6mODdWRwgiMthXdha3UyLQ" --mixnode="enr:-Nq4QKoh8Ta8Q3zLLAkf4hyYzxpuTc-BRBGb_WYVIm6hRptKZFuIo3DNlWCpfIxJnNI5epjLWQWHFUo3dqpAoWhoXEUDgmlkgnY0gmlwhH8AAAGHbWl4LWtleaDg7VlKjVBmgb4HXo4jcjR4OI-xgkd_ekaTCaJecHb8GIptdWx0aWFkZHJzigAIBMCoRD4G6mSCcnOFAAIBAACJc2VjcDI1NmsxoQOnphVC3U5zmOCkjOI2tY0v8K5QkXSaE5xO37q3iFfKGIN0Y3CC6mSDdWRwgiMvhXdha3UyLQ" --mixnode="enr:-Nq4QN7ub3xi53eDyKKstEM2IjFo7oY5Kf4glFz45W2saWqNXPqJFruw08c9B_EIu1LoW4opwXId_4zvPmekZwYHKp8DgmlkgnY0gmlwhH8AAAGHbWl4LWtleaCP16GnwZtAPSMUUqmx6kDrHMdvRV2RjviYDnaF-e7rH4ptdWx0aWFkZHJzigAIBMCoRD4G6mWCcnOFAAIBAACJc2VjcDI1NmsxoQLJtl9kA98YgBkVElkJgl9XyyRNco78oShb1hsv6Mlbs4N0Y3CC6mWDdWRwgiMxhXdha3UyLQ" diff --git a/third-party/nwaku/simulations/mixnet/run_lp_service_node.sh b/third-party/nwaku/simulations/mixnet/run_lp_service_node.sh new file mode 100755 index 0000000..1d00579 --- /dev/null +++ b/third-party/nwaku/simulations/mixnet/run_lp_service_node.sh @@ -0,0 +1 @@ +../../build/wakunode2 --config-file="config.toml" diff --git a/third-party/nwaku/simulations/mixnet/run_mix_node1.sh b/third-party/nwaku/simulations/mixnet/run_mix_node1.sh new file mode 100755 index 0000000..024eb3f --- /dev/null +++ b/third-party/nwaku/simulations/mixnet/run_mix_node1.sh @@ -0,0 +1 @@ +../../build/wakunode2 --config-file="config1.toml" diff --git a/third-party/nwaku/simulations/mixnet/run_mix_node2.sh b/third-party/nwaku/simulations/mixnet/run_mix_node2.sh new file mode 100755 index 0000000..e55a9ba --- /dev/null +++ b/third-party/nwaku/simulations/mixnet/run_mix_node2.sh @@ -0,0 +1 @@ +../../build/wakunode2 --config-file="config2.toml" diff --git a/third-party/nwaku/simulations/mixnet/run_mix_node3.sh b/third-party/nwaku/simulations/mixnet/run_mix_node3.sh new file mode 100755 index 0000000..dca8119 --- /dev/null +++ b/third-party/nwaku/simulations/mixnet/run_mix_node3.sh @@ -0,0 +1 @@ +../../build/wakunode2 --config-file="config3.toml" diff --git a/third-party/nwaku/simulations/mixnet/run_mix_node4.sh b/third-party/nwaku/simulations/mixnet/run_mix_node4.sh new file mode 100755 index 0000000..9cf2515 --- /dev/null +++ b/third-party/nwaku/simulations/mixnet/run_mix_node4.sh @@ -0,0 +1 @@ +../../build/wakunode2 --config-file="config4.toml" diff --git a/third-party/nwaku/tests/all_tests_common.nim b/third-party/nwaku/tests/all_tests_common.nim new file mode 100644 index 0000000..1bbfd2c --- /dev/null +++ b/third-party/nwaku/tests/all_tests_common.nim @@ -0,0 +1,3 @@ +{.used.} + +import ./common/test_all diff --git a/third-party/nwaku/tests/all_tests_waku.nim b/third-party/nwaku/tests/all_tests_waku.nim new file mode 100644 index 0000000..3d22cd9 --- /dev/null +++ b/third-party/nwaku/tests/all_tests_waku.nim @@ -0,0 +1,106 @@ +## Waku v2 + +import ./test_waku + +# Waku core test suite +import + ./waku_core/test_namespaced_topics, + ./waku_core/test_time, + ./waku_core/test_message_digest, + ./waku_core/test_peers, + ./waku_core/test_published_address + +# Waku archive test suite +import + ./waku_archive/test_driver_queue_index, + ./waku_archive/test_driver_queue_pagination, + ./waku_archive/test_driver_queue_query, + ./waku_archive/test_driver_queue, + ./waku_archive/test_driver_sqlite_query, + ./waku_archive/test_driver_sqlite, + ./waku_archive/test_retention_policy, + ./waku_archive/test_waku_archive, + ./waku_archive/test_partition_manager, + ./waku_archive_legacy/test_driver_queue_index, + ./waku_archive_legacy/test_driver_queue_pagination, + ./waku_archive_legacy/test_driver_queue_query, + ./waku_archive_legacy/test_driver_queue, + ./waku_archive_legacy/test_driver_sqlite_query, + ./waku_archive_legacy/test_driver_sqlite, + ./waku_archive_legacy/test_waku_archive + +const os* {.strdefine.} = "" +when os == "Linux" and + # GitHub only supports container actions on Linux + # and we need to start a postgres database in a docker container + defined(postgres): + import + ./waku_archive/test_driver_postgres_query, + ./waku_archive/test_driver_postgres, + #./waku_archive_legacy/test_driver_postgres_query, + #./waku_archive_legacy/test_driver_postgres, + ./factory/test_node_factory, + ./wakunode_rest/test_rest_store, + ./wakunode_rest/test_all + +# Waku store test suite +import + ./waku_store/test_client, + ./waku_store/test_rpc_codec, + ./waku_store/test_waku_store, + ./waku_store/test_wakunode_store + +# Waku legacy store test suite +import + ./waku_store_legacy/test_client, + ./waku_store_legacy/test_rpc_codec, + ./waku_store_legacy/test_waku_store, + ./waku_store_legacy/test_wakunode_store + +# Waku store sync suite +import ./waku_store_sync/test_all + +when defined(waku_exp_store_resume): + # TODO: Review store resume test cases (#1282) + import ./waku_store_legacy/test_resume + +import + ./node/test_all, + ./waku_filter_v2/test_all, + ./waku_peer_exchange/test_all, + ./waku_lightpush_legacy/test_all, + ./waku_lightpush/test_all, + ./waku_relay/test_all, + ./incentivization/test_all + +import + # Waku v2 tests + ./test_wakunode, + ./test_peer_store_extended, + ./test_message_cache, + ./test_peer_manager, + ./test_peer_storage, + ./test_waku_keepalive, + ./test_waku_enr, + ./test_waku_dnsdisc, + ./test_relay_peer_exchange, + ./test_waku_noise, + ./test_waku_noise_sessions, + ./test_waku_netconfig, + ./test_waku_switch, + ./test_waku_rendezvous, + ./waku_discv5/test_waku_discv5 + +# Waku Keystore test suite +import ./test_waku_keystore_keyfile, ./test_waku_keystore + +import ./waku_rln_relay/test_all + +# Node Factory +import ./factory/test_all + +# Waku API tests +import ./api/test_all + +# Waku tools tests +import ./tools/test_all diff --git a/third-party/nwaku/tests/all_tests_wakunode2.nim b/third-party/nwaku/tests/all_tests_wakunode2.nim new file mode 100644 index 0000000..cfde01d --- /dev/null +++ b/third-party/nwaku/tests/all_tests_wakunode2.nim @@ -0,0 +1,3 @@ +## Wakunode2 + +import ./wakunode2/test_all diff --git a/third-party/nwaku/tests/api/test_all.nim b/third-party/nwaku/tests/api/test_all.nim new file mode 100644 index 0000000..99c1b3b --- /dev/null +++ b/third-party/nwaku/tests/api/test_all.nim @@ -0,0 +1,3 @@ +{.used.} + +import ./test_entry_nodes, ./test_node_conf diff --git a/third-party/nwaku/tests/api/test_entry_nodes.nim b/third-party/nwaku/tests/api/test_entry_nodes.nim new file mode 100644 index 0000000..136a49b --- /dev/null +++ b/third-party/nwaku/tests/api/test_entry_nodes.nim @@ -0,0 +1,264 @@ +{.used.} + +import std/options, results, testutils/unittests + +import waku/api/entry_nodes + +# Since classifyEntryNode is internal, we test it indirectly through processEntryNodes behavior +# The enum is exported so we can test against it + +suite "Entry Nodes Classification": + test "Process ENRTree - standard format": + let result = processEntryNodes( + @[ + "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im" + ] + ) + check: + result.isOk() + let (enrTreeUrls, bootstrapEnrs, staticNodes) = result.get() + check: + enrTreeUrls.len == 1 + bootstrapEnrs.len == 0 + staticNodes.len == 0 + + test "Process ENRTree - case insensitive": + let result = processEntryNodes( + @[ + "ENRTREE://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im" + ] + ) + check: + result.isOk() + let (enrTreeUrls, bootstrapEnrs, staticNodes) = result.get() + check: + enrTreeUrls.len == 1 + bootstrapEnrs.len == 0 + staticNodes.len == 0 + + test "Process ENR - standard format": + let result = processEntryNodes( + @[ + "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g" + ] + ) + check: + result.isOk() + let (enrTreeUrls, bootstrapEnrs, staticNodes) = result.get() + check: + enrTreeUrls.len == 0 + bootstrapEnrs.len == 1 + staticNodes.len == 0 + + test "Process ENR - case insensitive": + let result = processEntryNodes( + @[ + "ENR:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g" + ] + ) + check: + result.isOk() + let (enrTreeUrls, bootstrapEnrs, staticNodes) = result.get() + check: + enrTreeUrls.len == 0 + bootstrapEnrs.len == 1 + staticNodes.len == 0 + + test "Process Multiaddress - IPv4": + let result = processEntryNodes( + @[ + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + ] + ) + check: + result.isOk() + let (enrTreeUrls, bootstrapEnrs, staticNodes) = result.get() + check: + enrTreeUrls.len == 0 + bootstrapEnrs.len == 0 + staticNodes.len == 1 + + test "Process Multiaddress - IPv6": + let result = processEntryNodes( + @["/ip6/::1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"] + ) + check: + result.isOk() + let (enrTreeUrls, bootstrapEnrs, staticNodes) = result.get() + check: + enrTreeUrls.len == 0 + bootstrapEnrs.len == 0 + staticNodes.len == 1 + + test "Process Multiaddress - DNS": + let result = processEntryNodes( + @[ + "/dns4/example.com/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + ] + ) + check: + result.isOk() + let (enrTreeUrls, bootstrapEnrs, staticNodes) = result.get() + check: + enrTreeUrls.len == 0 + bootstrapEnrs.len == 0 + staticNodes.len == 1 + + test "Process empty string": + let result = processEntryNodes(@[""]) + check: + result.isErr() + result.error == "Entry node error: Empty entry node address" + + test "Process invalid format - HTTP URL": + let result = processEntryNodes(@["http://example.com"]) + check: + result.isErr() + result.error == + "Entry node error: Unrecognized entry node format. Must start with 'enrtree:', 'enr:', or '/'" + + test "Process invalid format - some string": + let result = processEntryNodes(@["some-string-here"]) + check: + result.isErr() + result.error == + "Entry node error: Unrecognized entry node format. Must start with 'enrtree:', 'enr:', or '/'" + +suite "Entry Nodes Processing": + test "Process mixed entry nodes": + let entryNodes = + @[ + "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im", + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", + "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g", + ] + + let result = processEntryNodes(entryNodes) + check: + result.isOk() + + let (enrTreeUrls, bootstrapEnrs, staticNodes) = result.get() + check: + enrTreeUrls.len == 1 # enrtree + bootstrapEnrs.len == 1 # enr + staticNodes.len >= 1 # at least the multiaddr + enrTreeUrls[0] == entryNodes[0] # enrtree unchanged + bootstrapEnrs[0] == entryNodes[2] # enr unchanged + staticNodes[0] == entryNodes[1] # multiaddr added to static + + test "Process only ENRTree nodes": + let entryNodes = + @[ + "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im", + "enrtree://ANOTHER_TREE@example.com", + ] + + let result = processEntryNodes(entryNodes) + check: + result.isOk() + + let (enrTreeUrls, bootstrapEnrs, staticNodes) = result.get() + check: + enrTreeUrls.len == 2 + bootstrapEnrs.len == 0 + staticNodes.len == 0 + enrTreeUrls == entryNodes + + test "Process only multiaddresses": + let entryNodes = + @[ + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", + "/ip4/192.168.1.1/tcp/60001/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYd", + ] + + let result = processEntryNodes(entryNodes) + check: + result.isOk() + + let (enrTreeUrls, bootstrapEnrs, staticNodes) = result.get() + check: + enrTreeUrls.len == 0 + bootstrapEnrs.len == 0 + staticNodes.len == 2 + staticNodes == entryNodes + + test "Process only ENR nodes": + let entryNodes = + @[ + "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g", + "enr:-QEkuECnZ3IbVAgkOzv-QLnKC4dRKAPRY80m1-R7G8jZ7yfT3ipEfBrhKN7ARcQgQ-vg-h40AQzyvAkPYlHPaFKk6u9MBgmlkgnY0iXNlY3AyNTZrMaEDk49D8JjMSns4p1XVNBvJquOUzT4PENSJknkROspfAFGg3RjcIJ2X4N1ZHCCd2g", + ] + + let result = processEntryNodes(entryNodes) + check: + result.isOk() + + let (enrTreeUrls, bootstrapEnrs, staticNodes) = result.get() + check: + enrTreeUrls.len == 0 + bootstrapEnrs.len == 2 + staticNodes.len == 0 + bootstrapEnrs == entryNodes + # Note: staticNodes may or may not be populated depending on ENR parsing + + test "Process empty list": + let entryNodes: seq[string] = @[] + + let result = processEntryNodes(entryNodes) + check: + result.isOk() + + let (enrTreeUrls, bootstrapEnrs, staticNodes) = result.get() + check: + enrTreeUrls.len == 0 + bootstrapEnrs.len == 0 + staticNodes.len == 0 + + test "Process with invalid entry": + let entryNodes = @["enrtree://VALID@example.com", "invalid://notvalid"] + + let result = processEntryNodes(entryNodes) + check: + result.isErr() + result.error == + "Entry node error: Unrecognized entry node format. Must start with 'enrtree:', 'enr:', or '/'" + + test "Process different multiaddr formats": + let entryNodes = + @[ + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", + "/ip6/::1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYd", + "/dns4/example.com/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYe", + "/dns/node.example.org/tcp/443/wss/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYf", + ] + + let result = processEntryNodes(entryNodes) + check: + result.isOk() + + let (enrTreeUrls, bootstrapEnrs, staticNodes) = result.get() + check: + enrTreeUrls.len == 0 + bootstrapEnrs.len == 0 + staticNodes.len == 4 + staticNodes == entryNodes + + test "Process with duplicate entries": + let entryNodes = + @[ + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", + "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im", + "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im", + ] + + let result = processEntryNodes(entryNodes) + check: + result.isOk() + + let (enrTreeUrls, bootstrapEnrs, staticNodes) = result.get() + check: + # Duplicates are not filtered out (by design - let downstream handle it) + enrTreeUrls.len == 2 + bootstrapEnrs.len == 0 + staticNodes.len == 2 diff --git a/third-party/nwaku/tests/api/test_node_conf.nim b/third-party/nwaku/tests/api/test_node_conf.nim new file mode 100644 index 0000000..c9b256d --- /dev/null +++ b/third-party/nwaku/tests/api/test_node_conf.nim @@ -0,0 +1,277 @@ +{.used.} + +import std/options, results, stint, testutils/unittests +import waku/api/api_conf, waku/factory/waku_conf, waku/factory/networks_config + +suite "LibWaku Conf - toWakuConf": + test "Minimal configuration": + ## Given + let nodeConfig = NodeConfig.init(ethRpcEndpoints = @["http://someaddress"]) + + ## When + let wakuConfRes = toWakuConf(nodeConfig) + + ## Then + let wakuConf = wakuConfRes.valueOr: + raiseAssert error + wakuConf.validate().isOkOr: + raiseAssert error + check: + wakuConf.clusterId == 1 + wakuConf.shardingConf.numShardsInCluster == 8 + wakuConf.staticNodes.len == 0 + + test "Core mode configuration": + ## Given + let wakuConfig = WakuConfig.init(entryNodes = @[], clusterId = 1) + + let nodeConfig = NodeConfig.init(mode = Core, wakuConfig = wakuConfig) + + ## When + let wakuConfRes = toWakuConf(nodeConfig) + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.relay == true + wakuConf.lightPush == true + wakuConf.peerExchangeService == true + wakuConf.clusterId == 1 + + test "Auto-sharding configuration": + ## Given + let nodeConfig = NodeConfig.init( + mode = Core, + wakuConfig = WakuConfig.init( + entryNodes = @[], + staticStoreNodes = @[], + clusterId = 42, + autoShardingConfig = AutoShardingConfig(numShardsInCluster: 16), + ), + ) + + ## When + let wakuConfRes = toWakuConf(nodeConfig) + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.clusterId == 42 + wakuConf.shardingConf.numShardsInCluster == 16 + + test "Bootstrap nodes configuration": + ## Given + let entryNodes = + @[ + "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g", + "enr:-QEkuECnZ3IbVAgkOzv-QLnKC4dRKAPRY80m1-R7G8jZ7yfT3ipEfBrhKN7ARcQgQ-vg-h40AQzyvAkPYlHPaFKk6u9MBgmlkgnY0iXNlY3AyNTZrMaEDk49D8JjMSns4p1XVNBvJquOUzT4PENSJknkROspfAFGg3RjcIJ2X4N1ZHCCd2g", + ] + let libConf = NodeConfig.init( + mode = Core, + wakuConfig = + WakuConfig.init(entryNodes = entryNodes, staticStoreNodes = @[], clusterId = 1), + ) + + ## When + let wakuConfRes = toWakuConf(libConf) + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + require wakuConf.discv5Conf.isSome() + check: + wakuConf.discv5Conf.get().bootstrapNodes == entryNodes + + test "Static store nodes configuration": + ## Given + let staticStoreNodes = + @[ + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", + "/ip4/192.168.1.1/tcp/60001/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYd", + ] + let nodeConf = NodeConfig.init( + wakuConfig = WakuConfig.init( + entryNodes = @[], staticStoreNodes = staticStoreNodes, clusterId = 1 + ) + ) + + ## When + let wakuConfRes = toWakuConf(nodeConf) + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.staticNodes == staticStoreNodes + + test "Message validation with max message size": + ## Given + let nodeConfig = NodeConfig.init( + wakuConfig = WakuConfig.init( + entryNodes = @[], + staticStoreNodes = @[], + clusterId = 1, + messageValidation = + MessageValidation(maxMessageSize: "100KiB", rlnConfig: none(RlnConfig)), + ) + ) + + ## When + let wakuConfRes = toWakuConf(nodeConfig) + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.maxMessageSizeBytes == 100'u64 * 1024'u64 + + test "Message validation with RLN config": + ## Given + let nodeConfig = NodeConfig.init( + wakuConfig = WakuConfig.init( + entryNodes = @[], + clusterId = 1, + messageValidation = MessageValidation( + maxMessageSize: "150 KiB", + rlnConfig: some( + RlnConfig( + contractAddress: "0x1234567890123456789012345678901234567890", + chainId: 1'u, + epochSizeSec: 600'u64, + ) + ), + ), + ), + ethRpcEndpoints = @["http://127.0.0.1:1111"], + ) + + ## When + let wakuConf = toWakuConf(nodeConfig).valueOr: + raiseAssert error + + wakuConf.validate().isOkOr: + raiseAssert error + + check: + wakuConf.maxMessageSizeBytes == 150'u64 * 1024'u64 + + require wakuConf.rlnRelayConf.isSome() + let rlnConf = wakuConf.rlnRelayConf.get() + check: + rlnConf.dynamic == true + rlnConf.ethContractAddress == "0x1234567890123456789012345678901234567890" + rlnConf.chainId == 1'u256 + rlnConf.epochSizeSec == 600'u64 + + test "Full Core mode configuration with all fields": + ## Given + let nodeConfig = NodeConfig.init( + mode = Core, + wakuConfig = WakuConfig.init( + entryNodes = + @[ + "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g" + ], + staticStoreNodes = + @[ + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + ], + clusterId = 99, + autoShardingConfig = AutoShardingConfig(numShardsInCluster: 12), + messageValidation = MessageValidation( + maxMessageSize: "512KiB", + rlnConfig: some( + RlnConfig( + contractAddress: "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", + chainId: 5'u, # Goerli + epochSizeSec: 300'u64, + ) + ), + ), + ), + ethRpcEndpoints = @["https://127.0.0.1:8333"], + ) + + ## When + let wakuConfRes = toWakuConf(nodeConfig) + + ## Then + let wakuConf = wakuConfRes.valueOr: + raiseAssert error + wakuConf.validate().isOkOr: + raiseAssert error + + # Check basic settings + check: + wakuConf.relay == true + wakuConf.lightPush == true + wakuConf.peerExchangeService == true + wakuConf.rendezvous == true + wakuConf.clusterId == 99 + + # Check sharding + check: + wakuConf.shardingConf.numShardsInCluster == 12 + + # Check bootstrap nodes + require wakuConf.discv5Conf.isSome() + check: + wakuConf.discv5Conf.get().bootstrapNodes.len == 1 + + # Check static nodes + check: + wakuConf.staticNodes.len == 1 + wakuConf.staticNodes[0] == + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + + # Check message validation + check: + wakuConf.maxMessageSizeBytes == 512'u64 * 1024'u64 + + # Check RLN config + require wakuConf.rlnRelayConf.isSome() + let rlnConf = wakuConf.rlnRelayConf.get() + check: + rlnConf.dynamic == true + rlnConf.ethContractAddress == "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + rlnConf.chainId == 5'u256 + rlnConf.epochSizeSec == 300'u64 + + test "NodeConfig with mixed entry nodes (integration test)": + ## Given + let entryNodes = + @[ + "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im", + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", + ] + + let nodeConfig = NodeConfig.init( + mode = Core, + wakuConfig = + WakuConfig.init(entryNodes = entryNodes, staticStoreNodes = @[], clusterId = 1), + ) + + ## When + let wakuConfRes = toWakuConf(nodeConfig) + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + + # Check that ENRTree went to DNS discovery + require wakuConf.dnsDiscoveryConf.isSome() + check: + wakuConf.dnsDiscoveryConf.get().enrTreeUrl == entryNodes[0] + + # Check that multiaddr went to static nodes + check: + wakuConf.staticNodes.len == 1 + wakuConf.staticNodes[0] == entryNodes[1] diff --git a/third-party/nwaku/tests/common/test_all.nim b/third-party/nwaku/tests/common/test_all.nim new file mode 100644 index 0000000..5b45150 --- /dev/null +++ b/third-party/nwaku/tests/common/test_all.nim @@ -0,0 +1,12 @@ +{.used.} + +import + ./test_base64_codec, + ./test_enr_builder, + ./test_protobuf_validation, + ./test_sqlite_migrations, + ./test_parse_size, + ./test_tokenbucket, + ./test_requestratelimiter, + ./test_ratelimit_setting, + ./test_timed_map diff --git a/third-party/nwaku/tests/common/test_base64_codec.nim b/third-party/nwaku/tests/common/test_base64_codec.nim new file mode 100644 index 0000000..1c2d04c --- /dev/null +++ b/third-party/nwaku/tests/common/test_base64_codec.nim @@ -0,0 +1,50 @@ +{.used.} + +import std/strutils, results, stew/byteutils, testutils/unittests +import waku/common/base64 + +suite "Waku Common - stew base64 wrapper": + const TestData = + @[ + # Test vectors from RFC 4648 + # See: https://datatracker.ietf.org/doc/html/rfc4648#section-10 + ("", Base64String("")), + ("f", Base64String("Zg==")), + ("fo", Base64String("Zm8=")), + ("foo", Base64String("Zm9v")), + ("foob", Base64String("Zm9vYg==")), + ("fooba", Base64String("Zm9vYmE=")), + ("foobar", Base64String("Zm9vYmFy")), + + # Custom test vectors + ("\x01", Base64String("AQ==")), + ("\x13", Base64String("Ew==")), + ("\x01\x02\x03\x04", Base64String("AQIDBA==")), + ] + + for (plaintext, encoded) in TestData: + test "encode into base64 (" & escape(plaintext) & " -> \"" & string(encoded) & "\")": + ## Given + let data = plaintext + + ## When + let encodedData = base64.encode(data) + + ## Then + check: + encodedData == encoded + + test "decode from base64 (\"" & string(encoded) & "\" -> " & escape(plaintext) & ")": + ## Given + let data = encoded + + ## When + let decodedRes = base64.decode(data) + + ## Then + check: + decodedRes.isOk() + + let decoded = decodedRes.tryGet() + check: + decoded == toBytes(plaintext) diff --git a/third-party/nwaku/tests/common/test_enr_builder.nim b/third-party/nwaku/tests/common/test_enr_builder.nim new file mode 100644 index 0000000..0cf7bcb --- /dev/null +++ b/third-party/nwaku/tests/common/test_enr_builder.nim @@ -0,0 +1,126 @@ +{.used.} + +import std/[options, net], results, testutils/unittests +import waku/common/enr, ../testlib/wakucore + +suite "nim-eth ENR - builder and typed record": + test "Non-supported private key (ECDSA)": + ## Given + let privateKey = generateEcdsaKey() + + ## Then + expect Defect: + discard EnrBuilder.init(privateKey) + + test "Supported private key (Secp256k1)": + let + seqNum = 1u64 + privateKey = generateSecp256k1Key() + + let expectedPubKey = privateKey.getPublicKey().get().getRawBytes().get() + + ## When + var builder = EnrBuilder.init(privateKey, seqNum) + let enrRes = builder.build() + + ## Then + check enrRes.isOk() + + let record = enrRes.tryGet().toTyped().get() + + let id = record.id + check: + id == some(RecordId.V4) + + let publicKey = record.secp256k1 + check: + publicKey.isSome() + @(publicKey.get()) == expectedPubKey + +suite "nim-eth ENR - Ext: IP address and TCP/UDP ports": + test "EIP-778 test vector": + ## Given + # Test vector from EIP-778 + # See: https://eips.ethereum.org/EIPS/eip-778#test-vectors + let expectedEnr = + "-IS4QHCYrYZbAKWCBRlAy5zzaDZXJBGkcnh4MHcBFZntXNFrdvJjX04j" & + "RzjzCBOonrkTfj499SZuOh8R33Ls8RRcy5wBgmlkgnY0gmlwhH8AAAGJ" & + "c2VjcDI1NmsxoQPKY0yuDUmstAHYpMa2_oxVtw0RW_QAdpzBQA8yWM0x" & "OIN1ZHCCdl8" + + let + seqNum = 1u64 + privateKey = ethSecp256k1Key( + "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291" + ) + + enrIpAddr = parseIpAddress("127.0.0.1") + enrUdpPort = Port(30303) + + ## When + var builder = EnrBuilder.init(privateKey, seqNum) + builder.withIpAddressAndPorts(ipAddr = some(enrIpAddr), udpPort = some(enrUdpPort)) + + let enrRes = builder.build() + + ## Then + check enrRes.isOk() + + let record = enrRes.tryGet().toBase64() + check: + record == expectedEnr + + test "IPv4 and TCP port": + let + seqNum = 1u64 + privateKey = generateSecp256k1Key() + + enrIpAddr = parseIpAddress("127.0.0.1") + enrTcpPort = Port(30301) + + let expectedPubKey = privateKey.getPublicKey().get().getRawBytes().get() + + ## When + var builder = EnrBuilder.init(privateKey, seqNum) + builder.withIpAddressAndPorts(ipAddr = some(enrIpAddr), tcpPort = some(enrTcpPort)) + + let enrRes = builder.build() + + ## Then + check enrRes.isOk() + + let record = enrRes.tryGet().toTyped().get() + check: + @(record.secp256k1.get()) == expectedPubKey + record.ip == some(enrIpAddr.address_v4) + record.tcp == some(enrTcpPort.uint16) + record.udp == none(uint16) + record.ip6 == none(array[16, byte]) + + test "IPv6 and UDP port": + let + seqNum = 1u64 + privateKey = generateSecp256k1Key() + + enrIpAddr = parseIpAddress("::1") + enrUdpPort = Port(30301) + + let expectedPubKey = privateKey.getPublicKey().get().getRawBytes().get() + + ## When + var builder = EnrBuilder.init(privateKey, seqNum) + builder.withIpAddressAndPorts(ipAddr = some(enrIpAddr), udpPort = some(enrUdpPort)) + + let enrRes = builder.build() + + ## Then + check enrRes.isOk() + + let record = enrRes.tryGet().toTyped().get() + check: + @(record.secp256k1.get()) == expectedPubKey + record.ip == none(array[4, byte]) + record.tcp == none(uint16) + record.ip6 == some(enrIpAddr.address_v6) + record.tcp6 == none(uint16) + record.udp6 == some(enrUdpPort.uint16) + record.udp == some(enrUdpPort.uint16) diff --git a/third-party/nwaku/tests/common/test_parse_size.nim b/third-party/nwaku/tests/common/test_parse_size.nim new file mode 100644 index 0000000..009cb96 --- /dev/null +++ b/third-party/nwaku/tests/common/test_parse_size.nim @@ -0,0 +1,104 @@ +{.used.} + +import testutils/unittests, results +import waku/common/utils/parse_size_units + +suite "Size serialization test": + test "parse normal sizes": + var sizeInBytesRes = parseMsgSize("15 KiB") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 15360 + + sizeInBytesRes = parseMsgSize(" 1048576 B") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 1048576 + + sizeInBytesRes = parseMsgSize("150 B") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 150 + + sizeInBytesRes = parseMsgSize("150 b") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 150 + + sizeInBytesRes = parseMsgSize("150b") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 150 + + sizeInBytesRes = parseMsgSize("1024kib") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 1048576 + + sizeInBytesRes = parseMsgSize("1024KiB") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 1048576 + + sizeInBytesRes = parseMsgSize("1024KB") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 1024000 + + sizeInBytesRes = parseMsgSize("1024kb") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 1024000 + + sizeInBytesRes = parseMsgSize("1.5 kib") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 1536 + + sizeInBytesRes = parseMsgSize("1,5 kb") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 1500 + + sizeInBytesRes = parseMsgSize("0,5 kb") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 500 + + sizeInBytesRes = parseMsgSize("1.5 kb") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 1500 + + sizeInBytesRes = parseMsgSize("0.5 kb") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 500 + + sizeInBytesRes = parseMsgSize(" 1.5 KB") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 1500 + + sizeInBytesRes = parseMsgSize(" 0.5 kb") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == 500 + + sizeInBytesRes = parseMsgSize(" 1024 kib") + assert sizeInBytesRes.isOk(), sizeInBytesRes.error + check sizeInBytesRes.get() == uint64(1024 * 1024) + + test "parse wrong sizes": + var sizeInBytesRes = parseMsgSize("150K") + assert sizeInBytesRes.isErr(), "The size should be considered incorrect" + + sizeInBytesRes = parseMsgSize("150 iB") + assert sizeInBytesRes.isErr(), "The size should be considered incorrect" + + sizeInBytesRes = parseMsgSize("150 ib") + assert sizeInBytesRes.isErr(), "The size should be considered incorrect" + + sizeInBytesRes = parseMsgSize("150 MB") + assert sizeInBytesRes.isErr(), "The size should be considered incorrect" + + ## notice that we don't allow MB units explicitly. If someone want to set 1MiB, the + ## s/he should use 1024 KiB + sizeInBytesRes = parseMsgSize("150 MiB") + assert sizeInBytesRes.isErr(), "The size should be considered incorrect" + + sizeInBytesRes = parseMsgSize("150MiB") + assert sizeInBytesRes.isErr(), "The size should be considered incorrect" + + sizeInBytesRes = parseMsgSize("150K") + assert sizeInBytesRes.isErr(), "The size should be considered incorrect" + + sizeInBytesRes = parseMsgSize("150 K") + assert sizeInBytesRes.isErr(), "The size should be considered incorrect" + + sizeInBytesRes = parseMsgSize("15..0 KiB") + assert sizeInBytesRes.isErr(), "The size should be considered incorrect" diff --git a/third-party/nwaku/tests/common/test_protobuf_validation.nim b/third-party/nwaku/tests/common/test_protobuf_validation.nim new file mode 100644 index 0000000..30254d3 --- /dev/null +++ b/third-party/nwaku/tests/common/test_protobuf_validation.nim @@ -0,0 +1,96 @@ +{.used.} + +import testutils/unittests +import waku/common/protobuf + +## Fixtures + +const MaxTestRpcFieldLen = 5 + +type TestRpc = object + testField*: string + +proc init(T: type TestRpc, field: string): T = + T(testField: field) + +proc encode(rpc: TestRpc): ProtoBuffer = + var pb = initProtoBuffer() + pb.write3(1, rpc.testField) + pb.finish3() + pb + +proc encodeWithBadFieldId(rpc: TestRpc): ProtoBuffer = + var pb = initProtoBuffer() + pb.write3(666, rpc.testField) + pb.finish3() + pb + +proc decode(T: type TestRpc, buf: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buf) + + var field: string + if not ?pb.getField(1, field): + return err(ProtobufError.missingRequiredField("test_field")) + if field.len > MaxTestRpcFieldLen: + return err(ProtobufError.invalidLengthField("test_field")) + + ok(TestRpc.init(field)) + +## Tests + +suite "Waku Common - libp2p minprotobuf wrapper": + test "serialize and deserialize - valid length field": + ## Given + let field = "12345" + + let rpc = TestRpc.init(field) + + ## When + let encodedRpc = rpc.encode() + let decodedRpcRes = TestRpc.decode(encodedRpc.buffer) + + ## Then + check: + decodedRpcRes.isOk() + + let decodedRpc = decodedRpcRes.tryGet() + check: + decodedRpc.testField == field + + test "serialize and deserialize - missing required field": + ## Given + let field = "12345" + + let rpc = TestRpc.init(field) + + ## When + let encodedRpc = rpc.encodeWithBadFieldId() + let decodedRpcRes = TestRpc.decode(encodedRpc.buffer) + + ## Then + check: + decodedRpcRes.isErr() + + let error = decodedRpcRes.tryError() + check: + error.kind == ProtobufErrorKind.MissingRequiredField + error.field == "test_field" + + test "serialize and deserialize - invalid length field": + ## Given + let field = "123456" # field.len = MaxTestRpcFieldLen + 1 + + let rpc = TestRpc.init(field) + + ## When + let encodedRpc = rpc.encode() + let decodedRpcRes = TestRpc.decode(encodedRpc.buffer) + + ## Then + check: + decodedRpcRes.isErr() + + let error = decodedRpcRes.tryError() + check: + error.kind == ProtobufErrorKind.InvalidLengthField + error.field == "test_field" diff --git a/third-party/nwaku/tests/common/test_ratelimit_setting.nim b/third-party/nwaku/tests/common/test_ratelimit_setting.nim new file mode 100644 index 0000000..97d69e0 --- /dev/null +++ b/third-party/nwaku/tests/common/test_ratelimit_setting.nim @@ -0,0 +1,165 @@ +# Chronos Test Suite +# (c) Copyright 2022-Present +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +{.used.} + +import testutils/unittests +import chronos, libp2p/stream/connection +import std/[options, tables] + +import ../../waku/common/rate_limit/request_limiter +import ../../waku/common/rate_limit/timed_map + +let proto = "ProtocolDescriptor" + +let conn1 = Connection(peerId: PeerId.random().tryGet()) +let conn2 = Connection(peerId: PeerId.random().tryGet()) +let conn3 = Connection(peerId: PeerId.random().tryGet()) + +suite "RateLimitSetting": + test "Parse rate limit setting - ok": + let test1 = "10/2m" + let test2 = " store : 10 /1h" + let test2a = "storev2 : 10 /1h" + let test2b = "storeV3: 12 /1s" + let test3 = "LIGHTPUSH: 10/ 1m" + let test4 = "px:10/2 s " + let test5 = "filter:42/66ms" + + let expU = UnlimitedRateLimit + let exp1: RateLimitSetting = (10, 2.minutes) + let exp2: RateLimitSetting = (10, 1.hours) + let exp2a: RateLimitSetting = (10, 1.hours) + let exp2b: RateLimitSetting = (12, 1.seconds) + let exp3: RateLimitSetting = (10, 1.minutes) + let exp4: RateLimitSetting = (10, 2.seconds) + let exp5: RateLimitSetting = (42, 66.milliseconds) + + let res1 = ProtocolRateLimitSettings.parse(@[test1]) + let res2 = ProtocolRateLimitSettings.parse(@[test2]) + let res2a = ProtocolRateLimitSettings.parse(@[test2a]) + let res2b = ProtocolRateLimitSettings.parse(@[test2b]) + let res3 = ProtocolRateLimitSettings.parse(@[test3]) + let res4 = ProtocolRateLimitSettings.parse(@[test4]) + let res5 = ProtocolRateLimitSettings.parse(@[test5]) + + check: + res1.isOk() + res1.get() == {GLOBAL: exp1, FILTER: FilterDefaultPerPeerRateLimit}.toTable() + res2.isOk() + res2.get() == + { + GLOBAL: expU, + FILTER: FilterDefaultPerPeerRateLimit, + STOREV2: exp2, + STOREV3: exp2, + }.toTable() + res2a.isOk() + res2a.get() == + {GLOBAL: expU, FILTER: FilterDefaultPerPeerRateLimit, STOREV2: exp2a}.toTable() + res2b.isOk() + res2b.get() == + {GLOBAL: expU, FILTER: FilterDefaultPerPeerRateLimit, STOREV3: exp2b}.toTable() + res3.isOk() + res3.get() == + {GLOBAL: expU, FILTER: FilterDefaultPerPeerRateLimit, LIGHTPUSH: exp3}.toTable() + res4.isOk() + res4.get() == + {GLOBAL: expU, FILTER: FilterDefaultPerPeerRateLimit, PEEREXCHG: exp4}.toTable() + res5.isOk() + res5.get() == {GLOBAL: expU, FILTER: exp5}.toTable() + + test "Parse rate limit setting - err": + let test1 = "10/2d" + let test2 = " stre : 10 /1h" + let test2a = "storev2 10 /1h" + let test2b = "storev3: 12 1s" + let test3 = "somethingelse: 10/ 1m" + let test4 = ":px:10/2 s " + let test5 = "filter:p42/66ms" + + let res1 = ProtocolRateLimitSettings.parse(@[test1]) + let res2 = ProtocolRateLimitSettings.parse(@[test2]) + let res2a = ProtocolRateLimitSettings.parse(@[test2a]) + let res2b = ProtocolRateLimitSettings.parse(@[test2b]) + let res3 = ProtocolRateLimitSettings.parse(@[test3]) + let res4 = ProtocolRateLimitSettings.parse(@[test4]) + let res5 = ProtocolRateLimitSettings.parse(@[test5]) + + check: + res1.isErr() + res2.isErr() + res2a.isErr() + res2b.isErr() + res3.isErr() + res4.isErr() + res5.isErr() + + test "Parse rate limit setting - complex": + let expU = UnlimitedRateLimit + + let test1 = @["lightpush:2/2ms", "10/2m", " store: 3/3s", " storev2:12/12s"] + let exp1 = { + GLOBAL: (10, 2.minutes), + FILTER: FilterDefaultPerPeerRateLimit, + LIGHTPUSH: (2, 2.milliseconds), + STOREV3: (3, 3.seconds), + STOREV2: (12, 12.seconds), + }.toTable() + + let res1 = ProtocolRateLimitSettings.parse(test1) + + check: + res1.isOk() + res1.get() == exp1 + res1.get().getSetting(PEEREXCHG) == (10, 2.minutes) + res1.get().getSetting(STOREV2) == (12, 12.seconds) + res1.get().getSetting(STOREV3) == (3, 3.seconds) + res1.get().getSetting(LIGHTPUSH) == (2, 2.milliseconds) + + let test2 = @["lightpush:2/2ms", " store: 3/3s", "px:10/10h", "filter:4/42ms"] + let exp2 = { + GLOBAL: expU, + LIGHTPUSH: (2, 2.milliseconds), + STOREV3: (3, 3.seconds), + STOREV2: (3, 3.seconds), + FILTER: (4, 42.milliseconds), + PEEREXCHG: (10, 10.hours), + }.toTable() + + let res2 = ProtocolRateLimitSettings.parse(test2) + + check: + res2.isOk() + res2.get() == exp2 + + let test3 = + @["storev2:1/1s", "store:3/3s", "storev3:4/42ms", "storev3:5/5s", "storev3:6/6s"] + let exp3 = { + GLOBAL: expU, + FILTER: FilterDefaultPerPeerRateLimit, + STOREV3: (6, 6.seconds), + STOREV2: (1, 1.seconds), + }.toTable() + + let res3 = ProtocolRateLimitSettings.parse(test3) + + check: + res3.isOk() + res3.get() == exp3 + res3.get().getSetting(LIGHTPUSH) == expU + + let test4 = newSeq[string](0) + let exp4 = {GLOBAL: expU, FILTER: FilterDefaultPerPeerRateLimit}.toTable() + + let res4 = ProtocolRateLimitSettings.parse(test4) + + check: + res4.isOk() + res4.get() == exp4 + res3.get().getSetting(LIGHTPUSH) == expU diff --git a/third-party/nwaku/tests/common/test_requestratelimiter.nim b/third-party/nwaku/tests/common/test_requestratelimiter.nim new file mode 100644 index 0000000..be910b3 --- /dev/null +++ b/third-party/nwaku/tests/common/test_requestratelimiter.nim @@ -0,0 +1,98 @@ +# Chronos Test Suite +# (c) Copyright 2022-Present +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +{.used.} + +import testutils/unittests +import chronos, libp2p/stream/connection +import std/options + +import ../../waku/common/rate_limit/request_limiter +import ../../waku/common/rate_limit/timed_map + +let proto = "ProtocolDescriptor" + +let conn1 = Connection(peerId: PeerId.random().tryGet()) +let conn2 = Connection(peerId: PeerId.random().tryGet()) +let conn3 = Connection(peerId: PeerId.random().tryGet()) + +suite "RequestRateLimiter": + test "RequestRateLimiter Allow up to main bucket": + # keep limits low for easier calculation of ratios + let rateLimit: RateLimitSetting = (4, 2.minutes) + var limiter = newRequestRateLimiter(some(rateLimit)) + # per peer tokens will be 6 / 4min + # as ratio is 2 in this case but max tokens are main tokens*ratio . 0.75 + # notice meanwhile we have 8 global tokens over 2 period (4 mins) in sum + # See: waku/common/rate_limit/request_limiter.nim #func calcPeriodRatio + + let now = Moment.now() + # with first use we register the peer also and start its timer + check limiter.checkUsage(proto, conn2, now) == true + for i in 0 ..< 3: + check limiter.checkUsage(proto, conn1, now) == true + + check limiter.checkUsage(proto, conn2, now + 3.minutes) == true + for i in 0 ..< 3: + check limiter.checkUsage(proto, conn1, now + 3.minutes) == true + + # conn1 reached the 75% of the main bucket over 2 periods of time + check limiter.checkUsage(proto, conn1, now + 3.minutes) == false + + # conn2 has not used its tokens while we have 1 more tokens left in the main bucket + check limiter.checkUsage(proto, conn2, now + 3.minutes) == true + + test "RequestRateLimiter Restrict overusing peer": + # keep limits low for easier calculation of ratios + let rateLimit: RateLimitSetting = (10, 2.minutes) + var limiter = newRequestRateLimiter(some(rateLimit)) + # per peer tokens will be 15 / 4min + # as ratio is 2 in this case but max tokens are main tokens*ratio . 0.75 + # notice meanwhile we have 20 tokens over 2 period (4 mins) in sum + # See: waku/common/rate_limit/request_limiter.nim #func calcPeriodRatio + + let now = Moment.now() + # with first use we register the peer also and start its timer + for i in 0 ..< 10: + check limiter.checkUsage(proto, conn1, now) == true + + # run out of main tokens but still used one more token from the peer's bucket + check limiter.checkUsage(proto, conn1, now) == false + + for i in 0 ..< 4: + check limiter.checkUsage(proto, conn1, now + 3.minutes) == true + + # conn1 reached the 75% of the main bucket over 2 periods of time + check limiter.checkUsage(proto, conn1, now + 3.minutes) == false + + check limiter.checkUsage(proto, conn2, now + 3.minutes) == true + check limiter.checkUsage(proto, conn2, now + 3.minutes) == true + check limiter.checkUsage(proto, conn3, now + 3.minutes) == true + check limiter.checkUsage(proto, conn2, now + 3.minutes) == true + check limiter.checkUsage(proto, conn3, now + 3.minutes) == true + + # conn1 gets replenished as the ratio was 2 giving twice as long replenish period than the main bucket + # see waku/common/rate_limit/request_limiter.nim #func calcPeriodRatio and calcPeerTokenSetting + check limiter.checkUsage(proto, conn1, now + 4.minutes) == true + # requests of other peers can also go + check limiter.checkUsage(proto, conn2, now + 4100.milliseconds) == true + check limiter.checkUsage(proto, conn3, now + 5.minutes) == true + + test "RequestRateLimiter lowest possible volume": + # keep limits low for easier calculation of ratios + let rateLimit: RateLimitSetting = (1, 1.seconds) + var limiter = newRequestRateLimiter(some(rateLimit)) + + let now = Moment.now() + # with first use we register the peer also and start its timer + check limiter.checkUsage(proto, conn1, now + 500.milliseconds) == true + + # run out of main tokens but still used one more token from the peer's bucket + check limiter.checkUsage(proto, conn1, now + 800.milliseconds) == false + check limiter.checkUsage(proto, conn1, now + 1499.milliseconds) == false + check limiter.checkUsage(proto, conn1, now + 1501.milliseconds) == true diff --git a/third-party/nwaku/tests/common/test_sqlite_migrations.nim b/third-party/nwaku/tests/common/test_sqlite_migrations.nim new file mode 100644 index 0000000..9e67fb9 --- /dev/null +++ b/third-party/nwaku/tests/common/test_sqlite_migrations.nim @@ -0,0 +1,117 @@ +{.used.} + +import std/[strutils, os], results, testutils/unittests +import waku/common/databases/db_sqlite {.all.}, ../waku_archive/archive_utils + +template sourceDir(): string = + currentSourcePath.rsplit(DirSep, 1)[0] + +suite "SQLite - migrations": + test "set and get user version": + ## Given + let database = newSqliteDatabase() + + ## When + let setRes = database.setUserVersion(5) + let getRes = database.getUserVersion() + + ## Then + check: + setRes.isOk() + getRes.isOk() + + let version = getRes.tryGet() + check: + version == 5 + + ## Cleanup + database.close() + + test "filter and order migration script file paths": + ## Given + let paths = + @[ + sourceDir / "00001_valid.up.sql", + sourceDir / "00002_alsoValidWithUpperCaseExtension.UP.SQL", + sourceDir / "00007_unorderedValid.up.sql", + sourceDir / "00003_validRepeated.up.sql", + sourceDir / "00003_validRepeated.up.sql", + sourceDir / "00666_noMigrationScript.bmp", + sourceDir / "00X00_invalidVersion.down.sql", + sourceDir / "00008_notWithinVersionRange.up.sql", + ] + + let + lowerVersion = 0 + highVersion = 7 + + ## When + var migrationSciptPaths: seq[string] + migrationSciptPaths = + filterMigrationScripts(paths, lowerVersion, highVersion, direction = "up") + migrationSciptPaths = sortMigrationScripts(migrationSciptPaths) + + ## Then + check: + migrationSciptPaths == + @[ + sourceDir / "00001_valid.up.sql", + sourceDir / "00002_alsoValidWithUpperCaseExtension.UP.SQL", + sourceDir / "00003_validRepeated.up.sql", + sourceDir / "00003_validRepeated.up.sql", + sourceDir / "00007_unorderedValid.up.sql", + ] + + test "break migration scripts into queries": + ## Given + let statement1 = + """CREATE TABLE contacts1 ( + contact_id INTEGER PRIMARY KEY, + first_name TEXT NOT NULL, + last_name TEXT NOT NULL, + email TEXT NOT NULL UNIQUE, + phone TEXT NOT NULL UNIQUE + );""" + let statement2 = + """CREATE TABLE contacts2 ( + contact_id INTEGER PRIMARY KEY, + first_name TEXT NOT NULL, + last_name TEXT NOT NULL, + email TEXT NOT NULL UNIQUE, + phone TEXT NOT NULL UNIQUE + );""" + let script = statement1 & statement2 + + ## When + let statements = script.breakIntoStatements() + + ## Then + check: + statements == @[statement1, statement2] + + test "break statements script into queries - empty statements": + ## Given + let statement1 = + """CREATE TABLE contacts1 ( + contact_id INTEGER PRIMARY KEY, + first_name TEXT NOT NULL, + last_name TEXT NOT NULL, + email TEXT NOT NULL UNIQUE, + phone TEXT NOT NULL UNIQUE + );""" + let statement2 = + """CREATE TABLE contacts2 ( + contact_id INTEGER PRIMARY KEY, + first_name TEXT NOT NULL, + last_name TEXT NOT NULL, + email TEXT NOT NULL UNIQUE, + phone TEXT NOT NULL UNIQUE + );""" + let script = statement1 & "; ;" & statement2 + + ## When + let statements = script.breakIntoStatements() + + ## Then + check: + statements == @[statement1, statement2] diff --git a/third-party/nwaku/tests/common/test_timed_map.nim b/third-party/nwaku/tests/common/test_timed_map.nim new file mode 100644 index 0000000..3b063d9 --- /dev/null +++ b/third-party/nwaku/tests/common/test_timed_map.nim @@ -0,0 +1,60 @@ +{.used.} + +import unittest2 +import chronos/timer +import ../../waku/common/rate_limit/timed_map + +suite "TimedMap": + test "put/get": + var cache = TimedMap[int, string].init(5.seconds) + + let now = Moment.now() + check: + cache.mgetOrPut(1, "1", now) == "1" + cache.mgetOrPut(1, "1", now + 1.seconds) == "1" + cache.mgetOrPut(2, "2", now + 4.seconds) == "2" + + check: + 1 in cache + 2 in cache + + check: + cache.mgetOrPut(3, "3", now + 6.seconds) == "3" + # expires 1 + + check: + 1 notin cache + 2 in cache + 3 in cache + + cache.addedAt(2) == now + 4.seconds + + check: + cache.mgetOrPut(2, "modified2", now + 8.seconds) == "2" # refreshes 2 + cache.mgetOrPut(4, "4", now + 12.seconds) == "4" # expires 3 + + check: + 2 in cache + 3 notin cache + 4 in cache + + check: + cache.remove(4).isSome() + 4 notin cache + + check: + cache.mgetOrPut(100, "100", now + 100.seconds) == "100" # expires everything + 100 in cache + 2 notin cache + + test "enough items to force cache heap storage growth": + var cache = TimedMap[int, string].init(5.seconds) + + let now = Moment.now() + for i in 101 .. 100000: + check: + cache.mgetOrPut(i, $i, now) == $i + + for i in 101 .. 100000: + check: + i in cache diff --git a/third-party/nwaku/tests/common/test_tokenbucket.nim b/third-party/nwaku/tests/common/test_tokenbucket.nim new file mode 100644 index 0000000..5bc1a05 --- /dev/null +++ b/third-party/nwaku/tests/common/test_tokenbucket.nim @@ -0,0 +1,69 @@ +# Chronos Test Suite +# (c) Copyright 2022-Present +# Status Research & Development GmbH +# +# Licensed under either of +# Apache License, version 2.0, (LICENSE-APACHEv2) +# MIT license (LICENSE-MIT) + +{.used.} + +import testutils/unittests +import chronos +import ../../waku/common/rate_limit/token_bucket + +suite "Token Bucket": + test "TokenBucket Sync test - strict": + var bucket = TokenBucket.newStrict(1000, 1.milliseconds) + let + start = Moment.now() + fullTime = start + 1.milliseconds + check: + bucket.tryConsume(800, start) == true + bucket.tryConsume(200, start) == true + # Out of budget + bucket.tryConsume(100, start) == false + bucket.tryConsume(800, fullTime) == true + bucket.tryConsume(200, fullTime) == true + # Out of budget + bucket.tryConsume(100, fullTime) == false + + test "TokenBucket Sync test - compensating": + var bucket = TokenBucket.new(1000, 1.milliseconds) + let + start = Moment.now() + fullTime = start + 1.milliseconds + check: + bucket.tryConsume(800, start) == true + bucket.tryConsume(200, start) == true + # Out of budget + bucket.tryConsume(100, start) == false + bucket.tryConsume(800, fullTime) == true + bucket.tryConsume(200, fullTime) == true + # Due not using the bucket for a full period the compensation will satisfy this request + bucket.tryConsume(100, fullTime) == true + + test "TokenBucket Max compensation": + var bucket = TokenBucket.new(1000, 1.minutes) + var reqTime = Moment.now() + + check bucket.tryConsume(1000, reqTime) + check bucket.tryConsume(1, reqTime) == false + reqTime += 1.minutes + check bucket.tryConsume(500, reqTime) == true + reqTime += 1.minutes + check bucket.tryConsume(1000, reqTime) == true + reqTime += 10.seconds + # max compensation is 25% so try to consume 250 more + check bucket.tryConsume(250, reqTime) == true + reqTime += 49.seconds + # out of budget within the same period + check bucket.tryConsume(1, reqTime) == false + + test "TokenBucket Short replenish": + var bucket = TokenBucket.new(15000, 1.milliseconds) + let start = Moment.now() + check bucket.tryConsume(15000, start) + check bucket.tryConsume(1, start) == false + + check bucket.tryConsume(15000, start + 1.milliseconds) == true diff --git a/third-party/nwaku/tests/factory/test_all.nim b/third-party/nwaku/tests/factory/test_all.nim new file mode 100644 index 0000000..9166103 --- /dev/null +++ b/third-party/nwaku/tests/factory/test_all.nim @@ -0,0 +1,3 @@ +{.used.} + +import ./test_waku_conf, ./test_node_factory diff --git a/third-party/nwaku/tests/factory/test_node_factory.nim b/third-party/nwaku/tests/factory/test_node_factory.nim new file mode 100644 index 0000000..f30e079 --- /dev/null +++ b/third-party/nwaku/tests/factory/test_node_factory.nim @@ -0,0 +1,72 @@ +{.used.} + +import testutils/unittests, chronos, libp2p/protocols/connectivity/relay/relay + +import + ../testlib/wakunode, + waku/waku_node, + waku/factory/node_factory, + waku/factory/conf_builder/conf_builder, + waku/factory/conf_builder/web_socket_conf_builder + +suite "Node Factory": + asynctest "Set up a node based on default configurations": + let conf = defaultTestWakuConf() + + let node = (await setupNode(conf, relay = Relay.new())).valueOr: + raiseAssert error + + check: + not node.isNil() + node.wakuArchive.isNil() + node.wakuStore.isNil() + node.wakuFilter.isNil() + not node.wakuStoreClient.isNil() + not node.wakuRendezvous.isNil() + + asynctest "Set up a node with Store enabled": + var confBuilder = defaultTestWakuConfBuilder() + confBuilder.storeServiceConf.withEnabled(true) + confBuilder.storeServiceConf.withDbUrl("sqlite://store.sqlite3") + let conf = confBuilder.build().value + + let node = (await setupNode(conf, relay = Relay.new())).valueOr: + raiseAssert error + + check: + not node.isNil() + not node.wakuStore.isNil() + not node.wakuArchive.isNil() + +asynctest "Set up a node with Filter enabled": + var confBuilder = defaultTestWakuConfBuilder() + confBuilder.filterServiceConf.withEnabled(true) + let conf = confBuilder.build().value + + let node = (await setupNode(conf, relay = Relay.new())).valueOr: + raiseAssert error + + check: + not node.isNil() + not node.wakuFilter.isNil() + echo "TEST END" + +asynctest "Start a node based on default test configuration": + let conf = defaultTestWakuConf() + + let node = (await setupNode(conf, relay = Relay.new())).valueOr: + raiseAssert error + + assert not node.isNil(), "Node can't be nil" + + let startRes = catch: + (await startNode(node, conf)) + + assert not startRes.isErr(), "Exception starting node" + assert startRes.get().isOk(), "Error starting node " & startRes.get().error + + check: + node.started == true + + ## Cleanup + await node.stop() diff --git a/third-party/nwaku/tests/factory/test_waku_conf.nim b/third-party/nwaku/tests/factory/test_waku_conf.nim new file mode 100644 index 0000000..3d3fec2 --- /dev/null +++ b/third-party/nwaku/tests/factory/test_waku_conf.nim @@ -0,0 +1,296 @@ +{.used.} + +import + libp2p/crypto/[crypto, secp], + libp2p/multiaddress, + nimcrypto/utils, + std/[options, sequtils], + results, + testutils/unittests +import + waku/factory/waku_conf, + waku/factory/conf_builder/conf_builder, + waku/factory/networks_config, + waku/common/utils/parse_size_units + +suite "Waku Conf - build with cluster conf": + test "Cluster Conf is passed and relay is enabled": + ## Setup + let networkConf = NetworkConf.TheWakuNetworkConf() + var builder = WakuConfBuilder.init() + builder.discv5Conf.withUdpPort(9000) + builder.withRelayServiceRatio("50:50") + # Mount all shards in network + let expectedShards = toSeq[0.uint16 .. 7.uint16] + + ## Given + builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"]) + builder.withNetworkConf(networkConf) + builder.withRelay(true) + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + check conf.clusterId == networkConf.clusterId + check conf.shardingConf.kind == networkConf.shardingConf.kind + check conf.shardingConf.numShardsInCluster == + networkConf.shardingConf.numShardsInCluster + check conf.subscribeShards == expectedShards + check conf.maxMessageSizeBytes == + uint64(parseCorrectMsgSize(networkConf.maxMessageSize)) + check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes + + if networkConf.rlnRelay: + assert conf.rlnRelayConf.isSome(), "RLN Relay conf is disabled" + + let rlnRelayConf = conf.rlnRelayConf.get() + check rlnRelayConf.ethContractAddress.string == + networkConf.rlnRelayEthContractAddress + check rlnRelayConf.dynamic == networkConf.rlnRelayDynamic + check rlnRelayConf.chainId == networkConf.rlnRelayChainId + check rlnRelayConf.epochSizeSec == networkConf.rlnEpochSizeSec + check rlnRelayConf.userMessageLimit == networkConf.rlnRelayUserMessageLimit + + test "Cluster Conf is passed, but relay is disabled": + ## Setup + let networkConf = NetworkConf.TheWakuNetworkConf() + var builder = WakuConfBuilder.init() + builder.withRelayServiceRatio("50:50") + builder.discv5Conf.withUdpPort(9000) + # Mount all shards in network + let expectedShards = toSeq[0.uint16 .. 7.uint16] + + ## Given + builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"]) + builder.withNetworkConf(networkConf) + builder.withRelay(false) + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + check conf.clusterId == networkConf.clusterId + check conf.shardingConf.kind == networkConf.shardingConf.kind + check conf.shardingConf.numShardsInCluster == + networkConf.shardingConf.numShardsInCluster + check conf.subscribeShards == expectedShards + check conf.maxMessageSizeBytes == + uint64(parseCorrectMsgSize(networkConf.maxMessageSize)) + check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes + + assert conf.rlnRelayConf.isNone + + test "Cluster Conf is passed, but rln relay is disabled": + ## Setup + let networkConf = NetworkConf.TheWakuNetworkConf() + var builder = WakuConfBuilder.init() + + let # Mount all shards in network + expectedShards = toSeq[0.uint16 .. 7.uint16] + + ## Given + builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"]) + builder.withNetworkConf(networkConf) + builder.rlnRelayConf.withEnabled(false) + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + check conf.clusterId == networkConf.clusterId + check conf.shardingConf.kind == networkConf.shardingConf.kind + check conf.shardingConf.numShardsInCluster == + networkConf.shardingConf.numShardsInCluster + check conf.subscribeShards == expectedShards + check conf.maxMessageSizeBytes == + uint64(parseCorrectMsgSize(networkConf.maxMessageSize)) + check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes + assert conf.rlnRelayConf.isNone + + test "Cluster Conf is passed and valid shards are specified": + ## Setup + let networkConf = NetworkConf.TheWakuNetworkConf() + var builder = WakuConfBuilder.init() + let shards = @[2.uint16, 3.uint16] + + ## Given + builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"]) + builder.withNetworkConf(networkConf) + builder.withSubscribeShards(shards) + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + check conf.clusterId == networkConf.clusterId + check conf.shardingConf.kind == networkConf.shardingConf.kind + check conf.shardingConf.numShardsInCluster == + networkConf.shardingConf.numShardsInCluster + check conf.subscribeShards == shards + check conf.maxMessageSizeBytes == + uint64(parseCorrectMsgSize(networkConf.maxMessageSize)) + check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes + + test "Cluster Conf is passed and invalid shards are specified": + ## Setup + let networkConf = NetworkConf.TheWakuNetworkConf() + var builder = WakuConfBuilder.init() + let shards = @[2.uint16, 10.uint16] + + ## Given + builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"]) + builder.withNetworkConf(networkConf) + builder.withSubscribeShards(shards) + + ## When + let resConf = builder.build() + + ## Then + assert resConf.isErr(), "Invalid shard was accepted" + + test "Cluster Conf is passed and RLN contract is **not** overridden": + ## Setup + let networkConf = NetworkConf.TheWakuNetworkConf() + var builder = WakuConfBuilder.init() + builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"]) + + # Mount all shards in network + let expectedShards = toSeq[0.uint16 .. 7.uint16] + let contractAddress = "0x0123456789ABCDEF" + + ## Given + builder.rlnRelayConf.withEthContractAddress(contractAddress) + builder.withNetworkConf(networkConf) + builder.withRelay(true) + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + check conf.clusterId == networkConf.clusterId + check conf.shardingConf.kind == networkConf.shardingConf.kind + check conf.shardingConf.numShardsInCluster == + networkConf.shardingConf.numShardsInCluster + check conf.subscribeShards == expectedShards + check conf.maxMessageSizeBytes == + uint64(parseCorrectMsgSize(networkConf.maxMessageSize)) + check conf.discv5Conf.isSome == networkConf.discv5Discovery + check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes + + if networkConf.rlnRelay: + assert conf.rlnRelayConf.isSome + + let rlnRelayConf = conf.rlnRelayConf.get() + check rlnRelayConf.ethContractAddress.string == + networkConf.rlnRelayEthContractAddress + check rlnRelayConf.dynamic == networkConf.rlnRelayDynamic + check rlnRelayConf.chainId == networkConf.rlnRelayChainId + check rlnRelayConf.epochSizeSec == networkConf.rlnEpochSizeSec + check rlnRelayConf.userMessageLimit == networkConf.rlnRelayUserMessageLimit + +suite "Waku Conf - node key": + test "Node key is generated": + ## Setup + var builder = WakuConfBuilder.init() + builder.withClusterId(1) + + ## Given + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + let pubkey = getPublicKey(conf.nodeKey) + assert pubkey.isOk() + + test "Passed node key is used": + ## Setup + let nodeKeyStr = + "0011223344556677889900aabbccddeeff0011223344556677889900aabbccddeeff" + let nodeKey = block: + let key = SkPrivateKey.init(utils.fromHex(nodeKeyStr)).tryGet() + crypto.PrivateKey(scheme: Secp256k1, skkey: key) + var builder = WakuConfBuilder.init() + builder.withClusterId(1) + + ## Given + builder.withNodeKey(nodeKey) + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + assert utils.toHex(conf.nodeKey.getRawBytes().get()) == + utils.toHex(nodeKey.getRawBytes().get()), + "Passed node key isn't in config:" & $nodeKey & $conf.nodeKey + +suite "Waku Conf - extMultiaddrs": + test "Valid multiaddresses are passed and accepted": + ## Setup + var builder = WakuConfBuilder.init() + builder.withClusterId(1) + + ## Given + let multiaddrs = + @["/ip4/127.0.0.1/udp/9090/quic", "/ip6/::1/tcp/3217", "/dns4/foo.com/tcp/80"] + builder.withExtMultiAddrs(multiaddrs) + + ## When + let resConf = builder.build() + assert resConf.isOk(), $resConf.error + let conf = resConf.get() + + ## Then + let resValidate = conf.validate() + assert resValidate.isOk(), $resValidate.error + check multiaddrs.len == conf.endpointConf.extMultiAddrs.len + let resMultiaddrs = conf.endpointConf.extMultiAddrs.map( + proc(m: MultiAddress): string = + $m + ) + for m in multiaddrs: + check m in resMultiaddrs + +suite "Waku Conf Builder - rate limits": + test "Valid rate limit passed via string": + ## Setup + var builder = RateLimitConfBuilder.init() + + ## Given + let rateLimitsStr = @["lightpush:2/2ms", "10/2m", "store: 3/3s"] + builder.withRateLimits(rateLimitsStr) + + ## When + let res = builder.build() + + ## Then + assert res.isOk(), $res.error diff --git a/third-party/nwaku/tests/incentivization/test_all.nim b/third-party/nwaku/tests/incentivization/test_all.nim new file mode 100644 index 0000000..dc488c4 --- /dev/null +++ b/third-party/nwaku/tests/incentivization/test_all.nim @@ -0,0 +1,3 @@ +{.used.} + +import ./test_rpc_codec, ./test_poc_eligibility, ./test_poc_reputation diff --git a/third-party/nwaku/tests/incentivization/test_poc_eligibility.nim b/third-party/nwaku/tests/incentivization/test_poc_eligibility.nim new file mode 100644 index 0000000..be90188 --- /dev/null +++ b/third-party/nwaku/tests/incentivization/test_poc_eligibility.nim @@ -0,0 +1,207 @@ +{.used.} + +import std/options, testutils/unittests, chronos, web3, stint, tests/testlib/testasync + +import + waku/node/peer_manager, + waku/incentivization/[rpc, eligibility_manager], + ../waku_rln_relay/[utils_onchain, utils] + +const TxHashNonExisting = + TxHash.fromHex("0x0000000000000000000000000000000000000000000000000000000000000000") + +# Anvil RPC URL +const EthClient = "ws://127.0.0.1:8540" + +const TxValueExpectedWei = 1000.u256 + +## Storage.sol contract from https://remix.ethereum.org/ +## Compiled with Solidity compiler version "0.8.26+commit.8a97fa7a" + +const ExampleStorageContractBytecode = + "6080604052348015600e575f80fd5b506101438061001c5f395ff3fe608060405234801561000f575f80fd5b5060043610610034575f3560e01c80632e64cec1146100385780636057361d14610056575b5f80fd5b610040610072565b60405161004d919061009b565b60405180910390f35b610070600480360381019061006b91906100e2565b61007a565b005b5f8054905090565b805f8190555050565b5f819050919050565b61009581610083565b82525050565b5f6020820190506100ae5f83018461008c565b92915050565b5f80fd5b6100c181610083565b81146100cb575f80fd5b50565b5f813590506100dc816100b8565b92915050565b5f602082840312156100f7576100f66100b4565b5b5f610104848285016100ce565b9150509291505056fea26469706673582212209a0dd35336aff1eb3eeb11db76aa60a1427a12c1b92f945ea8c8d1dfa337cf2264736f6c634300081a0033" + +contract(ExampleStorageContract): + proc number(): UInt256 {.view.} + proc store(num: UInt256) + proc retrieve(): UInt256 {.view.} + +#[ +// SPDX-License-Identifier: GPL-3.0 + +pragma solidity >=0.8.2 <0.9.0; + +/** + * @title Storage + * @dev Store & retrieve value in a variable + * @custom:dev-run-script ./scripts/deploy_with_ethers.ts + */ +contract Storage { + + uint256 number; + + /** + * @dev Store value in variable + * @param num value to store + */ + function store(uint256 num) public { + number = num; + } + + /** + * @dev Return value + * @return value of 'number' + */ + function retrieve() public view returns (uint256){ + return number; + } +} +]# + +proc setup( + manager: EligibilityManager +): Future[(TxHash, TxHash, TxHash, TxHash, TxHash, Address, Address)] {.async.} = + ## Populate the local chain (connected to via manager) + ## with txs required for eligibility testing. + ## + ## 1. Depoly a dummy contract that has a publicly callable function. + ## (While doing so, we confirm a contract creation tx.) + ## 2. Confirm these transactions: + ## - a contract call tx (eligibility test must fail) + ## - a simple transfer with the wrong receiver (must fail) + ## - a simple transfer with the wrong amount (must fail) + ## - a simple transfer with the right receiver and amount (must pass) + + let web3 = manager.web3 + + let accounts = await web3.provider.eth_accounts() + web3.defaultAccount = accounts[0] + let sender = web3.defaultAccount + let receiverExpected = accounts[1] + let receiverNotExpected = accounts[2] + + let txValueEthExpected = TxValueExpectedWei + let txValueEthNotExpected = txValueEthExpected + 1 + + # wrong receiver, wrong amount + let txHashWrongReceiverRightAmount = + await web3.sendEthTransfer(sender, receiverNotExpected, txValueEthExpected) + + # right receiver, wrong amount + let txHashRightReceiverWrongAmount = + await web3.sendEthTransfer(sender, receiverExpected, txValueEthNotExpected) + + # right receiver, right amount + let txHashRightReceiverRightAmount = + await web3.sendEthTransfer(sender, receiverExpected, txValueEthExpected) + + let receipt = await web3.deployContract(ExampleStorageContractBytecode) + let txHashContractCreation = receipt.transactionHash + let exampleStorageContractAddress = receipt.contractAddress.get() + let exampleStorageContract = + web3.contractSender(ExampleStorageContract, exampleStorageContractAddress) + + let txHashContractCall = await exampleStorageContract.store(1.u256).send() + + return ( + txHashWrongReceiverRightAmount, txHashRightReceiverWrongAmount, + txHashRightReceiverRightAmount, txHashContractCreation, txHashContractCall, + receiverExpected, receiverNotExpected, + ) + +suite "Waku Incentivization PoC Eligibility Proofs": + ## Tests for service incentivization PoC. + ## In a client-server interaction, a client submits an eligibility proof to the server. + ## The server provides the service if and only if the proof is valid. + ## In PoC, a txid serves as eligibility proof. + ## The txid reflects the confirmed payment from the client to the server. + ## The request is eligible if the tx is confirmed and pays the correct amount to the correct address. + ## The tx must also be of a "simple transfer" type (not a contract creation, not a contract call). + ## See spec: https://github.com/waku-org/specs/blob/master/standards/core/incentivization.md + + ## Start Anvil + let runAnvil {.used.} = runAnvil() + + var txHashWrongReceiverRightAmount, txHashRightReceiverWrongAmount, + txHashRightReceiverRightAmount, txHashContractCreation, txHashContractCall: TxHash + + var receiverExpected, receiverNotExpected: Address + + var manager {.threadvar.}: EligibilityManager + + asyncSetup: + manager = await EligibilityManager.init(EthClient) + + ( + txHashWrongReceiverRightAmount, txHashRightReceiverWrongAmount, + txHashRightReceiverRightAmount, txHashContractCreation, txHashContractCall, + receiverExpected, receiverNotExpected, + ) = await manager.setup() + + asyncTeardown: + await manager.close() + + asyncTest "incentivization PoC: non-existent tx is not eligible": + ## Test that an unconfirmed tx is not eligible. + + let eligibilityProof = + EligibilityProof(proofOfPayment: some(@(TxHashNonExisting.bytes()))) + let isEligible = await manager.isEligibleTxId( + eligibilityProof, receiverExpected, TxValueExpectedWei + ) + check: + isEligible.isErr() + + asyncTest "incentivization PoC: contract creation tx is not eligible": + ## Test that a contract creation tx is not eligible. + + let eligibilityProof = + EligibilityProof(proofOfPayment: some(@(txHashContractCreation.bytes()))) + let isEligible = await manager.isEligibleTxId( + eligibilityProof, receiverExpected, TxValueExpectedWei + ) + check: + isEligible.isErr() + + asyncTest "incentivization PoC: contract call tx is not eligible": + ## Test that a contract call tx is not eligible. + ## This assumes a payment in native currency (ETH), not a token. + + let eligibilityProof = + EligibilityProof(proofOfPayment: some(@(txHashContractCall.bytes()))) + let isEligible = await manager.isEligibleTxId( + eligibilityProof, receiverExpected, TxValueExpectedWei + ) + check: + isEligible.isErr() + + asyncTest "incentivization PoC: simple transfer tx is eligible": + ## Test that a simple transfer tx is eligible (if necessary conditions hold). + + let eligibilityProof = + EligibilityProof(proofOfPayment: some(@(txHashRightReceiverRightAmount.bytes()))) + let isEligible = await manager.isEligibleTxId( + eligibilityProof, receiverExpected, TxValueExpectedWei + ) + + assert isEligible.isOk(), isEligible.error + + asyncTest "incentivization PoC: double-spend tx is not eligible": + ## Test that the same tx submitted twice is not eligible the second time + + let eligibilityProof = + EligibilityProof(proofOfPayment: some(@(txHashRightReceiverRightAmount.bytes()))) + + let isEligibleOnce = await manager.isEligibleTxId( + eligibilityProof, receiverExpected, TxValueExpectedWei + ) + + let isEligibleTwice = await manager.isEligibleTxId( + eligibilityProof, receiverExpected, TxValueExpectedWei + ) + + assert isEligibleOnce.isOk() + assert isEligibleTwice.isErr(), isEligibleTwice.error + + # Stop Anvil daemon + stopAnvil(runAnvil) diff --git a/third-party/nwaku/tests/incentivization/test_poc_reputation.nim b/third-party/nwaku/tests/incentivization/test_poc_reputation.nim new file mode 100644 index 0000000..0547b97 --- /dev/null +++ b/third-party/nwaku/tests/incentivization/test_poc_reputation.nim @@ -0,0 +1,43 @@ +import std/options, testutils/unittests, chronos, web3 + +import waku/incentivization/reputation_manager, waku/waku_lightpush_legacy/rpc + +suite "Waku Incentivization PoC Reputation": + var manager {.threadvar.}: ReputationManager + + setup: + manager = ReputationManager.init() + + test "incentivization PoC: reputation: reputation table is empty after initialization": + check manager.reputationOf.len == 0 + + test "incentivization PoC: reputation: set and get reputation": + manager.setReputation("peer1", some(true)) # Encodes GoodRep + check manager.getReputation("peer1") == some(true) + + test "incentivization PoC: reputation: evaluate PushResponse valid": + let validLightpushResponse = + PushResponse(isSuccess: true, info: some("Everything is OK")) + # We expect evaluateResponse to return GoodResponse if isSuccess is true + check evaluateResponse(validLightpushResponse) == GoodResponse + + test "incentivization PoC: reputation: evaluate PushResponse invalid": + let invalidLightpushResponse = PushResponse(isSuccess: false, info: none(string)) + check evaluateResponse(invalidLightpushResponse) == BadResponse + + test "incentivization PoC: reputation: updateReputationFromResponse valid": + let peerId = "peerWithValidResponse" + let validResp = PushResponse(isSuccess: true, info: some("All good")) + manager.updateReputationFromResponse(peerId, validResp) + check manager.getReputation(peerId) == some(true) + + test "incentivization PoC: reputation: updateReputationFromResponse invalid": + let peerId = "peerWithInvalidResponse" + let invalidResp = PushResponse(isSuccess: false, info: none(string)) + manager.updateReputationFromResponse(peerId, invalidResp) + check manager.getReputation(peerId) == some(false) + + test "incentivization PoC: reputation: default is None": + let unknownPeerId = "unknown_peer" + # The peer is not in the table yet + check manager.getReputation(unknownPeerId) == none(bool) diff --git a/third-party/nwaku/tests/incentivization/test_rpc_codec.nim b/third-party/nwaku/tests/incentivization/test_rpc_codec.nim new file mode 100644 index 0000000..30befd8 --- /dev/null +++ b/third-party/nwaku/tests/incentivization/test_rpc_codec.nim @@ -0,0 +1,22 @@ +import std/options, testutils/unittests, chronos, libp2p/crypto/crypto, web3 + +import waku/incentivization/[rpc, rpc_codec, common] + +suite "Waku Incentivization Eligibility Codec": + asyncTest "encode eligibility proof from txid": + let txHash = TxHash.fromHex( + "0x0000000000000000000000000000000000000000000000000000000000000000" + ) + let txHashAsBytes = @(txHash.bytes()) + let eligibilityProof = EligibilityProof(proofOfPayment: some(txHashAsBytes)) + let encoded = encode(eligibilityProof) + let decoded = EligibilityProof.decode(encoded.buffer).get() + check: + eligibilityProof == decoded + + asyncTest "encode eligibility status": + let eligibilityStatus = init(EligibilityStatus, true) + let encoded = encode(eligibilityStatus) + let decoded = EligibilityStatus.decode(encoded.buffer).get() + check: + eligibilityStatus == decoded diff --git a/third-party/nwaku/tests/nim.cfg b/third-party/nwaku/tests/nim.cfg new file mode 100644 index 0000000..e09f88d --- /dev/null +++ b/third-party/nwaku/tests/nim.cfg @@ -0,0 +1,3 @@ +-d:chronicles_line_numbers +-d:discv5_protocol_id=d5waku +path = "../" diff --git a/third-party/nwaku/tests/node/peer_manager/peer_store/test_migrations.nim b/third-party/nwaku/tests/node/peer_manager/peer_store/test_migrations.nim new file mode 100644 index 0000000..a20d065 --- /dev/null +++ b/third-party/nwaku/tests/node/peer_manager/peer_store/test_migrations.nim @@ -0,0 +1,56 @@ +import std/[options], stew/results, testutils/unittests + +import + waku/node/peer_manager/peer_store/migrations, + ../../waku_archive/archive_utils, + ../../testlib/[simple_mock] + +import std/[tables, strutils, os], stew/results, chronicles + +import waku/common/databases/db_sqlite, waku/common/databases/common + +suite "Migrations": + test "migrate ok": + # Given the db_sqlite.migrate function returns ok + let backup = db_sqlite.migrate + mock(db_sqlite.migrate): + proc mockedMigrate( + db: SqliteDatabase, targetVersion: int64, migrationsScriptsDir: string + ): DatabaseResult[void] = + ok() + + mockedMigrate + + # When we call the migrate function + let migrationResult = migrations.migrate(newSqliteDatabase(), 1) + + # Then we expect the result to be ok + check: + migrationResult == DatabaseResult[void].ok() + + # Cleanup + mock(db_sqlite.migrate): + backup + + test "migrate error": + # Given the db_sqlite.migrate function returns an error + let backup = db_sqlite.migrate + mock(db_sqlite.migrate): + proc mockedMigrate( + db: SqliteDatabase, targetVersion: int64, migrationsScriptsDir: string + ): DatabaseResult[void] = + err("mock error") + + mockedMigrate + + # When we call the migrate function + let migrationResult = migrations.migrate(newSqliteDatabase(), 1) + + # Then we expect the result to be an error + check: + migrationResult == + DatabaseResult[void].err("failed to execute migration scripts: mock error") + + # Cleanup + mock(db_sqlite.migrate): + backup diff --git a/third-party/nwaku/tests/node/peer_manager/peer_store/test_peer_storage.nim b/third-party/nwaku/tests/node/peer_manager/peer_store/test_peer_storage.nim new file mode 100644 index 0000000..c8a4791 --- /dev/null +++ b/third-party/nwaku/tests/node/peer_manager/peer_store/test_peer_storage.nim @@ -0,0 +1,21 @@ +import stew/results, testutils/unittests + +import waku/node/peer_manager/peer_store/peer_storage, waku/waku_core/peers + +suite "PeerStorage": + var peerStorage {.threadvar.}: PeerStorage + + setup: + peerStorage = PeerStorage() + + suite "put": + test "unimplemented": + check: + peerStorage.put(nil) == PeerStorageResult[void].err("Unimplemented") + + suite "getAll": + test "unimplemented": + let emptyClosure = proc(remotePeerInfo: RemotePeerInfo) = + discard + check: + peerStorage.getAll(emptyClosure) == PeerStorageResult[void].err("Unimplemented") diff --git a/third-party/nwaku/tests/node/peer_manager/peer_store/test_waku_peer_storage.nim b/third-party/nwaku/tests/node/peer_manager/peer_store/test_waku_peer_storage.nim new file mode 100644 index 0000000..c0e25ec --- /dev/null +++ b/third-party/nwaku/tests/node/peer_manager/peer_store/test_waku_peer_storage.nim @@ -0,0 +1,108 @@ +import + std/[nativesockets, options, sequtils], + testutils/unittests, + libp2p/[multiaddress, peerid], + libp2p/crypto/crypto, + eth/keys, + eth/p2p/discoveryv5/enr, + nimcrypto/utils + +import waku/waku_core/peers, waku/node/peer_manager/peer_store/waku_peer_storage + +proc `==`(a, b: RemotePeerInfo): bool = + let comparisons = + @[ + a.peerId == b.peerId, + a.addrs == b.addrs, + a.enr == b.enr, + a.protocols == b.protocols, + a.agent == b.agent, + a.protoVersion == b.protoVersion, + a.publicKey == b.publicKey, + a.connectedness == b.connectedness, + a.disconnectTime == b.disconnectTime, + a.origin == b.origin, + a.direction == b.direction, + a.lastFailedConn == b.lastFailedConn, + a.numberFailedConn == b.numberFailedConn, + ] + + allIt(comparisons, it == true) + +suite "Protobuf Serialisation": + let + privateKeyStr = + "08031279307702010104203E5B1FE9712E6C314942A750BD67485DE3C1EFE85B1BFB520AE8F9AE3DFA4A4CA00A06082A8648CE3D030107A14403420004DE3D300FA36AE0E8F5D530899D83ABAB44ABF3161F162A4BC901D8E6ECDA020E8B6D5F8DA30525E71D6851510C098E5C47C646A597FB4DCEC034E9F77C409E62" + publicKeyStr = + "0803125b3059301306072a8648ce3d020106082a8648ce3d03010703420004de3d300fa36ae0e8f5d530899d83abab44abf3161f162a4bc901d8e6ecda020e8b6d5f8da30525e71d6851510c098e5c47c646a597fb4dcec034e9f77c409e62" + + var remotePeerInfo {.threadvar.}: RemotePeerInfo + + setup: + let + port = Port(8080) + ipAddress = IpAddress(family: IPv4, address_v4: [192, 168, 0, 1]) + multiAddress: MultiAddress = + MultiAddress.init(ipAddress, IpTransportProtocol.tcpProtocol, port) + encodedPeerIdStr = "16Uiu2HAmFccGe5iezmyRDQZuLPRP7FqpqXLjnocmMRk18pmTZs2j" + + var peerId: PeerID + assert init(peerId, encodedPeerIdStr) + + let + publicKey = + crypto.PublicKey.init(utils.fromHex(publicKeyStr)).expect("public key") + privateKey = + crypto.PrivateKey.init(utils.fromHex(privateKeyStr)).expect("private key") + + remotePeerInfo = RemotePeerInfo.init(peerId, @[multiAddress]) + remotePeerInfo.publicKey = publicKey + + suite "encode": + test "simple": + # Given the expected bytes representation of a valid RemotePeerInfo + let expectedBuffer: seq[byte] = + @[ + 10, 39, 0, 37, 8, 2, 18, 33, 3, 43, 246, 238, 219, 109, 147, 79, 129, 40, 145, + 217, 209, 109, 105, 185, 186, 200, 180, 203, 72, 166, 220, 196, 232, 170, 74, + 141, 125, 255, 112, 238, 204, 18, 8, 4, 192, 168, 0, 1, 6, 31, 144, 34, 95, 8, + 3, 18, 91, 48, 89, 48, 19, 6, 7, 42, 134, 72, 206, 61, 2, 1, 6, 8, 42, 134, + 72, 206, 61, 3, 1, 7, 3, 66, 0, 4, 222, 61, 48, 15, 163, 106, 224, 232, 245, + 213, 48, 137, 157, 131, 171, 171, 68, 171, 243, 22, 31, 22, 42, 75, 201, 1, + 216, 230, 236, 218, 2, 14, 139, 109, 95, 141, 163, 5, 37, 231, 29, 104, 81, + 81, 12, 9, 142, 92, 71, 198, 70, 165, 151, 251, 77, 206, 192, 52, 233, 247, + 124, 64, 158, 98, 40, 0, 48, 0, + ] + + # When converting a valid RemotePeerInfo to a ProtoBuffer + let encodedRemotePeerInfo = encode(remotePeerInfo).get() + + # Then the encoded RemotePeerInfo should be equal to the expected bytes + check: + encodedRemotePeerInfo.buffer == expectedBuffer + encodedRemotePeerInfo.offset == 152 + encodedRemotePeerInfo.length == 0 + encodedRemotePeerInfo.maxSize == 4194304 + + suite "decode": + test "simple": + # Given the bytes representation of a valid RemotePeerInfo + let buffer: seq[byte] = + @[ + 10, 39, 0, 37, 8, 2, 18, 33, 3, 43, 246, 238, 219, 109, 147, 79, 129, 40, 145, + 217, 209, 109, 105, 185, 186, 200, 180, 203, 72, 166, 220, 196, 232, 170, 74, + 141, 125, 255, 112, 238, 204, 18, 8, 4, 192, 168, 0, 1, 6, 31, 144, 34, 95, 8, + 3, 18, 91, 48, 89, 48, 19, 6, 7, 42, 134, 72, 206, 61, 2, 1, 6, 8, 42, 134, + 72, 206, 61, 3, 1, 7, 3, 66, 0, 4, 222, 61, 48, 15, 163, 106, 224, 232, 245, + 213, 48, 137, 157, 131, 171, 171, 68, 171, 243, 22, 31, 22, 42, 75, 201, 1, + 216, 230, 236, 218, 2, 14, 139, 109, 95, 141, 163, 5, 37, 231, 29, 104, 81, + 81, 12, 9, 142, 92, 71, 198, 70, 165, 151, 251, 77, 206, 192, 52, 233, 247, + 124, 64, 158, 98, 40, 0, 48, 0, + ] + + # When converting a valid buffer to RemotePeerInfo + let decodedRemotePeerInfo = RemotePeerInfo.decode(buffer).get() + + # Then the decoded RemotePeerInfo should be equal to the original RemotePeerInfo + check: + decodedRemotePeerInfo == remotePeerInfo diff --git a/third-party/nwaku/tests/node/peer_manager/peer_store/utils.nim b/third-party/nwaku/tests/node/peer_manager/peer_store/utils.nim new file mode 100644 index 0000000..891c5fd --- /dev/null +++ b/third-party/nwaku/tests/node/peer_manager/peer_store/utils.nim @@ -0,0 +1,9 @@ +import std/options, results + +import + waku/node/peer_manager/[waku_peer_store, peer_store/waku_peer_storage], + ../../../waku_archive/archive_utils + +proc newTestWakuPeerStorage*(path: Option[string] = string.none()): WakuPeerStorage = + let db = newSqliteDatabase(path) + WakuPeerStorage.new(db).value() diff --git a/third-party/nwaku/tests/node/peer_manager/test_peer_manager.nim b/third-party/nwaku/tests/node/peer_manager/test_peer_manager.nim new file mode 100644 index 0000000..7697e0a --- /dev/null +++ b/third-party/nwaku/tests/node/peer_manager/test_peer_manager.nim @@ -0,0 +1,137 @@ +import chronicles, std/[options, tables, strutils], chronos, testutils/unittests + +import + waku/node/waku_node, + waku/waku_core, + ../../waku_lightpush/[lightpush_utils], + ../../testlib/[wakucore, wakunode, futures, testasync], + waku/node/peer_manager/peer_manager + +suite "Peer Manager": + suite "onPeerMetadata": + var + listenPort {.threadvar.}: Port + listenAddress {.threadvar.}: IpAddress + serverKey {.threadvar.}: PrivateKey + clientKey {.threadvar.}: PrivateKey + clusterId {.threadvar.}: uint64 + + asyncSetup: + listenPort = Port(0) + listenAddress = parseIpAddress("0.0.0.0") + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + clusterId = 1 + + asyncTest "light client is not disconnected": + # Given two nodes with the same shardId + let + server = newTestWakuNode( + serverKey, listenAddress, listenPort, clusterId = clusterId, shards = @[0] + ) + client = newTestWakuNode( + clientKey, listenAddress, listenPort, clusterId = clusterId, shards = @[1] + ) + + # And both mount metadata and filter + discard + client.mountMetadata(0, @[1'u16]) # clusterId irrelevant, overridden by topic + discard + server.mountMetadata(0, @[0'u16]) # clusterId irrelevant, overridden by topic + await client.mountFilterClient() + await server.mountFilter() + + # And both nodes are started + await allFutures(server.start(), client.start()) + await sleepAsync(FUTURE_TIMEOUT) + + # And the nodes are connected + let serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + await client.connectToNodes(@[serverRemotePeerInfo]) + await sleepAsync(FUTURE_TIMEOUT) + + # When making an operation that triggers onPeerMetadata + discard await client.filterSubscribe( + some("/waku/2/rs/0/0"), "waku/lightpush/1", serverRemotePeerInfo + ) + await sleepAsync(FUTURE_TIMEOUT) + + check: + server.switch.isConnected(client.switch.peerInfo.toRemotePeerInfo().peerId) + client.switch.isConnected(server.switch.peerInfo.toRemotePeerInfo().peerId) + + asyncTest "relay with same shardId is not disconnected": + # Given two nodes with the same shardId + let + server = newTestWakuNode( + serverKey, listenAddress, listenPort, clusterId = clusterId, shards = @[0] + ) + client = newTestWakuNode( + clientKey, listenAddress, listenPort, clusterId = clusterId, shards = @[1] + ) + + # And both mount metadata and relay + discard + client.mountMetadata(0, @[1'u16]) # clusterId irrelevant, overridden by topic + discard + server.mountMetadata(0, @[0'u16]) # clusterId irrelevant, overridden by topic + (await client.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # And both nodes are started + await allFutures(server.start(), client.start()) + await sleepAsync(FUTURE_TIMEOUT) + + # And the nodes are connected + let serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + await client.connectToNodes(@[serverRemotePeerInfo]) + await sleepAsync(FUTURE_TIMEOUT) + + # When making an operation that triggers onPeerMetadata + client.subscribe((kind: SubscriptionKind.PubsubSub, topic: "newTopic")).isOkOr: + assert false, "Failed to subscribe to relay" + await sleepAsync(FUTURE_TIMEOUT) + + check: + server.switch.isConnected(client.switch.peerInfo.toRemotePeerInfo().peerId) + client.switch.isConnected(server.switch.peerInfo.toRemotePeerInfo().peerId) + + asyncTest "relay with different shardId is disconnected": + # Given two nodes with different shardIds + let + server = newTestWakuNode( + serverKey, listenAddress, listenPort, clusterId = clusterId, shards = @[0] + ) + client = newTestWakuNode( + clientKey, listenAddress, listenPort, clusterId = clusterId, shards = @[1] + ) + + # And both mount metadata and relay + discard + client.mountMetadata(0, @[1'u16]) # clusterId irrelevant, overridden by topic + discard + server.mountMetadata(0, @[0'u16]) # clusterId irrelevant, overridden by topic + (await client.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # And both nodes are started + await allFutures(server.start(), client.start()) + await sleepAsync(FUTURE_TIMEOUT) + + # And the nodes are connected + let serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + await client.connectToNodes(@[serverRemotePeerInfo]) + await sleepAsync(FUTURE_TIMEOUT) + + # When making an operation that triggers onPeerMetadata + client.subscribe((kind: SubscriptionKind.PubsubSub, topic: "newTopic")).isOkOr: + assert false, "Failed to subscribe to relay" + await sleepAsync(FUTURE_TIMEOUT) + + check: + not server.switch.isConnected(client.switch.peerInfo.toRemotePeerInfo().peerId) + not client.switch.isConnected(server.switch.peerInfo.toRemotePeerInfo().peerId) diff --git a/third-party/nwaku/tests/node/test_all.nim b/third-party/nwaku/tests/node/test_all.nim new file mode 100644 index 0000000..f6e7507 --- /dev/null +++ b/third-party/nwaku/tests/node/test_all.nim @@ -0,0 +1,10 @@ +{.used.} + +import + ./test_wakunode_filter, + ./test_wakunode_legacy_lightpush, + ./test_wakunode_lightpush, + ./test_wakunode_peer_exchange, + ./test_wakunode_store, + ./test_wakunode_legacy_store, + ./test_wakunode_peer_manager diff --git a/third-party/nwaku/tests/node/test_wakunode_filter.nim b/third-party/nwaku/tests/node/test_wakunode_filter.nim new file mode 100644 index 0000000..abf555b --- /dev/null +++ b/third-party/nwaku/tests/node/test_wakunode_filter.nim @@ -0,0 +1,838 @@ +{.used.} + +import + std/[options, tables, sequtils, strutils, sets], + testutils/unittests, + chronos, + chronicles, + libp2p/[peerstore, crypto/crypto] + +import + waku/[ + waku_core, + node/peer_manager, + node/waku_node, + waku_filter_v2, + waku_filter_v2/client, + waku_filter_v2/subscriptions, + waku_filter_v2/rpc, + ], + ../testlib/[common, wakucore, wakunode, testasync, futures, testutils], + ../waku_filter_v2/waku_filter_utils + +proc generateRequestId(rng: ref HmacDrbgContext): string = + var bytes: array[10, byte] + hmacDrbgGenerate(rng[], bytes) + return toHex(bytes) + +proc createRequest( + filterSubscribeType: FilterSubscribeType, + pubsubTopic = none(PubsubTopic), + contentTopics = newSeq[ContentTopic](), +): FilterSubscribeRequest = + let requestId = generateRequestId(rng) + + return FilterSubscribeRequest( + requestId: requestId, + filterSubscribeType: filterSubscribeType, + pubsubTopic: pubsubTopic, + contentTopics: contentTopics, + ) + +suite "Waku Filter - End to End": + var client {.threadvar.}: WakuNode + var clientPeerId {.threadvar.}: PeerId + var clientClone {.threadvar.}: WakuNode + var server {.threadvar.}: WakuNode + var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + var pubsubTopic {.threadvar.}: PubsubTopic + var contentTopic {.threadvar.}: ContentTopic + var contentTopicSeq {.threadvar.}: seq[ContentTopic] + var pushHandlerFuture {.threadvar.}: Future[(string, WakuMessage)] + var messagePushHandler {.threadvar.}: FilterPushHandler + var clientKey {.threadvar.}: PrivateKey + var serverKey {.threadvar.}: PrivateKey + + asyncSetup: + pushHandlerFuture = newFuture[(string, WakuMessage)]() + messagePushHandler = proc( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.async, closure, gcsafe.} = + pushHandlerFuture.complete((pubsubTopic, message)) + + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + contentTopicSeq = @[DefaultContentTopic] + + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode( + serverKey, parseIpAddress("0.0.0.0"), Port(23450), maxConnections = 300 + ) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(23451)) + clientClone = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(23451)) + # Used for testing client restarts + + await allFutures(server.start(), client.start()) + + await server.mountFilter() + await client.mountFilterClient() + + client.wakuFilterClient.registerPushHandler(messagePushHandler) + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + clientPeerId = client.peerInfo.toRemotePeerInfo().peerId + + # Prepare the clone but do not start it + await clientClone.mountFilterClient() + clientClone.wakuFilterClient.registerPushHandler(messagePushHandler) + + asyncTeardown: + await allFutures(client.stop(), clientClone.stop(), server.stop()) + + asyncTest "Client Node receives Push from Server Node, via Filter": + # When a client node subscribes to a filter node + let subscribeResponse = await client.filterSubscribe( + some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo + ) + + # Then the subscription is successful + check: + subscribeResponse.isOk() + server.wakuFilter.subscriptions.subscribedPeerCount() == 1 + server.wakuFilter.subscriptions.isSubscribed(clientPeerId) + + # When sending a message to the subscribed content topic + let msg1 = fakeWakuMessage(contentTopic = contentTopic) + await server.filterHandleMessage(pubsubTopic, msg1) + + # Then the message is pushed to the client + require await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + + # When unsubscribing from the subscription + let unsubscribeResponse = await client.filterUnsubscribe( + some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo + ) + + # Then the unsubscription is successful + check: + unsubscribeResponse.isOk() + server.wakuFilter.subscriptions.subscribedPeerCount() == 0 + + # When sending a message to the previously subscribed content topic + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg2 = fakeWakuMessage(contentTopic = contentTopic) + await server.filterHandleMessage(pubsubTopic, msg2) + + # Then the message is not pushed to the client + check: + not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + asyncTest "Client Node can't receive Push from Server Node, via Relay": + # Given the server node has Relay enabled + (await server.mountRelay()).isOkOr: + assert false, "error mounting relay: " & $error + + # And valid filter subscription + let subscribeResponse = await client.filterSubscribe( + some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo + ) + require: + subscribeResponse.isOk() + server.wakuFilter.subscriptions.subscribedPeerCount() == 1 + + # When a server node gets a Relay message + let msg1 = fakeWakuMessage(contentTopic = contentTopic) + discard await server.publish(some(pubsubTopic), msg1) + + # Then the message is not sent to the client's filter push handler + check (not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT)) + + asyncTest "Client Node can't subscribe to Server Node without Filter": + # Given a server node with Relay without Filter + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + + await server.start() + (await server.mountRelay()).isOkOr: + assert false, "error mounting relay: " & $error + + let serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + + # When a client node subscribes to the server node + let subscribeResponse = await client.filterSubscribe( + some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo + ) + + # Then the subscription is successful + check (not subscribeResponse.isOk()) + + xasyncTest "Filter Client Node can receive messages after subscribing and restarting, via Filter": + ## connect both switches + await client.switch.connect( + server.switch.peerInfo.peerId, server.switch.peerInfo.listenAddrs + ) + + # Given a valid filter subscription + var subscribeResponse = await client.filterSubscribe( + some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo + ) + require: + subscribeResponse.isOk() + server.wakuFilter.subscriptions.subscribedPeerCount() == 1 + + # And the client node reboots + await client.stop() + ## This line above causes the test to fail. I think ConnManager + ## is not prepare for restarts and maybe we don't need that restart feature. + + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(23451)) + await client.start() # Mimic restart by starting the clone + + # pushHandlerFuture = newFuture[(string, WakuMessage)]() + await client.mountFilterClient() + client.wakuFilterClient.registerPushHandler(messagePushHandler) + + ## connect both switches + await client.switch.connect( + server.switch.peerInfo.peerId, server.switch.peerInfo.listenAddrs + ) + + # Given a valid filter subscription + subscribeResponse = await client.filterSubscribe( + some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo + ) + require: + subscribeResponse.isOk() + server.wakuFilter.subscriptions.subscribedPeerCount() == 1 + + # When a message is sent to the subscribed content topic, via Filter; without refreshing the subscription + let msg = fakeWakuMessage(contentTopic = contentTopic) + await server.filterHandleMessage(pubsubTopic, msg) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic, pushedMsg) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic == pubsubTopic + pushedMsg == msg + + asyncTest "Filter Client Node can't receive messages after subscribing and restarting, via Relay": + (await server.mountRelay()).isOkOr: + assert false, "error mounting relay: " & $error + + # Given a valid filter subscription + let subscribeResponse = await client.filterSubscribe( + some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo + ) + require: + subscribeResponse.isOk() + server.wakuFilter.subscriptions.subscribedPeerCount() == 1 + + # And the client node reboots + await client.stop() + await clientClone.start() # Mimic restart by starting the clone + + # When a message is sent to the subscribed content topic, via Relay + let msg = fakeWakuMessage(contentTopic = contentTopic) + discard await server.publish(some(pubsubTopic), msg) + + # Then the message is not sent to the client's filter push handler + check (not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT)) + + # Given the client refreshes the subscription + let subscribeResponse2 = await clientClone.filterSubscribe( + some(pubsubTopic), contentTopicSeq, serverRemotePeerInfo + ) + check: + subscribeResponse2.isOk() + server.wakuFilter.subscriptions.subscribedPeerCount() == 1 + + # When a message is sent to the subscribed content topic, via Relay + pushHandlerFuture = newPushHandlerFuture() + let msg2 = fakeWakuMessage(contentTopic = contentTopic) + discard await server.publish(some(pubsubTopic), msg2) + + # Then the message is not sent to the client's filter push handler + check (not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT)) + + asyncTest "ping subscriber": + # Given + let + wakuFilter = server.wakuFilter + clientPeerId = client.switch.peerInfo.peerId + serverPeerId = server.switch.peerInfo.peerId + pingRequest = + createRequest(filterSubscribeType = FilterSubscribeType.SUBSCRIBER_PING) + filterSubscribeRequest = createRequest( + filterSubscribeType = FilterSubscribeType.SUBSCRIBE, + pubsubTopic = some(DefaultPubsubTopic), + contentTopics = @[DefaultContentTopic], + ) + + ## connect both switches + await client.switch.connect(serverPeerId, server.switch.peerInfo.listenAddrs) + + # When + let response1 = await wakuFilter.handleSubscribeRequest(clientPeerId, pingRequest) + + # Then + check: + response1.requestId == pingRequest.requestId + response1.statusCode == FilterSubscribeErrorKind.NOT_FOUND.uint32 + response1.statusDesc.get().contains("peer has no subscriptions") + + # When + let + response2 = + await wakuFilter.handleSubscribeRequest(clientPeerId, filterSubscribeRequest) + response3 = await wakuFilter.handleSubscribeRequest(clientPeerId, pingRequest) + + # Then + check: + response2.requestId == filterSubscribeRequest.requestId + response2.statusCode == 200 + response2.statusDesc.get() == "OK" + response3.requestId == pingRequest.requestId + response3.statusCode == 200 + response3.statusDesc.get() == "OK" + + asyncTest "simple subscribe and unsubscribe request": + # Given + let + wakuFilter = server.wakuFilter + clientPeerId = client.switch.peerInfo.peerId + serverPeerId = server.switch.peerInfo.peerId + filterSubscribeRequest = createRequest( + filterSubscribeType = FilterSubscribeType.SUBSCRIBE, + pubsubTopic = some(DefaultPubsubTopic), + contentTopics = @[DefaultContentTopic], + ) + filterUnsubscribeRequest = createRequest( + filterSubscribeType = FilterSubscribeType.UNSUBSCRIBE, + pubsubTopic = filterSubscribeRequest.pubsubTopic, + contentTopics = filterSubscribeRequest.contentTopics, + ) + + ## connect both switches + await client.switch.connect(serverPeerId, server.switch.peerInfo.listenAddrs) + + # When + let response = + await wakuFilter.handleSubscribeRequest(clientPeerId, filterSubscribeRequest) + + # Then + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.peersSubscribed[clientPeerId].criteriaCount == 1 + response.requestId == filterSubscribeRequest.requestId + response.statusCode == 200 + response.statusDesc.get() == "OK" + + # When + let response2 = + await wakuFilter.handleSubscribeRequest(clientPeerId, filterUnsubscribeRequest) + + # Then + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + # peerId is removed from subscriptions + response2.requestId == filterUnsubscribeRequest.requestId + response2.statusCode == 200 + response2.statusDesc.get() == "OK" + + asyncTest "simple subscribe and unsubscribe all for multiple content topics": + # Given + let + wakuFilter = server.wakuFilter + clientPeerId = client.switch.peerInfo.peerId + serverPeerId = server.switch.peerInfo.peerId + nonDefaultContentTopic = ContentTopic("/waku/2/non-default-waku/proto") + filterSubscribeRequest = createRequest( + filterSubscribeType = FilterSubscribeType.SUBSCRIBE, + pubsubTopic = some(DefaultPubsubTopic), + contentTopics = @[DefaultContentTopic, nonDefaultContentTopic], + ) + filterUnsubscribeAllRequest = + createRequest(filterSubscribeType = FilterSubscribeType.UNSUBSCRIBE_ALL) + + ## connect both switches + await client.switch.connect(serverPeerId, server.switch.peerInfo.listenAddrs) + + # When + let response = + await wakuFilter.handleSubscribeRequest(clientPeerId, filterSubscribeRequest) + + # Then + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.peersSubscribed[clientPeerId].criteriaCount == 2 + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + filterSubscribeRequest.contentTopics, + ) + response.requestId == filterSubscribeRequest.requestId + response.statusCode == 200 + response.statusDesc.get() == "OK" + + # When + let response2 = + await wakuFilter.handleSubscribeRequest(clientPeerId, filterUnsubscribeAllRequest) + + # Then + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + # peerId is removed from subscriptions + response2.requestId == filterUnsubscribeAllRequest.requestId + response2.statusCode == 200 + response2.statusDesc.get() == "OK" + + asyncTest "subscribe and unsubscribe to multiple content topics": + # Given + let + wakuFilter = server.wakuFilter + clientPeerId = client.switch.peerInfo.peerId + serverPeerId = server.switch.peerInfo.peerId + nonDefaultContentTopic = ContentTopic("/waku/2/non-default-waku/proto") + filterSubscribeRequest1 = createRequest( + filterSubscribeType = FilterSubscribeType.SUBSCRIBE, + pubsubTopic = some(DefaultPubsubTopic), + contentTopics = @[DefaultContentTopic], + ) + filterSubscribeRequest2 = createRequest( + filterSubscribeType = FilterSubscribeType.SUBSCRIBE, + pubsubTopic = filterSubscribeRequest1.pubsubTopic, + contentTopics = @[nonDefaultContentTopic], + ) + filterUnsubscribeRequest1 = createRequest( + filterSubscribeType = FilterSubscribeType.UNSUBSCRIBE, + pubsubTopic = filterSubscribeRequest1.pubsubTopic, + contentTopics = filterSubscribeRequest1.contentTopics, + ) + filterUnsubscribeRequest2 = createRequest( + filterSubscribeType = FilterSubscribeType.UNSUBSCRIBE, + pubsubTopic = filterSubscribeRequest2.pubsubTopic, + contentTopics = filterSubscribeRequest2.contentTopics, + ) + + ## connect both switches + await client.switch.connect(serverPeerId, server.switch.peerInfo.listenAddrs) + + # When + let response1 = + await wakuFilter.handleSubscribeRequest(clientPeerId, filterSubscribeRequest1) + + # Then + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.peersSubscribed[clientPeerId].criteriaCount == 1 + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + filterSubscribeRequest1.contentTopics, + ) + response1.requestId == filterSubscribeRequest1.requestId + response1.statusCode == 200 + response1.statusDesc.get() == "OK" + + # When + let response2 = + await wakuFilter.handleSubscribeRequest(clientPeerId, filterSubscribeRequest2) + + # Then + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.peersSubscribed[clientPeerId].criteriaCount == 2 + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + filterSubscribeRequest1.contentTopics & filterSubscribeRequest2.contentTopics, + ) + response2.requestId == filterSubscribeRequest2.requestId + response2.statusCode == 200 + response2.statusDesc.get() == "OK" + + # When + let response3 = + await wakuFilter.handleSubscribeRequest(clientPeerId, filterUnsubscribeRequest1) + + # Then + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.peersSubscribed[clientPeerId].criteriaCount == 1 + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + filterSubscribeRequest2.contentTopics, + ) + response3.requestId == filterUnsubscribeRequest1.requestId + response3.statusCode == 200 + response3.statusDesc.get() == "OK" + + # When + let response4 = + await wakuFilter.handleSubscribeRequest(clientPeerId, filterUnsubscribeRequest2) + + # Then + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + # peerId is removed from subscriptions + response4.requestId == filterUnsubscribeRequest2.requestId + response4.statusCode == 200 + response4.statusDesc.get() == "OK" + + asyncTest "subscribe errors": + ## Tests most common error paths while subscribing + + # Given + let + wakuFilter = server.wakuFilter + clientPeerId = client.switch.peerInfo.peerId + serverPeerId = server.switch.peerInfo.peerId + peerManager = server.peerManager + + ## connect both switches + await client.switch.connect(serverPeerId, server.switch.peerInfo.listenAddrs) + + ## Incomplete filter criteria + + # When + let + reqNoPubsubTopic = createRequest( + filterSubscribeType = FilterSubscribeType.SUBSCRIBE, + pubsubTopic = none(PubsubTopic), + contentTopics = @[DefaultContentTopic], + ) + reqNoContentTopics = createRequest( + filterSubscribeType = FilterSubscribeType.SUBSCRIBE, + pubsubTopic = some(DefaultPubsubTopic), + contentTopics = @[], + ) + response1 = + await wakuFilter.handleSubscribeRequest(clientPeerId, reqNoPubsubTopic) + response2 = + await wakuFilter.handleSubscribeRequest(clientPeerId, reqNoContentTopics) + + # Then + check: + response1.requestId == reqNoPubsubTopic.requestId + response2.requestId == reqNoContentTopics.requestId + response1.statusCode == FilterSubscribeErrorKind.BAD_REQUEST.uint32 + response2.statusCode == FilterSubscribeErrorKind.BAD_REQUEST.uint32 + response1.statusDesc.get().contains( + "pubsubTopic and contentTopics must be specified" + ) + response2.statusDesc.get().contains( + "pubsubTopic and contentTopics must be specified" + ) + + ## Max content topics per request exceeded + + # When + let + contentTopics = toSeq(1 .. MaxContentTopicsPerRequest + 1).mapIt( + ContentTopic("/waku/2/content-$#/proto" % [$it]) + ) + reqTooManyContentTopics = createRequest( + filterSubscribeType = FilterSubscribeType.SUBSCRIBE, + pubsubTopic = some(DefaultPubsubTopic), + contentTopics = contentTopics, + ) + response3 = + await wakuFilter.handleSubscribeRequest(clientPeerId, reqTooManyContentTopics) + + # Then + check: + response3.requestId == reqTooManyContentTopics.requestId + response3.statusCode == FilterSubscribeErrorKind.BAD_REQUEST.uint32 + response3.statusDesc.get().contains("exceeds maximum content topics") + + ## Max filter criteria exceeded + + # When + let filterCriteria = toSeq(1 .. MaxFilterCriteriaPerPeer).mapIt( + (DefaultPubsubTopic, ContentTopic("/waku/2/content-$#/proto" % [$it])) + ) + + discard await wakuFilter.subscriptions.addSubscription( + clientPeerId, filterCriteria.toHashSet() + ) + + let + reqTooManyFilterCriteria = createRequest( + filterSubscribeType = FilterSubscribeType.SUBSCRIBE, + pubsubTopic = some(DefaultPubsubTopic), + contentTopics = @[DefaultContentTopic], + ) + response4 = + await wakuFilter.handleSubscribeRequest(clientPeerId, reqTooManyFilterCriteria) + + # Then + check: + response4.requestId == reqTooManyFilterCriteria.requestId + response4.statusCode == FilterSubscribeErrorKind.SERVICE_UNAVAILABLE.uint32 + response4.statusDesc.get().contains( + "peer has reached maximum number of filter criteria" + ) + + ## Max subscriptions exceeded + + # When + await wakuFilter.subscriptions.removePeer(clientPeerId) + wakuFilter.subscriptions.cleanUp() + + var peers = newSeq[WakuNode](MaxFilterPeers) + + for index in 0 ..< MaxFilterPeers: + peers[index] = newTestWakuNode( + generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(23551 + index) + ) + + await peers[index].start() + await peers[index].mountFilterClient() + + ## connect switches + debug "establish connection", peerId = peers[index].peerInfo.peerId + + await server.switch.connect( + peers[index].switch.peerInfo.peerId, peers[index].switch.peerInfo.listenAddrs + ) + + debug "adding subscription" + + ( + await wakuFilter.subscriptions.addSubscription( + peers[index].switch.peerInfo.peerId, + @[(DefaultPubsubTopic, DefaultContentTopic)].toHashSet(), + ) + ).isOkOr: + assert false, $error + + let + reqTooManySubscriptions = createRequest( + filterSubscribeType = FilterSubscribeType.SUBSCRIBE, + pubsubTopic = some(DefaultPubsubTopic), + contentTopics = @[DefaultContentTopic], + ) + response5 = + await wakuFilter.handleSubscribeRequest(clientPeerId, reqTooManySubscriptions) + + # Then + check: + response5.requestId == reqTooManySubscriptions.requestId + response5.statusCode == FilterSubscribeErrorKind.SERVICE_UNAVAILABLE.uint32 + response5.statusDesc.get().contains( + "node has reached maximum number of subscriptions" + ) + + ## stop the peers + for index in 0 ..< MaxFilterPeers: + await peers[index].stop() + + asyncTest "unsubscribe errors": + ## Tests most common error paths while unsubscribing + + # Given + let + wakuFilter = server.wakuFilter + clientPeerId = client.switch.peerInfo.peerId + serverPeerId = server.switch.peerInfo.peerId + + ## connect both switches + await client.switch.connect(serverPeerId, server.switch.peerInfo.listenAddrs) + + ## Incomplete filter criteria + + # When + let + reqNoPubsubTopic = createRequest( + filterSubscribeType = FilterSubscribeType.UNSUBSCRIBE, + pubsubTopic = none(PubsubTopic), + contentTopics = @[DefaultContentTopic], + ) + reqNoContentTopics = createRequest( + filterSubscribeType = FilterSubscribeType.UNSUBSCRIBE, + pubsubTopic = some(DefaultPubsubTopic), + contentTopics = @[], + ) + response1 = + await wakuFilter.handleSubscribeRequest(clientPeerId, reqNoPubsubTopic) + response2 = + await wakuFilter.handleSubscribeRequest(clientPeerId, reqNoContentTopics) + + # Then + check: + response1.requestId == reqNoPubsubTopic.requestId + response2.requestId == reqNoContentTopics.requestId + response1.statusCode == FilterSubscribeErrorKind.BAD_REQUEST.uint32 + response2.statusCode == FilterSubscribeErrorKind.BAD_REQUEST.uint32 + response1.statusDesc.get().contains( + "pubsubTopic and contentTopics must be specified" + ) + response2.statusDesc.get().contains( + "pubsubTopic and contentTopics must be specified" + ) + + ## Max content topics per request exceeded + + # When + let + contentTopics = toSeq(1 .. MaxContentTopicsPerRequest + 1).mapIt( + ContentTopic("/waku/2/content-$#/proto" % [$it]) + ) + reqTooManyContentTopics = createRequest( + filterSubscribeType = FilterSubscribeType.UNSUBSCRIBE, + pubsubTopic = some(DefaultPubsubTopic), + contentTopics = contentTopics, + ) + response3 = + await wakuFilter.handleSubscribeRequest(clientPeerId, reqTooManyContentTopics) + + # Then + check: + response3.requestId == reqTooManyContentTopics.requestId + response3.statusCode == FilterSubscribeErrorKind.BAD_REQUEST.uint32 + response3.statusDesc.get().contains("exceeds maximum content topics") + + ## Subscription not found - unsubscribe + + # When + let + reqSubscriptionNotFound = createRequest( + filterSubscribeType = FilterSubscribeType.UNSUBSCRIBE, + pubsubTopic = some(DefaultPubsubTopic), + contentTopics = @[DefaultContentTopic], + ) + response4 = + await wakuFilter.handleSubscribeRequest(clientPeerId, reqSubscriptionNotFound) + + # Then + check: + response4.requestId == reqSubscriptionNotFound.requestId + response4.statusCode == FilterSubscribeErrorKind.NOT_FOUND.uint32 + response4.statusDesc.get().contains("peer has no subscriptions") + + ## Subscription not found - unsubscribe all + + # When + let + reqUnsubscribeAll = + createRequest(filterSubscribeType = FilterSubscribeType.UNSUBSCRIBE_ALL) + response5 = + await wakuFilter.handleSubscribeRequest(clientPeerId, reqUnsubscribeAll) + + # Then + check: + response5.requestId == reqUnsubscribeAll.requestId + response5.statusCode == FilterSubscribeErrorKind.NOT_FOUND.uint32 + response5.statusDesc.get().contains("peer has no subscriptions") + + suite "Waku Filter - subscription maintenance": + asyncTest "simple maintenance": + # Given + let + wakuFilter = server.wakuFilter + clientPeerId = client.switch.peerInfo.peerId + serverPeerId = server.switch.peerInfo.peerId + peerManager = server.peerManager + + let + client1 = newTestWakuNode( + generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(23552) + ) + client2 = newTestWakuNode( + generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(23553) + ) + client3 = newTestWakuNode( + generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(23554) + ) + filterSubscribeRequest = createRequest( + filterSubscribeType = FilterSubscribeType.SUBSCRIBE, + pubsubTopic = some(DefaultPubsubTopic), + contentTopics = @[DefaultContentTopic], + ) + + ## connect both switches + await client1.switch.connect(serverPeerId, server.switch.peerInfo.listenAddrs) + await client2.switch.connect(serverPeerId, server.switch.peerInfo.listenAddrs) + await client3.switch.connect(serverPeerId, server.switch.peerInfo.listenAddrs) + + await client1.start() + await client2.start() + await client3.start() + + defer: + await client1.stop() + await client2.stop() + await client3.stop() + + await client1.mountFilterClient() + await client2.mountFilterClient() + await client3.mountFilterClient() + + # When + server.switch.peerStore[ProtoBook][client1.switch.peerInfo.peerId] = + @[WakuFilterPushCodec] + server.switch.peerStore[ProtoBook][client2.switch.peerInfo.peerId] = + @[WakuFilterPushCodec] + server.switch.peerStore[ProtoBook][client3.switch.peerInfo.peerId] = + @[WakuFilterPushCodec] + + check: + ( + await wakuFilter.handleSubscribeRequest( + client1.switch.peerInfo.peerId, filterSubscribeRequest + ) + ).statusCode == 200 + + ( + await wakuFilter.handleSubscribeRequest( + client2.switch.peerInfo.peerId, filterSubscribeRequest + ) + ).statusCode == 200 + + ( + await wakuFilter.handleSubscribeRequest( + client3.switch.peerInfo.peerId, filterSubscribeRequest + ) + ).statusCode == 200 + + # Then + check: + wakuFilter.subscriptions.subscribedPeerCount() == 3 + wakuFilter.subscriptions.isSubscribed(client1.switch.peerInfo.peerId) + wakuFilter.subscriptions.isSubscribed(client2.switch.peerInfo.peerId) + wakuFilter.subscriptions.isSubscribed(client1.switch.peerInfo.peerId) + + # When + # Maintenance loop should leave all peers in peer store intact + await wakuFilter.maintainSubscriptions() + + # Then + check: + wakuFilter.subscriptions.subscribedPeerCount() == 3 + wakuFilter.subscriptions.isSubscribed(client1.switch.peerInfo.peerId) + wakuFilter.subscriptions.isSubscribed(client2.switch.peerInfo.peerId) + wakuFilter.subscriptions.isSubscribed(client1.switch.peerInfo.peerId) + + # When + # Remove peerId1 and peerId3 from peer store + server.switch.peerStore.del(client1.switch.peerInfo.peerId) + server.switch.peerStore.del(client3.switch.peerInfo.peerId) + await wakuFilter.maintainSubscriptions() + + # Then + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(client2.switch.peerInfo.peerId) + + # When + # Remove peerId2 from peer store + server.switch.peerStore.del(client2.switch.peerInfo.peerId) + await wakuFilter.maintainSubscriptions() + + # Then + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 diff --git a/third-party/nwaku/tests/node/test_wakunode_legacy_lightpush.nim b/third-party/nwaku/tests/node/test_wakunode_legacy_lightpush.nim new file mode 100644 index 0000000..b769abb --- /dev/null +++ b/third-party/nwaku/tests/node/test_wakunode_legacy_lightpush.nim @@ -0,0 +1,250 @@ +{.used.} + +import + std/[options, tempfiles, net, osproc], + testutils/unittests, + chronos, + std/strformat, + libp2p/crypto/crypto + +import + waku/[ + waku_core, + node/peer_manager, + node/waku_node, + waku_lightpush_legacy, + waku_lightpush_legacy/common, + waku_lightpush_legacy/protocol_metrics, + waku_rln_relay, + ], + ../testlib/[wakucore, wakunode, testasync, futures, testutils], + ../resources/payloads, + ../waku_rln_relay/[rln/waku_rln_relay_utils, utils_onchain] + +suite "Waku Legacy Lightpush - End To End": + var + handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] + handler {.threadvar.}: PushMessageHandler + + server {.threadvar.}: WakuNode + client {.threadvar.}: WakuNode + + serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + pubsubTopic {.threadvar.}: PubsubTopic + contentTopic {.threadvar.}: ContentTopic + message {.threadvar.}: WakuMessage + + asyncSetup: + handlerFuture = newPushHandlerFuture() + handler = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + handlerFuture.complete((pubsubTopic, message)) + return ok() + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + await allFutures(server.start(), client.start()) + await server.start() + + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await server.mountLegacyLightpush() # without rln-relay + client.mountLegacyLightpushClient() + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + message = fakeWakuMessage() + + asyncTeardown: + await server.stop() + + suite "Assessment of Message Relaying Mechanisms": + asyncTest "Via 11/WAKU2-RELAY from Relay/Full Node": + # Given a light lightpush client + let lightpushClient = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + lightpushClient.mountLegacyLightpushClient() + + # When the client publishes a message + let publishResponse = await lightpushClient.legacyLightpushPublish( + some(pubsubTopic), message, serverRemotePeerInfo + ) + + if not publishResponse.isOk(): + echo "Publish failed: ", publishResponse.error() + + # Then the message is not relayed but not due to RLN + assert publishResponse.isErr(), "We expect an error response" + + assert (publishResponse.error == protocol_metrics.notPublishedAnyPeer), + "incorrect error response" + + suite "Waku LightPush Validation Tests": + asyncTest "Validate message size exceeds limit": + let msgOverLimit = fakeWakuMessage( + contentTopic = contentTopic, + payload = getByteSequence(DefaultMaxWakuMessageSize + 64 * 1024), + ) + + # When the client publishes an over-limit message + let publishResponse = await client.legacyLightpushPublish( + some(pubsubTopic), msgOverLimit, serverRemotePeerInfo + ) + + check: + publishResponse.isErr() + publishResponse.error == + fmt"Message size exceeded maximum of {DefaultMaxWakuMessageSize} bytes" + +suite "RLN Proofs as a Lightpush Service": + var + handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] + handler {.threadvar.}: PushMessageHandler + + server {.threadvar.}: WakuNode + client {.threadvar.}: WakuNode + anvilProc {.threadvar.}: Process + manager {.threadvar.}: OnchainGroupManager + + serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + pubsubTopic {.threadvar.}: PubsubTopic + contentTopic {.threadvar.}: ContentTopic + message {.threadvar.}: WakuMessage + + asyncSetup: + handlerFuture = newPushHandlerFuture() + handler = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + handlerFuture.complete((pubsubTopic, message)) + return ok() + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + anvilProc = runAnvil() + manager = waitFor setupOnchainGroupManager() + + # mount rln-relay + let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + + await allFutures(server.start(), client.start()) + await server.start() + + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + await server.mountRlnRelay(wakuRlnConfig) + await server.mountLegacyLightPush() + client.mountLegacyLightPushClient() + + let manager1 = cast[OnchainGroupManager](server.wakuRlnRelay.groupManager) + let idCredentials1 = generateCredentials(manager1.rlnInstance) + + try: + waitFor manager1.register(idCredentials1, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated1 = waitFor manager1.updateRoots() + debug "Updated root for node1", rootUpdated1 + + if rootUpdated1: + let proofResult = waitFor manager1.fetchMerkleProofElements() + if proofResult.isErr(): + error "Failed to fetch Merkle proof", error = proofResult.error + manager1.merkleProofCache = proofResult.get() + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + message = fakeWakuMessage() + + asyncTeardown: + await server.stop() + stopAnvil(anvilProc) + + suite "Lightpush attaching RLN proofs": + asyncTest "Message is published when RLN enabled": + # Given a light lightpush client + let lightpushClient = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + lightpushClient.mountLegacyLightPushClient() + + # When the client publishes a message + let publishResponse = await lightpushClient.legacyLightpushPublish( + some(pubsubTopic), message, serverRemotePeerInfo + ) + + if not publishResponse.isOk(): + echo "Publish failed: ", publishResponse.error() + + # Then the message is not relayed but not due to RLN + assert publishResponse.isErr(), "We expect an error response" + check publishResponse.error == protocol_metrics.notPublishedAnyPeer + +suite "Waku Legacy Lightpush message delivery": + asyncTest "Legacy lightpush message flow succeed": + ## Setup + let + lightNodeKey = generateSecp256k1Key() + lightNode = newTestWakuNode(lightNodeKey, parseIpAddress("0.0.0.0"), Port(0)) + bridgeNodeKey = generateSecp256k1Key() + bridgeNode = newTestWakuNode(bridgeNodeKey, parseIpAddress("0.0.0.0"), Port(0)) + destNodeKey = generateSecp256k1Key() + destNode = newTestWakuNode(destNodeKey, parseIpAddress("0.0.0.0"), Port(0)) + + await allFutures(destNode.start(), bridgeNode.start(), lightNode.start()) + + (await destNode.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await bridgeNode.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + await bridgeNode.mountLegacyLightPush() + lightNode.mountLegacyLightPushClient() + + discard await lightNode.peerManager.dialPeer( + bridgeNode.peerInfo.toRemotePeerInfo(), WakuLegacyLightPushCodec + ) + await sleepAsync(100.milliseconds) + await destNode.connectToNodes(@[bridgeNode.peerInfo.toRemotePeerInfo()]) + + ## Given + const CustomPubsubTopic = "/waku/2/rs/0/1" + let message = fakeWakuMessage() + var completionFutRelay = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + check: + topic == CustomPubsubTopic + msg == message + completionFutRelay.complete(true) + + destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), relayHandler).isOkOr: + assert false, "Failed to subscribe to topic:" & $error + + # Wait for subscription to take effect + await sleepAsync(100.millis) + + ## When + let res = await lightNode.legacyLightpushPublish(some(CustomPubsubTopic), message) + assert res.isOk(), $res.error + + ## Then + check await completionFutRelay.withTimeout(5.seconds) + + ## Cleanup + await allFutures(lightNode.stop(), bridgeNode.stop(), destNode.stop()) diff --git a/third-party/nwaku/tests/node/test_wakunode_legacy_store.nim b/third-party/nwaku/tests/node/test_wakunode_legacy_store.nim new file mode 100644 index 0000000..beed3c1 --- /dev/null +++ b/third-party/nwaku/tests/node/test_wakunode_legacy_store.nim @@ -0,0 +1,1069 @@ +{.used.} + +import std/options, testutils/unittests, chronos, libp2p/crypto/crypto + +import + waku/[ + common/paging, + node/waku_node, + node/peer_manager, + waku_core, + waku_store_legacy, + waku_archive_legacy, + ], + ../waku_store_legacy/store_utils, + ../waku_archive_legacy/archive_utils, + ../testlib/[wakucore, wakunode, testasync, testutils] + +suite "Waku Store - End to End - Sorted Archive": + var pubsubTopic {.threadvar.}: PubsubTopic + var contentTopic {.threadvar.}: ContentTopic + var contentTopicSeq {.threadvar.}: seq[ContentTopic] + + var archiveMessages {.threadvar.}: seq[WakuMessage] + var historyQuery {.threadvar.}: HistoryQuery + + var server {.threadvar.}: WakuNode + var client {.threadvar.}: WakuNode + + var archiveDriver {.threadvar.}: ArchiveDriver + var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + var clientPeerId {.threadvar.}: PeerId + + asyncSetup: + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + contentTopicSeq = @[contentTopic] + + let timeOrigin = now() + archiveMessages = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)), + ] + + historyQuery = HistoryQuery( + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.Forward, + pageSize: 5, + ) + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + archiveDriver = newArchiveDriverWithMessages(pubsubTopic, archiveMessages) + let mountArchiveResult = server.mountLegacyArchive(archiveDriver) + assert mountArchiveResult.isOk() + + await server.mountLegacyStore() + client.mountLegacyStoreClient() + + await allFutures(server.start(), client.start()) + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + clientPeerId = client.peerInfo.toRemotePeerInfo().peerId + + asyncTeardown: + await allFutures(client.stop(), server.stop()) + + suite "Message Pagination": + asyncTest "Forward Pagination": + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == archiveMessages[0 ..< 5] + + # Given the next query + var otherHistoryQuery = HistoryQuery( + cursor: queryResponse.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 5, + ) + + # When making the next history query + let otherQueryResponse = + await client.query(otherHistoryQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + otherQueryResponse.get().messages == archiveMessages[5 ..< 10] + + asyncTest "Backward Pagination": + # Given the history query is backward + historyQuery.direction = PagingDirection.BACKWARD + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == archiveMessages[5 ..< 10] + + # Given the next query + var nextHistoryQuery = HistoryQuery( + cursor: queryResponse.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.BACKWARD, + pageSize: 5, + ) + + # When making the next history query + let otherQueryResponse = + await client.query(nextHistoryQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + otherQueryResponse.get().messages == archiveMessages[0 ..< 5] + + suite "Pagination with Differente Page Sizes": + asyncTest "Pagination with Small Page Size": + # Given the first query (1/5) + historyQuery.pageSize = 2 + + # When making a history query + let queryResponse1 = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse1.get().messages == archiveMessages[0 ..< 2] + + # Given the next query (2/5) + let historyQuery2 = HistoryQuery( + cursor: queryResponse1.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 2, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == archiveMessages[2 ..< 4] + + # Given the next query (3/5) + let historyQuery3 = HistoryQuery( + cursor: queryResponse2.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 2, + ) + + # When making the next history query + let queryResponse3 = await client.query(historyQuery3, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse3.get().messages == archiveMessages[4 ..< 6] + + # Given the next query (4/5) + let historyQuery4 = HistoryQuery( + cursor: queryResponse3.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 2, + ) + + # When making the next history query + let queryResponse4 = await client.query(historyQuery4, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse4.get().messages == archiveMessages[6 ..< 8] + + # Given the next query (5/5) + let historyQuery5 = HistoryQuery( + cursor: queryResponse4.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 2, + ) + + # When making the next history query + let queryResponse5 = await client.query(historyQuery5, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse5.get().messages == archiveMessages[8 ..< 10] + + asyncTest "Pagination with Large Page Size": + # Given the first query (1/2) + historyQuery.pageSize = 8 + + # When making a history query + let queryResponse1 = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse1.get().messages == archiveMessages[0 ..< 8] + + # Given the next query (2/2) + let historyQuery2 = HistoryQuery( + cursor: queryResponse1.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 8, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == archiveMessages[8 ..< 10] + + asyncTest "Pagination with Excessive Page Size": + # Given the first query (1/1) + historyQuery.pageSize = 100 + + # When making a history query + let queryResponse1 = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse1.get().messages == archiveMessages[0 ..< 10] + + asyncTest "Pagination with Mixed Page Size": + # Given the first query (1/3) + historyQuery.pageSize = 2 + + # When making a history query + let queryResponse1 = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse1.get().messages == archiveMessages[0 ..< 2] + + # Given the next query (2/3) + let historyQuery2 = HistoryQuery( + cursor: queryResponse1.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 4, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == archiveMessages[2 ..< 6] + + # Given the next query (3/3) + let historyQuery3 = HistoryQuery( + cursor: queryResponse2.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 6, + ) + + # When making the next history query + let queryResponse3 = await client.query(historyQuery3, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse3.get().messages == archiveMessages[6 ..< 10] + + asyncTest "Pagination with Zero Page Size (Behaves as DefaultPageSize)": + # Given a message list of size higher than the default page size + let currentStoreLen = uint((await archiveDriver.getMessagesCount()).get()) + assert archive.DefaultPageSize > currentStoreLen, + "This test requires a store with more than (DefaultPageSize) messages" + let missingMessagesAmount = archive.DefaultPageSize - currentStoreLen + 5 + + let lastMessageTimestamp = archiveMessages[archiveMessages.len - 1].timestamp + var extraMessages: seq[WakuMessage] = @[] + for i in 0 ..< missingMessagesAmount: + let + timestampOffset = 10 * int(i + 1) + # + 1 to avoid collision with existing messages + message: WakuMessage = + fakeWakuMessage(@[byte i], ts = ts(timestampOffset, lastMessageTimestamp)) + extraMessages.add(message) + discard archiveDriver.put(pubsubTopic, extraMessages) + + let totalMessages = archiveMessages & extraMessages + + # Given the a query with zero page size (1/2) + historyQuery.pageSize = 0 + + # When making a history query + let queryResponse1 = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the archive.DefaultPageSize messages + check: + queryResponse1.get().messages == totalMessages[0 ..< archive.DefaultPageSize] + + # Given the next query (2/2) + let historyQuery2 = HistoryQuery( + cursor: queryResponse1.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 0, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the remaining messages + check: + queryResponse2.get().messages == + totalMessages[archive.DefaultPageSize ..< archive.DefaultPageSize + 5] + + asyncTest "Pagination with Default Page Size": + # Given a message list of size higher than the default page size + let currentStoreLen = uint((await archiveDriver.getMessagesCount()).get()) + assert archive.DefaultPageSize > currentStoreLen, + "This test requires a store with more than (DefaultPageSize) messages" + let missingMessagesAmount = archive.DefaultPageSize - currentStoreLen + 5 + + let lastMessageTimestamp = archiveMessages[archiveMessages.len - 1].timestamp + var extraMessages: seq[WakuMessage] = @[] + for i in 0 ..< missingMessagesAmount: + let + timestampOffset = 10 * int(i + 1) + # + 1 to avoid collision with existing messages + message: WakuMessage = + fakeWakuMessage(@[byte i], ts = ts(timestampOffset, lastMessageTimestamp)) + extraMessages.add(message) + discard archiveDriver.put(pubsubTopic, extraMessages) + + let totalMessages = archiveMessages & extraMessages + + # Given a query with default page size (1/2) + historyQuery = HistoryQuery( + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + ) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == totalMessages[0 ..< archive.DefaultPageSize] + + # Given the next query (2/2) + let historyQuery2 = HistoryQuery( + cursor: queryResponse.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == + totalMessages[archive.DefaultPageSize ..< archive.DefaultPageSize + 5] + + suite "Pagination with Different Cursors": + asyncTest "Starting Cursor": + # Given a cursor pointing to the first message + let cursor = computeHistoryCursor(pubsubTopic, archiveMessages[0]) + historyQuery.cursor = some(cursor) + historyQuery.pageSize = 1 + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the message + check: + queryResponse.get().messages == archiveMessages[1 ..< 2] + + asyncTest "Middle Cursor": + # Given a cursor pointing to the middle message1 + let cursor = computeHistoryCursor(pubsubTopic, archiveMessages[5]) + historyQuery.cursor = some(cursor) + historyQuery.pageSize = 1 + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the message + check: + queryResponse.get().messages == archiveMessages[6 ..< 7] + + asyncTest "Ending Cursor": + # Given a cursor pointing to the last message + let cursor = computeHistoryCursor(pubsubTopic, archiveMessages[9]) + historyQuery.cursor = some(cursor) + historyQuery.pageSize = 1 + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + suite "Message Sorting": + asyncTest "Cursor Reusability Across Nodes": + # Given a different server node with the same archive + let + otherArchiveDriverWithMessages = + newArchiveDriverWithMessages(pubsubTopic, archiveMessages) + otherServerKey = generateSecp256k1Key() + otherServer = + newTestWakuNode(otherServerKey, parseIpAddress("0.0.0.0"), Port(0)) + mountOtherArchiveResult = + otherServer.mountLegacyArchive(otherArchiveDriverWithMessages) + assert mountOtherArchiveResult.isOk() + + await otherServer.mountLegacyStore() + + await otherServer.start() + let otherServerRemotePeerInfo = otherServer.peerInfo.toRemotePeerInfo() + + # When making a history query to the first server node + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == archiveMessages[0 ..< 5] + + # Given the cursor from the first query + let cursor = queryResponse.get().cursor + + # When making a history query to the second server node + let otherHistoryQuery = HistoryQuery( + cursor: cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 5, + ) + let otherQueryResponse = + await client.query(otherHistoryQuery, otherServerRemotePeerInfo) + + # Then the response contains the remaining messages + check: + otherQueryResponse.get().messages == archiveMessages[5 ..< 10] + + # Cleanup + await otherServer.stop() + +suite "Waku Store - End to End - Unsorted Archive": + var pubsubTopic {.threadvar.}: PubsubTopic + var contentTopic {.threadvar.}: ContentTopic + var contentTopicSeq {.threadvar.}: seq[ContentTopic] + + var historyQuery {.threadvar.}: HistoryQuery + var unsortedArchiveMessages {.threadvar.}: seq[WakuMessage] + + var server {.threadvar.}: WakuNode + var client {.threadvar.}: WakuNode + + var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + + asyncSetup: + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + contentTopicSeq = @[contentTopic] + + historyQuery = HistoryQuery( + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 5, + ) + + let timeOrigin = now() + unsortedArchiveMessages = + @[ # SortIndex (by timestamp and digest) + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), # 1 + fakeWakuMessage(@[byte 03], ts = ts(00, timeOrigin)), # 2 + fakeWakuMessage(@[byte 08], ts = ts(00, timeOrigin)), # 0 + fakeWakuMessage(@[byte 07], ts = ts(10, timeOrigin)), # 4 + fakeWakuMessage(@[byte 02], ts = ts(10, timeOrigin)), # 3 + fakeWakuMessage(@[byte 09], ts = ts(10, timeOrigin)), # 5 + fakeWakuMessage(@[byte 06], ts = ts(20, timeOrigin)), # 6 + fakeWakuMessage(@[byte 01], ts = ts(20, timeOrigin)), # 9 + fakeWakuMessage(@[byte 04], ts = ts(20, timeOrigin)), # 7 + fakeWakuMessage(@[byte 05], ts = ts(20, timeOrigin)), # 8 + ] + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + let + unsortedArchiveDriverWithMessages = + newArchiveDriverWithMessages(pubsubTopic, unsortedArchiveMessages) + mountUnsortedArchiveResult = + server.mountLegacyArchive(unsortedArchiveDriverWithMessages) + + assert mountUnsortedArchiveResult.isOk() + + await server.mountLegacyStore() + client.mountLegacyStoreClient() + + await allFutures(server.start(), client.start()) + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + + asyncTeardown: + await allFutures(client.stop(), server.stop()) + + asyncTest "Basic (Timestamp and Digest) Sorting Validation": + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + unsortedArchiveMessages[2], + unsortedArchiveMessages[0], + unsortedArchiveMessages[1], + unsortedArchiveMessages[4], + unsortedArchiveMessages[3], + ] + + # Given the next query + var historyQuery2 = HistoryQuery( + cursor: queryResponse.get().cursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 5, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == + @[ + unsortedArchiveMessages[5], + unsortedArchiveMessages[6], + unsortedArchiveMessages[8], + unsortedArchiveMessages[9], + unsortedArchiveMessages[7], + ] + + asyncTest "Backward pagination with Ascending Sorting": + # Given a history query with backward pagination + let cursor = computeHistoryCursor(pubsubTopic, unsortedArchiveMessages[4]) + historyQuery.direction = PagingDirection.BACKWARD + historyQuery.cursor = some(cursor) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + unsortedArchiveMessages[2], + unsortedArchiveMessages[0], + unsortedArchiveMessages[1], + ] + + asyncTest "Forward Pagination with Ascending Sorting": + # Given a history query with forward pagination + let cursor = computeHistoryCursor(pubsubTopic, unsortedArchiveMessages[4]) + historyQuery.direction = PagingDirection.FORWARD + historyQuery.cursor = some(cursor) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + unsortedArchiveMessages[3], + unsortedArchiveMessages[5], + unsortedArchiveMessages[6], + unsortedArchiveMessages[8], + unsortedArchiveMessages[9], + ] + +suite "Waku Store - End to End - Archive with Multiple Topics": + var pubsubTopic {.threadvar.}: PubsubTopic + var pubsubTopicB {.threadvar.}: PubsubTopic + var contentTopic {.threadvar.}: ContentTopic + var contentTopicB {.threadvar.}: ContentTopic + var contentTopicC {.threadvar.}: ContentTopic + var contentTopicSpecials {.threadvar.}: ContentTopic + var contentTopicSeq {.threadvar.}: seq[ContentTopic] + + var historyQuery {.threadvar.}: HistoryQuery + var originTs {.threadvar.}: proc(offset: int): Timestamp {.gcsafe, raises: [].} + var archiveMessages {.threadvar.}: seq[WakuMessage] + + var server {.threadvar.}: WakuNode + var client {.threadvar.}: WakuNode + + var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + + asyncSetup: + pubsubTopic = DefaultPubsubTopic + pubsubTopicB = "topicB" + contentTopic = DefaultContentTopic + contentTopicB = "topicB" + contentTopicC = "topicC" + contentTopicSpecials = "!@#$%^&*()_+" + contentTopicSeq = + @[contentTopic, contentTopicB, contentTopicC, contentTopicSpecials] + + historyQuery = HistoryQuery( + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 5, + ) + + let timeOrigin = now() + originTs = proc(offset = 0): Timestamp {.gcsafe, raises: [].} = + ts(offset, timeOrigin) + + archiveMessages = + @[ + fakeWakuMessage(@[byte 00], ts = originTs(00), contentTopic = contentTopic), + fakeWakuMessage(@[byte 01], ts = originTs(10), contentTopic = contentTopicB), + fakeWakuMessage(@[byte 02], ts = originTs(20), contentTopic = contentTopicC), + fakeWakuMessage(@[byte 03], ts = originTs(30), contentTopic = contentTopic), + fakeWakuMessage(@[byte 04], ts = originTs(40), contentTopic = contentTopicB), + fakeWakuMessage(@[byte 05], ts = originTs(50), contentTopic = contentTopicC), + fakeWakuMessage(@[byte 06], ts = originTs(60), contentTopic = contentTopic), + fakeWakuMessage(@[byte 07], ts = originTs(70), contentTopic = contentTopicB), + fakeWakuMessage(@[byte 08], ts = originTs(80), contentTopic = contentTopicC), + fakeWakuMessage( + @[byte 09], ts = originTs(90), contentTopic = contentTopicSpecials + ), + ] + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + let archiveDriver = newSqliteArchiveDriver() + .put(pubsubTopic, archiveMessages[0 ..< 6]) + .put(pubsubTopicB, archiveMessages[6 ..< 10]) + let mountSortedArchiveResult = server.mountLegacyArchive(archiveDriver) + + assert mountSortedArchiveResult.isOk() + + await server.mountLegacyStore() + client.mountLegacyStoreClient() + + await allFutures(server.start(), client.start()) + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + + asyncTeardown: + await allFutures(client.stop(), server.stop()) + + suite "Validation of Content Filtering": + asyncTest "Basic Content Filtering": + # Given a history query with content filtering + historyQuery.contentTopics = @[contentTopic] + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == @[archiveMessages[0], archiveMessages[3]] + + asyncTest "Multiple Content Filters": + # Given a history query with multiple content filtering + historyQuery.contentTopics = @[contentTopic, contentTopicB] + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + archiveMessages[0], + archiveMessages[1], + archiveMessages[3], + archiveMessages[4], + ] + + asyncTest "Empty Content Filtering": + # Given a history query with empty content filtering + historyQuery.contentTopics = @[] + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == archiveMessages[0 ..< 5] + + # Given the next query + let historyQuery2 = HistoryQuery( + cursor: queryResponse.get().cursor, + pubsubTopic: none(PubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 5, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == archiveMessages[5 ..< 10] + + asyncTest "Non-Existent Content Topic": + # Given a history query with non-existent content filtering + historyQuery.contentTopics = @["non-existent-topic"] + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + asyncTest "Special Characters in Content Filtering": + # Given a history query with special characters in content filtering + historyQuery.pubsubTopic = some(pubsubTopicB) + historyQuery.contentTopics = @["!@#$%^&*()_+"] + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages == @[archiveMessages[9]] + + asyncTest "PubsubTopic Specified": + # Given a history query with pubsub topic specified + historyQuery.pubsubTopic = some(pubsubTopicB) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + archiveMessages[6], + archiveMessages[7], + archiveMessages[8], + archiveMessages[9], + ] + + asyncTest "PubsubTopic Left Empty": + # Given a history query with pubsub topic left empty + historyQuery.pubsubTopic = none(PubsubTopic) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == archiveMessages[0 ..< 5] + + # Given the next query + let historyQuery2 = HistoryQuery( + cursor: queryResponse.get().cursor, + pubsubTopic: none(PubsubTopic), + contentTopics: contentTopicSeq, + direction: PagingDirection.FORWARD, + pageSize: 5, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == archiveMessages[5 ..< 10] + + suite "Validation of Time-based Filtering": + asyncTest "Basic Time Filtering": + # Given a history query with start and end time + historyQuery.startTime = some(originTs(20)) + historyQuery.endTime = some(originTs(40)) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[archiveMessages[2], archiveMessages[3], archiveMessages[4]] + + asyncTest "Only Start Time Specified": + # Given a history query with only start time + historyQuery.startTime = some(originTs(20)) + historyQuery.endTime = none(Timestamp) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + archiveMessages[2], + archiveMessages[3], + archiveMessages[4], + archiveMessages[5], + ] + + asyncTest "Only End Time Specified": + # Given a history query with only end time + historyQuery.startTime = none(Timestamp) + historyQuery.endTime = some(originTs(40)) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages == + @[ + archiveMessages[0], + archiveMessages[1], + archiveMessages[2], + archiveMessages[3], + archiveMessages[4], + ] + + asyncTest "Invalid Time Range": + # Given a history query with invalid time range + historyQuery.startTime = some(originTs(60)) + historyQuery.endTime = some(originTs(40)) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + asyncTest "Time Filtering with Content Filtering": + # Given a history query with time and content filtering + historyQuery.startTime = some(originTs(20)) + historyQuery.endTime = some(originTs(60)) + historyQuery.contentTopics = @[contentTopicC] + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == @[archiveMessages[2], archiveMessages[5]] + + asyncTest "Messages Outside of Time Range": + # Given a history query with a valid time range which does not contain any messages + historyQuery.startTime = some(originTs(100)) + historyQuery.endTime = some(originTs(200)) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + suite "Ephemeral": + # TODO: Ephemeral value is not properly set for Sqlite + xasyncTest "Only ephemeral Messages:": + # Given an archive with only ephemeral messages + let + ephemeralMessages = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00), ephemeral = true), + fakeWakuMessage(@[byte 01], ts = ts(10), ephemeral = true), + fakeWakuMessage(@[byte 02], ts = ts(20), ephemeral = true), + ] + ephemeralArchiveDriver = + newSqliteArchiveDriver().put(pubsubTopic, ephemeralMessages) + + # And a server node with the ephemeral archive + let + ephemeralServerKey = generateSecp256k1Key() + ephemeralServer = + newTestWakuNode(ephemeralServerKey, parseIpAddress("0.0.0.0"), Port(0)) + mountEphemeralArchiveResult = + ephemeralServer.mountLegacyArchive(ephemeralArchiveDriver) + assert mountEphemeralArchiveResult.isOk() + + await ephemeralServer.mountLegacyStore() + await ephemeralServer.start() + let ephemeralServerRemotePeerInfo = ephemeralServer.peerInfo.toRemotePeerInfo() + + # When making a history query to the server with only ephemeral messages + let queryResponse = + await client.query(historyQuery, ephemeralServerRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + # Cleanup + await ephemeralServer.stop() + + xasyncTest "Mixed messages": + # Given an archive with both ephemeral and non-ephemeral messages + let + ephemeralMessages = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00), ephemeral = true), + fakeWakuMessage(@[byte 01], ts = ts(10), ephemeral = true), + fakeWakuMessage(@[byte 02], ts = ts(20), ephemeral = true), + ] + nonEphemeralMessages = + @[ + fakeWakuMessage(@[byte 03], ts = ts(30), ephemeral = false), + fakeWakuMessage(@[byte 04], ts = ts(40), ephemeral = false), + fakeWakuMessage(@[byte 05], ts = ts(50), ephemeral = false), + ] + mixedArchiveDriver = newSqliteArchiveDriver() + .put(pubsubTopic, ephemeralMessages) + .put(pubsubTopic, nonEphemeralMessages) + + # And a server node with the mixed archive + let + mixedServerKey = generateSecp256k1Key() + mixedServer = + newTestWakuNode(mixedServerKey, parseIpAddress("0.0.0.0"), Port(0)) + mountMixedArchiveResult = mixedServer.mountLegacyArchive(mixedArchiveDriver) + assert mountMixedArchiveResult.isOk() + + await mixedServer.mountLegacyStore() + await mixedServer.start() + let mixedServerRemotePeerInfo = mixedServer.peerInfo.toRemotePeerInfo() + + # When making a history query to the server with mixed messages + let queryResponse = await client.query(historyQuery, mixedServerRemotePeerInfo) + + # Then the response contains the non-ephemeral messages + check: + queryResponse.get().messages == nonEphemeralMessages + + # Cleanup + await mixedServer.stop() + + suite "Edge Case Scenarios": + asyncTest "Empty Message Store": + # Given an empty archive + let emptyArchiveDriver = newSqliteArchiveDriver() + + # And a server node with the empty archive + let + emptyServerKey = generateSecp256k1Key() + emptyServer = + newTestWakuNode(emptyServerKey, parseIpAddress("0.0.0.0"), Port(0)) + mountEmptyArchiveResult = emptyServer.mountLegacyArchive(emptyArchiveDriver) + assert mountEmptyArchiveResult.isOk() + + await emptyServer.mountLegacyStore() + await emptyServer.start() + let emptyServerRemotePeerInfo = emptyServer.peerInfo.toRemotePeerInfo() + + # When making a history query to the server with an empty archive + let queryResponse = await client.query(historyQuery, emptyServerRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + # Cleanup + await emptyServer.stop() + + asyncTest "Voluminous Message Store": + # Given a voluminous archive (1M+ messages) + var voluminousArchiveMessages: seq[WakuMessage] = @[] + for i in 0 ..< 100000: + let topic = "topic" & $i + voluminousArchiveMessages.add(fakeWakuMessage(@[byte i], contentTopic = topic)) + let voluminousArchiveDriverWithMessages = + newArchiveDriverWithMessages(pubsubTopic, voluminousArchiveMessages) + + # And a server node with the voluminous archive + let + voluminousServerKey = generateSecp256k1Key() + voluminousServer = + newTestWakuNode(voluminousServerKey, parseIpAddress("0.0.0.0"), Port(0)) + mountVoluminousArchiveResult = + voluminousServer.mountLegacyArchive(voluminousArchiveDriverWithMessages) + assert mountVoluminousArchiveResult.isOk() + + await voluminousServer.mountLegacyStore() + await voluminousServer.start() + let voluminousServerRemotePeerInfo = voluminousServer.peerInfo.toRemotePeerInfo() + + # Given the following history query + historyQuery.contentTopics = + @["topic10000", "topic30000", "topic50000", "topic70000", "topic90000"] + + # When making a history query to the server with a voluminous archive + let queryResponse = + await client.query(historyQuery, voluminousServerRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + voluminousArchiveMessages[10000], + voluminousArchiveMessages[30000], + voluminousArchiveMessages[50000], + voluminousArchiveMessages[70000], + voluminousArchiveMessages[90000], + ] + + # Cleanup + await voluminousServer.stop() + + asyncTest "Large contentFilters Array": + # Given a history query with the max contentFilters len, 10 + historyQuery.contentTopics = @[contentTopic] + for i in 0 ..< 9: + let topic = "topic" & $i + historyQuery.contentTopics.add(topic) + + # When making a history query + let queryResponse = await client.query(historyQuery, serverRemotePeerInfo) + + # Then the response should trigger no errors + check: + queryResponse.get().messages == @[archiveMessages[0], archiveMessages[3]] diff --git a/third-party/nwaku/tests/node/test_wakunode_lightpush.nim b/third-party/nwaku/tests/node/test_wakunode_lightpush.nim new file mode 100644 index 0000000..e4ccb60 --- /dev/null +++ b/third-party/nwaku/tests/node/test_wakunode_lightpush.nim @@ -0,0 +1,246 @@ +{.used.} + +import + std/[options, tempfiles, osproc], + testutils/unittests, + chronos, + std/strformat, + libp2p/crypto/crypto + +import + waku/[waku_core, node/peer_manager, node/waku_node, waku_lightpush, waku_rln_relay], + ../testlib/[wakucore, wakunode, testasync, futures], + ../resources/payloads, + ../waku_rln_relay/[rln/waku_rln_relay_utils, utils_onchain] + +const PublishedToOnePeer = 1 + +suite "Waku Lightpush - End To End": + var + handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] + handler {.threadvar.}: PushMessageHandler + + server {.threadvar.}: WakuNode + client {.threadvar.}: WakuNode + + serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + pubsubTopic {.threadvar.}: PubsubTopic + contentTopic {.threadvar.}: ContentTopic + message {.threadvar.}: WakuMessage + + asyncSetup: + handlerFuture = newPushHandlerFuture() + handler = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult] {.async.} = + handlerFuture.complete((pubsubTopic, message)) + return ok(PublishedToOnePeer) + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + await allFutures(server.start(), client.start()) + await server.start() + + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + await server.mountLightpush() # without rln-relay + client.mountLightpushClient() + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + message = fakeWakuMessage() + + asyncTeardown: + await server.stop() + + suite "Assessment of Message Relaying Mechanisms": + asyncTest "Via 11/WAKU2-RELAY from Relay/Full Node": + # Given a light lightpush client + let lightpushClient = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + lightpushClient.mountLightpushClient() + + # When the client publishes a message + let publishResponse = await lightpushClient.lightpushPublish( + some(pubsubTopic), message, some(serverRemotePeerInfo) + ) + + if not publishResponse.isOk(): + echo "Publish failed: ", publishResponse.error.code + + # Then the message is not relayed but not due to RLN + assert publishResponse.isErr(), "We expect an error response" + + assert (publishResponse.error.code == LightPushErrorCode.NO_PEERS_TO_RELAY), + "incorrect error response" + + suite "Waku LightPush Validation Tests": + asyncTest "Validate message size exceeds limit": + let msgOverLimit = fakeWakuMessage( + contentTopic = contentTopic, + payload = getByteSequence(DefaultMaxWakuMessageSize + 64 * 1024), + ) + + # When the client publishes an over-limit message + let publishResponse = await client.lightpushPublish( + some(pubsubTopic), msgOverLimit, some(serverRemotePeerInfo) + ) + + check: + publishResponse.isErr() + publishResponse.error.code == LightPushErrorCode.INVALID_MESSAGE + publishResponse.error.desc == + some(fmt"Message size exceeded maximum of {DefaultMaxWakuMessageSize} bytes") + +suite "RLN Proofs as a Lightpush Service": + var + handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] + handler {.threadvar.}: PushMessageHandler + + server {.threadvar.}: WakuNode + client {.threadvar.}: WakuNode + anvilProc {.threadvar.}: Process + manager {.threadvar.}: OnchainGroupManager + + serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + pubsubTopic {.threadvar.}: PubsubTopic + contentTopic {.threadvar.}: ContentTopic + message {.threadvar.}: WakuMessage + + asyncSetup: + handlerFuture = newPushHandlerFuture() + handler = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult] {.async.} = + handlerFuture.complete((pubsubTopic, message)) + return ok(PublishedToOnePeer) + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + anvilProc = runAnvil() + manager = waitFor setupOnchainGroupManager() + + # mount rln-relay + let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + + await allFutures(server.start(), client.start()) + await server.start() + + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + await server.mountRlnRelay(wakuRlnConfig) + await server.mountLightPush() + client.mountLightPushClient() + + let manager1 = cast[OnchainGroupManager](server.wakuRlnRelay.groupManager) + let idCredentials1 = generateCredentials(manager1.rlnInstance) + + try: + waitFor manager1.register(idCredentials1, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated1 = waitFor manager1.updateRoots() + debug "Updated root for node1", rootUpdated1 + + if rootUpdated1: + let proofResult = waitFor manager1.fetchMerkleProofElements() + if proofResult.isErr(): + error "Failed to fetch Merkle proof", error = proofResult.error + manager1.merkleProofCache = proofResult.get() + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + message = fakeWakuMessage() + + asyncTeardown: + await server.stop() + stopAnvil(anvilProc) + + suite "Lightpush attaching RLN proofs": + asyncTest "Message is published when RLN enabled": + # Given a light lightpush client + let lightpushClient = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + lightpushClient.mountLightPushClient() + + # When the client publishes a message + let publishResponse = await lightpushClient.lightpushPublish( + some(pubsubTopic), message, some(serverRemotePeerInfo) + ) + + if not publishResponse.isOk(): + echo "Publish failed: ", publishResponse.error() + + # Then the message is not relayed but not due to RLN + assert publishResponse.isErr(), "We expect an error response" + check publishResponse.error.code == LightPushErrorCode.NO_PEERS_TO_RELAY + +suite "Waku Lightpush message delivery": + asyncTest "lightpush message flow succeed": + ## Setup + let + lightNodeKey = generateSecp256k1Key() + lightNode = newTestWakuNode(lightNodeKey, parseIpAddress("0.0.0.0"), Port(0)) + bridgeNodeKey = generateSecp256k1Key() + bridgeNode = newTestWakuNode(bridgeNodeKey, parseIpAddress("0.0.0.0"), Port(0)) + destNodeKey = generateSecp256k1Key() + destNode = newTestWakuNode(destNodeKey, parseIpAddress("0.0.0.0"), Port(0)) + + await allFutures(destNode.start(), bridgeNode.start(), lightNode.start()) + + (await destNode.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await bridgeNode.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + await bridgeNode.mountLightPush() + lightNode.mountLightPushClient() + + discard await lightNode.peerManager.dialPeer( + bridgeNode.peerInfo.toRemotePeerInfo(), WakuLightPushCodec + ) + await sleepAsync(100.milliseconds) + await destNode.connectToNodes(@[bridgeNode.peerInfo.toRemotePeerInfo()]) + + ## Given + const CustomPubsubTopic = "/waku/2/rs/0/1" + let message = fakeWakuMessage() + + var completionFutRelay = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + check: + topic == CustomPubsubTopic + msg == message + completionFutRelay.complete(true) + + destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), relayHandler).isOkOr: + assert false, "Failed to subscribe to relay" + + # Wait for subscription to take effect + await sleepAsync(100.millis) + + ## When + let res = await lightNode.lightpushPublish(some(CustomPubsubTopic), message) + assert res.isOk(), $res.error + assert res.get() == 1, "Expected to relay the message to 1 node" + + ## Then + check await completionFutRelay.withTimeout(5.seconds) + + ## Cleanup + await allFutures(lightNode.stop(), bridgeNode.stop(), destNode.stop()) diff --git a/third-party/nwaku/tests/node/test_wakunode_peer_exchange.nim b/third-party/nwaku/tests/node/test_wakunode_peer_exchange.nim new file mode 100644 index 0000000..3075fa8 --- /dev/null +++ b/third-party/nwaku/tests/node/test_wakunode_peer_exchange.nim @@ -0,0 +1,293 @@ +{.used.} + +import + std/[options, sequtils], + testutils/unittests, + chronos, + chronicles, + libp2p/switch, + libp2p/peerId, + libp2p/crypto/crypto, + eth/keys, + eth/p2p/discoveryv5/enr + +import + waku/ + [waku_node, discovery/waku_discv5, waku_peer_exchange, node/peer_manager, waku_core], + ../waku_peer_exchange/utils, + ../testlib/[wakucore, wakunode, testasync] + +suite "Waku Peer Exchange": + let + bindIp: IPAddress = parseIpAddress("0.0.0.0") + bindPort: Port = Port(0) + + var node {.threadvar.}: WakuNode + + suite "mountPeerExchange": + asyncSetup: + node = newTestWakuNode(generateSecp256k1Key(), bindIp, bindPort) + + asyncTest "Started node mounts peer exchange": + # Given a started node without peer exchange mounted + await node.start() + check: + node.wakuPeerExchange == nil + + # When mounting peer exchange + await node.mountPeerExchange() + + # Then peer exchange is mounted + check: + node.wakuPeerExchange != nil + node.wakuPeerExchange.started == true + + # Cleanup + await node.stop() + + asyncTest "Stopped node mounts peer exchange": + # Given a stopped node without peer exchange mounted + check: + node.wakuPeerExchange == nil + + # When mounting peer exchange + await node.mountPeerExchange() + + # Then peer exchange is mounted + check: + node.wakuPeerExchange != nil + node.wakuPeerExchange.started == false + + suite "fetchPeerExchangePeers": + var node2 {.threadvar.}: WakuNode + + asyncSetup: + node = newTestWakuNode(generateSecp256k1Key(), bindIp, bindPort) + node2 = newTestWakuNode(generateSecp256k1Key(), bindIp, bindPort) + + await allFutures(node.start(), node2.start()) + + asyncTeardown: + await allFutures(node.stop(), node2.stop()) + + asyncTest "Node fetches without mounting peer exchange": + # When a node, without peer exchange mounted, fetches peers + let res = await node.fetchPeerExchangePeers(1) + + # Then no peers are fetched + check: + node.peerManager.switch.peerStore.peers.len == 0 + res.error.status_code == SERVICE_UNAVAILABLE + res.error.status_desc == some("PeerExchangeClient is not mounted") + + asyncTest "Node fetches with mounted peer exchange, but no peers": + # Given a node with peer exchange mounted + await node.mountPeerExchangeClient() + + # When a node fetches peers + let res = await node.fetchPeerExchangePeers(1) + check: + res.error.status_code == SERVICE_UNAVAILABLE + res.error.status_desc == some("peer_not_found_failure") + + # Then no peers are fetched + check node.peerManager.switch.peerStore.peers.len == 0 + + asyncTest "Node succesfully exchanges px peers with faked discv5": + # Given both nodes mount peer exchange + await allFutures([node.mountPeerExchangeClient(), node2.mountPeerExchange()]) + check node.peerManager.switch.peerStore.peers.len == 0 + + # Mock that we discovered a node (to avoid running discv5) + var enr = enr.Record() + assert enr.fromUri( + "enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB" + ), "Failed to parse ENR" + node2.wakuPeerExchange.enrCache.add(enr) + + # Set node2 as service peer (default one) for px protocol + node.peerManager.addServicePeer( + node2.peerInfo.toRemotePeerInfo(), WakuPeerExchangeCodec + ) + + # Request 1 peer from peer exchange protocol + let res = await node.fetchPeerExchangePeers(1) + check res.tryGet() == 1 + + # Check that the peer ended up in the peerstore + let rpInfo = enr.toRemotePeerInfo.get() + check: + node.peerManager.switch.peerStore.peers.anyIt(it.peerId == rpInfo.peerId) + node.peerManager.switch.peerStore.peers.anyIt(it.addrs == rpInfo.addrs) + + suite "setPeerExchangePeer": + var node2 {.threadvar.}: WakuNode + + asyncSetup: + node = newTestWakuNode(generateSecp256k1Key(), bindIp, bindPort) + node2 = newTestWakuNode(generateSecp256k1Key(), bindIp, bindPort) + + await allFutures(node.start(), node2.start()) + + asyncTeardown: + await allFutures(node.stop(), node2.stop()) + + asyncTest "peer set successfully": + # Given a node with peer exchange mounted + await node.mountPeerExchange() + let initialPeers = node.peerManager.switch.peerStore.peers.len + + # And a valid peer info + let remotePeerInfo2 = node2.peerInfo.toRemotePeerInfo() + + # When making a request with a valid peer info + node.setPeerExchangePeer(remotePeerInfo2) + + # Then the peer is added to the peer store + check: + node.peerManager.switch.peerStore.peers.len == (initialPeers + 1) + + asyncTest "peer exchange not mounted": + # Given a node without peer exchange mounted + check node.wakuPeerExchange == nil + let initialPeers = node.peerManager.switch.peerStore.peers.len + + # And a valid peer info + let invalidMultiAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet() + + # When making any request with an invalid peer info + node.setPeerExchangePeer(invalidMultiAddress) + + # Then no peer is added to the peer store + check: + node.peerManager.switch.peerStore.peers.len == initialPeers + + asyncTest "peer info parse error": + # Given a node with peer exchange mounted + await node.mountPeerExchange() + let initialPeers = node.peerManager.switch.peerStore.peers.len + + # And given a peer info with an invalid peer id + var remotePeerInfo2 = node2.peerInfo.toRemotePeerInfo() + remotePeerInfo2.peerId.data.add(255.byte) + + # When making any request with an invalid peer info + node.setPeerExchangePeer("invalidpeerinfo") + + # Then no peer is added to the peer store + check: + node.peerManager.switch.peerStore.peers.len == initialPeers + +suite "Waku Peer Exchange with discv5": + asyncTest "Node successfully exchanges px peers with real discv5": + ## Given (copied from test_waku_discv5.nim) + let + # todo: px flag + flags = CapabilitiesBitfield.init( + lightpush = false, filter = false, store = false, relay = true + ) + bindIp = parseIpAddress("0.0.0.0") + extIp = parseIpAddress("127.0.0.1") + + nodeKey1 = generateSecp256k1Key() + nodeTcpPort1 = Port(64010) + nodeUdpPort1 = Port(9000) + node1 = newTestWakuNode( + nodeKey1, + bindIp, + nodeTcpPort1, + some(extIp), + wakuFlags = some(flags), + discv5UdpPort = some(nodeUdpPort1), + ) + + nodeKey2 = generateSecp256k1Key() + nodeTcpPort2 = Port(64012) + nodeUdpPort2 = Port(9002) + node2 = newTestWakuNode( + nodeKey2, + bindIp, + nodeTcpPort2, + some(extIp), + wakuFlags = some(flags), + discv5UdpPort = some(nodeUdpPort2), + ) + + nodeKey3 = generateSecp256k1Key() + nodeTcpPort3 = Port(64014) + nodeUdpPort3 = Port(9004) + node3 = newTestWakuNode( + nodeKey3, + bindIp, + nodeTcpPort3, + some(extIp), + wakuFlags = some(flags), + discv5UdpPort = some(nodeUdpPort3), + ) + + # discv5 + let conf1 = WakuDiscoveryV5Config( + discv5Config: none(DiscoveryConfig), + address: bindIp, + port: nodeUdpPort1, + privateKey: keys.PrivateKey(nodeKey1.skkey), + bootstrapRecords: @[], + autoupdateRecord: true, + ) + + let disc1 = + WakuDiscoveryV5.new(node1.rng, conf1, some(node1.enr), some(node1.peerManager)) + + let conf2 = WakuDiscoveryV5Config( + discv5Config: none(DiscoveryConfig), + address: bindIp, + port: nodeUdpPort2, + privateKey: keys.PrivateKey(nodeKey2.skkey), + bootstrapRecords: @[disc1.protocol.getRecord()], + autoupdateRecord: true, + ) + + let disc2 = + WakuDiscoveryV5.new(node2.rng, conf2, some(node2.enr), some(node2.peerManager)) + + await allFutures(node1.start(), node2.start(), node3.start()) + let resultDisc1StartRes = await disc1.start() + assert resultDisc1StartRes.isOk(), resultDisc1StartRes.error + let resultDisc2StartRes = await disc2.start() + assert resultDisc2StartRes.isOk(), resultDisc2StartRes.error + + ## When + var attempts = 10 + while (disc1.protocol.nodesDiscovered < 1 or disc2.protocol.nodesDiscovered < 1) and + attempts > 0: + await sleepAsync(1.seconds) + attempts -= 1 + + # node2 can be connected, so will be returned by peer exchange + require ( + await node1.peerManager.connectPeer(node2.switch.peerInfo.toRemotePeerInfo()) + ) + + # Mount peer exchange + await node1.mountPeerExchange() + await node3.mountPeerExchange() + await node3.mountPeerExchangeClient() + + let dialResponse = + await node3.dialForPeerExchange(node1.switch.peerInfo.toRemotePeerInfo()) + + check dialResponse.isOk + + let + requestPeers = 1 + currentPeers = node3.peerManager.switch.peerStore.peers.len + let res = await node3.fetchPeerExchangePeers(1) + check res.tryGet() == 1 + + # Then node3 has received 1 peer from node1 + check: + node3.peerManager.switch.peerStore.peers.len == currentPeers + requestPeers + + await allFutures( + [node1.stop(), node2.stop(), node3.stop(), disc1.stop(), disc2.stop()] + ) diff --git a/third-party/nwaku/tests/node/test_wakunode_peer_manager.nim b/third-party/nwaku/tests/node/test_wakunode_peer_manager.nim new file mode 100644 index 0000000..0ef2b1a --- /dev/null +++ b/third-party/nwaku/tests/node/test_wakunode_peer_manager.nim @@ -0,0 +1,958 @@ +{.used.} + +import + os, + std/[options, tables], + testutils/unittests, + chronos, + # chronos/timer, + chronicles, + times, + libp2p/[peerstore, crypto/crypto, multiaddress] + +from times import getTime, toUnix + +import + waku/[ + waku_core, + node/peer_manager, + node/waku_node, + discovery/waku_discv5, + waku_filter_v2/common, + waku_relay/protocol, + ], + ../testlib/[wakucore, wakunode, testasync, testutils, comparisons], + ../waku_enr/utils, + ../waku_archive/archive_utils, + ../waku_discv5/utils, + ./peer_manager/peer_store/utils + +const DEFAULT_PROTOCOLS: seq[string] = + @["/ipfs/id/1.0.0", "/libp2p/autonat/1.0.0", "/libp2p/circuit/relay/0.2.0/hop"] + +let + listenIp = parseIpAddress("0.0.0.0") + listenPort = Port(0) + +suite "Peer Manager": + var + pubsubTopic {.threadvar.}: PubsubTopic + contentTopic {.threadvar.}: ContentTopic + + var + server {.threadvar.}: WakuNode + serverPeerStore {.threadvar.}: PeerStore + client {.threadvar.}: WakuNode + clientPeerStore {.threadvar.}: PeerStore + + var + serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + serverPeerId {.threadvar.}: PeerId + clientRemotePeerInfo {.threadvar.}: RemotePeerInfo + clientPeerId {.threadvar.}: PeerId + + asyncSetup: + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, listenIp, Port(3000)) + serverPeerStore = server.peerManager.switch.peerStore + client = newTestWakuNode(clientKey, listenIp, Port(3001)) + clientPeerStore = client.peerManager.switch.peerStore + + await allFutures(server.start(), client.start()) + + serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + serverPeerId = serverRemotePeerInfo.peerId + clientRemotePeerInfo = client.switch.peerInfo.toRemotePeerInfo() + clientPeerId = clientRemotePeerInfo.peerId + + asyncTeardown: + await allFutures(server.stop(), client.stop()) + + suite "Peer Connectivity, Management, and Store": + asyncTest "Peer Connection Validation": + # When a client connects to a server + await client.connectToNodes(@[serverRemotePeerInfo]) + + # Then the server should have the client in its peer store + check: + clientPeerStore.peerExists(serverRemotePeerInfo.peerId) + clientPeerStore.getPeer(serverPeerId).connectedness == Connectedness.Connected + serverPeerStore.getPeer(clientPeerId).connectedness == Connectedness.Connected + + asyncTest "Graceful Handling of Non-Existent Peers": + # Given a non existent RemotePeerInfo + let + privKey = generateSecp256k1Key() + extIp = "127.0.0.1" + tcpPort = 61500u16 + udpPort = 9000u16 + nonExistentRecord = newTestEnrRecord( + privKey = privKey, extIp = extIp, tcpPort = tcpPort, udpPort = udpPort + ) + nonExistentRemotePeerInfo = nonExistentRecord.toRemotePeerInfo().value() + + # When a client connects to the non existent peer + await client.connectToNodes(@[nonExistentRemotePeerInfo]) + + # Then the client exists in the peer store but is marked as a failed connection + let parsedRemotePeerInfo = + clientPeerStore.getPeer(nonExistentRemotePeerInfo.peerId) + check: + clientPeerStore.peerExists(nonExistentRemotePeerInfo.peerId) + parsedRemotePeerInfo.connectedness == CannotConnect + parsedRemotePeerInfo.lastFailedConn <= Moment.init(getTime().toUnix, Second) + parsedRemotePeerInfo.numberFailedConn == 1 + + suite "Peer Store Pruning": + asyncTest "Capacity is not exceeded": + # Given the client's peer store has a capacity of 1 + clientPeerStore.setCapacity(1) + + # And the client connects to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + check: + clientPeerStore.peers().len == 1 + + # When pruning the client's store + client.peerManager.prunePeerStore() + + # Then no peers are removed + check: + clientPeerStore.peers().len == 1 + + asyncTest "Capacity is not exceeded but some peers are unhealthy": + # Given the client's peer store has a capacity of 1 + clientPeerStore.setCapacity(1) + + # And the client connects to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + check: + clientPeerStore.peers().len == 1 + + # Given the server is marked as CannotConnect + client.peerManager.switch.peerStore[ConnectionBook].book[serverPeerId] = + CannotConnect + + # When pruning the client's store + client.peerManager.prunePeerStore() + + # Then no peers are removed + check: + clientPeerStore.peers().len == 1 + + asyncTest "Capacity is exceeded but all peers are healthy": + # Given the client's peer store has a capacity of 0 + clientPeerStore.setCapacity(0) + + # And the client connects to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + check: + clientPeerStore.peers().len == 1 + + # When pruning the client's store + client.peerManager.prunePeerStore() + + # Then no peers are removed + check: + clientPeerStore.peers().len == 1 + + asyncTest "Failed connections": + # Given the client's peer store has a capacity of 0 and maxFailedAttempts of 1 + clientPeerStore.setCapacity(0) + client.peerManager.maxFailedAttempts = 1 + + # And the client connects to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + check: + clientPeerStore.peers().len == 1 + + # Given the server is marked as having 1 failed connection + client.peerManager.switch.peerStore[NumberFailedConnBook].book[serverPeerId] = 1 + + # When pruning the client's store + client.peerManager.prunePeerStore() + + # Then the server is removed from the client's peer store + check: + clientPeerStore.peers().len == 0 + + asyncTest "Shardless": + # Given the client's peer store has a capacity of 0 + clientPeerStore.setCapacity(0) + + # And the client connects to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + check: + clientPeerStore.peers().len == 1 + + # Given the server is marked as not connected + client.peerManager.switch.peerStore[ConnectionBook].book[serverPeerId] = + CannotConnect + + # When pruning the client's store + client.peerManager.prunePeerStore() + + # Then the server is removed from the client's peer store + check: + clientPeerStore.peers().len == 0 + + asyncTest "Higher than avg shard count": + # Given the client's peer store has a capacity of 0 + clientPeerStore.setCapacity(0) + + # And the server's remote peer info contains the node's ENR + serverRemotePeerInfo.enr = some(server.enr) + + # And the client connects to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + check: + clientPeerStore.peers().len == 1 + + # Given the server is marked as not connected + # (There's only one shard in the ENR so avg shards will be the same as the shard count; hence it will be purged.) + client.peerManager.switch.peerStore[ConnectionBook].book[serverPeerId] = + CannotConnect + + # When pruning the client's store + client.peerManager.prunePeerStore() + + # Then the server is removed from the client's peer store + check: + clientPeerStore.peers().len == 0 + + suite "Enforcing Colocation Limits": + asyncTest "Without colocation limits": + # Given two extra clients + let + client2Key = generateSecp256k1Key() + client3Key = generateSecp256k1Key() + client2 = newTestWakuNode(client2Key, listenIp, listenPort) + client3 = newTestWakuNode(client3Key, listenIp, listenPort) + + await allFutures(client2.start(), client3.start()) + + # And the server's peer manager has no colocation limit + server.peerManager.colocationLimit = 0 + + # When all clients connect to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + await client2.connectToNodes(@[serverRemotePeerInfo]) + await client3.connectToNodes(@[serverRemotePeerInfo]) + + # Then the server should have all clients in its peer store + check: + serverPeerStore.peers().len == 3 + + # Teardown + await allFutures(client2.stop(), client3.stop()) + + asyncTest "With colocation limits": + # Given two extra clients + let + client2Key = generateSecp256k1Key() + client3Key = generateSecp256k1Key() + client2 = newTestWakuNode(client2Key, listenIp, listenPort) + client3 = newTestWakuNode(client3Key, listenIp, listenPort) + + await allFutures(client2.start(), client3.start()) + + # And the server's peer manager has a colocation limit of 1 + server.peerManager.colocationLimit = 1 + + # When all clients connect to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + await client2.connectToNodes(@[serverRemotePeerInfo]) + await client3.connectToNodes(@[serverRemotePeerInfo]) + + # Then the server should have only 1 client in its peer store + check: + serverPeerStore.peers().len == 1 + + # Teardown + await allFutures(client2.stop(), client3.stop()) + + suite "In-memory Data Structure Verification": + asyncTest "Cannot add self": + # When trying to add self to the peer store + client.peerManager.addPeer(clientRemotePeerInfo) + + # Then the peer store should not contain the peer + check: + not clientPeerStore.peerExists(clientPeerId) + + asyncTest "Peer stored in peer store": + # When adding a peer other than self to the peer store + client.peerManager.addPeer(serverRemotePeerInfo) + + # Then the peer store should contain the peer + check: + clientPeerStore.peerExists(serverPeerId) + clientPeerStore[AddressBook][serverPeerId] == serverRemotePeerInfo.addrs + + suite "Protocol-Specific Peer Handling": + asyncTest "Peer Protocol Support Verification - No waku protocols": + # When connecting to a server with no Waku protocols + await client.connectToNodes(@[serverRemotePeerInfo]) + + # Then the stored protocols should be the default (libp2p) ones + check: + clientPeerStore.peerExists(serverPeerId) + clientPeerStore.getPeer(serverPeerId).protocols == DEFAULT_PROTOCOLS + + asyncTest "Peer Protocol Support Verification (Before Connection)": + # Given the server has mounted some Waku protocols + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + await server.mountFilter() + + # When connecting to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + + # Then the stored protocols should include the Waku protocols + check: + clientPeerStore.peerExists(serverPeerId) + clientPeerStore.getPeer(serverPeerId).protocols == + DEFAULT_PROTOCOLS & @[WakuRelayCodec, WakuFilterSubscribeCodec] + + asyncTest "Service-Specific Peer Addition": + # Given a server mounts some Waku protocols + await server.mountFilter() + + # And another server that mounts different Waku protocols + let + server2Key = generateSecp256k1Key() + server2 = newTestWakuNode(server2Key, listenIp, listenPort) + + await server2.start() + + let + server2RemotePeerInfo = server2.switch.peerInfo.toRemotePeerInfo() + server2PeerId = server2RemotePeerInfo.peerId + + (await server2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # When connecting to both servers + await client.connectToNodes(@[serverRemotePeerInfo, server2RemotePeerInfo]) + + # Then the peer store should contain both peers with the correct protocols + check: + clientPeerStore.peerExists(serverPeerId) + clientPeerStore.getPeer(serverPeerId).protocols == + DEFAULT_PROTOCOLS & @[WakuFilterSubscribeCodec] + clientPeerStore.peerExists(server2PeerId) + clientPeerStore.getPeer(server2PeerId).protocols == + DEFAULT_PROTOCOLS & @[WakuRelayCodec] + + # Cleanup + await server2.stop() + + suite "Tracked Peer Metadata": + asyncTest "Metadata Recording": + # When adding a peer other than self to the peer store + serverRemotePeerInfo.enr = some(server.enr) + client.peerManager.addPeer(serverRemotePeerInfo) + + # Then the peer store should contain the peer + check clientPeerStore.peerExists(serverPeerId) + + # And all the peer's information should be stored + check: + clientPeerStore[AddressBook][serverPeerId] == serverRemotePeerInfo.addrs + clientPeerStore[ENRBook][serverPeerId].raw == + serverRemotePeerInfo.enr.get().raw + chainedComparison( + clientPeerStore[ProtoBook][serverPeerId], + serverRemotePeerInfo.protocols, + DEFAULT_PROTOCOLS, + ) + chainedComparison( + clientPeerStore[AgentBook][serverPeerId], # FIXME: Not assigned + serverRemotePeerInfo.agent, + "nim-libp2p/0.0.1", + ) + chainedComparison( + clientPeerStore[ProtoVersionBook][serverPeerId], # FIXME: Not assigned + serverRemotePeerInfo.protoVersion, + "ipfs/0.1.0", + ) + clientPeerStore[KeyBook][serverPeerId] == serverRemotePeerInfo.publicKey + chainedComparison( + clientPeerStore[ConnectionBook][serverPeerId], + serverRemotePeerInfo.connectedness, + NOT_CONNECTED, + ) + chainedComparison( + clientPeerStore[DisconnectBook][serverPeerId], + serverRemotePeerInfo.disconnectTime, + 0, + ) + chainedComparison( + clientPeerStore[SourceBook][serverPeerId], + serverRemotePeerInfo.origin, + UnknownOrigin, + ) + chainedComparison( + clientPeerStore[DirectionBook][serverPeerId], + serverRemotePeerInfo.direction, + UnknownDirection, + ) + chainedComparison( + clientPeerStore[LastFailedConnBook][serverPeerId], + serverRemotePeerInfo.lastFailedConn, + Moment.init(0, Second), + ) + chainedComparison( + clientPeerStore[NumberFailedConnBook][serverPeerId], + serverRemotePeerInfo.numberFailedConn, + 0, + ) + + xasyncTest "Metadata Accuracy": + # Given a second server + let + server2Key = generateSecp256k1Key() + server2 = newTestWakuNode(server2Key, listenIp, listenPort) + server2RemotePeerInfo = server2.switch.peerInfo.toRemotePeerInfo() + server2PeerId = server2RemotePeerInfo.peerId + + await server2.start() + + # When the client connects to both servers + await client.connectToNodes(@[serverRemotePeerInfo, server2RemotePeerInfo]) + + # Then the peer store should contain both peers with the correct metadata + check: + # Server + clientPeerStore[AddressBook][serverPeerId] == serverRemotePeerInfo.addrs + clientPeerStore[ENRBook][serverPeerId].raw == + serverRemotePeerInfo.enr.get().raw + chainedComparison( + clientPeerStore[ProtoBook][serverPeerId], + serverRemotePeerInfo.protocols, + DEFAULT_PROTOCOLS, + ) + chainedComparison( + clientPeerStore[AgentBook][serverPeerId], # FIXME: Not assigned + serverRemotePeerInfo.agent, + "nim-libp2p/0.0.1", + ) + chainedComparison( + clientPeerStore[ProtoVersionBook][serverPeerId], # FIXME: Not assigned + serverRemotePeerInfo.protoVersion, + "ipfs/0.1.0", + ) + clientPeerStore[KeyBook][serverPeerId] == serverRemotePeerInfo.publicKey + chainedComparison( + clientPeerStore[ConnectionBook][serverPeerId], + serverRemotePeerInfo.connectedness, + NOT_CONNECTED, + ) + chainedComparison( + clientPeerStore[DisconnectBook][serverPeerId], + serverRemotePeerInfo.disconnectTime, + 0, + ) + chainedComparison( + clientPeerStore[SourceBook][serverPeerId], + serverRemotePeerInfo.origin, + UnknownOrigin, + ) + chainedComparison( + clientPeerStore[DirectionBook][serverPeerId], + serverRemotePeerInfo.direction, + UnknownDirection, + ) + chainedComparison( + clientPeerStore[LastFailedConnBook][serverPeerId], + serverRemotePeerInfo.lastFailedConn, + Moment.init(0, Second), + ) + chainedComparison( + clientPeerStore[NumberFailedConnBook][serverPeerId], + serverRemotePeerInfo.numberFailedConn, + 0, + ) + + # Server 2 + clientPeerStore[AddressBook][server2PeerId] == server2RemotePeerInfo.addrs + clientPeerStore[ENRBook][server2PeerId].raw == + server2RemotePeerInfo.enr.get().raw + chainedComparison( + clientPeerStore[ProtoBook][server2PeerId], + server2RemotePeerInfo.protocols, + DEFAULT_PROTOCOLS, + ) + chainedComparison( + clientPeerStore[AgentBook][server2PeerId], # FIXME: Not assigned + server2RemotePeerInfo.agent, + "nim-libp2p/0.0.1", + ) + chainedComparison( + clientPeerStore[ProtoVersionBook][server2PeerId], # FIXME: Not assigned + server2RemotePeerInfo.protoVersion, + "ipfs/0.1.0", + ) + clientPeerStore[KeyBook][serverPeerId] == server2RemotePeerInfo.publicKey + chainedComparison( + clientPeerStore[ConnectionBook][server2PeerId], + server2RemotePeerInfo.connectedness, + NOT_CONNECTED, + ) + chainedComparison( + clientPeerStore[DisconnectBook][server2PeerId], + server2RemotePeerInfo.disconnectTime, + 0, + ) + chainedComparison( + clientPeerStore[SourceBook][server2PeerId], + server2RemotePeerInfo.origin, + UnknownOrigin, + ) + chainedComparison( + clientPeerStore[DirectionBook][server2PeerId], + server2RemotePeerInfo.direction, + UnknownDirection, + ) + chainedComparison( + clientPeerStore[LastFailedConnBook][server2PeerId], + server2RemotePeerInfo.lastFailedConn, + Moment.init(0, Second), + ) + chainedComparison( + clientPeerStore[NumberFailedConnBook][server2PeerId], + server2RemotePeerInfo.numberFailedConn, + 0, + ) + + suite "Peer Connectivity States": + asyncTest "State Tracking & Transition": + # Given two correctly initialised nodes, but not connected + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await client.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # Then their connectedness should be NotConnected + check: + clientPeerStore.getPeer(serverPeerId).connectedness == + Connectedness.NotConnected + serverPeerStore.getPeer(clientPeerId).connectedness == + Connectedness.NotConnected + + # When connecting the client to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + + # Then both peers' connectedness should be Connected + check: + clientPeerStore.getPeer(serverPeerId).connectedness == + Connectedness.Connected + serverPeerStore.getPeer(clientPeerId).connectedness == + Connectedness.Connected + + # When stopping the switches of either of the peers + # (Running just one stop is enough to change the states in both peers, but I'll leave both calls as an example) + await server.switch.stop() + await client.switch.stop() + + # Then both peers are gracefully disconnected, and turned to CanConnect + check: + clientPeerStore.getPeer(serverPeerId).connectedness == + Connectedness.CanConnect + serverPeerStore.getPeer(clientPeerId).connectedness == + Connectedness.CanConnect + + # When trying to connect those peers to a non-existent peer + # Generate an invalid multiaddress, and patching both peerInfos with it so dialing fails + let + port = Port(8080) + ipAddress = IpAddress(family: IPv4, address_v4: [192, 168, 0, 1]) + multiaddress = + MultiAddress.init(ipAddress, IpTransportProtocol.tcpProtocol, port) + serverRemotePeerInfo.addrs = @[multiaddress] + clientRemotePeerInfo.addrs = @[multiaddress] + await client.connectToNodes(@[serverRemotePeerInfo]) + await server.connectToNodes(@[clientRemotePeerInfo]) + + # Then both peers should be marked as CannotConnect + check: + clientPeerStore.getPeer(serverPeerId).connectedness == + Connectedness.CannotConnect + serverPeerStore.getPeer(clientPeerId).connectedness == + Connectedness.CannotConnect + + suite "Automatic Reconnection": + asyncTest "Automatic Reconnection Implementation": + # Given two correctly initialised nodes, that are available for reconnection + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await client.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + await client.connectToNodes(@[serverRemotePeerInfo]) + + waitActive: + clientPeerStore.getPeer(serverPeerId).connectedness == + Connectedness.Connected and + serverPeerStore.getPeer(clientPeerId).connectedness == + Connectedness.Connected + + await client.disconnectNode(serverRemotePeerInfo) + + waitActive: + clientPeerStore.getPeer(serverPeerId).connectedness == + Connectedness.CanConnect and + serverPeerStore.getPeer(clientPeerId).connectedness == + Connectedness.CanConnect + + # When triggering the reconnection + await client.peerManager.reconnectPeers(WakuRelayCodec) + + # Then both peers should be marked as Connected + waitActive: + clientPeerStore.getPeer(serverPeerId).connectedness == + Connectedness.Connected and + serverPeerStore.getPeer(clientPeerId).connectedness == + Connectedness.Connected + + ## Now let's do the same but with backoff period + await client.disconnectNode(serverRemotePeerInfo) + + waitActive: + clientPeerStore.getPeer(serverPeerId).connectedness == + Connectedness.CanConnect and + serverPeerStore.getPeer(clientPeerId).connectedness == + Connectedness.CanConnect + + # When triggering a reconnection with a backoff period + let backoffPeriod = chronos.seconds(1) + let beforeReconnect = getTime().toUnixFloat() + await client.peerManager.reconnectPeers(WakuRelayCodec, backoffPeriod) + let reconnectDurationWithBackoffPeriod = + getTime().toUnixFloat() - beforeReconnect + + # Then both peers should be marked as Connected + check: + clientPeerStore.getPeer(serverPeerId).connectedness == + Connectedness.Connected + serverPeerStore.getPeer(clientPeerId).connectedness == + Connectedness.Connected + reconnectDurationWithBackoffPeriod > backoffPeriod.seconds.float + +suite "Handling Connections on Different Networks": + # TODO: Implement after discv5 and peer manager's interaction is understood + proc buildNode( + tcpPort: uint16, + udpPort: uint16, + bindIp: string = "0.0.0.0", + extIp: string = "127.0.0.1", + indices: seq[uint64] = @[], + recordFlags: Option[CapabilitiesBitfield] = none(CapabilitiesBitfield), + bootstrapRecords: seq[waku_enr.Record] = @[], + ): (WakuDiscoveryV5, Record) = + let + privKey = generateSecp256k1Key() + record = newTestEnrRecord( + privKey = privKey, + extIp = extIp, + tcpPort = tcpPort, + udpPort = udpPort, + indices = indices, + flags = recordFlags, + ) + node = newTestDiscv5( + privKey = privKey, + bindIp = bindIp, + tcpPort = tcpPort, + udpPort = udpPort, + record = record, + bootstrapRecords = bootstrapRecords, + ) + + (node, record) + + asyncTest "Same cluster but different shard": + # peer 1 is on cluster x - shard a ; peer 2 is on cluster x - shard b + # todo: Implement after discv5 and peer manager's interaction is understood + discard + + xasyncTest "Different cluster but same shard": + # peer 1 is on cluster x - shard a ; peer 2 is on cluster y - shard a + # todo: Implement after discv5 and peer manager's interaction is understood + discard + + xasyncTest "Different cluster and different shard": + # peer 1 is on cluster x - shard a ; peer 2 is on cluster y - shard b + # todo: Implement after discv5 and peer manager's interaction is understood + discard + + xasyncTest "Same cluster with multiple shards (one shared)": + # peer 1 is on cluster x - shard [a,b,c] ; peer 2 is on cluster x - shard [c, d, e] + # todo: Implement after discv5 and peer manager's interaction is understood + discard + +const baseDbPath = "./peers.test.db" +proc cleanupDb() = + os.removeFile(baseDbPath) + os.removeFile(baseDbPath & "-shm") + os.removeFile(baseDbPath & "-wal") + +suite "Persistence Check": + asyncTest "PeerStorage exists": + # Cleanup previous existing db + cleanupDb() + + # Given an on-disk peer db exists, with a peer in it; and two connected nodes + let + clientPeerStorage = newTestWakuPeerStorage(some(baseDbPath)) + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, listenIp, listenPort) + client = newTestWakuNode( + clientKey, listenIp, listenPort, peerStorage = clientPeerStorage + ) + serverPeerStore = server.peerManager.switch.peerStore + clientPeerStore = client.peerManager.switch.peerStore + + await allFutures(server.start(), client.start()) + + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + check: + clientPeerStore.peers().len == 1 + + await allFutures(server.stop(), client.stop()) + + # When initializing a new client using the prepopulated on-disk storage + let + newClientPeerStorage = newTestWakuPeerStorage(some(baseDbPath)) + newClient = newTestWakuNode( + clientKey, listenIp, listenPort, peerStorage = newClientPeerStorage + ) + newClientPeerStore = newClient.peerManager.switch.peerStore + + await newClient.start() + + # Then the new client should have the same peer in its peer store + check: + newClientPeerStore.peers().len == 1 + + # Cleanup + await newClient.stop() + cleanupDb() + + asyncTest "PeerStorage exists but no data": + # Cleanup previous existing db + cleanupDb() + + # When creating a new server with memory storage, and a client with on-disk peer storage + let + clientPeerStorage = newTestWakuPeerStorage(some(baseDbPath)) + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, listenIp, listenPort) + client = newTestWakuNode( + clientKey, listenIp, listenPort, peerStorage = clientPeerStorage + ) + serverPeerStore = server.peerManager.switch.peerStore + clientPeerStore = client.peerManager.switch.peerStore + + await allFutures(server.start(), client.start()) + + # Then the client's peer store should be empty + check: + clientPeerStore.peers().len == 0 + + # Cleanup + await allFutures(server.stop(), client.stop()) + cleanupDb() + + asyncTest "PeerStorage not exists": + # When creating a new server and client, both without peer storage + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, listenIp, listenPort) + client = newTestWakuNode(clientKey, listenIp, listenPort) + serverPeerStore = server.peerManager.switch.peerStore + clientPeerStore = client.peerManager.switch.peerStore + + await allFutures(server.start(), client.start()) + + # Then the client's peer store should be empty + check: + clientPeerStore.peers().len == 0 + + # Cleanup + await allFutures(server.stop(), client.stop()) + +suite "Mount Order": + var + client {.threadvar.}: WakuNode + clientRemotePeerInfo {.threadvar.}: RemotePeerInfo + clientPeerStore {.threadvar.}: PeerStore + + asyncSetup: + let clientKey = generateSecp256k1Key() + + client = newTestWakuNode(clientKey, listenIp, listenPort) + clientPeerStore = client.peerManager.switch.peerStore + + await client.start() + + clientRemotePeerInfo = client.switch.peerInfo.toRemotePeerInfo() + + asyncTeardown: + await client.stop() + + asyncTest "protocol-start-info": + # Given a server that is initiaalised in the order defined in the title + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, listenIp, listenPort) + + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + await server.start() + let + serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + serverPeerId = serverRemotePeerInfo.peerId + + # When connecting to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + + # Then the peer store should contain the peer with the mounted protocol + check: + clientPeerStore.peerExists(serverPeerId) + clientPeerStore.getPeer(serverPeerId).protocols == + DEFAULT_PROTOCOLS & @[WakuRelayCodec] + + # Cleanup + await server.stop() + + asyncTest "protocol-info-start": + # Given a server that is initialised in the order defined in the title + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, listenIp, listenPort) + + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + let + serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + serverPeerId = serverRemotePeerInfo.peerId + await server.start() + + # When connecting to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + + # Then the peer store should contain the peer with the mounted protocol + check: + clientPeerStore.peerExists(serverPeerId) + clientPeerStore.getPeer(serverPeerId).protocols == + DEFAULT_PROTOCOLS & @[WakuRelayCodec] + + # Cleanup + await server.stop() + + asyncTest "start-protocol-info": + # Given a server that is initialised in the order defined in the title + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, listenIp, listenPort) + + await server.start() + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + let + serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + serverPeerId = serverRemotePeerInfo.peerId + + # When connecting to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + + # Then the peer store should contain the peer with the mounted protocol + check: + clientPeerStore.peerExists(serverPeerId) + clientPeerStore.getPeer(serverPeerId).protocols == + DEFAULT_PROTOCOLS & @[WakuRelayCodec] + + # Cleanup + await server.stop() + + asyncTest "start-info-protocol": + # Given a server that is initialised in the order defined in the title + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, listenIp, listenPort) + + await server.start() + let + serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + serverPeerId = serverRemotePeerInfo.peerId + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # When connecting to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + + # Then the peer store should contain the peer with the mounted protocol + check: + clientPeerStore.peerExists(serverPeerId) + clientPeerStore.getPeer(serverPeerId).protocols == + DEFAULT_PROTOCOLS & @[WakuRelayCodec] + + # Cleanup + await server.stop() + + asyncTest "info-start-protocol": + # Given a server that is initialised in the order defined in the title + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, listenIp, listenPort) + + let + serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + serverPeerId = serverRemotePeerInfo.peerId + await server.start() + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # When connecting to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + + # Then the peer store should contain the peer but not the mounted protocol + check: + clientPeerStore.peerExists(serverPeerId) + clientPeerStore.getPeer(serverPeerId).protocols == DEFAULT_PROTOCOLS + + # Cleanup + await server.stop() + + asyncTest "info-protocol-start": + # Given a server that is initialised in the order defined in the title + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, listenIp, listenPort) + + let + serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + serverPeerId = serverRemotePeerInfo.peerId + (await server.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + await server.start() + + # When connecting to the server + await client.connectToNodes(@[serverRemotePeerInfo]) + + # Then the peer store should contain the peer but not the mounted protocol + check: + clientPeerStore.peerExists(serverPeerId) + clientPeerStore.getPeer(serverPeerId).protocols == DEFAULT_PROTOCOLS + + # Cleanup + await server.stop() diff --git a/third-party/nwaku/tests/node/test_wakunode_relay_rln.nim b/third-party/nwaku/tests/node/test_wakunode_relay_rln.nim new file mode 100644 index 0000000..4bc74fc --- /dev/null +++ b/third-party/nwaku/tests/node/test_wakunode_relay_rln.nim @@ -0,0 +1,757 @@ +{.used.} + +import + std/[tempfiles, strutils, options], + stew/results, + testutils/unittests, + chronos, + libp2p/switch, + libp2p/protocols/pubsub/pubsub, + eth/keys + +from std/times import epochTime + +import + ../../../waku/[ + node/waku_node, + node/peer_manager, + waku_core, + waku_node, + common/error_handling, + waku_rln_relay, + waku_rln_relay/rln, + waku_rln_relay/protocol_types, + waku_keystore/keystore, + ], + ../waku_store/store_utils, + ../waku_archive/archive_utils, + ../testlib/[wakucore, wakunode, testasync, futures, common, assertions], + ../resources/payloads, + ../waku_rln_relay/[utils_static, utils_onchain] + +from ../../waku/waku_noise/noise_utils import randomSeqByte + +proc buildRandomIdentityCredentials(): IdentityCredential = + # We generate a random identity credential (inter-value constrains are not enforced, otherwise we need to load e.g. zerokit RLN keygen) + let + idTrapdoor = randomSeqByte(rng[], 32) + idNullifier = randomSeqByte(rng[], 32) + idSecretHash = randomSeqByte(rng[], 32) + idCommitment = randomSeqByte(rng[], 32) + + IdentityCredential( + idTrapdoor: idTrapdoor, + idNullifier: idNullifier, + idSecretHash: idSecretHash, + idCommitment: idCommitment, + ) + +proc addMembershipCredentialsToKeystore( + credentials: IdentityCredential, + keystorePath: string, + appInfo: AppInfo, + rlnRelayEthContractAddress: string, + password: string, + membershipIndex: uint, +): KeystoreResult[void] = + let + contract = MembershipContract(chainId: "0x539", address: rlnRelayEthContractAddress) + # contract = MembershipContract(chainId: "1337", address: rlnRelayEthContractAddress) + index = MembershipIndex(membershipIndex) + membershipCredential = KeystoreMembership( + membershipContract: contract, treeIndex: index, identityCredential: credentials + ) + + addMembershipCredentials( + path = keystorePath, + membership = membershipCredential, + password = password, + appInfo = appInfo, + ) + +proc fatalErrorVoidHandler(errMsg: string) {.gcsafe, raises: [].} = + discard + +proc getWakuRlnConfigOnChain*( + keystorePath: string, + appInfo: AppInfo, + rlnRelayEthContractAddress: string, + password: string, + credIndex: uint, + fatalErrorHandler: Option[OnFatalErrorHandler] = none(OnFatalErrorHandler), + ethClientAddress: Option[string] = none(string), +): WakuRlnConfig = + return WakuRlnConfig( + dynamic: true, + credIndex: some(credIndex), + ethContractAddress: rlnRelayEthContractAddress, + ethClientAddress: ethClientAddress.get(EthClient), + epochSizeSec: 1, + onFatalErrorAction: fatalErrorHandler.get(fatalErrorVoidHandler), + # If these are used, initialisation fails with "failed to mount WakuRlnRelay: could not initialize the group manager: the commitment does not have a membership" + creds: some(RlnRelayCreds(path: keystorePath, password: password)), + ) + +proc setupRelayWithOnChainRln*( + node: WakuNode, shards: seq[RelayShard], wakuRlnConfig: WakuRlnConfig +) {.async.} = + await node.mountRelay(shards) + await node.mountRlnRelay(wakuRlnConfig) + +suite "Waku RlnRelay - End to End - Static": + var + pubsubTopic {.threadvar.}: PubsubTopic + contentTopic {.threadvar.}: ContentTopic + + var + server {.threadvar.}: WakuNode + client {.threadvar.}: WakuNode + + var + serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + clientPeerId {.threadvar.}: PeerId + + asyncSetup: + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + await allFutures(server.start(), client.start()) + + serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + clientPeerId = client.switch.peerInfo.toRemotePeerInfo().peerId + + asyncTeardown: + await allFutures(client.stop(), server.stop()) + + suite "Mount": + asyncTest "Can't mount if relay is not mounted": + # Given Relay and RLN are not mounted + check: + server.wakuRelay == nil + server.wakuRlnRelay == nil + + # When RlnRelay is mounted + let catchRes = catch: + await server.setupStaticRln(1) + + # Then Relay and RLN are not mounted,and the process fails + check: + server.wakuRelay == nil + server.wakuRlnRelay == nil + catchRes.error()[].msg == + "WakuRelay protocol is not mounted, cannot mount WakuRlnRelay" + + asyncTest "Pubsub topics subscribed before mounting RlnRelay are added to it": + # Given the node enables Relay and Rln while subscribing to a pubsub topic + await server.setupRelayWithStaticRln(1.uint, @[pubsubTopic]) + await client.setupRelayWithStaticRln(2.uint, @[pubsubTopic]) + check: + server.wakuRelay != nil + server.wakuRlnRelay != nil + client.wakuRelay != nil + client.wakuRlnRelay != nil + + # And the nodes are connected + await client.connectToNodes(@[serverRemotePeerInfo]) + + # And the node registers the completion handler + var completionFuture = subscribeCompletionHandler(server, pubsubTopic) + + # When the client sends a valid RLN message + let isCompleted1 = + await sendRlnMessage(client, pubsubTopic, contentTopic, completionFuture) + + # Then the valid RLN message is relayed + check: + isCompleted1 + completionFuture.read() + + # When the client sends an invalid RLN message + completionFuture = newBoolFuture() + let isCompleted2 = await sendRlnMessageWithInvalidProof( + client, pubsubTopic, contentTopic, completionFuture + ) + + # Then the invalid RLN message is not relayed + check: + not isCompleted2 + + asyncTest "Pubsub topics subscribed after mounting RlnRelay are added to it": + # Given the node enables Relay and Rln without subscribing to a pubsub topic + await server.setupRelayWithStaticRln(1.uint, @[]) + await client.setupRelayWithStaticRln(2.uint, @[]) + + # And the nodes are connected + await client.connectToNodes(@[serverRemotePeerInfo]) + + # await sleepAsync(FUTURE_TIMEOUT) + # And the node registers the completion handler + var completionFuture = subscribeCompletionHandler(server, pubsubTopic) + + await sleepAsync(FUTURE_TIMEOUT) + # When the client sends a valid RLN message + let isCompleted1 = + await sendRlnMessage(client, pubsubTopic, contentTopic, completionFuture) + + # Then the valid RLN message is relayed + check: + isCompleted1 + completionFuture.read() + + # When the client sends an invalid RLN message + completionFuture = newBoolFuture() + let isCompleted2 = await sendRlnMessageWithInvalidProof( + client, pubsubTopic, contentTopic, completionFuture + ) + + # Then the invalid RLN message is not relayed + check: + not isCompleted2 + + asyncTest "rln-relay-max-message-limit testing": + let + nodekey = generateSecp256k1Key() + node = newTestWakuNode(nodekey, parseIpAddress("0.0.0.0"), Port(0)) + + await node.mountRelay(@[DefaultRelayShard]) + + let contractAddress = await uploadRLNContract(EthClient) + let wakuRlnConfig = WakuRlnConfig( + dynamic: true, + credIndex: some(0.uint), + userMessageLimit: 111, + ethClientAddress: EthClient, + ethContractAddress: $contractAddress, + chainId: 1337, + onFatalErrorAction: proc(errStr: string) = + raiseAssert errStr + , + ) + + try: + await node.mountRlnRelay(wakuRlnConfig) + except CatchableError as e: + check e.msg == + "failed to mount WakuRlnRelay: rln-relay-user-message-limit can't exceed the MAX_MESSAGE_LIMIT in the rln contract" + + suite "Analysis of Bandwith Limitations": + asyncTest "Valid Payload Sizes": + # Given the node enables Relay and Rln while subscribing to a pubsub topic + await server.setupRelayWithStaticRln(1.uint, @[pubsubTopic]) + await client.setupRelayWithStaticRln(2.uint, @[pubsubTopic]) + + # And the nodes are connected + await client.connectToNodes(@[serverRemotePeerInfo]) + + # Register Relay Handler + var completionFut = newPushHandlerFuture() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + if topic == pubsubTopic: + completionFut.complete((topic, msg)) + + let subscriptionEvent = (kind: PubsubSub, topic: pubsubTopic) + server.subscribe(subscriptionEvent, some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to pubsub topic" + + await sleepAsync(FUTURE_TIMEOUT) + + # Generate Messages + let + epoch = epochTime() + payload1b = getByteSequence(1) + payload1kib = getByteSequence(1024) + overhead: uint64 = 419 + payload150kib = getByteSequence((150 * 1024) - overhead) + payload150kibPlus = getByteSequence((150 * 1024) - overhead + 1) + + var + message1b = WakuMessage(payload: @payload1b, contentTopic: contentTopic) + message1kib = WakuMessage(payload: @payload1kib, contentTopic: contentTopic) + message150kib = WakuMessage(payload: @payload150kib, contentTopic: contentTopic) + message151kibPlus = + WakuMessage(payload: @payload150kibPlus, contentTopic: contentTopic) + + doAssert( + client.wakuRlnRelay + .appendRLNProof( + message1b, epoch + float64(client.wakuRlnRelay.rlnEpochSizeSec * 0) + ) + .isOk() + ) + doAssert( + client.wakuRlnRelay + .appendRLNProof( + message1kib, epoch + float64(client.wakuRlnRelay.rlnEpochSizeSec * 1) + ) + .isOk() + ) + doAssert( + client.wakuRlnRelay + .appendRLNProof( + message150kib, epoch + float64(client.wakuRlnRelay.rlnEpochSizeSec * 2) + ) + .isOk() + ) + doAssert( + client.wakuRlnRelay + .appendRLNProof( + message151kibPlus, epoch + float64(client.wakuRlnRelay.rlnEpochSizeSec * 3) + ) + .isOk() + ) + + # When sending the 1B message + discard await client.publish(some(pubsubTopic), message1b) + discard await completionFut.withTimeout(FUTURE_TIMEOUT_LONG) + + # Then the message is relayed + check completionFut.read() == (pubsubTopic, message1b) + # When sending the 1KiB message + completionFut = newPushHandlerFuture() # Reset Future + discard await client.publish(some(pubsubTopic), message1kib) + discard await completionFut.withTimeout(FUTURE_TIMEOUT_LONG) + + # Then the message is relayed + check completionFut.read() == (pubsubTopic, message1kib) + + # When sending the 150KiB message + completionFut = newPushHandlerFuture() # Reset Future + discard await client.publish(some(pubsubTopic), message150kib) + discard await completionFut.withTimeout(FUTURE_TIMEOUT_LONG) + + # Then the message is relayed + check completionFut.read() == (pubsubTopic, message150kib) + + # When sending the 150KiB plus message + completionFut = newPushHandlerFuture() # Reset Future + discard await client.publish(some(pubsubTopic), message151kibPlus) + + # Then the message is not relayed + check not await completionFut.withTimeout(FUTURE_TIMEOUT_LONG) + + asyncTest "Invalid Payload Sizes": + # Given the node enables Relay and Rln while subscribing to a pubsub topic + await server.setupRelayWithStaticRln(1.uint, @[pubsubTopic]) + await client.setupRelayWithStaticRln(2.uint, @[pubsubTopic]) + + # And the nodes are connected + await client.connectToNodes(@[serverRemotePeerInfo]) + + # Register Relay Handler + var completionFut = newPushHandlerFuture() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + if topic == pubsubTopic: + completionFut.complete((topic, msg)) + + let subscriptionEvent = (kind: PubsubSub, topic: pubsubTopic) + server.subscribe(subscriptionEvent, some(relayHandler)).isOkOr: + assert false, "Failed to subscribe to pubsub topic" + + await sleepAsync(FUTURE_TIMEOUT) + + # Generate Messages + let + epoch = epochTime() + overhead: uint64 = 419 + payload150kibPlus = getByteSequence((150 * 1024) - overhead + 1) + + var message151kibPlus = + WakuMessage(payload: @payload150kibPlus, contentTopic: contentTopic) + + doAssert( + client.wakuRlnRelay + .appendRLNProof( + message151kibPlus, epoch + float64(client.wakuRlnRelay.rlnEpochSizeSec * 3) + ) + .isOk() + ) + + # When sending the 150KiB plus message + completionFut = newPushHandlerFuture() # Reset Future + discard await client.publish(some(pubsubTopic), message151kibPlus) + + # Then the message is not relayed + check not await completionFut.withTimeout(FUTURE_TIMEOUT_LONG) + +suite "Waku RlnRelay - End to End - OnChain": + let runAnvil {.used.} = runAnvil() + + var + pubsubTopic {.threadvar.}: PubsubTopic + contentTopic {.threadvar.}: ContentTopic + + var + server {.threadvar.}: WakuNode + client {.threadvar.}: WakuNode + + var + serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + clientPeerId {.threadvar.}: PeerId + + asyncSetup: + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + await allFutures(server.start(), client.start()) + + serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + clientPeerId = client.switch.peerInfo.toRemotePeerInfo().peerId + + asyncTeardown: + await allFutures(client.stop(), server.stop()) + + suite "Smart Contract Availability and Interaction": + asyncTest "Invalid format contract": + let + # One character missing + invalidContractAddress = "0x000000000000000000000000000000000000000" + keystorePath = + genTempPath("rln_keystore", "test_wakunode_relay_rln-no_valid_contract") + appInfo = RlnAppInfo + password = "1234" + wakuRlnConfig1 = getWakuRlnConfigOnChain( + keystorePath, appInfo, invalidContractAddress, password, 0 + ) + wakuRlnConfig2 = getWakuRlnConfigOnChain( + keystorePath, appInfo, invalidContractAddress, password, 1 + ) + idCredential = buildRandomIdentityCredentials() + persistRes = addMembershipCredentialsToKeystore( + idCredential, keystorePath, appInfo, invalidContractAddress, password, 1 + ) + assertResultOk(persistRes) + + # Given the node enables Relay and Rln while subscribing to a pubsub topic + try: + await server.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig1) + assert false, "Relay should fail mounting when using an invalid contract" + except CatchableError: + assert true + + try: + await client.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig2) + assert false, "Relay should fail mounting when using an invalid contract" + except CatchableError: + assert true + + asyncTest "Unregistered contract": + # This is a very slow test due to the retries RLN does. Might take upwards of 1m-2m to finish. + let + invalidContractAddress = "0x0000000000000000000000000000000000000000" + keystorePath = + genTempPath("rln_keystore", "test_wakunode_relay_rln-no_valid_contract") + appInfo = RlnAppInfo + password = "1234" + + # Connect to the eth client + discard await newWeb3(EthClient) + + var serverErrorFuture = Future[string].new() + proc serverFatalErrorHandler(errMsg: string) {.gcsafe, closure, raises: [].} = + serverErrorFuture.complete(errMsg) + + var clientErrorFuture = Future[string].new() + proc clientFatalErrorHandler(errMsg: string) {.gcsafe, closure, raises: [].} = + clientErrorFuture.complete(errMsg) + + let + wakuRlnConfig1 = getWakuRlnConfigOnChain( + keystorePath, + appInfo, + invalidContractAddress, + password, + 0, + some(serverFatalErrorHandler), + ) + wakuRlnConfig2 = getWakuRlnConfigOnChain( + keystorePath, + appInfo, + invalidContractAddress, + password, + 1, + some(clientFatalErrorHandler), + ) + + # Given the node enable Relay and Rln while subscribing to a pubsub topic. + # The withTimeout call is a workaround for the test not to terminate with an exception. + # However, it doesn't reduce the retries against the blockchain that the mounting rln process attempts (until it accepts failure). + # Note: These retries might be an unintended library issue. + discard await server + .setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig1) + .withTimeout(FUTURE_TIMEOUT) + discard await client + .setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig2) + .withTimeout(FUTURE_TIMEOUT) + + check: + (await serverErrorFuture.waitForResult()).get() == + "Failed to get the storage index: No response from the Web3 provider" + (await clientErrorFuture.waitForResult()).get() == + "Failed to get the storage index: No response from the Web3 provider" + + asyncTest "Valid contract": + #[ + # Notes + ## Issues + ### TreeIndex + For some reason the calls to `getWakuRlnConfigOnChain` need to be made with `treeIndex` = 0 and 1, in that order. + But the registration needs to be made with 1 and 2. + #### Solutions + Requires investigation + ### Monkeypatching + Instead of running the idCredentials monkeypatch, passing the correct membershipIndex and keystorePath and keystorePassword should work. + #### Solutions + A) Using the register callback to fetch the correct membership + B) Using two different keystores, one for each rlnconfig. If there's only one key, it will fetch it regardless of membershipIndex. + ##### A + - Register is not calling callback even though register is happening, this should happen. + - This command should be working, but it doesn't on the current HEAD of the branch, it does work on master, which suggest there's something wrong with the branch. + - nim c -r --out:build/onchain -d:chronicles_log_level=NOTICE --verbosity:0 --hints:off -d:git_version="v0.27.0-rc.0-3-gaa9c30" -d:release --passL:librln_v0.3.7.a --passL:-lm tests/waku_rln_relay/test_rln_group_manager_onchain.nim && onchain_group_test + - All modified files are tests/*, which is a bit weird. Might be interesting re-creating the branch slowly, and checking out why this is happening. + ##### B + Untested + ]# + + let + onChainGroupManager = await setup() + contractAddress = onChainGroupManager.ethContractAddress + keystorePath = + genTempPath("rln_keystore", "test_wakunode_relay_rln-valid_contract") + appInfo = RlnAppInfo + password = "1234" + rlnInstance = onChainGroupManager.rlnInstance + assertResultOk(createAppKeystore(keystorePath, appInfo)) + + # Generate configs before registering the credentials. Otherwise the file gets cleared up. + let + wakuRlnConfig1 = + getWakuRlnConfigOnChain(keystorePath, appInfo, contractAddress, password, 0) + wakuRlnConfig2 = + getWakuRlnConfigOnChain(keystorePath, appInfo, contractAddress, password, 1) + + # Generate credentials + let + idCredential1 = rlnInstance.membershipKeyGen().get() + idCredential2 = rlnInstance.membershipKeyGen().get() + + discard await onChainGroupManager.init() + try: + # Register credentials in the chain + waitFor onChainGroupManager.register(idCredential1) + waitFor onChainGroupManager.register(idCredential2) + except Exception: + assert false, "Failed to register credentials: " & getCurrentExceptionMsg() + + # Add credentials to keystore + let + persistRes1 = addMembershipCredentialsToKeystore( + idCredential1, keystorePath, appInfo, contractAddress, password, 0 + ) + persistRes2 = addMembershipCredentialsToKeystore( + idCredential2, keystorePath, appInfo, contractAddress, password, 1 + ) + + assertResultOk(persistRes1) + assertResultOk(persistRes2) + + await onChainGroupManager.stop() + + # Given the node enables Relay and Rln while subscribing to a pubsub topic + await server.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig1) + await client.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig2) + + try: + (await server.wakuRlnRelay.groupManager.startGroupSync()).isOkOr: + raiseAssert $error + (await client.wakuRlnRelay.groupManager.startGroupSync()).isOkOr: + raiseAssert $error + + # Test Hack: Monkeypatch the idCredentials into the groupManager + server.wakuRlnRelay.groupManager.idCredentials = some(idCredential1) + client.wakuRlnRelay.groupManager.idCredentials = some(idCredential2) + except Exception, CatchableError: + assert false, "exception raised: " & getCurrentExceptionMsg() + + # And the nodes are connected + let serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + await client.connectToNodes(@[serverRemotePeerInfo]) + + # And the node registers the completion handler + var completionFuture = subscribeCompletionHandler(server, pubsubTopic) + + # When the client sends a valid RLN message + let isCompleted = + await sendRlnMessage(client, pubsubTopic, contentTopic, completionFuture) + + # Then the valid RLN message is relayed + check isCompleted + assertResultOk(await completionFuture.waitForResult()) + + asyncTest "Not enough gas": + let + onChainGroupManager = await setupOnchainGroupManager(amountWei = 0.u256) + contractAddress = onChainGroupManager.ethContractAddress + keystorePath = + genTempPath("rln_keystore", "test_wakunode_relay_rln-valid_contract") + appInfo = RlnAppInfo + password = "1234" + rlnInstance = onChainGroupManager.rlnInstance + assertResultOk(createAppKeystore(keystorePath, appInfo)) + + # Generate credentials + let idCredential = rlnInstance.membershipKeyGen().get() + + discard await onChainGroupManager.init() + var errorFuture = Future[string].new() + onChainGroupManager.onFatalErrorAction = proc( + errMsg: string + ) {.gcsafe, closure.} = + errorFuture.complete(errMsg) + try: + # Register credentials in the chain + waitFor onChainGroupManager.register(idCredential) + assert false, "Should have failed to register credentials given there is 0 gas" + except Exception: + assert true + + check (await errorFuture.waitForResult()).get() == + "Failed to register the member: {\"code\":-32003,\"message\":\"Insufficient funds for gas * price + value\"}" + await onChainGroupManager.stop() + + suite "RLN Relay Configuration and Parameters": + asyncTest "RLN Relay Credential Path": + let + onChainGroupManager = await setup() + contractAddress = onChainGroupManager.ethContractAddress + keystorePath = + genTempPath("rln_keystore", "test_wakunode_relay_rln-valid_contract") + appInfo = RlnAppInfo + password = "1234" + rlnInstance = onChainGroupManager.rlnInstance + assertResultOk(createAppKeystore(keystorePath, appInfo)) + + # Generate configs before registering the credentials. Otherwise the file gets cleared up. + let + wakuRlnConfig1 = + getWakuRlnConfigOnChain(keystorePath, appInfo, contractAddress, password, 0) + wakuRlnConfig2 = + getWakuRlnConfigOnChain(keystorePath, appInfo, contractAddress, password, 1) + + # Given the node enables Relay and Rln while subscribing to a pubsub topic + await server.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig1) + await client.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig2) + + try: + (await server.wakuRlnRelay.groupManager.startGroupSync()).isOkOr: + raiseAssert $error + (await client.wakuRlnRelay.groupManager.startGroupSync()).isOkOr: + raiseAssert $error + + # Test Hack: Monkeypatch the idCredentials into the groupManager + echo server.wakuRlnRelay.groupManager.idCredentials + echo client.wakuRlnRelay.groupManager.idCredentials + except Exception, CatchableError: + assert false, "exception raised: " & getCurrentExceptionMsg() + + # And the nodes are connected + let serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + await client.connectToNodes(@[serverRemotePeerInfo]) + + # And the node registers the completion handler + var completionFuture = subscribeCompletionHandler(server, pubsubTopic) + + # When the client attempts to send a message + try: + let isCompleted = + await sendRlnMessage(client, pubsubTopic, contentTopic, completionFuture) + assert false, "Should have failed to send a message" + except AssertionDefect as e: + # Then the message is not relayed + assert e.msg.endsWith("identity credentials are not set") + + suite "RLN Relay Resilience, Security and Compatibility": + asyncTest "Key Management and Integrity": + let + onChainGroupManager = await setup() + contractAddress = onChainGroupManager.ethContractAddress + keystorePath = + genTempPath("rln_keystore", "test_wakunode_relay_rln-valid_contract") + appInfo = RlnAppInfo + password = "1234" + rlnInstance = onChainGroupManager.rlnInstance + assertResultOk(createAppKeystore(keystorePath, appInfo)) + + # Generate configs before registering the credentials. Otherwise the file gets cleared up. + let + wakuRlnConfig1 = + getWakuRlnConfigOnChain(keystorePath, appInfo, contractAddress, password, 0) + wakuRlnConfig2 = + getWakuRlnConfigOnChain(keystorePath, appInfo, contractAddress, password, 1) + + # Generate credentials + let + idCredential1 = rlnInstance.membershipKeyGen().get() + idCredential2 = rlnInstance.membershipKeyGen().get() + + discard await onChainGroupManager.init() + try: + # Register credentials in the chain + waitFor onChainGroupManager.register(idCredential1) + waitFor onChainGroupManager.register(idCredential2) + except Exception: + assert false, "Failed to register credentials: " & getCurrentExceptionMsg() + + # Add credentials to keystore + let + persistRes1 = addMembershipCredentialsToKeystore( + idCredential1, keystorePath, appInfo, contractAddress, password, 0 + ) + persistRes2 = addMembershipCredentialsToKeystore( + idCredential2, keystorePath, appInfo, contractAddress, password, 1 + ) + + assertResultOk(persistRes1) + assertResultOk(persistRes2) + + # await onChainGroupManager.stop() + + let + registryContract = onChainGroupManager.registryContract.get() + storageIndex = (await registryContract.usingStorageIndex().call()) + rlnContractAddress = await registryContract.storages(storageIndex).call() + contract = onChainGroupManager.ethRpc.get().contractSender( + RlnStorage, rlnContractAddress + ) + contract2 = onChainGroupManager.rlnContract.get() + + echo "###" + echo await (contract.memberExists(idCredential1.idCommitment.toUInt256()).call()) + echo await (contract.memberExists(idCredential2.idCommitment.toUInt256()).call()) + echo await (contract2.memberExists(idCredential1.idCommitment.toUInt256()).call()) + echo await (contract2.memberExists(idCredential2.idCommitment.toUInt256()).call()) + echo "###" + + ################################ + ## Terminating/removing Anvil + ################################ + + # We stop Anvil daemon + stopAnvil(runAnvil) diff --git a/third-party/nwaku/tests/node/test_wakunode_sharding.nim b/third-party/nwaku/tests/node/test_wakunode_sharding.nim new file mode 100644 index 0000000..5b99689 --- /dev/null +++ b/third-party/nwaku/tests/node/test_wakunode_sharding.nim @@ -0,0 +1,1027 @@ +{.used.} + +import std/[options, sequtils, tempfiles], testutils/unittests, chronos, chronicles + +import + std/[sequtils, tempfiles], + stew/byteutils, + testutils/unittests, + chronos, + libp2p/switch, + libp2p/protocols/pubsub/pubsub + +import + waku/[ + waku_core/topics/pubsub_topic, + waku_core/topics/sharding, + waku_store_legacy/common, + node/waku_node, + common/paging, + waku_core, + waku_store/common, + node/peer_manager, + waku_filter_v2/client, + ], + ../waku_relay/utils, + ../waku_archive/archive_utils, + ../testlib/[assertions, common, wakucore, wakunode, testasync, futures, testutils] + +import waku_relay/protocol + +const + listenIp = parseIpAddress("0.0.0.0") + listenPort = Port(0) + +suite "Sharding": + var + server {.threadvar.}: WakuNode + client {.threadvar.}: WakuNode + + asyncSetup: + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, listenIp, listenPort) + client = newTestWakuNode(clientKey, listenIp, listenPort) + + await allFutures(server.mountRelay(), client.mountRelay()) + await allFutures(server.start(), client.start()) + + asyncTeardown: + await allFutures(server.stop(), client.stop()) + + suite "Static Sharding Functionality": + asyncTest "Shard Subscription and Peer Dialing": + # Given a connected server and client subscribed to the same pubsub shard + let + topic = "/waku/2/rs/0/1" + serverHandler = server.subscribeCompletionHandler(topic) + clientHandler = client.subscribeCompletionHandler(topic) + + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When the client publishes a message in the subscribed topic + discard await client.publish( + some(topic), + WakuMessage(payload: "message1".toBytes(), contentTopic: "contentTopic"), + ) + + # Then the server receives the message + let + serverResult1 = await serverHandler.waitForResult(FUTURE_TIMEOUT) + clientResult1 = await clientHandler.waitForResult(FUTURE_TIMEOUT) + + assertResultOk(serverResult1) + assertResultOk(clientResult1) + + # When the server publishes a message in the subscribed topic + serverHandler.reset() + clientHandler.reset() + discard await server.publish( + some(topic), + WakuMessage(payload: "message2".toBytes(), contentTopic: "contentTopic"), + ) + + # Then the client receives the message + let + serverResult2 = await serverHandler.waitForResult(FUTURE_TIMEOUT) + clientResult2 = await clientHandler.waitForResult(FUTURE_TIMEOUT) + + assertResultOk(serverResult2) + assertResultOk(clientResult2) + + asyncTest "Exclusion of Non-Subscribed Service Nodes": + # When a connected server and client are subscribed to different pubsub shards + let + topic1 = "/waku/2/rs/0/1" + topic2 = "/waku/2/rs/0/2" + contentTopic = "myContentTopic" + + var + serverHandler = server.subscribeCompletionHandler(topic1) + clientHandler = client.subscribeCompletionHandler(topic2) + + # await sleepAsync(FUTURE_TIMEOUT) + + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When a message is published in the server's subscribed topic + discard await client.publish( + some(topic1), + WakuMessage(payload: "message1".toBytes(), contentTopic: contentTopic), + ) + let + serverResult1 = await serverHandler.waitForResult(FUTURE_TIMEOUT) + clientResult1 = await clientHandler.waitForResult(FUTURE_TIMEOUT) + + # Then the server receives the message but the client does not + assertResultOk(serverResult1) + check clientResult1.isErr() + + # When the server publishes a message in the client's subscribed topic + serverHandler.reset() + clientHandler.reset() + let wakuMessage2 = + WakuMessage(payload: "message2".toBytes(), contentTopic: contentTopic) + discard await server.publish(some(topic2), wakuMessage2) + let + serverResult2 = await serverHandler.waitForResult(FUTURE_TIMEOUT) + clientResult2 = await clientHandler.waitForResult(FUTURE_TIMEOUT) + + # Then the client receives the message but the server does not + check serverResult2.isErr() + assertResultOk(clientResult2) + + suite "Automatic Sharding Mechanics": + asyncTest "Content Topic-Based Shard Dialing": + # Given a connected server and client subscribed to the same content topic (with two different formats) + let + contentTopicShort = "/toychat/2/huilong/proto" + contentTopicFull = "/0/toychat/2/huilong/proto" + pubsubTopic = "/waku/2/rs/0/58355" + # Automatically generated from the contentTopic above + + let + serverHandler = server.subscribeToContentTopicWithHandler(contentTopicShort) + clientHandler = client.subscribeToContentTopicWithHandler(contentTopicFull) + + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When the client publishes a message + discard await client.publish( + some(pubsubTopic), + WakuMessage(payload: "message1".toBytes(), contentTopic: contentTopicShort), + ) + let + serverResult1 = await serverHandler.waitForResult(FUTURE_TIMEOUT) + clientResult1 = await clientHandler.waitForResult(FUTURE_TIMEOUT) + + # Then the server and client receive the message + assertResultOk(serverResult1) + assertResultOk(clientResult1) + + # When the server publishes a message + serverHandler.reset() + clientHandler.reset() + discard await server.publish( + some(pubsubTopic), + WakuMessage(payload: "message2".toBytes(), contentTopic: contentTopicFull), + ) + let + serverResult2 = await serverHandler.waitForResult(FUTURE_TIMEOUT) + clientResult2 = await clientHandler.waitForResult(FUTURE_TIMEOUT) + + # Then the client and server receive the message + assertResultOk(serverResult2) + assertResultOk(clientResult2) + + asyncTest "Exclusion of Irrelevant Autosharded Topics": + # Given a connected server and client subscribed to different content topics + let + contentTopic1 = "/toychat/2/huilong/proto" + shard1 = "/waku/2/rs/0/58355" + shard12 = RelayShard.parse(contentTopic1) + # Automatically generated from the contentTopic above + contentTopic2 = "/0/toychat2/2/huilong/proto" + shard2 = "/waku/2/rs/0/23286" + # Automatically generated from the contentTopic above + + let + serverHandler = server.subscribeToContentTopicWithHandler(contentTopic1) + clientHandler = client.subscribeToContentTopicWithHandler(contentTopic2) + + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When the server publishes a message in the server's subscribed topic + discard await server.publish( + some(shard1), + WakuMessage(payload: "message1".toBytes(), contentTopic: contentTopic1), + ) + let + serverResult1 = await serverHandler.waitForResult(FUTURE_TIMEOUT) + clientResult1 = await clientHandler.waitForResult(FUTURE_TIMEOUT) + + # Then the server receives the message but the client does not + assertResultOk(serverResult1) + check clientResult1.isErr() + + # When the client publishes a message in the client's subscribed topic + serverHandler.reset() + clientHandler.reset() + discard await client.publish( + some(shard2), + WakuMessage(payload: "message2".toBytes(), contentTopic: contentTopic2), + ) + let + serverResult2 = await serverHandler.waitForResult(FUTURE_TIMEOUT) + clientResult2 = await clientHandler.waitForResult(FUTURE_TIMEOUT) + + # Then the client receives the message but the server does not + assertResultOk(clientResult2) + check serverResult2.isErr() + + suite "Application Layer Integration": + suite "App Protocol Compatibility": + asyncTest "relay": + # Given a connected server and client subscribed to the same pubsub topic + let + pubsubTopic = "/waku/2/rs/0/1" + serverHandler = server.subscribeCompletionHandler(pubsubTopic) + clientHandler = client.subscribeCompletionHandler(pubsubTopic) + + await sleepAsync(FUTURE_TIMEOUT) + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When the client publishes a message + discard await client.publish( + some(pubsubTopic), + WakuMessage(payload: "message1".toBytes(), contentTopic: "myContentTopic"), + ) + let + serverResult1 = await serverHandler.waitForResult(FUTURE_TIMEOUT) + clientResult1 = await clientHandler.waitForResult(FUTURE_TIMEOUT) + + # Then the server and client receive the message + assertResultOk(serverResult1) + assertResultOk(clientResult1) + + asyncTest "filter": + # Given a connected server and client using the same pubsub topic + await client.mountFilterClient() + await server.mountFilter() + + let pushHandlerFuture = newFuture[(string, WakuMessage)]() + proc messagePushHandler( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.async, closure, gcsafe.} = + pushHandlerFuture.complete((pubsubTopic, message)) + + client.wakuFilterClient.registerPushHandler(messagePushHandler) + let + pubsubTopic = "/waku/2/rs/0/1" + contentTopic = "myContentTopic" + subscribeResponse = await client.filterSubscribe( + some(pubsubTopic), + @[contentTopic], + server.switch.peerInfo.toRemotePeerInfo(), + ) + + assertResultOk(subscribeResponse) + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When a peer publishes a message (the client, for testing easeness) + let msg = WakuMessage(payload: "message".toBytes(), contentTopic: contentTopic) + await server.filterHandleMessage(pubsubTopic, msg) + + # Then the client receives the message + let pushHandlerResult = await pushHandlerFuture.waitForResult(FUTURE_TIMEOUT) + assertResultOk(pushHandlerResult) + + asyncTest "lightpush": + # Given a connected server and client subscribed to the same pubsub topic + client.mountLegacyLightPushClient() + await server.mountLightpush() + + let + topic = "/waku/2/rs/0/1" + clientHandler = client.subscribeCompletionHandler(topic) + + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When a peer publishes a message (the client, for testing easeness) + let + msg = + WakuMessage(payload: "message".toBytes(), contentTopic: "myContentTopic") + lightpublishRespnse = await client.legacyLightpushPublish( + some(topic), msg, server.switch.peerInfo.toRemotePeerInfo() + ) + + # Then the client receives the message + let clientResult = await clientHandler.waitForResult(FUTURE_TIMEOUT) + assertResultOk(clientResult) + + suite "Content Topic Filtering and Routing": + asyncTest "relay (automatic sharding filtering)": + # Given a connected server and client subscribed to the same content topic (with two different formats) + let + contentTopicShort = "/toychat/2/huilong/proto" + contentTopicFull = "/0/toychat/2/huilong/proto" + pubsubTopic = "/waku/2/rs/0/58355" + serverHandler = server.subscribeToContentTopicWithHandler(contentTopicShort) + clientHandler = client.subscribeToContentTopicWithHandler(contentTopicFull) + + await sleepAsync(FUTURE_TIMEOUT) + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When the client publishes a message + discard await client.publish( + some(pubsubTopic), + WakuMessage(payload: "message1".toBytes(), contentTopic: contentTopicShort), + ) + let + serverResult1 = await serverHandler.waitForResult(FUTURE_TIMEOUT) + clientResult1 = await clientHandler.waitForResult(FUTURE_TIMEOUT) + + # Then the server and client receive the message + assertResultOk(serverResult1) + assertResultOk(clientResult1) + + # When the server publishes a message + serverHandler.reset() + clientHandler.reset() + discard await server.publish( + some(pubsubTopic), + WakuMessage(payload: "message2".toBytes(), contentTopic: contentTopicFull), + ) + let + serverResult2 = await serverHandler.waitForResult(FUTURE_TIMEOUT) + clientResult2 = await clientHandler.waitForResult(FUTURE_TIMEOUT) + + # Then the server and client receive the message + assertResultOk(serverResult2) + assertResultOk(clientResult2) + + asyncTest "filter (automatic sharding filtering)": + # Given a connected server and client using the same content topic (with two different formats) + await client.mountFilterClient() + await server.mountFilter() + + let pushHandlerFuture = newFuture[(string, WakuMessage)]() + proc messagePushHandler( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.async, closure, gcsafe.} = + pushHandlerFuture.complete((pubsubTopic, message)) + + client.wakuFilterClient.registerPushHandler(messagePushHandler) + let + contentTopicShort = "/toychat/2/huilong/proto" + contentTopicFull = "/0/toychat/2/huilong/proto" + pubsubTopic = "/waku/2/rs/0/58355" + subscribeResponse1 = await client.filterSubscribe( + some(pubsubTopic), + @[contentTopicShort], + server.switch.peerInfo.toRemotePeerInfo(), + ) + + assertResultOk(subscribeResponse1) + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When the client publishes a message + let msg = + WakuMessage(payload: "message1".toBytes(), contentTopic: contentTopicShort) + await server.filterHandleMessage(pubsubTopic, msg) + + # Then the client receives the message + let pushHandlerResult = await pushHandlerFuture.waitForResult(FUTURE_TIMEOUT) + assertResultOk(pushHandlerResult) + check pushHandlerResult.get() == (pubsubTopic, msg) + + # Given the subscription is cleared and a new subscription is made + let + unsubscribeResponse = + await client.filterUnsubscribeAll(server.switch.peerInfo.toRemotePeerInfo()) + subscribeResponse2 = await client.filterSubscribe( + some(pubsubTopic), + @[contentTopicFull], + server.switch.peerInfo.toRemotePeerInfo(), + ) + + assertResultOk(unsubscribeResponse) + assertResultOk(subscribeResponse2) + + # When the client publishes a message + pushHandlerFuture.reset() + let msg2 = + WakuMessage(payload: "message2".toBytes(), contentTopic: contentTopicFull) + await server.filterHandleMessage(pubsubTopic, msg2) + + # Then the client receives the message + let pushHandlerResult2 = await pushHandlerFuture.waitForResult(FUTURE_TIMEOUT) + assertResultOk(pushHandlerResult2) + check pushHandlerResult2.get() == (pubsubTopic, msg2) + + asyncTest "lightpush (automatic sharding filtering)": + # Given a connected server and client using the same content topic (with two different formats) + client.mountLegacyLightPushClient() + await server.mountLightpush() + + let + contentTopicShort = "/toychat/2/huilong/proto" + contentTopicFull = "/0/toychat/2/huilong/proto" + pubsubTopic = "/waku/2/rs/0/58355" + clientHandler = client.subscribeToContentTopicWithHandler(contentTopicShort) + + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When a peer publishes a message (the client, for testing easeness) + let + msg = + WakuMessage(payload: "message".toBytes(), contentTopic: contentTopicFull) + lightpublishRespnse = await client.legacyLightpushPublish( + some(pubsubTopic), msg, server.switch.peerInfo.toRemotePeerInfo() + ) + + # Then the client receives the message + let clientResult = await clientHandler.waitForResult(FUTURE_TIMEOUT) + assertResultOk(clientResult) + + xasyncTest "store (automatic sharding filtering)": + # Given one archive with two sets of messages using the same content topic (with two different formats) + let + timeOrigin = now() + contentTopicShort = "/toychat/2/huilong/proto" + contentTopicFull = "/0/toychat/2/huilong/proto" + pubsubTopic = "/waku/2/rs/0/58355" + archiveMessages1 = + @[ + fakeWakuMessage( + @[byte 00], ts = ts(00, timeOrigin), contentTopic = contentTopicShort + ) + ] + archiveMessages2 = + @[ + fakeWakuMessage( + @[byte 01], ts = ts(10, timeOrigin), contentTopic = contentTopicFull + ) + ] + archiveDriver = newArchiveDriverWithMessages(pubsubTopic, archiveMessages1) + discard archiveDriver.put(pubsubTopic, archiveMessages2) + let mountArchiveResult = server.mountArchive(archiveDriver) + assertResultOk(mountArchiveResult) + + waitFor server.mountStore() + client.mountStoreClient() + + # Given one query for each content topic format + let + historyQuery1 = HistoryQuery( + contentTopics: @[contentTopicShort], + direction: PagingDirection.Forward, + pageSize: 3, + ) + historyQuery2 = HistoryQuery( + contentTopics: @[contentTopicFull], + direction: PagingDirection.Forward, + pageSize: 3, + ) + + # When the client queries the server for the messages + let + serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + queryResponse1 = await client.query(historyQuery1, serverRemotePeerInfo) + queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + assertResultOk(queryResponse1) + assertResultOk(queryResponse2) + + # Then the responses of both queries should contain all the messages + check: + queryResponse1.get().messages == archiveMessages1 & archiveMessages2 + queryResponse2.get().messages == archiveMessages1 & archiveMessages2 + + asyncTest "relay - exclusion (automatic sharding filtering)": + # Given a connected server and client subscribed to different content topics + let + contentTopic1 = "/toychat/2/huilong/proto" + pubsubTopic1 = "/waku/2/rs/0/58355" + # Automatically generated from the contentTopic above + contentTopic2 = "/0/toychat2/2/huilong/proto" + pubsubTopic2 = "/waku/2/rs/0/23286" + # Automatically generated from the contentTopic above + serverHandler = server.subscribeToContentTopicWithHandler(contentTopic1) + clientHandler = client.subscribeToContentTopicWithHandler(contentTopic2) + + await sleepAsync(FUTURE_TIMEOUT) + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When the client publishes a message in the client's subscribed topic + discard await client.publish( + some(pubsubTopic2), + WakuMessage(payload: "message1".toBytes(), contentTopic: contentTopic2), + ) + let + serverResult1 = await serverHandler.waitForResult(FUTURE_TIMEOUT) + clientResult1 = await clientHandler.waitForResult(FUTURE_TIMEOUT) + + # Then the client receives the message but the server does not + check serverResult1.isErr() + assertResultOk(clientResult1) + + # When the server publishes a message in the server's subscribed topic + serverHandler.reset() + clientHandler.reset() + discard await server.publish( + some(pubsubTopic1), + WakuMessage(payload: "message2".toBytes(), contentTopic: contentTopic1), + ) + let + serverResult2 = await serverHandler.waitForResult(FUTURE_TIMEOUT) + clientResult2 = await clientHandler.waitForResult(FUTURE_TIMEOUT) + + # Then the server receives the message but the client does not + assertResultOk(serverResult2) + check clientResult2.isErr() + + asyncTest "filter - exclusion (automatic sharding filtering)": + # Given a connected server and client using different content topics + await client.mountFilterClient() + await server.mountFilter() + + let pushHandlerFuture = newFuture[(string, WakuMessage)]() + proc messagePushHandler( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.async, closure, gcsafe.} = + pushHandlerFuture.complete((pubsubTopic, message)) + + client.wakuFilterClient.registerPushHandler(messagePushHandler) + let + contentTopic1 = "/toychat/2/huilong/proto" + pubsubTopic1 = "/waku/2/rs/0/58355" + # Automatically generated from the contentTopic above + contentTopic2 = "/0/toychat2/2/huilong/proto" + pubsubTopic2 = "/waku/2/rs/0/23286" + # Automatically generated from the contentTopic above + subscribeResponse1 = await client.filterSubscribe( + some(pubsubTopic1), + @[contentTopic1], + server.switch.peerInfo.toRemotePeerInfo(), + ) + + assertResultOk(subscribeResponse1) + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When the server publishes a message in the server's subscribed topic + let msg = + WakuMessage(payload: "message1".toBytes(), contentTopic: contentTopic2) + await server.filterHandleMessage(pubsubTopic2, msg) + + # Then the client does not receive the message + let pushHandlerResult = await pushHandlerFuture.waitForResult(FUTURE_TIMEOUT) + check pushHandlerResult.isErr() + + asyncTest "lightpush - exclusion (automatic sharding filtering)": + # Given a connected server and client using different content topics + client.mountLegacyLightPushClient() + await server.mountLightpush() + + let + contentTopic1 = "/toychat/2/huilong/proto" + pubsubTopic1 = "/waku/2/rs/0/58355" + # Automatically generated from the contentTopic above + contentTopic2 = "/0/toychat2/2/huilong/proto" + pubsubTopic2 = "/waku/2/rs/0/23286" + # Automatically generated from the contentTopic above + clientHandler = client.subscribeToContentTopicWithHandler(contentTopic1) + + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When a peer publishes a message in the server's subscribed topic (the client, for testing easeness) + let + msg = WakuMessage(payload: "message".toBytes(), contentTopic: contentTopic2) + lightpublishRespnse = await client.legacyLightpushPublish( + some(pubsubTopic2), msg, server.switch.peerInfo.toRemotePeerInfo() + ) + + # Then the client does not receive the message + let clientResult = await clientHandler.waitForResult(FUTURE_TIMEOUT) + check clientResult.isErr() + + asyncTest "store - exclusion (automatic sharding filtering)": + # Given one archive with two sets of messages using different content topics + let + timeOrigin = now() + contentTopic1 = "/toychat/2/huilong/proto" + pubsubTopic1 = "/waku/2/rs/0/58355" + # Automatically generated from the contentTopic above + contentTopic2 = "/0/toychat2/2/huilong/proto" + pubsubTopic2 = "/waku/2/rs/0/23286" + # Automatically generated from the contentTopic above + archiveMessages1 = + @[ + fakeWakuMessage( + @[byte 00], ts = ts(00, timeOrigin), contentTopic = contentTopic1 + ) + ] + archiveMessages2 = + @[ + fakeWakuMessage( + @[byte 01], ts = ts(10, timeOrigin), contentTopic = contentTopic2 + ) + ] + archiveDriver = newArchiveDriverWithMessages(pubsubTopic1, archiveMessages1) + discard archiveDriver.put(pubsubTopic2, archiveMessages2) + let mountArchiveResult = server.mountArchive(archiveDriver) + assertResultOk(mountArchiveResult) + + waitFor server.mountStore() + client.mountStoreClient() + + # Given one query for each content topic + let + historyQuery1 = HistoryQuery( + contentTopics: @[contentTopic1], + direction: PagingDirection.Forward, + pageSize: 2, + ) + historyQuery2 = HistoryQuery( + contentTopics: @[contentTopic2], + direction: PagingDirection.Forward, + pageSize: 2, + ) + + # When the client queries the server for the messages + let + serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + queryResponse1 = await client.query(historyQuery1, serverRemotePeerInfo) + queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + assertResultOk(queryResponse1) + assertResultOk(queryResponse2) + + # Then each response should contain only the messages of the corresponding content topic + check: + queryResponse1.get().messages == archiveMessages1 + queryResponse2.get().messages == archiveMessages2 + + suite "Specific Tests": + asyncTest "Configure Node with Multiple PubSub Topics": + # Given a connected server and client subscribed to multiple pubsub topics + let + contentTopic = "myContentTopic" + topic1 = "/waku/2/rs/0/1" + topic2 = "/waku/2/rs/0/2" + serverHandler1 = server.subscribeCompletionHandler(topic1) + serverHandler2 = server.subscribeCompletionHandler(topic2) + clientHandler1 = client.subscribeCompletionHandler(topic1) + clientHandler2 = client.subscribeCompletionHandler(topic2) + + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When the client publishes a message in the topic1 + discard await client.publish( + some(topic1), + WakuMessage(payload: "message1".toBytes(), contentTopic: contentTopic), + ) + + # Then the server and client receive the message in topic1's handlers, but not in topic2's + assertResultOk(await serverHandler1.waitForResult(FUTURE_TIMEOUT)) + assertResultOk(await clientHandler1.waitForResult(FUTURE_TIMEOUT)) + check: + (await serverHandler2.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler2.waitForResult(FUTURE_TIMEOUT)).isErr() + + # When the client publishes a message in the topic2 + serverHandler1.reset() + serverHandler2.reset() + clientHandler1.reset() + clientHandler2.reset() + discard await client.publish( + some(topic2), + WakuMessage(payload: "message2".toBytes(), contentTopic: contentTopic), + ) + + # Then the server and client receive the message in topic2's handlers, but not in topic1's + assertResultOk(await serverHandler2.waitForResult(FUTURE_TIMEOUT)) + assertResultOk(await clientHandler2.waitForResult(FUTURE_TIMEOUT)) + check: + (await serverHandler1.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler1.waitForResult(FUTURE_TIMEOUT)).isErr() + + asyncTest "Configure Node with Multiple Content Topics": + # Given a connected server and client subscribed to multiple content topics + let + contentTopic1 = "/toychat/2/huilong/proto" + pubsubTopic1 = "/waku/2/rs/0/58355" + # Automatically generated from the contentTopic above + contentTopic2 = "/0/toychat2/2/huilong/proto" + pubsubTopic2 = "/waku/2/rs/0/23286" + # Automatically generated from the contentTopic above + serverHandler1 = server.subscribeToContentTopicWithHandler(contentTopic1) + serverHandler2 = server.subscribeToContentTopicWithHandler(contentTopic2) + clientHandler1 = client.subscribeToContentTopicWithHandler(contentTopic1) + clientHandler2 = client.subscribeToContentTopicWithHandler(contentTopic2) + + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When the client publishes a message in contentTopic1 + discard await client.publish( + some(pubsubTopic1), + WakuMessage(payload: "message1".toBytes(), contentTopic: contentTopic1), + ) + + # Then the server and client receive the message in contentTopic1's handlers, but not in contentTopic2's + assertResultOk(await serverHandler1.waitForResult(FUTURE_TIMEOUT)) + assertResultOk(await clientHandler1.waitForResult(FUTURE_TIMEOUT)) + check: + (await serverHandler2.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler2.waitForResult(FUTURE_TIMEOUT)).isErr() + + # When the client publishes a message in contentTopic2 + serverHandler1.reset() + serverHandler2.reset() + clientHandler1.reset() + clientHandler2.reset() + discard await client.publish( + some(pubsubTopic2), + WakuMessage(payload: "message2".toBytes(), contentTopic: contentTopic2), + ) + + # Then the server and client receive the message in contentTopic2's handlers, but not in contentTopic1's + assertResultOk(await serverHandler2.waitForResult(FUTURE_TIMEOUT)) + assertResultOk(await clientHandler2.waitForResult(FUTURE_TIMEOUT)) + check: + (await serverHandler1.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler1.waitForResult(FUTURE_TIMEOUT)).isErr() + + asyncTest "Configure Node combining Multiple Pubsub and Content Topics": + # Given a connected server and client subscribed to multiple pubsub topics and content topics + let + contentTopic = "myContentTopic" + pubsubTopic1 = "/waku/2/rs/0/1" + pubsubTopic2 = "/waku/2/rs/0/2" + serverHandler1 = server.subscribeCompletionHandler(pubsubTopic1) + clientHandler1 = client.subscribeCompletionHandler(pubsubTopic1) + serverHandler2 = server.subscribeCompletionHandler(pubsubTopic2) + clientHandler2 = client.subscribeCompletionHandler(pubsubTopic2) + contentTopic3 = "/toychat/2/huilong/proto" + pubsubTopic3 = "/waku/2/rs/0/58355" + # Automatically generated from the contentTopic above + contentTopic4 = "/0/toychat2/2/huilong/proto" + pubsubTopic4 = "/waku/2/rs/0/23286" + # Automatically generated from the contentTopic above + serverHandler3 = server.subscribeToContentTopicWithHandler(contentTopic3) + clientHandler3 = client.subscribeToContentTopicWithHandler(contentTopic3) + serverHandler4 = server.subscribeToContentTopicWithHandler(contentTopic4) + clientHandler4 = client.subscribeToContentTopicWithHandler(contentTopic4) + + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When the client publishes a message in the topic1 + discard await client.publish( + some(pubsubTopic1), + WakuMessage(payload: "message1".toBytes(), contentTopic: contentTopic), + ) + + # Then the server and client receive the message in topic1's handlers, but not in topic234's + assertResultOk(await serverHandler1.waitForResult(FUTURE_TIMEOUT)) + assertResultOk(await clientHandler1.waitForResult(FUTURE_TIMEOUT)) + check: + (await serverHandler2.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler2.waitForResult(FUTURE_TIMEOUT)).isErr() + (await serverHandler3.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler3.waitForResult(FUTURE_TIMEOUT)).isErr() + (await serverHandler4.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler4.waitForResult(FUTURE_TIMEOUT)).isErr() + + # When the client publishes a message in the topic2 + serverHandler1.reset() + clientHandler1.reset() + serverHandler2.reset() + clientHandler2.reset() + serverHandler3.reset() + clientHandler3.reset() + serverHandler4.reset() + clientHandler4.reset() + discard await client.publish( + some(pubsubTopic2), + WakuMessage(payload: "message2".toBytes(), contentTopic: contentTopic), + ) + + # Then the server and client receive the message in topic2's handlers, but not in topic134's + assertResultOk(await serverHandler2.waitForResult(FUTURE_TIMEOUT)) + assertResultOk(await clientHandler2.waitForResult(FUTURE_TIMEOUT)) + check: + (await serverHandler1.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler1.waitForResult(FUTURE_TIMEOUT)).isErr() + (await serverHandler3.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler3.waitForResult(FUTURE_TIMEOUT)).isErr() + (await serverHandler4.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler4.waitForResult(FUTURE_TIMEOUT)).isErr() + + # When the client publishes a message in the topic3 + serverHandler1.reset() + clientHandler1.reset() + serverHandler2.reset() + clientHandler2.reset() + serverHandler3.reset() + clientHandler3.reset() + serverHandler4.reset() + clientHandler4.reset() + discard await client.publish( + some(pubsubTopic3), + WakuMessage(payload: "message1".toBytes(), contentTopic: contentTopic3), + ) + + # Then the server and client receive the message in topic3's handlers, but not in topic124's + assertResultOk(await serverHandler3.waitForResult(FUTURE_TIMEOUT)) + assertResultOk(await clientHandler3.waitForResult(FUTURE_TIMEOUT)) + check: + (await serverHandler1.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler1.waitForResult(FUTURE_TIMEOUT)).isErr() + (await serverHandler2.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler2.waitForResult(FUTURE_TIMEOUT)).isErr() + (await serverHandler4.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler4.waitForResult(FUTURE_TIMEOUT)).isErr() + + # When the client publishes a message in the topic4 + serverHandler1.reset() + clientHandler1.reset() + serverHandler2.reset() + clientHandler2.reset() + serverHandler3.reset() + clientHandler3.reset() + serverHandler4.reset() + clientHandler4.reset() + discard await client.publish( + some(pubsubTopic4), + WakuMessage(payload: "message2".toBytes(), contentTopic: contentTopic4), + ) + + # Then the server and client receive the message in topic4's handlers, but not in topic123's + assertResultOk(await serverHandler4.waitForResult(FUTURE_TIMEOUT)) + assertResultOk(await clientHandler4.waitForResult(FUTURE_TIMEOUT)) + check: + (await serverHandler1.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler1.waitForResult(FUTURE_TIMEOUT)).isErr() + (await serverHandler2.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler2.waitForResult(FUTURE_TIMEOUT)).isErr() + (await serverHandler3.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler3.waitForResult(FUTURE_TIMEOUT)).isErr() + + asyncTest "Protocol with Unconfigured PubSub Topic Fails": + # Given a + let + contentTopic = "myContentTopic" + topic = "/waku/2/rs/0/1" + # Using a different topic to simulate "unconfigured" pubsub topic + # but to have a handler (and be able to assert the test) + serverHandler = server.subscribeCompletionHandler("/waku/2/rs/0/0") + clientHandler = client.subscribeCompletionHandler("/waku/2/rs/0/0") + + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When the client publishes a message in the topic + discard await client.publish( + some(topic), + WakuMessage(payload: "message1".toBytes(), contentTopic: contentTopic), + ) + + # Then the server and client don't receive the message + check: + (await serverHandler.waitForResult(FUTURE_TIMEOUT)).isErr() + (await clientHandler.waitForResult(FUTURE_TIMEOUT)).isErr() + + asyncTest "Waku LightPush Sharding (Static Sharding)": + # Given a connected server and client using two different pubsub topics + client.mountLegacyLightPushClient() + await server.mountLightpush() + + # Given a connected server and client subscribed to multiple pubsub topics + let + contentTopic = "myContentTopic" + topic1 = "/waku/2/rs/0/1" + topic2 = "/waku/2/rs/0/2" + serverHandler1 = server.subscribeCompletionHandler(topic1) + serverHandler2 = server.subscribeCompletionHandler(topic2) + clientHandler1 = client.subscribeCompletionHandler(topic1) + clientHandler2 = client.subscribeCompletionHandler(topic2) + + await sleepAsync(FUTURE_TIMEOUT) + + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When a peer publishes a message (the client, for testing easeness) in topic1 + let + msg1 = WakuMessage(payload: "message1".toBytes(), contentTopic: contentTopic) + lightpublishRespnse = await client.legacyLightpushPublish( + some(topic1), msg1, server.switch.peerInfo.toRemotePeerInfo() + ) + + # Then the server and client receive the message in topic1's handlers, but not in topic2's + assertResultOk(await clientHandler1.waitForResult(FUTURE_TIMEOUT)) + assertResultOk(await serverHandler1.waitForResult(FUTURE_TIMEOUT)) + check: + (await clientHandler2.waitForResult(FUTURE_TIMEOUT)).isErr() + (await serverHandler2.waitForResult(FUTURE_TIMEOUT)).isErr() + + # When a peer publishes a message (the client, for testing easeness) in topic2 + serverHandler1.reset() + serverHandler2.reset() + clientHandler1.reset() + clientHandler2.reset() + let + msg2 = WakuMessage(payload: "message2".toBytes(), contentTopic: contentTopic) + lightpublishResponse2 = await client.legacyLightpushPublish( + some(topic2), msg2, server.switch.peerInfo.toRemotePeerInfo() + ) + + # Then the server and client receive the message in topic2's handlers, but not in topic1's + assertResultOk(await clientHandler2.waitForResult(FUTURE_TIMEOUT)) + assertResultOk(await serverHandler2.waitForResult(FUTURE_TIMEOUT)) + check: + (await clientHandler1.waitForResult(FUTURE_TIMEOUT)).isErr() + (await serverHandler1.waitForResult(FUTURE_TIMEOUT)).isErr() + + asyncTest "Waku Filter Sharding (Static Sharding)": + # Given a connected server and client using two different pubsub topics + await client.mountFilterClient() + await server.mountFilter() + + let + contentTopic = "myContentTopic" + topic1 = "/waku/2/rs/0/1" + topic2 = "/waku/2/rs/0/2" + + let + pushHandlerFuture1 = newFuture[(string, WakuMessage)]() + pushHandlerFuture2 = newFuture[(string, WakuMessage)]() + + proc messagePushHandler1( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.async, closure, gcsafe.} = + if topic1 == pubsubTopic: + pushHandlerFuture1.complete((pubsubTopic, message)) + + proc messagePushHandler2( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.async, closure, gcsafe.} = + if topic2 == pubsubTopic: + pushHandlerFuture2.complete((pubsubTopic, message)) + + client.wakuFilterClient.registerPushHandler(messagePushHandler1) + client.wakuFilterClient.registerPushHandler(messagePushHandler2) + + let + subscribeResponse1 = await client.filterSubscribe( + some(topic1), @[contentTopic], server.switch.peerInfo.toRemotePeerInfo() + ) + subscribeResponse2 = await client.filterSubscribe( + some(topic2), @[contentTopic], server.switch.peerInfo.toRemotePeerInfo() + ) + + assertResultOk(subscribeResponse1) + assertResultOk(subscribeResponse2) + await client.connectToNodes(@[server.switch.peerInfo.toRemotePeerInfo()]) + + # When the client publishes a message in topic1 + let msg = WakuMessage(payload: "message1".toBytes(), contentTopic: contentTopic) + await server.filterHandleMessage(topic1, msg) + + # Then the client receives the message in topic1's handler, but not in topic2's + let pushHandlerResult = await pushHandlerFuture1.waitForResult(FUTURE_TIMEOUT) + assertResultOk(pushHandlerResult) + check: + pushHandlerResult.get() == (topic1, msg) + (await pushHandlerFuture2.waitForResult(FUTURE_TIMEOUT)).isErr() + + # Given the futures are reset + pushHandlerFuture1.reset() + pushHandlerFuture2.reset() + + # When the client publishes a message in topic2 + let msg2 = WakuMessage(payload: "message2".toBytes(), contentTopic: contentTopic) + await server.filterHandleMessage(topic2, msg2) + + # Then the client receives the message in topic2's handler, but not in topic1's + let pushHandlerResult2 = await pushHandlerFuture2.waitForResult(FUTURE_TIMEOUT) + assertResultOk(pushHandlerResult2) + check: + pushHandlerResult2.get() == (topic2, msg2) + (await pushHandlerFuture1.waitForResult(FUTURE_TIMEOUT)).isErr() + + asyncTest "Waku Store Sharding (Static Sharding)": + # Given one archive with two sets of messages using two different pubsub topics + let + timeOrigin = now() + topic1 = "/waku/2/rs/0/1" + topic2 = "/waku/2/rs/0/2" + archiveMessages1 = @[fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin))] + archiveMessages2 = @[fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin))] + archiveDriver = newArchiveDriverWithMessages(topic1, archiveMessages1) + discard archiveDriver.put(topic2, archiveMessages2) + let mountArchiveResult = server.mountArchive(archiveDriver) + assertResultOk(mountArchiveResult) + + waitFor server.mountStore() + client.mountStoreClient() + + # Given one query for each pubsub topic + let + historyQuery1 = HistoryQuery( + pubsubTopic: some(topic1), direction: PagingDirection.Forward, pageSize: 2 + ) + historyQuery2 = HistoryQuery( + pubsubTopic: some(topic2), direction: PagingDirection.Forward, pageSize: 2 + ) + + # When the client queries the server for the messages + let + serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo() + queryResponse1 = await client.query(historyQuery1, serverRemotePeerInfo) + queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + assertResultOk(queryResponse1) + assertResultOk(queryResponse2) + + # Then each response should contain only the messages of the corresponding pubsub topic + check: + queryResponse1.get().messages == archiveMessages1[0 ..< 1] + queryResponse2.get().messages == archiveMessages2[0 ..< 1] diff --git a/third-party/nwaku/tests/node/test_wakunode_store.nim b/third-party/nwaku/tests/node/test_wakunode_store.nim new file mode 100644 index 0000000..00dbfb7 --- /dev/null +++ b/third-party/nwaku/tests/node/test_wakunode_store.nim @@ -0,0 +1,1338 @@ +{.used.} + +import std/[options, sequtils, sets], testutils/unittests, chronos, libp2p/crypto/crypto + +import + waku/[ + common/paging, + node/waku_node, + node/peer_manager, + waku_core, + waku_core/message/digest, + waku_store, + waku_archive, + ], + ../waku_store/store_utils, + ../waku_archive/archive_utils, + ../testlib/[wakucore, wakunode, testasync, testutils] + +suite "Waku Store - End to End - Sorted Archive": + var pubsubTopic {.threadvar.}: PubsubTopic + var contentTopic {.threadvar.}: ContentTopic + var contentTopicSeq {.threadvar.}: seq[ContentTopic] + + var archiveMessages {.threadvar.}: seq[WakuMessageKeyValue] + var storeQuery {.threadvar.}: StoreQueryRequest + + var server {.threadvar.}: WakuNode + var client {.threadvar.}: WakuNode + + var archiveDriver {.threadvar.}: ArchiveDriver + var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + var clientPeerId {.threadvar.}: PeerId + + asyncSetup: + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + contentTopicSeq = @[contentTopic] + + let timeOrigin = now() + let messages = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)), + ] + archiveMessages = messages.mapIt( + WakuMessageKeyValue( + messageHash: computeMessageHash(pubsubTopic, it), + message: some(it), + pubsubTopic: some(pubsubTopic), + ) + ) + + storeQuery = StoreQueryRequest( + includeData: true, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.Forward, + paginationLimit: some(uint64(5)), + ) + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + archiveDriver = newArchiveDriverWithMessages(pubsubTopic, messages) + let mountArchiveResult = server.mountArchive(archiveDriver) + assert mountArchiveResult.isOk() + + await server.mountStore() + client.mountStoreClient() + + await allFutures(server.start(), client.start()) + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + clientPeerId = client.peerInfo.toRemotePeerInfo().peerId + + asyncTeardown: + await allFutures(client.stop(), server.stop()) + + suite "Message Pagination": + asyncTest "Forward Pagination": + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == archiveMessages[0 ..< 5] + + # Given the next query + var otherHistoryQuery = StoreQueryRequest( + includeData: true, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationCursor: queryResponse.get().paginationCursor, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), + ) + + # When making the next history query + let otherQueryResponse = + await client.query(otherHistoryQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + otherQueryResponse.get().messages == archiveMessages[5 ..< 10] + + asyncTest "Backward Pagination": + # Given the history query is backward + storeQuery.paginationForward = PagingDirection.BACKWARD + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == archiveMessages[5 ..< 10] + + # Given the next query + var nextHistoryQuery = StoreQueryRequest( + includeData: true, + paginationCursor: queryResponse.get().paginationCursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.BACKWARD, + paginationLimit: some(uint64(5)), + ) + + # When making the next history query + let otherQueryResponse = + await client.query(nextHistoryQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + otherQueryResponse.get().messages == archiveMessages[0 ..< 5] + + suite "Pagination with Differente Page Sizes": + asyncTest "Pagination with Small Page Size": + # Given the first query (1/5) + storeQuery.paginationLimit = some(uint64(2)) + + # When making a history query + let queryResponse1 = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse1.get().messages == archiveMessages[0 ..< 2] + + # Given the next query (2/5) + let historyQuery2 = StoreQueryRequest( + includeData: true, + paginationCursor: queryResponse1.get().paginationCursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(2)), + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == archiveMessages[2 ..< 4] + + # Given the next query (3/5) + let historyQuery3 = StoreQueryRequest( + includeData: true, + paginationCursor: queryResponse2.get().paginationCursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(2)), + ) + + # When making the next history query + let queryResponse3 = await client.query(historyQuery3, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse3.get().messages == archiveMessages[4 ..< 6] + + # Given the next query (4/5) + let historyQuery4 = StoreQueryRequest( + includeData: true, + paginationCursor: queryResponse3.get().paginationCursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(2)), + ) + + # When making the next history query + let queryResponse4 = await client.query(historyQuery4, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse4.get().messages == archiveMessages[6 ..< 8] + + # Given the next query (5/5) + let historyQuery5 = StoreQueryRequest( + includeData: true, + paginationCursor: queryResponse4.get().paginationCursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(2)), + ) + + # When making the next history query + let queryResponse5 = await client.query(historyQuery5, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse5.get().messages == archiveMessages[8 ..< 10] + + asyncTest "Pagination with Large Page Size": + # Given the first query (1/2) + storeQuery.paginationLimit = some(uint64(8)) + + # When making a history query + let queryResponse1 = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse1.get().messages == archiveMessages[0 ..< 8] + + # Given the next query (2/2) + let historyQuery2 = StoreQueryRequest( + includeData: true, + paginationCursor: queryResponse1.get().paginationCursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(8)), + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == archiveMessages[8 ..< 10] + + asyncTest "Pagination with Excessive Page Size": + # Given the first query (1/1) + storeQuery.paginationLimit = some(uint64(100)) + + # When making a history query + let queryResponse1 = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse1.get().messages == archiveMessages[0 ..< 10] + + asyncTest "Pagination with Mixed Page Size": + # Given the first query (1/3) + storeQuery.paginationLimit = some(uint64(2)) + + # When making a history query + let queryResponse1 = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse1.get().messages == archiveMessages[0 ..< 2] + + # Given the next query (2/3) + let historyQuery2 = StoreQueryRequest( + includeData: true, + paginationCursor: queryResponse1.get().paginationCursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(4)), + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == archiveMessages[2 ..< 6] + + # Given the next query (3/3) + let historyQuery3 = StoreQueryRequest( + includeData: true, + paginationCursor: queryResponse2.get().paginationCursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(6)), + ) + + # When making the next history query + let queryResponse3 = await client.query(historyQuery3, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse3.get().messages == archiveMessages[6 ..< 10] + + asyncTest "Pagination with Zero Page Size (Behaves as DefaultPageSize)": + # Given a message list of size higher than the default page size + let currentStoreLen = uint((await archiveDriver.getMessagesCount()).get()) + assert archive.DefaultPageSize > currentStoreLen, + "This test requires a store with more than (DefaultPageSize) messages" + let missingMessagesAmount = archive.DefaultPageSize - currentStoreLen + 5 + + let lastMessageTimestamp = + archiveMessages[archiveMessages.len - 1].message.get().timestamp + var extraMessages: seq[WakuMessage] = @[] + for i in 0 ..< missingMessagesAmount: + let + timestampOffset = 10 * int(i + 1) + # + 1 to avoid collision with existing messages + message: WakuMessage = + fakeWakuMessage(@[byte i], ts = ts(timestampOffset, lastMessageTimestamp)) + extraMessages.add(message) + discard archiveDriver.put(pubsubTopic, extraMessages) + + let totalMessages = + archiveMessages & + extraMessages.mapIt( + WakuMessageKeyValue( + messageHash: computeMessageHash(pubsubTopic, it), + message: some(it), + pubsubTopic: some(pubsubTopic), + ) + ) + + # Given the a query with zero page size (1/2) + storeQuery.paginationLimit = none(uint64) + + # When making a history query + let queryResponse1 = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the archive.DefaultPageSize messages + check: + queryResponse1.get().messages == totalMessages[0 ..< archive.DefaultPageSize] + + # Given the next query (2/2) + let historyQuery2 = StoreQueryRequest( + includeData: true, + paginationCursor: queryResponse1.get().paginationCursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: none(uint64), + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the remaining messages + check: + queryResponse2.get().messages == + totalMessages[archive.DefaultPageSize ..< archive.DefaultPageSize + 5] + + asyncTest "Pagination with Default Page Size": + # Given a message list of size higher than the default page size + let currentStoreLen = uint((await archiveDriver.getMessagesCount()).get()) + assert archive.DefaultPageSize > currentStoreLen, + "This test requires a store with more than (DefaultPageSize) messages" + let missingMessagesAmount = archive.DefaultPageSize - currentStoreLen + 5 + + let lastMessageTimestamp = + archiveMessages[archiveMessages.len - 1].message.get().timestamp + var extraMessages: seq[WakuMessage] = @[] + for i in 0 ..< missingMessagesAmount: + let + timestampOffset = 10 * int(i + 1) + # + 1 to avoid collision with existing messages + message: WakuMessage = + fakeWakuMessage(@[byte i], ts = ts(timestampOffset, lastMessageTimestamp)) + extraMessages.add(message) + discard archiveDriver.put(pubsubTopic, extraMessages) + + let totalMessages = + archiveMessages & + extraMessages.mapIt( + WakuMessageKeyValue( + messageHash: computeMessageHash(pubsubTopic, it), + message: some(it), + pubsubTopic: some(pubsubTopic), + ) + ) + + # Given a query with default page size (1/2) + storeQuery = StoreQueryRequest( + includeData: true, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + ) + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == totalMessages[0 ..< archive.DefaultPageSize] + + # Given the next query (2/2) + let historyQuery2 = StoreQueryRequest( + includeData: true, + paginationCursor: queryResponse.get().paginationCursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == + totalMessages[archive.DefaultPageSize ..< archive.DefaultPageSize + 5] + + suite "Pagination with Different Cursors": + asyncTest "Starting Cursor": + # Given a paginationCursor pointing to the first message + let paginationCursor = archiveMessages[0].messageHash + storeQuery.paginationCursor = some(paginationCursor) + storeQuery.paginationLimit = some(uint64(1)) + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the message + check: + queryResponse.get().messages == archiveMessages[1 ..< 2] + + asyncTest "Middle Cursor": + # Given a paginationCursor pointing to the middle message1 + let paginationCursor = archiveMessages[5].messageHash + storeQuery.paginationCursor = some(paginationCursor) + storeQuery.paginationLimit = some(uint64(1)) + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the message + check: + queryResponse.get().messages == archiveMessages[6 ..< 7] + + asyncTest "Ending Cursor": + # Given a paginationCursor pointing to the last message + let paginationCursor = archiveMessages[9].messageHash + storeQuery.paginationCursor = some(paginationCursor) + storeQuery.paginationLimit = some(uint64(1)) + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + suite "Message Sorting": + asyncTest "Cursor Reusability Across Nodes": + # Given a different server node with the same archive + let + otherArchiveDriverWithMessages = newArchiveDriverWithMessages( + pubsubTopic, archiveMessages.mapIt(it.message.get()) + ) + otherServerKey = generateSecp256k1Key() + otherServer = + newTestWakuNode(otherServerKey, parseIpAddress("0.0.0.0"), Port(0)) + mountOtherArchiveResult = + otherServer.mountArchive(otherArchiveDriverWithMessages) + assert mountOtherArchiveResult.isOk() + + await otherServer.mountStore() + + await otherServer.start() + let otherServerRemotePeerInfo = otherServer.peerInfo.toRemotePeerInfo() + + # When making a history query to the first server node + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == archiveMessages[0 ..< 5] + + # Given the paginationCursor from the first query + let paginationCursor = queryResponse.get().paginationCursor + + # When making a history query to the second server node + let otherHistoryQuery = StoreQueryRequest( + includeData: true, + paginationCursor: paginationCursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), + ) + let otherQueryResponse = + await client.query(otherHistoryQuery, otherServerRemotePeerInfo) + + # Then the response contains the remaining messages + check: + otherQueryResponse.get().messages == archiveMessages[5 ..< 10] + + # Cleanup + await otherServer.stop() + +suite "Waku Store - End to End - Unsorted Archive": + var pubsubTopic {.threadvar.}: PubsubTopic + var contentTopic {.threadvar.}: ContentTopic + var contentTopicSeq {.threadvar.}: seq[ContentTopic] + + var storeQuery {.threadvar.}: StoreQueryRequest + var unsortedArchiveMessages {.threadvar.}: seq[WakuMessageKeyValue] + + var server {.threadvar.}: WakuNode + var client {.threadvar.}: WakuNode + + var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + + asyncSetup: + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + contentTopicSeq = @[contentTopic] + + storeQuery = StoreQueryRequest( + includeData: true, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), + ) + + let timeOrigin = now() + let messages = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 03], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 08], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 07], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 02], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 09], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 06], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 01], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 04], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 05], ts = ts(20, timeOrigin)), + ] + unsortedArchiveMessages = messages.mapIt( + WakuMessageKeyValue( + messageHash: computeMessageHash(pubsubTopic, it), + message: some(it), + pubsubTopic: some(pubsubTopic), + ) + ) + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + let + unsortedArchiveDriverWithMessages = + newArchiveDriverWithMessages(pubsubTopic, messages) + mountUnsortedArchiveResult = + server.mountArchive(unsortedArchiveDriverWithMessages) + + assert mountUnsortedArchiveResult.isOk() + + await server.mountStore() + client.mountStoreClient() + + await allFutures(server.start(), client.start()) + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + + asyncTeardown: + await allFutures(client.stop(), server.stop()) + + asyncTest "Basic (Timestamp and Hash) Sorting Validation": + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Check the ordering + check: + queryResponse.get().messages.len == 5 + + queryResponse.get().messages[0].message.get().timestamp == + queryResponse.get().messages[1].message.get().timestamp + + queryResponse.get().messages[1].message.get().timestamp == + queryResponse.get().messages[2].message.get().timestamp + + queryResponse.get().messages[2].message.get().timestamp < + queryResponse.get().messages[3].message.get().timestamp + + queryResponse.get().messages[3].message.get().timestamp == + queryResponse.get().messages[4].message.get().timestamp + + toHex(queryResponse.get().messages[0].messageHash) < + toHex(queryResponse.get().messages[1].messageHash) + + toHex(queryResponse.get().messages[1].messageHash) < + toHex(queryResponse.get().messages[2].messageHash) + + toHex(queryResponse.get().messages[3].messageHash) < + toHex(queryResponse.get().messages[4].messageHash) + + # Given the next query + var historyQuery2 = StoreQueryRequest( + includeData: true, + paginationCursor: queryResponse.get().paginationCursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Check the ordering + check: + queryResponse2.get().messages[0].message.get().timestamp < + queryResponse2.get().messages[1].message.get().timestamp + + queryResponse2.get().messages[1].message.get().timestamp == + queryResponse2.get().messages[2].message.get().timestamp + + queryResponse2.get().messages[2].message.get().timestamp == + queryResponse2.get().messages[3].message.get().timestamp + + queryResponse2.get().messages[3].message.get().timestamp == + queryResponse2.get().messages[4].message.get().timestamp + + toHex(queryResponse2.get().messages[1].messageHash) < + toHex(queryResponse2.get().messages[2].messageHash) + + toHex(queryResponse2.get().messages[2].messageHash) < + toHex(queryResponse2.get().messages[3].messageHash) + + toHex(queryResponse2.get().messages[3].messageHash) < + toHex(queryResponse2.get().messages[4].messageHash) + + asyncTest "Backward pagination with Ascending Sorting": + # Given a history query with backward pagination + + # Pick the right cursor based on the ordering + var cursor = unsortedArchiveMessages[3].messageHash + if toHex(cursor) > toHex(unsortedArchiveMessages[4].messageHash): + cursor = unsortedArchiveMessages[4].messageHash + if toHex(cursor) > toHex(unsortedArchiveMessages[5].messageHash): + cursor = unsortedArchiveMessages[5].messageHash + + storeQuery.paginationForward = PagingDirection.BACKWARD + storeQuery.paginationCursor = some(cursor) + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then check the response ordering + check: + queryResponse.get().messages.len == 3 + + queryResponse.get().messages[0].message.get().timestamp == + queryResponse.get().messages[1].message.get().timestamp + + queryResponse.get().messages[1].message.get().timestamp == + queryResponse.get().messages[2].message.get().timestamp + + toHex(queryResponse.get().messages[0].messageHash) < + toHex(queryResponse.get().messages[1].messageHash) + + toHex(queryResponse.get().messages[1].messageHash) < + toHex(queryResponse.get().messages[2].messageHash) + + asyncTest "Forward Pagination with Ascending Sorting": + # Given a history query with forward pagination + + # Pick the right cursor based on the ordering + var cursor = unsortedArchiveMessages[3].messageHash + if toHex(cursor) > toHex(unsortedArchiveMessages[4].messageHash): + cursor = unsortedArchiveMessages[4].messageHash + if toHex(cursor) > toHex(unsortedArchiveMessages[5].messageHash): + cursor = unsortedArchiveMessages[5].messageHash + + storeQuery.paginationForward = PagingDirection.FORWARD + storeQuery.paginationCursor = some(cursor) + storeQuery.paginationLimit = some(uint64(6)) + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then check the response ordering + check: + queryResponse.get().messages.len == 6 + + queryResponse.get().messages[0].message.get().timestamp == + queryResponse.get().messages[1].message.get().timestamp + + queryResponse.get().messages[1].message.get().timestamp < + queryResponse.get().messages[2].message.get().timestamp + + queryResponse.get().messages[2].message.get().timestamp == + queryResponse.get().messages[3].message.get().timestamp + + queryResponse.get().messages[3].message.get().timestamp == + queryResponse.get().messages[4].message.get().timestamp + + queryResponse.get().messages[4].message.get().timestamp == + queryResponse.get().messages[5].message.get().timestamp + + toHex(queryResponse.get().messages[0].messageHash) < + toHex(queryResponse.get().messages[1].messageHash) + + toHex(queryResponse.get().messages[2].messageHash) < + toHex(queryResponse.get().messages[3].messageHash) + + toHex(queryResponse.get().messages[3].messageHash) < + toHex(queryResponse.get().messages[4].messageHash) + + toHex(queryResponse.get().messages[4].messageHash) < + toHex(queryResponse.get().messages[5].messageHash) + +suite "Waku Store - End to End - Unsorted Archive without provided Timestamp": + var pubsubTopic {.threadvar.}: PubsubTopic + var contentTopic {.threadvar.}: ContentTopic + var contentTopicSeq {.threadvar.}: seq[ContentTopic] + + var storeQuery {.threadvar.}: StoreQueryRequest + var unsortedArchiveMessages {.threadvar.}: seq[WakuMessageKeyValue] + + var server {.threadvar.}: WakuNode + var client {.threadvar.}: WakuNode + + var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + + asyncSetup: + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + contentTopicSeq = @[contentTopic] + + storeQuery = StoreQueryRequest( + includeData: true, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), + ) + + let messages = + @[ # Not providing explicit timestamp means it will be set in "arrive" order + fakeWakuMessage(@[byte 09]), + fakeWakuMessage(@[byte 07]), + fakeWakuMessage(@[byte 05]), + fakeWakuMessage(@[byte 03]), + fakeWakuMessage(@[byte 01]), + fakeWakuMessage(@[byte 00]), + fakeWakuMessage(@[byte 02]), + fakeWakuMessage(@[byte 04]), + fakeWakuMessage(@[byte 06]), + fakeWakuMessage(@[byte 08]), + ] + unsortedArchiveMessages = messages.mapIt( + WakuMessageKeyValue( + messageHash: computeMessageHash(pubsubTopic, it), + message: some(it), + pubsubTopic: some(pubsubTopic), + ) + ) + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + let + unsortedArchiveDriverWithMessages = + newArchiveDriverWithMessages(pubsubTopic, messages) + mountUnsortedArchiveResult = + server.mountArchive(unsortedArchiveDriverWithMessages) + + assert mountUnsortedArchiveResult.isOk() + + await server.mountStore() + client.mountStoreClient() + + await allFutures(server.start(), client.start()) + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + + asyncTeardown: + await allFutures(client.stop(), server.stop()) + + asyncTest "Sorting using receiverTime": + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + check: + queryResponse.get().messages.len == 5 + + queryResponse.get().messages[0].message.get().timestamp <= + queryResponse.get().messages[1].message.get().timestamp + + queryResponse.get().messages[1].message.get().timestamp <= + queryResponse.get().messages[2].message.get().timestamp + + queryResponse.get().messages[2].message.get().timestamp <= + queryResponse.get().messages[3].message.get().timestamp + + queryResponse.get().messages[3].message.get().timestamp <= + queryResponse.get().messages[4].message.get().timestamp + + # Given the next query + var historyQuery2 = StoreQueryRequest( + includeData: true, + paginationCursor: queryResponse.get().paginationCursor, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Timestamps are quite random in this case. + # Those are the only assumptions we can make in ALL cases. + let setA = toHashSet(queryResponse.get().messages) + let setB = toHashSet(queryResponse2.get().messages) + let setC = intersection(setA, setB) + + check: + setC.len == 0 + + queryResponse2.get().messages.len == 5 + + queryResponse2.get().messages[0].message.get().timestamp <= + queryResponse2.get().messages[1].message.get().timestamp + + queryResponse2.get().messages[1].message.get().timestamp <= + queryResponse2.get().messages[2].message.get().timestamp + + queryResponse2.get().messages[2].message.get().timestamp <= + queryResponse2.get().messages[3].message.get().timestamp + + queryResponse2.get().messages[3].message.get().timestamp <= + queryResponse2.get().messages[4].message.get().timestamp + +suite "Waku Store - End to End - Archive with Multiple Topics": + var pubsubTopic {.threadvar.}: PubsubTopic + var pubsubTopicB {.threadvar.}: PubsubTopic + var contentTopic {.threadvar.}: ContentTopic + var contentTopicB {.threadvar.}: ContentTopic + var contentTopicC {.threadvar.}: ContentTopic + var contentTopicSpecials {.threadvar.}: ContentTopic + var contentTopicSeq {.threadvar.}: seq[ContentTopic] + + var storeQuery {.threadvar.}: StoreQueryRequest + var originTs {.threadvar.}: proc(offset: int): Timestamp {.gcsafe, raises: [].} + var archiveMessages {.threadvar.}: seq[WakuMessageKeyValue] + + var server {.threadvar.}: WakuNode + var client {.threadvar.}: WakuNode + + var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + + asyncSetup: + pubsubTopic = DefaultPubsubTopic + pubsubTopicB = "topicB" + contentTopic = DefaultContentTopic + contentTopicB = "topicB" + contentTopicC = "topicC" + contentTopicSpecials = "!@#$%^&*()_+" + contentTopicSeq = + @[contentTopic, contentTopicB, contentTopicC, contentTopicSpecials] + + storeQuery = StoreQueryRequest( + includeData: true, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), + ) + + let timeOrigin = now() + originTs = proc(offset = 0): Timestamp {.gcsafe, raises: [].} = + ts(offset, timeOrigin) + + let messages = + @[ + fakeWakuMessage(@[byte 00], ts = originTs(00), contentTopic = contentTopic), + fakeWakuMessage(@[byte 01], ts = originTs(10), contentTopic = contentTopicB), + fakeWakuMessage(@[byte 02], ts = originTs(20), contentTopic = contentTopicC), + fakeWakuMessage(@[byte 03], ts = originTs(30), contentTopic = contentTopic), + fakeWakuMessage(@[byte 04], ts = originTs(40), contentTopic = contentTopicB), + fakeWakuMessage(@[byte 05], ts = originTs(50), contentTopic = contentTopicC), + fakeWakuMessage(@[byte 06], ts = originTs(60), contentTopic = contentTopic), + fakeWakuMessage(@[byte 07], ts = originTs(70), contentTopic = contentTopicB), + fakeWakuMessage(@[byte 08], ts = originTs(80), contentTopic = contentTopicC), + fakeWakuMessage( + @[byte 09], ts = originTs(90), contentTopic = contentTopicSpecials + ), + ] + + archiveMessages = messages.mapIt( + WakuMessageKeyValue( + messageHash: computeMessageHash(pubsubTopic, it), + message: some(it), + pubsubTopic: some(pubsubTopic), + ) + ) + + for i in 6 ..< 10: + archiveMessages[i].messagehash = + computeMessageHash(pubsubTopicB, archiveMessages[i].message.get()) + + archiveMessages[i].pubsubTopic = some(pubsubTopicB) + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + let archiveDriver = newSqliteArchiveDriver().put(pubsubTopic, messages[0 ..< 6]).put( + pubsubTopicB, messages[6 ..< 10] + ) + let mountUnsortedArchiveResult = server.mountArchive(archiveDriver) + + assert mountUnsortedArchiveResult.isOk() + + await server.mountStore() + client.mountStoreClient() + + await allFutures(server.start(), client.start()) + + serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + + asyncTeardown: + await allFutures(client.stop(), server.stop()) + + suite "Validation of Content Filtering": + asyncTest "Basic Content Filtering": + # Given a history query with content filtering + storeQuery.contentTopics = @[contentTopic] + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == @[archiveMessages[0], archiveMessages[3]] + + asyncTest "Multiple Content Filters": + # Given a history query with multiple content filtering + storeQuery.contentTopics = @[contentTopic, contentTopicB] + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + archiveMessages[0], + archiveMessages[1], + archiveMessages[3], + archiveMessages[4], + ] + + asyncTest "Empty Content Filtering": + # Given a history query with empty content filtering + storeQuery.contentTopics = @[] + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == archiveMessages[0 ..< 5] + + # Given the next query + let historyQuery2 = StoreQueryRequest( + includeData: true, + paginationCursor: queryResponse.get().paginationCursor, + pubsubTopic: none(PubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == archiveMessages[5 ..< 10] + + asyncTest "Non-Existent Content Topic": + # Given a history query with non-existent content filtering + storeQuery.contentTopics = @["non-existent-topic"] + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + asyncTest "Special Characters in Content Filtering": + # Given a history query with special characters in content filtering + storeQuery.pubsubTopic = some(pubsubTopicB) + storeQuery.contentTopics = @["!@#$%^&*()_+"] + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages == @[archiveMessages[9]] + + asyncTest "PubsubTopic Specified": + # Given a history query with pubsub topic specified + storeQuery.pubsubTopic = some(pubsubTopicB) + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + archiveMessages[6], + archiveMessages[7], + archiveMessages[8], + archiveMessages[9], + ] + + asyncTest "PubsubTopic Left Empty": + # Given a history query with pubsub topic left empty + storeQuery.pubsubTopic = none(PubsubTopic) + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == archiveMessages[0 ..< 5] + + # Given the next query + let historyQuery2 = StoreQueryRequest( + includeData: true, + paginationCursor: queryResponse.get().paginationCursor, + pubsubTopic: none(PubsubTopic), + contentTopics: contentTopicSeq, + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(5)), + ) + + # When making the next history query + let queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse2.get().messages == archiveMessages[5 ..< 10] + + suite "Validation of Time-based Filtering": + asyncTest "Basic Time Filtering": + # Given a history query with start and end time + storeQuery.startTime = some(originTs(20)) + storeQuery.endTime = some(originTs(40)) + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[archiveMessages[2], archiveMessages[3], archiveMessages[4]] + + asyncTest "Only Start Time Specified": + # Given a history query with only start time + storeQuery.startTime = some(originTs(20)) + storeQuery.endTime = none(Timestamp) + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + archiveMessages[2], + archiveMessages[3], + archiveMessages[4], + archiveMessages[5], + ] + + asyncTest "Only End Time Specified": + # Given a history query with only end time + storeQuery.startTime = none(Timestamp) + storeQuery.endTime = some(originTs(40)) + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages == + @[ + archiveMessages[0], + archiveMessages[1], + archiveMessages[2], + archiveMessages[3], + archiveMessages[4], + ] + + asyncTest "Invalid Time Range": + # Given a history query with invalid time range + storeQuery.startTime = some(originTs(60)) + storeQuery.endTime = some(originTs(40)) + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + asyncTest "Time Filtering with Content Filtering": + # Given a history query with time and content filtering + storeQuery.startTime = some(originTs(20)) + storeQuery.endTime = some(originTs(60)) + storeQuery.contentTopics = @[contentTopicC] + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == @[archiveMessages[2], archiveMessages[5]] + + asyncTest "Messages Outside of Time Range": + # Given a history query with a valid time range which does not contain any messages + storeQuery.startTime = some(originTs(100)) + storeQuery.endTime = some(originTs(200)) + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + suite "Ephemeral": + # TODO: Ephemeral value is not properly set for Sqlite + xasyncTest "Only ephemeral Messages:": + # Given an archive with only ephemeral messages + let + ephemeralMessages = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00), ephemeral = true), + fakeWakuMessage(@[byte 01], ts = ts(10), ephemeral = true), + fakeWakuMessage(@[byte 02], ts = ts(20), ephemeral = true), + ] + ephemeralArchiveDriver = + newSqliteArchiveDriver().put(pubsubTopic, ephemeralMessages) + + # And a server node with the ephemeral archive + let + ephemeralServerKey = generateSecp256k1Key() + ephemeralServer = + newTestWakuNode(ephemeralServerKey, parseIpAddress("0.0.0.0"), Port(0)) + mountEphemeralArchiveResult = + ephemeralServer.mountArchive(ephemeralArchiveDriver) + assert mountEphemeralArchiveResult.isOk() + + await ephemeralServer.mountStore() + await ephemeralServer.start() + let ephemeralServerRemotePeerInfo = ephemeralServer.peerInfo.toRemotePeerInfo() + + # When making a history query to the server with only ephemeral messages + let queryResponse = await client.query(storeQuery, ephemeralServerRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + # Cleanup + await ephemeralServer.stop() + + xasyncTest "Mixed messages": + # Given an archive with both ephemeral and non-ephemeral messages + let + ephemeralMessages = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00), ephemeral = true), + fakeWakuMessage(@[byte 01], ts = ts(10), ephemeral = true), + fakeWakuMessage(@[byte 02], ts = ts(20), ephemeral = true), + ] + nonEphemeralMessages = + @[ + fakeWakuMessage(@[byte 03], ts = ts(30), ephemeral = false), + fakeWakuMessage(@[byte 04], ts = ts(40), ephemeral = false), + fakeWakuMessage(@[byte 05], ts = ts(50), ephemeral = false), + ] + mixedArchiveDriver = newSqliteArchiveDriver() + .put(pubsubTopic, ephemeralMessages) + .put(pubsubTopic, nonEphemeralMessages) + + # And a server node with the mixed archive + let + mixedServerKey = generateSecp256k1Key() + mixedServer = + newTestWakuNode(mixedServerKey, parseIpAddress("0.0.0.0"), Port(0)) + mountMixedArchiveResult = mixedServer.mountArchive(mixedArchiveDriver) + assert mountMixedArchiveResult.isOk() + + await mixedServer.mountStore() + await mixedServer.start() + let mixedServerRemotePeerInfo = mixedServer.peerInfo.toRemotePeerInfo() + + # When making a history query to the server with mixed messages + let queryResponse = await client.query(storeQuery, mixedServerRemotePeerInfo) + + # Then the response contains the non-ephemeral messages + check: + queryResponse.get().messages == nonEphemeralMessages + + # Cleanup + await mixedServer.stop() + + suite "Edge Case Scenarios": + asyncTest "Empty Message Store": + # Given an empty archive + let emptyArchiveDriver = newSqliteArchiveDriver() + + # And a server node with the empty archive + let + emptyServerKey = generateSecp256k1Key() + emptyServer = + newTestWakuNode(emptyServerKey, parseIpAddress("0.0.0.0"), Port(0)) + mountEmptyArchiveResult = emptyServer.mountArchive(emptyArchiveDriver) + assert mountEmptyArchiveResult.isOk() + + await emptyServer.mountStore() + await emptyServer.start() + let emptyServerRemotePeerInfo = emptyServer.peerInfo.toRemotePeerInfo() + + # When making a history query to the server with an empty archive + let queryResponse = await client.query(storeQuery, emptyServerRemotePeerInfo) + + # Then the response contains no messages + check: + queryResponse.get().messages.len == 0 + + # Cleanup + await emptyServer.stop() + + asyncTest "Voluminous Message Store": + # Given a voluminous archive (1M+ messages) + var messages: seq[WakuMessage] = @[] + for i in 0 ..< 100000: + let topic = "topic" & $i + messages.add(fakeWakuMessage(@[byte i], contentTopic = topic)) + + let voluminousArchiveMessages = messages.mapIt( + WakuMessageKeyValue( + messageHash: computeMessageHash(pubsubTopic, it), + message: some(it), + pubsubTopic: some(pubsubTopic), + ) + ) + + let voluminousArchiveDriverWithMessages = + newArchiveDriverWithMessages(pubsubTopic, messages) + + # And a server node with the voluminous archive + let + voluminousServerKey = generateSecp256k1Key() + voluminousServer = + newTestWakuNode(voluminousServerKey, parseIpAddress("0.0.0.0"), Port(0)) + mountVoluminousArchiveResult = + voluminousServer.mountArchive(voluminousArchiveDriverWithMessages) + assert mountVoluminousArchiveResult.isOk() + + await voluminousServer.mountStore() + await voluminousServer.start() + let voluminousServerRemotePeerInfo = voluminousServer.peerInfo.toRemotePeerInfo() + + # Given the following history query + storeQuery.contentTopics = + @["topic10000", "topic30000", "topic50000", "topic70000", "topic90000"] + + # When making a history query to the server with a voluminous archive + let queryResponse = await client.query(storeQuery, voluminousServerRemotePeerInfo) + + # Then the response contains the messages + check: + queryResponse.get().messages == + @[ + voluminousArchiveMessages[10000], + voluminousArchiveMessages[30000], + voluminousArchiveMessages[50000], + voluminousArchiveMessages[70000], + voluminousArchiveMessages[90000], + ] + + # Cleanup + await voluminousServer.stop() + + asyncTest "Large contentFilters Array": + # Given a history query with the max contentFilters len, 10 + storeQuery.contentTopics = @[contentTopic] + for i in 0 ..< 9: + let topic = "topic" & $i + storeQuery.contentTopics.add(topic) + + # When making a history query + let queryResponse = await client.query(storeQuery, serverRemotePeerInfo) + + # Then the response should trigger no errors + check: + queryResponse.get().messages == @[archiveMessages[0], archiveMessages[3]] diff --git a/third-party/nwaku/tests/node/utils.nim b/third-party/nwaku/tests/node/utils.nim new file mode 100644 index 0000000..61c6c4a --- /dev/null +++ b/third-party/nwaku/tests/node/utils.nim @@ -0,0 +1,7 @@ +import std/options, results +import + waku/[node/peer_manager, node/waku_node, waku_enr/sharding, common/enr/typed_record], + ../testlib/[wakucore] + +proc relayShards*(node: WakuNode): RelayShards = + return node.enr.toTyped().get().relayShardingIndicesList().get() diff --git a/third-party/nwaku/tests/postgres-docker-compose.yml b/third-party/nwaku/tests/postgres-docker-compose.yml new file mode 100644 index 0000000..396a142 --- /dev/null +++ b/third-party/nwaku/tests/postgres-docker-compose.yml @@ -0,0 +1,10 @@ +version: "3.8" + +services: + db: + image: postgres:15.4-alpine3.18 + restart: always + environment: + POSTGRES_PASSWORD: test123 + ports: + - "5432:5432" diff --git a/third-party/nwaku/tests/resources/content_topics.nim b/third-party/nwaku/tests/resources/content_topics.nim new file mode 100644 index 0000000..9090d55 --- /dev/null +++ b/third-party/nwaku/tests/resources/content_topics.nim @@ -0,0 +1,12 @@ +proc getContentTopic*( + applicationName: string, + applicationVersion: int, + contentTopicName: string, + encoding: string, +): string = + return "/$applicationName/$applicationVersion/$contentTopicName/$enconding" + +const + CURRENT* = getContentTopic("application", 1, "content-topic", "proto") + TESTNET* = getContentTopic("toychat", 2, "huilong", "proto") + PLAIN* = "test" diff --git a/third-party/nwaku/tests/resources/payloads.nim b/third-party/nwaku/tests/resources/payloads.nim new file mode 100644 index 0000000..723bf78 --- /dev/null +++ b/third-party/nwaku/tests/resources/payloads.nim @@ -0,0 +1,57 @@ +import std/json + +const + ALPHABETIC* = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + ALPHANUMERIC* = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + ALPHANUMERIC_SPECIAL* = + "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()_+-=[]{}|;':\\\",./<>?`~" + EMOJI* = + "😀 😃 😄 😁 😆 😅 🤣 😂 🙂 🙃 😉 😊 😇 🥰 😍 🤩 😘 😗 😚 😙" + CODE* = "def main():\n\tprint('Hello, world!')" + QUERY* = + """ + SELECT + u.id, + u.name, + u.email, + u.created_at, + u.updated_at, + ( + SELECT + COUNT(*) + FROM + posts p + WHERE + p.user_id = u.id + ) AS post_count + FROM + users u + WHERE + u.id = 1 + """ + TEXT_SMALL* = "Lorem ipsum dolor sit amet, consectetur adipiscing elit." + TEXT_LARGE* = + """ + Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras gravida vulputate semper. Proin + eleifend varius cursus. Morbi lacinia posuere quam sit amet pretium. Sed non metus fermentum, + venenatis nisl id, vestibulum eros. Quisque non lorem sit amet lectus faucibus elementum eu + sit amet odio. Mauris tortor justo, malesuada quis volutpat vitae, tristique at nisl. Proin + eleifend eu arcu ac sodales. In efficitur ipsum urna, ut viverra turpis sodales ut. Phasellus + nec tortor eu urna suscipit euismod eget vel ligula. Phasellus vestibulum sollicitudin tellus, + ac sodales tellus tempor id. Curabitur sed congue velit. + """ + +proc getSampleJsonDictionary*(): JsonNode = + %*{ + "shapes": [{"type": "circle", "radius": 10}, {"type": "square", "side": 10}], + "colours": ["red", "green", "blue"], + } + +proc getSampleJsonList*(): JsonNode = + %*[{"type": "cat", "name": "Salem"}, {"type": "dog", "name": "Oberon"}] + +proc getByteSequence*(bytesNumber: uint64): seq[byte] = + result = newSeq[byte](bytesNumber) + for i in 0 ..< bytesNumber: + result[i] = cast[byte](i mod 256) + return result diff --git a/third-party/nwaku/tests/resources/pubsub_topics.nim b/third-party/nwaku/tests/resources/pubsub_topics.nim new file mode 100644 index 0000000..07ac741 --- /dev/null +++ b/third-party/nwaku/tests/resources/pubsub_topics.nim @@ -0,0 +1,13 @@ +import std/strformat + +proc getPubsubTopic*(pubsubTopicName: string): string = + return fmt"/waku/2/{pubsubTopicName}" + +const + CURRENT* = getPubsubTopic("test") + CURRENT_NESTED* = getPubsubTopic("test/nested") + SHARDING* = getPubsubTopic("waku-9_shard-0") + PLAIN* = "test" + LEGACY* = "/waku/1/test" + LEGACY_NESTED* = "/waku/1/test/nested" + LEGACY_ENCODING* = "/waku/1/test/proto" diff --git a/third-party/nwaku/tests/test_all.nim b/third-party/nwaku/tests/test_all.nim new file mode 100644 index 0000000..a72caba --- /dev/null +++ b/third-party/nwaku/tests/test_all.nim @@ -0,0 +1,3 @@ +{.used.} + +import ./all_tests_common, ./all_tests_waku, ./all_tests_wakunode2 diff --git a/third-party/nwaku/tests/test_helpers.nim b/third-party/nwaku/tests/test_helpers.nim new file mode 100644 index 0000000..bd1d837 --- /dev/null +++ b/third-party/nwaku/tests/test_helpers.nim @@ -0,0 +1,46 @@ +import chronos, bearssl/rand, eth/[keys, p2p] + +import libp2p/crypto/crypto + +var nextPort = 30303 + +proc localAddress*(port: int): Address = + let port = Port(port) + result = Address(udpPort: port, tcpPort: port, ip: parseIpAddress("127.0.0.1")) + +proc setupTestNode*( + rng: ref HmacDrbgContext, capabilities: varargs[ProtocolInfo, `protocolInfo`] +): EthereumNode = + let + keys1 = keys.KeyPair.random(rng[]) + address = localAddress(nextPort) + result = newEthereumNode( + keys1, + address, + NetworkId(1), + addAllCapabilities = false, + bindUdpPort = address.udpPort, # Assume same as external + bindTcpPort = address.tcpPort, # Assume same as external + rng = rng, + ) + nextPort.inc + for capability in capabilities: + result.addCapability capability + +# Copied from here: https://github.com/status-im/nim-libp2p/blob/d522537b19a532bc4af94fcd146f779c1f23bad0/tests/helpers.nim#L28 +type RngWrap = object + rng: ref rand.HmacDrbgContext + +var rngVar: RngWrap + +proc getRng(): ref rand.HmacDrbgContext = + # TODO if `rngVar` is a threadvar like it should be, there are random and + # spurious compile failures on mac - this is not gcsafe but for the + # purpose of the tests, it's ok as long as we only use a single thread + {.gcsafe.}: + if rngVar.rng.isNil: + rngVar.rng = crypto.newRng() + rngVar.rng + +template rng*(): ref rand.HmacDrbgContext = + getRng() diff --git a/third-party/nwaku/tests/test_message_cache.nim b/third-party/nwaku/tests/test_message_cache.nim new file mode 100644 index 0000000..cd2e882 --- /dev/null +++ b/third-party/nwaku/tests/test_message_cache.nim @@ -0,0 +1,249 @@ +{.used.} + +import std/[sets, random], results, stew/byteutils, testutils/unittests +import waku/waku_core, waku/waku_api/message_cache, ./testlib/wakucore + +randomize() + +suite "MessageCache": + setup: + ## Given + let capacity = 3 + let testPubsubTopic = DefaultPubsubTopic + let testContentTopic = DefaultContentTopic + let cache = MessageCache.init(capacity) + + test "subscribe to topic": + ## When + cache.pubsubSubscribe(testPubsubTopic) + cache.pubsubSubscribe(testPubsubTopic) + + # idempotence of subscribe is also tested + cache.contentSubscribe(testContentTopic) + cache.contentSubscribe(testContentTopic) + + ## Then + check: + cache.isPubsubSubscribed(testPubsubTopic) + cache.isContentSubscribed(testContentTopic) + cache.pubsubTopicCount() == 1 + cache.contentTopicCount() == 1 + + test "unsubscribe from topic": + # Init cache content + cache.pubsubSubscribe(testPubsubTopic) + cache.contentSubscribe(testContentTopic) + + cache.pubsubSubscribe("AnotherPubsubTopic") + cache.contentSubscribe("AnotherContentTopic") + + ## When + cache.pubsubUnsubscribe(testPubsubTopic) + cache.contentUnsubscribe(testContentTopic) + + ## Then + check: + not cache.isPubsubSubscribed(testPubsubTopic) + not cache.isContentSubscribed(testContentTopic) + cache.pubsubTopicCount() == 1 + cache.contentTopicCount() == 1 + + test "get messages of a subscribed topic": + ## Given + let testMessage = fakeWakuMessage() + + # Init cache content + cache.pubsubSubscribe(testPubsubTopic) + cache.addMessage(testPubsubTopic, testMessage) + + ## When + let res = cache.getMessages(testPubsubTopic) + + ## Then + check: + res.isOk() + res.get() == @[testMessage] + + test "get messages with clean flag shoud clear the messages cache": + ## Given + let testMessage = fakeWakuMessage() + + # Init cache content + cache.pubsubSubscribe(testPubsubTopic) + cache.addMessage(testPubsubTopic, testMessage) + + ## When + var res = cache.getMessages(testPubsubTopic, clear = true) + require(res.isOk()) + + res = cache.getMessages(testPubsubTopic) + + ## Then + check: + res.isOk() + res.get().len == 0 + + test "get messages of a non-subscribed topic": + ## When + cache.pubsubSubscribe(PubsubTopic("dummyPubsub")) + let res = cache.getMessages(testPubsubTopic) + + ## Then + check: + res.isErr() + res.error() == "not subscribed to this pubsub topic" + + test "add messages to subscribed topic": + ## Given + let testMessage = fakeWakuMessage() + + cache.pubsubSubscribe(testPubsubTopic) + + ## When + cache.addMessage(testPubsubTopic, testMessage) + + ## Then + let messages = cache.getMessages(testPubsubTopic).tryGet() + check: + messages == @[testMessage] + + test "add messages to non-subscribed topic": + ## Given + let testMessage = fakeWakuMessage() + + ## When + cache.addMessage(testPubsubTopic, testMessage) + + ## Then + let res = cache.getMessages(testPubsubTopic) + check: + res.isErr() + res.error() == "not subscribed to any pubsub topics" + + test "add messages beyond the capacity": + ## Given + var testMessages = @[fakeWakuMessage(toBytes("MSG-1"))] + + # Prevent duplicate messages timestamp + for i in 0 ..< 5: + var msg = fakeWakuMessage(toBytes("MSG-1")) + + while msg.timestamp <= testMessages[i].timestamp: + msg = fakeWakuMessage(toBytes("MSG-1")) + + testMessages.add(msg) + + cache.pubsubSubscribe(testPubsubTopic) + + ## When + for msg in testMessages: + cache.addMessage(testPubsubTopic, msg) + + ## Then + let messages = cache.getMessages(testPubsubTopic).tryGet() + let messageSet = toHashSet(messages) + + let testSet = toHashSet(testMessages) + + check: + messageSet.len == capacity + messageSet < testSet + testMessages[0] notin messages + + test "get messages on pubsub via content topics": + cache.pubsubSubscribe(testPubsubTopic) + + let fakeMessage = fakeWakuMessage() + + cache.addMessage(testPubsubTopic, fakeMessage) + + let getRes = cache.getAutoMessages(DefaultContentTopic) + + check: + getRes.isOk + getRes.get() == @[fakeMessage] + + test "add same message twice": + cache.pubsubSubscribe(testPubsubTopic) + + let fakeMessage = fakeWakuMessage() + + cache.addMessage(testPubsubTopic, fakeMessage) + cache.addMessage(testPubsubTopic, fakeMessage) + + check: + cache.messagesCount() == 1 + + test "unsubscribing remove messages": + let topic0 = "PubsubTopic0" + let topic1 = "PubsubTopic1" + let topic2 = "PubsubTopic2" + + let fakeMessage0 = fakeWakuMessage(toBytes("MSG-0")) + let fakeMessage1 = fakeWakuMessage(toBytes("MSG-1")) + let fakeMessage2 = fakeWakuMessage(toBytes("MSG-2")) + + cache.pubsubSubscribe(topic0) + cache.pubsubSubscribe(topic1) + cache.pubsubSubscribe(topic2) + cache.contentSubscribe("ContentTopic0") + + cache.addMessage(topic0, fakeMessage0) + cache.addMessage(topic1, fakeMessage1) + cache.addMessage(topic2, fakeMessage2) + + cache.pubsubUnsubscribe(topic0) + + # at this point, fakeMessage0 is only ref by DefaultContentTopic + + let res = cache.getAutoMessages(DefaultContentTopic) + + check: + res.isOk() + res.get().len == 3 + cache.isPubsubSubscribed(topic0) == false + cache.isPubsubSubscribed(topic1) == true + cache.isPubsubSubscribed(topic2) == true + + cache.contentUnsubscribe(DefaultContentTopic) + + # msg0 was delete because no refs + + check: + cache.messagesCount() == 2 + + test "fuzzing": + let testContentTopic1 = "contentTopic1" + let testContentTopic2 = "contentTopic2" + + let cache = MessageCache.init(50) + + cache.contentSubscribe(testContentTopic1) + cache.contentSubscribe(testContentTopic2) + + for _ in 0 .. 10000: + let numb = rand(1.0) + + if numb > 0.4: + let topic = if rand(1.0) > 0.5: testContentTopic1 else: testContentTopic2 + + let testMessage = fakeWakuMessage(contentTopic = topic) + + cache.addMessage(DefaultPubsubTopic, testMessage) + elif numb > 0.1: + let topic = if rand(1.0) > 0.5: testContentTopic1 else: testContentTopic2 + + let clear = rand(1.0) > 0.5 + discard cache.getAutoMessages(topic, clear) + elif numb > 0.05: + if rand(1.0) > 0.5: + cache.pubsubUnsubscribe(DefaultPubsubTopic) + else: + cache.pubsubSubscribe(DefaultPubsubTopic) + else: + let topic = if rand(1.0) > 0.5: testContentTopic1 else: testContentTopic2 + + if rand(1.0) > 0.5: + cache.contentUnsubscribe(topic) + else: + cache.contentSubscribe(topic) diff --git a/third-party/nwaku/tests/test_peer_manager.nim b/third-party/nwaku/tests/test_peer_manager.nim new file mode 100644 index 0000000..1369f3f --- /dev/null +++ b/third-party/nwaku/tests/test_peer_manager.nim @@ -0,0 +1,1204 @@ +{.used.} + +import + std/[sequtils, times, sugar, net], + testutils/unittests, + chronos, + json_rpc/rpcserver, + json_rpc/rpcclient, + eth/keys, + eth/common/eth_types, + libp2p/[builders, switch, multiaddress], + libp2p/protobuf/minprotobuf, + libp2p/stream/[bufferstream, connection], + libp2p/crypto/crypto, + libp2p/protocols/pubsub/pubsub, + libp2p/protocols/pubsub/rpc/message, + libp2p/peerid +import + waku/[ + common/databases/db_sqlite, + node/peer_manager/peer_manager, + node/peer_manager/peer_store/waku_peer_storage, + waku_node, + waku_core, + waku_enr/capabilities, + waku_relay/protocol, + waku_filter_v2/common, + waku_store/common, + waku_peer_exchange, + waku_metadata, + ], + ./testlib/common, + ./testlib/testutils, + ./testlib/wakucore, + ./testlib/wakunode + +procSuite "Peer Manager": + asyncTest "connectPeer() works": + # Create 2 nodes + let nodes = toSeq(0 ..< 2).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + await allFutures(nodes.mapIt(it.start())) + + let connOk = + await nodes[0].peerManager.connectPeer(nodes[1].peerInfo.toRemotePeerInfo()) + await sleepAsync(chronos.milliseconds(500)) + + check: + connOk == true + nodes[0].peerManager.switch.peerStore.peers().anyIt( + it.peerId == nodes[1].peerInfo.peerId + ) + nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) == + Connectedness.Connected + + asyncTest "dialPeer() works": + # Create 2 nodes + let nodes = toSeq(0 ..< 2).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + + await allFutures(nodes.mapIt(it.start())) + await allFutures(nodes.mapIt(it.mountRelay())) + await allFutures(nodes.mapIt(it.mountFilter())) + + # Dial node2 from node1 + let conn = await nodes[0].peerManager.dialPeer( + nodes[1].peerInfo.toRemotePeerInfo(), WakuFilterSubscribeCodec + ) + await sleepAsync(chronos.milliseconds(500)) + + # Check connection + check: + conn.isSome() + conn.get.activity + conn.get.peerId == nodes[1].peerInfo.peerId + + # Check that node2 is being managed in node1 + check: + nodes[0].peerManager.switch.peerStore.peers().anyIt( + it.peerId == nodes[1].peerInfo.peerId + ) + + # Check connectedness + check: + nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) == + Connectedness.Connected + + await allFutures(nodes.mapIt(it.stop())) + + asyncTest "dialPeer() fails gracefully": + # Create 2 nodes and start them + let nodes = toSeq(0 ..< 2).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + await allFutures(nodes.mapIt(it.start())) + await allFutures(nodes.mapIt(it.mountRelay())) + + let nonExistentPeerRes = parsePeerInfo( + "/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmQSMNExfUYUqfuXWkD5DaNZnMYnigRxFKbk3tcEFQeQeE" + ) + require nonExistentPeerRes.isOk() + + let nonExistentPeer = nonExistentPeerRes.value + + # Dial non-existent peer from node1 + let conn1 = await nodes[0].peerManager.dialPeer(nonExistentPeer, WakuStoreCodec) + check: + conn1.isNone() + + # Dial peer not supporting given protocol + let conn2 = await nodes[0].peerManager.dialPeer( + nodes[1].peerInfo.toRemotePeerInfo(), WakuStoreCodec + ) + check: + conn2.isNone() + + await allFutures(nodes.mapIt(it.stop())) + + asyncTest "Adding, selecting and filtering peers work": + let + node = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + + # Create filter peer + filterLoc = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet() + filterPeer = PeerInfo.new(generateEcdsaKey(), @[filterLoc]) + # Create store peer + storeLoc = MultiAddress.init("/ip4/127.0.0.3/tcp/4").tryGet() + storePeer = PeerInfo.new(generateEcdsaKey(), @[storeLoc]) + + await node.start() + + node.peerManager.addServicePeer(storePeer.toRemotePeerInfo(), WakuStoreCodec) + node.peerManager.addServicePeer( + filterPeer.toRemotePeerInfo(), WakuFilterSubscribeCodec + ) + + # Check peers were successfully added to peer manager + check: + node.peerManager.switch.peerStore.peers().len == 2 + node.peerManager.switch.peerStore.peers(WakuFilterSubscribeCodec).allIt( + it.peerId == filterPeer.peerId and it.addrs.contains(filterLoc) and + it.protocols.contains(WakuFilterSubscribeCodec) + ) + node.peerManager.switch.peerStore.peers(WakuStoreCodec).allIt( + it.peerId == storePeer.peerId and it.addrs.contains(storeLoc) and + it.protocols.contains(WakuStoreCodec) + ) + + await node.stop() + + asyncTest "Peer manager keeps track of connections": + # Create 2 nodes + let nodes = toSeq(0 ..< 2).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + + await allFutures(nodes.mapIt(it.start())) + await allFutures(nodes.mapIt(it.mountRelay())) + + # Test default connectedness for new peers + nodes[0].peerManager.addPeer(nodes[1].peerInfo.toRemotePeerInfo()) + check: + # No information about node2's connectedness + nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) == + NotConnected + + # Failed connection + let nonExistentPeerRes = parsePeerInfo( + "/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmQSMNExfUYUqfuXWkD5DaNZnMYnigRxFKbk3tcEFQeQeE" + ) + require: + nonExistentPeerRes.isOk() + + let nonExistentPeer = nonExistentPeerRes.value + require: + (await nodes[0].peerManager.connectPeer(nonExistentPeer)) == false + await sleepAsync(chronos.milliseconds(500)) + + check: + # Cannot connect to node2 + nodes[0].peerManager.switch.peerStore.connectedness(nonExistentPeer.peerId) == + CannotConnect + + # Successful connection + require: + (await nodes[0].peerManager.connectPeer(nodes[1].peerInfo.toRemotePeerInfo())) == + true + await sleepAsync(chronos.milliseconds(500)) + + check: + # Currently connected to node2 + nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) == + Connected + + # Stop node. Gracefully disconnect from all peers. + await nodes[0].stop() + check: + # Not currently connected to node2, but had recent, successful connection. + nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) == + CanConnect + + await nodes[1].stop() + + asyncTest "Peer manager updates failed peers correctly": + # Create 2 nodes + let nodes = toSeq(0 ..< 2).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + + await allFutures(nodes.mapIt(it.start())) + await allFutures(nodes.mapIt(it.mountRelay())) + + let nonExistentPeerRes = parsePeerInfo( + "/ip4/0.0.0.0/tcp/1000/p2p/16Uiu2HAmQSMNExfUYUqfuXWkD5DaNZnMYnigRxFKbk3tcEFQeQeE" + ) + require nonExistentPeerRes.isOk() + + let nonExistentPeer = nonExistentPeerRes.value + + nodes[0].peerManager.addPeer(nonExistentPeer) + + # Set a low backoff to speed up test: 2, 4, 8, 16 + nodes[0].peerManager.initialBackoffInSec = 2 + nodes[0].peerManager.backoffFactor = 2 + + # try to connect to peer that doesnt exist + let conn1Ok = await nodes[0].peerManager.connectPeer(nonExistentPeer) + check: + # Cannot connect to node2 + nodes[0].peerManager.switch.peerStore.connectedness(nonExistentPeer.peerId) == + CannotConnect + nodes[0].peerManager.switch.peerStore[ConnectionBook][nonExistentPeer.peerId] == + CannotConnect + nodes[0].peerManager.switch.peerStore[NumberFailedConnBook][ + nonExistentPeer.peerId + ] == 1 + + # Connection attempt failed + conn1Ok == false + + # Right after failing there is a backoff period + nodes[0].peerManager.canBeConnected(nonExistentPeer.peerId) == false + + # We wait the first backoff period + await sleepAsync(chronos.milliseconds(2100)) + + # And backoff period is over + check: + nodes[0].peerManager.canBeConnected(nodes[1].peerInfo.peerId) == true + + # After a successful connection, the number of failed connections is reset + + nodes[0].peerManager.switch.peerStore[NumberFailedConnBook][ + nodes[1].peerInfo.peerId + ] = 4 + let conn2Ok = + await nodes[0].peerManager.connectPeer(nodes[1].peerInfo.toRemotePeerInfo()) + check: + conn2Ok == true + nodes[0].peerManager.switch.peerStore[NumberFailedConnBook][ + nodes[1].peerInfo.peerId + ] == 0 + + await allFutures(nodes.mapIt(it.stop())) + + asyncTest "Peer manager can use persistent storage and survive restarts": + let + database = SqliteDatabase.new(":memory:")[] + storage = WakuPeerStorage.new(database)[] + node1 = newTestWakuNode( + generateSecp256k1Key(), getPrimaryIPAddr(), Port(44048), peerStorage = storage + ) + node2 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(34023)) + + node1.mountMetadata(0, @[0'u16]).expect("Mounted Waku Metadata") + node2.mountMetadata(0, @[0'u16]).expect("Mounted Waku Metadata") + + await node1.start() + await node2.start() + + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + let peerInfo2 = node2.switch.peerInfo + var remotePeerInfo2 = peerInfo2.toRemotePeerInfo() + remotePeerInfo2.enr = some(node2.enr) + + let is12Connected = await node1.peerManager.connectPeer(remotePeerInfo2) + assert is12Connected == true, "Node 1 and 2 not connected" + + check: + node1.peerManager.switch.peerStore[AddressBook][remotePeerInfo2.peerId] == + remotePeerInfo2.addrs + + # wait for the peer store update + await sleepAsync(chronos.milliseconds(500)) + + check: + # Currently connected to node2 + node1.peerManager.switch.peerStore.peers().len == 1 + node1.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node1.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected + + # Simulate restart by initialising a new node using the same storage + let node3 = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("127.0.0.1"), + Port(56037), + peerStorage = storage, + ) + + node3.mountMetadata(0, @[0'u16]).expect("Mounted Waku Metadata") + + await node3.start() + + check: + # Node2 has been loaded after "restart", but we have not yet reconnected + node3.peerManager.switch.peerStore.peers().len == 1 + node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == NotConnected + + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node3.peerManager.connectToRelayPeers() + + await sleepAsync(chronos.milliseconds(500)) + + check: + # Reconnected to node2 after "restart" + node3.peerManager.switch.peerStore.peers().len == 1 + node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected + + await allFutures([node1.stop(), node2.stop(), node3.stop()]) + + asyncTest "Sharded peer manager can use persistent storage and survive restarts": + let + database = SqliteDatabase.new(":memory:")[] + storage = WakuPeerStorage.new(database)[] + node1 = newTestWakuNode( + generateSecp256k1Key(), getPrimaryIPAddr(), Port(44048), peerStorage = storage + ) + node2 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(34023)) + + node1.mountMetadata(0, @[0'u16]).expect("Mounted Waku Metadata") + node2.mountMetadata(0, @[0'u16]).expect("Mounted Waku Metadata") + + await node1.start() + await node2.start() + + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + let peerInfo2 = node2.switch.peerInfo + var remotePeerInfo2 = peerInfo2.toRemotePeerInfo() + remotePeerInfo2.enr = some(node2.enr) + + let is12Connected = await node1.peerManager.connectPeer(remotePeerInfo2) + assert is12Connected == true, "Node 1 and 2 not connected" + + check: + node1.peerManager.switch.peerStore[AddressBook][remotePeerInfo2.peerId] == + remotePeerInfo2.addrs + + # wait for the peer store update + await sleepAsync(chronos.milliseconds(500)) + + check: + # Currently connected to node2 + node1.peerManager.switch.peerStore.peers().len == 1 + node1.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node1.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected + + # Simulate restart by initialising a new node using the same storage + let node3 = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("127.0.0.1"), + Port(56037), + peerStorage = storage, + ) + + node3.mountMetadata(0, @[0'u16]).expect("Mounted Waku Metadata") + + await node3.start() + + check: + # Node2 has been loaded after "restart", but we have not yet reconnected + node3.peerManager.switch.peerStore.peers().len == 1 + node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == NotConnected + + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node3.peerManager.manageRelayPeers() + + await sleepAsync(chronos.milliseconds(500)) + + check: + # Reconnected to node2 after "restart" + node3.peerManager.switch.peerStore.peers().len == 1 + node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected + + await allFutures([node1.stop(), node2.stop(), node3.stop()]) + + asyncTest "Peer manager drops conections to peers on different networks": + let + port = Port(0) + # different network + node1 = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + port, + clusterId = 3, + subscribeShards = @[uint16(0)], + ) + + # same network + node2 = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + port, + clusterId = 4, + subscribeShards = @[uint16(0)], + ) + node3 = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + port, + clusterId = 4, + subscribeShards = @[uint16(0)], + ) + + node1.mountMetadata(3, @[0'u16]).expect("Mounted Waku Metadata") + node2.mountMetadata(4, @[0'u16]).expect("Mounted Waku Metadata") + node3.mountMetadata(4, @[0'u16]).expect("Mounted Waku Metadata") + + # Start nodes + await allFutures([node1.start(), node2.start(), node3.start()]) + + # 1->2 (fails) + let conn1 = await node1.peerManager.dialPeer( + node2.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec + ) + + # 1->3 (fails) + let conn2 = await node1.peerManager.dialPeer( + node3.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec + ) + + # 2->3 (succeeds) + let conn3 = await node2.peerManager.dialPeer( + node3.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec + ) + + check: + conn1.isNone or conn1.get().isClosed + conn2.isNone or conn2.get().isClosed + conn3.isSome and not conn3.get().isClosed + + # TODO: nwaku/issues/1377 + xasyncTest "Peer manager support multiple protocol IDs when reconnecting to peers": + let + database = SqliteDatabase.new(":memory:")[] + storage = WakuPeerStorage.new(database)[] + node1 = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + Port(0), + peerStorage = storage, + ) + node2 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + peerInfo2 = node2.switch.peerInfo + betaCodec = "/vac/waku/relay/2.0.0-beta2" + stableCodec = "/vac/waku/relay/2.0.0" + + await node1.start() + await node2.start() + + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + node1.wakuRelay.codec = betaCodec + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + node2.wakuRelay.codec = betaCodec + + require: + (await node1.peerManager.connectPeer(peerInfo2.toRemotePeerInfo())) == true + check: + # Currently connected to node2 + node1.peerManager.switch.peerStore.peers().len == 1 + node1.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node1.peerManager.switch.peerStore.peers().anyIt( + it.protocols.contains(node2.wakuRelay.codec) + ) + node1.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected + + # Simulate restart by initialising a new node using the same storage + let node3 = newTestWakuNode( + generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0), peerStorage = storage + ) + + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + node3.wakuRelay.codec = stableCodec + check: + # Node 2 and 3 have differing codecs + node2.wakuRelay.codec == betaCodec + node3.wakuRelay.codec == stableCodec + # Node2 has been loaded after "restart", but we have not yet reconnected + node3.peerManager.switch.peerStore.peers().len == 1 + node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node3.peerManager.switch.peerStore.peers().anyIt(it.protocols.contains(betaCodec)) + node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == NotConnected + + await node3.start() # This should trigger a reconnect + + check: + # Reconnected to node2 after "restart" + node3.peerManager.switch.peerStore.peers().len == 1 + node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId) + node3.peerManager.switch.peerStore.peers().anyIt(it.protocols.contains(betaCodec)) + node3.peerManager.switch.peerStore.peers().anyIt( + it.protocols.contains(stableCodec) + ) + node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected + + await allFutures([node1.stop(), node2.stop(), node3.stop()]) + + asyncTest "Peer manager connects to all peers supporting a given protocol": + # Create 4 nodes + let nodes = toSeq(0 ..< 4).mapIt( + newTestWakuNode( + nodeKey = generateSecp256k1Key(), + bindIp = parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wakuFlags = some(CapabilitiesBitfield.init(@[Relay])), + ) + ) + + # Start them + discard nodes.mapIt(it.mountMetadata(0, @[0'u16])) + await allFutures(nodes.mapIt(it.mountRelay())) + await allFutures(nodes.mapIt(it.start())) + + # Get all peer infos + let peerInfos = collect: + for i in 0 .. nodes.high: + let peerInfo = nodes[i].switch.peerInfo.toRemotePeerInfo() + peerInfo.enr = some(nodes[i].enr) + peerInfo + + # Add all peers (but self) to node 0 + nodes[0].peerManager.addPeer(peerInfos[1]) + nodes[0].peerManager.addPeer(peerInfos[2]) + nodes[0].peerManager.addPeer(peerInfos[3]) + + # Connect to relay peers + await nodes[0].peerManager.connectToRelayPeers() + + # wait for the connections to settle + await sleepAsync(chronos.milliseconds(500)) + + check: + # Peerstore track all three peers + nodes[0].peerManager.switch.peerStore.peers().len == 3 + + # All peer ids are correct + nodes[0].peerManager.switch.peerStore.peers().anyIt( + it.peerId == nodes[1].switch.peerInfo.peerId + ) + nodes[0].peerManager.switch.peerStore.peers().anyIt( + it.peerId == nodes[2].switch.peerInfo.peerId + ) + nodes[0].peerManager.switch.peerStore.peers().anyIt( + it.peerId == nodes[3].switch.peerInfo.peerId + ) + + # All peers support the relay protocol + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) + + # All peers are connected + nodes[0].peerManager.switch.peerStore[ConnectionBook][ + nodes[1].switch.peerInfo.peerId + ] == Connected + nodes[0].peerManager.switch.peerStore[ConnectionBook][ + nodes[2].switch.peerInfo.peerId + ] == Connected + nodes[0].peerManager.switch.peerStore[ConnectionBook][ + nodes[3].switch.peerInfo.peerId + ] == Connected + + await allFutures(nodes.mapIt(it.stop())) + + asyncTest "Sharded peer manager connects to all peers supporting a given protocol": + # Create 4 nodes + let nodes = toSeq(0 ..< 4).mapIt( + newTestWakuNode( + nodeKey = generateSecp256k1Key(), + bindIp = parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wakuFlags = some(CapabilitiesBitfield.init(@[Relay])), + ) + ) + + # Start them + discard nodes.mapIt(it.mountMetadata(0, @[0'u16])) + await allFutures(nodes.mapIt(it.mountRelay())) + await allFutures(nodes.mapIt(it.start())) + + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.millis) + + let topic = "/waku/2/rs/0/0" + for node in nodes: + node.wakuRelay.subscribe(topic, simpleHandler) + + # Get all peer infos + let peerInfos = collect: + for i in 0 .. nodes.high: + let peerInfo = nodes[i].switch.peerInfo.toRemotePeerInfo() + peerInfo.enr = some(nodes[i].enr) + peerInfo + + # Add all peers (but self) to node 0 + nodes[0].peerManager.addPeer(peerInfos[1]) + nodes[0].peerManager.addPeer(peerInfos[2]) + nodes[0].peerManager.addPeer(peerInfos[3]) + + # Connect to relay peers + await nodes[0].peerManager.manageRelayPeers() + + # wait for the connections to settle + await sleepAsync(chronos.milliseconds(500)) + + check: + # Peerstore track all three peers + nodes[0].peerManager.switch.peerStore.peers().len == 3 + + # All peer ids are correct + nodes[0].peerManager.switch.peerStore.peers().anyIt( + it.peerId == nodes[1].switch.peerInfo.peerId + ) + nodes[0].peerManager.switch.peerStore.peers().anyIt( + it.peerId == nodes[2].switch.peerInfo.peerId + ) + nodes[0].peerManager.switch.peerStore.peers().anyIt( + it.peerId == nodes[3].switch.peerInfo.peerId + ) + + # All peers support the relay protocol + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) + + # All peers are connected + nodes[0].peerManager.switch.peerStore[ConnectionBook][ + nodes[1].switch.peerInfo.peerId + ] == Connected + nodes[0].peerManager.switch.peerStore[ConnectionBook][ + nodes[2].switch.peerInfo.peerId + ] == Connected + nodes[0].peerManager.switch.peerStore[ConnectionBook][ + nodes[3].switch.peerInfo.peerId + ] == Connected + + await allFutures(nodes.mapIt(it.stop())) + + asyncTest "Peer store keeps track of incoming connections": + # Create 4 nodes + let nodes = toSeq(0 ..< 4).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + + # Start them + await allFutures(nodes.mapIt(it.start())) + await allFutures(nodes.mapIt(it.mountRelay())) + + # Get all peer infos + let peerInfos = nodes.mapIt(it.switch.peerInfo.toRemotePeerInfo()) + + # all nodes connect to peer 0 + require: + (await nodes[1].peerManager.connectPeer(peerInfos[0])) == true + (await nodes[2].peerManager.connectPeer(peerInfos[0])) == true + (await nodes[3].peerManager.connectPeer(peerInfos[0])) == true + + await sleepAsync(chronos.milliseconds(500)) + + check: + # Peerstore track all three peers + nodes[0].peerManager.switch.peerStore.peers().len == 3 + + # Inbound/Outbound number of peers match + nodes[0].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 3 + nodes[0].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 0 + nodes[1].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 0 + nodes[1].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 1 + nodes[2].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 0 + nodes[2].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 1 + nodes[3].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 0 + nodes[3].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 1 + + # All peer ids are correct + nodes[0].peerManager.switch.peerStore.peers().anyIt( + it.peerId == nodes[1].switch.peerInfo.peerId + ) + nodes[0].peerManager.switch.peerStore.peers().anyIt( + it.peerId == nodes[2].switch.peerInfo.peerId + ) + nodes[0].peerManager.switch.peerStore.peers().anyIt( + it.peerId == nodes[3].switch.peerInfo.peerId + ) + + # All peers support the relay protocol + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) + nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains( + WakuRelayCodec + ) + + # All peers are connected + nodes[0].peerManager.switch.peerStore[ConnectionBook][ + nodes[1].switch.peerInfo.peerId + ] == Connected + nodes[0].peerManager.switch.peerStore[ConnectionBook][ + nodes[2].switch.peerInfo.peerId + ] == Connected + nodes[0].peerManager.switch.peerStore[ConnectionBook][ + nodes[3].switch.peerInfo.peerId + ] == Connected + + # All peers are Inbound in peer 0 + nodes[0].peerManager.switch.peerStore[DirectionBook][ + nodes[1].switch.peerInfo.peerId + ] == Inbound + nodes[0].peerManager.switch.peerStore[DirectionBook][ + nodes[2].switch.peerInfo.peerId + ] == Inbound + nodes[0].peerManager.switch.peerStore[DirectionBook][ + nodes[3].switch.peerInfo.peerId + ] == Inbound + + # All peers have an Outbound connection with peer 0 + nodes[1].peerManager.switch.peerStore[DirectionBook][ + nodes[0].switch.peerInfo.peerId + ] == Outbound + nodes[2].peerManager.switch.peerStore[DirectionBook][ + nodes[0].switch.peerInfo.peerId + ] == Outbound + nodes[3].peerManager.switch.peerStore[DirectionBook][ + nodes[0].switch.peerInfo.peerId + ] == Outbound + + await allFutures(nodes.mapIt(it.stop())) + + asyncTest "Peer store addServicePeer() stores service peers": + # Valid peer id missing the last digit + let basePeerId = "16Uiu2HAm7QGEZKujdSbbo1aaQyfDPQ6Bw3ybQnj6fruH5Dxwd7D" + + let + node = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + peers = toSeq(1 .. 4) + .mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it)) + .filterIt(it.isOk()) + .mapIt(it.value) + + require: + peers.len == 4 + + # service peers + node.peerManager.addServicePeer(peers[0], WakuStoreCodec) + node.peerManager.addServicePeer(peers[1], WakuLegacyLightPushCodec) + node.peerManager.addServicePeer(peers[2], WakuPeerExchangeCodec) + + # relay peers (should not be added) + node.peerManager.addServicePeer(peers[3], WakuRelayCodec) + + # all peers are stored in the peerstore + check: + node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[0].peerId) + node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[1].peerId) + node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[2].peerId) + + # but the relay peer is not + node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[3].peerId) == + false + + # all service peers are added to its service slot + check: + node.peerManager.serviceSlots[WakuStoreCodec].peerId == peers[0].peerId + node.peerManager.serviceSlots[WakuLegacyLightPushCodec].peerId == peers[1].peerId + node.peerManager.serviceSlots[WakuPeerExchangeCodec].peerId == peers[2].peerId + + # but the relay peer is not + node.peerManager.serviceSlots.hasKey(WakuRelayCodec) == false + + asyncTest "connectedPeers() returns expected number of connections per protocol": + # Create 4 nodes + let nodes = toSeq(0 ..< 4).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + + # Start them with relay + filter + await allFutures(nodes.mapIt(it.start())) + await allFutures(nodes.mapIt(it.mountRelay())) + await allFutures(nodes.mapIt(it.mountFilter())) + + let pInfos = nodes.mapIt(it.switch.peerInfo.toRemotePeerInfo()) + + # create some connections/streams + check: + # some relay connections + (await nodes[0].peerManager.connectPeer(pInfos[1])) == true + (await nodes[0].peerManager.connectPeer(pInfos[2])) == true + (await nodes[1].peerManager.connectPeer(pInfos[2])) == true + + (await nodes[0].peerManager.dialPeer(pInfos[1], WakuFilterSubscribeCodec)).isSome() == + true + (await nodes[0].peerManager.dialPeer(pInfos[2], WakuFilterSubscribeCodec)).isSome() == + true + + # isolated dial creates a relay conn under the hood (libp2p behaviour) + (await nodes[2].peerManager.dialPeer(pInfos[3], WakuFilterSubscribeCodec)).isSome() == + true + + # assert physical connections + check: + nodes[0].peerManager.connectedPeers(WakuRelayCodec)[0].len == 0 + nodes[0].peerManager.connectedPeers(WakuRelayCodec)[1].len == 2 + + nodes[0].peerManager.connectedPeers(WakuFilterSubscribeCodec)[0].len == 0 + nodes[0].peerManager.connectedPeers(WakuFilterSubscribeCodec)[1].len == 2 + + nodes[1].peerManager.connectedPeers(WakuRelayCodec)[0].len == 1 + nodes[1].peerManager.connectedPeers(WakuRelayCodec)[1].len == 1 + + nodes[1].peerManager.connectedPeers(WakuFilterSubscribeCodec)[0].len == 1 + nodes[1].peerManager.connectedPeers(WakuFilterSubscribeCodec)[1].len == 0 + + nodes[2].peerManager.connectedPeers(WakuRelayCodec)[0].len == 2 + nodes[2].peerManager.connectedPeers(WakuRelayCodec)[1].len == 1 + + nodes[2].peerManager.connectedPeers(WakuFilterSubscribeCodec)[0].len == 1 + nodes[2].peerManager.connectedPeers(WakuFilterSubscribeCodec)[1].len == 1 + + nodes[3].peerManager.connectedPeers(WakuRelayCodec)[0].len == 1 + nodes[3].peerManager.connectedPeers(WakuRelayCodec)[1].len == 0 + + nodes[3].peerManager.connectedPeers(WakuFilterSubscribeCodec)[0].len == 1 + nodes[3].peerManager.connectedPeers(WakuFilterSubscribeCodec)[1].len == 0 + + asyncTest "getNumStreams() returns expected number of connections per protocol": + # Create 2 nodes + let nodes = toSeq(0 ..< 2).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + + # Start them with relay + filter + await allFutures(nodes.mapIt(it.start())) + await allFutures(nodes.mapIt(it.mountRelay())) + await allFutures(nodes.mapIt(it.mountFilter())) + + let pInfos = nodes.mapIt(it.switch.peerInfo.toRemotePeerInfo()) + + require: + # multiple streams are multiplexed over a single connection. + # note that a relay connection is created under the hood when dialing a peer (libp2p behaviour) + (await nodes[0].peerManager.dialPeer(pInfos[1], WakuFilterSubscribeCodec)).isSome() == + true + (await nodes[0].peerManager.dialPeer(pInfos[1], WakuFilterSubscribeCodec)).isSome() == + true + (await nodes[0].peerManager.dialPeer(pInfos[1], WakuFilterSubscribeCodec)).isSome() == + true + (await nodes[0].peerManager.dialPeer(pInfos[1], WakuFilterSubscribeCodec)).isSome() == + true + + check: + nodes[0].peerManager.getNumStreams(WakuRelayCodec) == (1, 1) + nodes[0].peerManager.getNumStreams(WakuFilterSubscribeCodec) == (0, 4) + + nodes[1].peerManager.getNumStreams(WakuRelayCodec) == (1, 1) + nodes[1].peerManager.getNumStreams(WakuFilterSubscribeCodec) == (4, 0) + + test "selectPeer() returns the correct peer": + # Valid peer id missing the last digit + let basePeerId = "16Uiu2HAm7QGEZKujdSbbo1aaQyfDPQ6Bw3ybQnj6fruH5Dxwd7D" + + # Create peer manager + let pm = PeerManager.new( + switch = SwitchBuilder.new().withRng(rng).withMplex().withNoise().build(), + storage = nil, + ) + + # Create 3 peer infos + let peers = toSeq(1 .. 3) + .mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it)) + .filterIt(it.isOk()) + .mapIt(it.value) + require: + peers.len == 3 + + # Add a peer[0] to the peerstore + pm.switch.peerStore[AddressBook][peers[0].peerId] = peers[0].addrs + pm.switch.peerStore[ProtoBook][peers[0].peerId] = + @[WakuRelayCodec, WakuStoreCodec, WakuFilterSubscribeCodec] + + # When no service peers, we get one from the peerstore + let selectedPeer1 = pm.selectPeer(WakuStoreCodec) + check: + selectedPeer1.isSome() == true + selectedPeer1.get().peerId == peers[0].peerId + + # Same for other protocol + let selectedPeer2 = pm.selectPeer(WakuFilterSubscribeCodec) + check: + selectedPeer2.isSome() == true + selectedPeer2.get().peerId == peers[0].peerId + + # And return none if we dont have any peer for that protocol + let selectedPeer3 = pm.selectPeer(WakuLegacyLightPushCodec) + check: + selectedPeer3.isSome() == false + + # Now we add service peers for different protocols peer[1..3] + pm.addServicePeer(peers[1], WakuStoreCodec) + pm.addServicePeer(peers[2], WakuLegacyLightPushCodec) + + # We no longer get one from the peerstore. Slots are being used instead. + let selectedPeer4 = pm.selectPeer(WakuStoreCodec) + check: + selectedPeer4.isSome() == true + selectedPeer4.get().peerId == peers[1].peerId + + let selectedPeer5 = pm.selectPeer(WakuLegacyLightPushCodec) + check: + selectedPeer5.isSome() == true + selectedPeer5.get().peerId == peers[2].peerId + + test "peer manager cant have more max connections than peerstore size": + # Peerstore size can't be smaller than max connections + let peerStoreSize = 20 + let maxConnections = 25 + + expect(Defect): + let pm = PeerManager.new( + switch = SwitchBuilder + .new() + .withRng(rng) + .withMplex() + .withNoise() + .withPeerStore(peerStoreSize) + .withMaxConnections(maxConnections) + .build(), + storage = nil, + ) + + test "prunePeerStore() correctly removes peers to match max quota": + # Create peer manager + let pm = PeerManager.new( + switch = SwitchBuilder + .new() + .withRng(rng) + .withMplex() + .withNoise() + .withPeerStore(25) + .withMaxConnections(20) + .build(), + maxFailedAttempts = 1, + storage = nil, + ) + + # Create 30 peers and add them to the peerstore + let peers = toSeq(1 .. 30) + .mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/0/p2p/" & $PeerId.random().get())) + .filterIt(it.isOk()) + .mapIt(it.value) + for p in peers: + pm.addPeer(p) + + # Check that we have 30 peers in the peerstore + check: + pm.switch.peerStore.peers.len == 30 + + # fake that some peers failed to connected + pm.switch.peerStore[NumberFailedConnBook][peers[0].peerId] = 2 + pm.switch.peerStore[NumberFailedConnBook][peers[1].peerId] = 2 + pm.switch.peerStore[NumberFailedConnBook][peers[2].peerId] = 2 + pm.switch.peerStore[NumberFailedConnBook][peers[3].peerId] = 2 + pm.switch.peerStore[NumberFailedConnBook][peers[4].peerId] = 2 + + # fake that some peers are connected + pm.switch.peerStore[ConnectionBook][peers[5].peerId] = Connected + pm.switch.peerStore[ConnectionBook][peers[8].peerId] = Connected + pm.switch.peerStore[ConnectionBook][peers[15].peerId] = Connected + pm.switch.peerStore[ConnectionBook][peers[18].peerId] = Connected + pm.switch.peerStore[ConnectionBook][peers[24].peerId] = Connected + pm.switch.peerStore[ConnectionBook][peers[29].peerId] = Connected + + # Prune the peerstore (current=30, target=25) + pm.prunePeerStore() + + check: + # ensure peerstore was pruned + pm.switch.peerStore.peers.len == 25 + + # ensure connected peers were not pruned + pm.switch.peerStore.peers.anyIt(it.peerId == peers[5].peerId) + pm.switch.peerStore.peers.anyIt(it.peerId == peers[8].peerId) + pm.switch.peerStore.peers.anyIt(it.peerId == peers[15].peerId) + pm.switch.peerStore.peers.anyIt(it.peerId == peers[18].peerId) + pm.switch.peerStore.peers.anyIt(it.peerId == peers[24].peerId) + pm.switch.peerStore.peers.anyIt(it.peerId == peers[29].peerId) + + # ensure peers that failed were the first to be pruned + not pm.switch.peerStore.peers.anyIt(it.peerId == peers[0].peerId) + not pm.switch.peerStore.peers.anyIt(it.peerId == peers[1].peerId) + not pm.switch.peerStore.peers.anyIt(it.peerId == peers[2].peerId) + not pm.switch.peerStore.peers.anyIt(it.peerId == peers[3].peerId) + not pm.switch.peerStore.peers.anyIt(it.peerId == peers[4].peerId) + + asyncTest "canBeConnected() returns correct value": + let pm = PeerManager.new( + switch = SwitchBuilder + .new() + .withRng(rng) + .withMplex() + .withNoise() + .withPeerStore(25) + .withMaxConnections(20) + .build(), + initialBackoffInSec = 1, + # with InitialBackoffInSec = 1 backoffs are: 1, 2, 4, 8secs. + backoffFactor = 2, + maxFailedAttempts = 10, + storage = nil, + ) + var p1: PeerId + require p1.init("QmeuZJbXrszW2jdT7GdduSjQskPU3S7vvGWKtKgDfkDvW" & "1") + + # new peer with no errors can be connected + check: + pm.canBeConnected(p1) == true + + # peer with ONE error that just failed + pm.switch.peerStore[NumberFailedConnBook][p1] = 1 + pm.switch.peerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second) + # we cant connect right now + check: + pm.canBeConnected(p1) == false + + # but we can after the first backoff of 1 seconds + await sleepAsync(chronos.milliseconds(1200)) + check: + pm.canBeConnected(p1) == true + + # peer with TWO errors, we can connect until 2 seconds have passed + pm.switch.peerStore[NumberFailedConnBook][p1] = 2 + pm.switch.peerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second) + + # cant be connected after 1 second + await sleepAsync(chronos.milliseconds(1000)) + check: + pm.canBeConnected(p1) == false + + # can be connected after 2 seconds + await sleepAsync(chronos.milliseconds(1200)) + check: + pm.canBeConnected(p1) == true + + # can't be connected if failed attempts are equal to maxFailedAttempts + pm.maxFailedAttempts = 2 + check: + pm.canBeConnected(p1) == false + + test "peer manager must fail if max backoff is over a week": + # Should result in overflow exception + expect(Defect): + let pm = PeerManager.new( + switch = SwitchBuilder + .new() + .withRng(rng) + .withMplex() + .withNoise() + .withPeerStore(25) + .withMaxConnections(20) + .build(), + maxFailedAttempts = 150, + storage = nil, + ) + + # Should result in backoff > 1 week + expect(Defect): + let pm = PeerManager.new( + switch = SwitchBuilder + .new() + .withRng(rng) + .withMplex() + .withNoise() + .withPeerStore(25) + .withMaxConnections(20) + .build(), + maxFailedAttempts = 10, + storage = nil, + ) + + let pm = PeerManager.new( + switch = SwitchBuilder + .new() + .withRng(rng) + .withMplex() + .withNoise() + .withPeerStore(25) + .withMaxConnections(20) + .build(), + maxFailedAttempts = 5, + storage = nil, + ) + + asyncTest "colocationLimit is enforced by pruneConnsByIp()": + # Create 5 nodes + let nodes = toSeq(0 ..< 5).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + + # Start them with relay + filter + await allFutures(nodes.mapIt(it.start())) + await allFutures(nodes.mapIt(it.mountRelay())) + + let pInfos = nodes.mapIt(it.switch.peerInfo.toRemotePeerInfo()) + + # force max 1 conn per ip + nodes[0].peerManager.colocationLimit = 1 + + # 2 in connections + discard await nodes[1].peerManager.connectPeer(pInfos[0]) + discard await nodes[2].peerManager.connectPeer(pInfos[0]) + await sleepAsync(chronos.milliseconds(500)) + + # but one is pruned + check nodes[0].peerManager.switch.connManager.getConnections().len == 1 + + # 2 out connections + discard await nodes[0].peerManager.connectPeer(pInfos[3]) + discard await nodes[0].peerManager.connectPeer(pInfos[4]) + await sleepAsync(chronos.milliseconds(500)) + + # they are also prunned + check nodes[0].peerManager.switch.connManager.getConnections().len == 1 + + # we should have 4 peers (2in/2out) but due to collocation limit + # they are pruned to max 1 + check: + nodes[0].peerManager.ipTable["127.0.0.1"].len == 1 + nodes[0].peerManager.switch.connManager.getConnections().len == 1 + nodes[0].peerManager.switch.peerStore.peers().len == 1 + + await allFutures(nodes.mapIt(it.stop())) + + asyncTest "Retrieve peer that mounted peer exchange": + let + node1 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(55048)) + node2 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(55023)) + + await allFutures(node1.start(), node2.start()) + await allFutures(node1.mountRelay(), node2.mountRelay()) + await allFutures(node1.mountPeerExchange(), node2.mountPeerExchange()) + + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + + var r = node1.peerManager.selectPeer(WakuRelayCodec) + assert r.isSome(), "could not retrieve peer mounting WakuRelayCodec" + + r = node1.peerManager.selectPeer(WakuPeerExchangeCodec) + assert r.isSome(), "could not retrieve peer mounting WakuPeerExchangeCodec" diff --git a/third-party/nwaku/tests/test_peer_storage.nim b/third-party/nwaku/tests/test_peer_storage.nim new file mode 100644 index 0000000..6cb48d7 --- /dev/null +++ b/third-party/nwaku/tests/test_peer_storage.nim @@ -0,0 +1,97 @@ +{.used.} + +import std/options, testutils/unittests, eth/p2p/discoveryv5/enr, libp2p/crypto/crypto +import + waku/[ + common/databases/db_sqlite, + node/peer_manager/peer_manager, + node/peer_manager/peer_store/waku_peer_storage, + waku_enr, + ], + ./testlib/wakucore + +suite "Peer Storage": + test "Store, replace and retrieve from persistent peer storage": + let + database = SqliteDatabase.new(":memory:").tryGet() + storage = WakuPeerStorage.new(database)[] + + # Test Peer + peerLoc = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet() + peerKey = generateSecp256k1Key() + peer = PeerInfo.new(peerKey, @[peerLoc]) + peerProto = "/waku/2/default-waku/codec" + connectedness = Connectedness.CanConnect + disconn = 999999 + pubsubTopics = @["/waku/2/rs/2/0", "/waku/2/rs/2/1"] + + # Create ENR + var enrBuilder = EnrBuilder.init(peerKey) + enrBuilder.withShardedTopics(pubsubTopics).expect("Valid topics") + let record = enrBuilder.build().expect("Valid record") + + let stored = RemotePeerInfo( + peerId: peer.peerId, + addrs: @[peerLoc], + enr: some(record), + protocols: @[peerProto], + publicKey: peerKey.getPublicKey().tryGet(), + connectedness: connectedness, + disconnectTime: disconn, + ) + + defer: + storage.close() + + # Test insert and retrieve + + require storage.put(stored).isOk + + var responseCount = 0 + + # Fetched variable from callback + var resStoredInfo: RemotePeerInfo + + proc data(storedInfo: RemotePeerInfo) = + responseCount += 1 + resStoredInfo = storedInfo + + let res = storage.getAll(data) + + check: + res.isErr == false + responseCount == 1 + resStoredInfo.peerId == peer.peerId + resStoredInfo.addrs == @[peerLoc] + resStoredInfo.protocols == @[peerProto] + resStoredInfo.publicKey == peerKey.getPublicKey().tryGet() + resStoredInfo.connectedness == connectedness + resStoredInfo.disconnectTime == disconn + + assert resStoredInfo.enr.isSome(), "The ENR info wasn't properly stored" + check: + resStoredInfo.enr.get() == record + + # Test replace and retrieve (update an existing entry) + stored.connectedness = CannotConnect + stored.disconnectTime = disconn + 10 + stored.enr = none(Record) + require storage.put(stored).isOk + + responseCount = 0 + proc replacedData(storedInfo: RemotePeerInfo) = + responseCount += 1 + resStoredInfo = storedInfo + + let repRes = storage.getAll(replacedData) + + check: + repRes.isErr == false + responseCount == 1 + resStoredInfo.peerId == peer.peerId + resStoredInfo.addrs == @[peerLoc] + resStoredInfo.protocols == @[peerProto] + resStoredInfo.publicKey == peerKey.getPublicKey().tryGet() + resStoredInfo.connectedness == Connectedness.CannotConnect + resStoredInfo.disconnectTime == disconn + 10 + resStoredInfo.enr.isNone() diff --git a/third-party/nwaku/tests/test_peer_store_extended.nim b/third-party/nwaku/tests/test_peer_store_extended.nim new file mode 100644 index 0000000..16926c7 --- /dev/null +++ b/third-party/nwaku/tests/test_peer_store_extended.nim @@ -0,0 +1,355 @@ +{.used.} + +import + std/[sequtils, times], + chronos, + libp2p/crypto/crypto, + libp2p/peerid, + libp2p/peerstore, + libp2p/multiaddress, + testutils/unittests +import + waku/ + [node/peer_manager/peer_manager, node/peer_manager/waku_peer_store, waku_core/peers], + ./testlib/wakucore + +suite "Extended nim-libp2p Peer Store": + # Valid peerId missing the last digit. Useful for creating new peerIds + # basePeerId & "1" + # basePeerId & "2" + let basePeerId = "QmeuZJbXrszW2jdT7GdduSjQskPU3S7vvGWKtKgDfkDvW" + + setup: + # Setup a nim-libp2p peerstore with some peers + let peerStore = PeerStore.new(nil, capacity = 50) + var p1, p2, p3, p4, p5, p6: PeerId + + # create five peers basePeerId + [1-5] + require p1.init(basePeerId & "1") + require p2.init(basePeerId & "2") + require p3.init(basePeerId & "3") + require p4.init(basePeerId & "4") + require p5.init(basePeerId & "5") + + # peer6 is not part of the peerstore + require p6.init(basePeerId & "6") + + # Peer1: Connected + peerStore.addPeer( + RemotePeerInfo.init( + peerId = p1, + addrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/1").tryGet()], + protocols = @["/vac/waku/relay/2.0.0-beta1", "/vac/waku/store/2.0.0"], + publicKey = generateEcdsaKeyPair().pubkey, + agent = "nwaku", + protoVersion = "protoVersion1", + connectedness = Connected, + disconnectTime = 0, + origin = Discv5, + direction = Inbound, + lastFailedConn = Moment.init(1001, Second), + numberFailedConn = 1, + ) + ) + + # Peer2: Connected + peerStore.addPeer( + RemotePeerInfo.init( + peerId = p2, + addrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/2").tryGet()], + protocols = @["/vac/waku/relay/2.0.0", "/vac/waku/store/2.0.0"], + publicKey = generateEcdsaKeyPair().pubkey, + agent = "nwaku", + protoVersion = "protoVersion2", + connectedness = Connected, + disconnectTime = 0, + origin = Discv5, + direction = Inbound, + lastFailedConn = Moment.init(1002, Second), + numberFailedConn = 2, + ) + ) + + # Peer3: Connected + peerStore.addPeer( + RemotePeerInfo.init( + peerId = p3, + addrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/3").tryGet()], + protocols = @["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"], + publicKey = generateEcdsaKeyPair().pubkey, + agent = "gowaku", + protoVersion = "protoVersion3", + connectedness = Connected, + disconnectTime = 0, + origin = Discv5, + direction = Inbound, + lastFailedConn = Moment.init(1003, Second), + numberFailedConn = 3, + ) + ) + + # Peer4: Added but never connected + peerStore.addPeer( + RemotePeerInfo.init( + peerId = p4, + addrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/4").tryGet()], + protocols = @[], + publicKey = generateEcdsaKeyPair().pubkey, + agent = "", + protoVersion = "", + connectedness = NotConnected, + disconnectTime = 0, + origin = Discv5, + direction = Inbound, + lastFailedConn = Moment.init(1004, Second), + numberFailedConn = 4, + ) + ) + + # Peer5: Connected + peerStore.addPeer( + RemotePeerInfo.init( + peerId = p5, + addrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/5").tryGet()], + protocols = @["/vac/waku/swap/2.0.0", "/vac/waku/store/2.0.0-beta2"], + publicKey = generateEcdsaKeyPair().pubkey, + agent = "gowaku", + protoVersion = "protoVersion5", + connectedness = CanConnect, + disconnectTime = 1000, + origin = Discv5, + direction = Outbound, + lastFailedConn = Moment.init(1005, Second), + numberFailedConn = 5, + ) + ) + + test "get() returns the correct StoredInfo for a given PeerId": + # When + let peer1 = peerStore.getPeer(p1) + let peer6 = peerStore.getPeer(p6) + + # Then + check: + # regression on nim-libp2p fields + peer1.peerId == p1 + peer1.addrs == @[MultiAddress.init("/ip4/127.0.0.1/tcp/1").tryGet()] + peer1.protocols == @["/vac/waku/relay/2.0.0-beta1", "/vac/waku/store/2.0.0"] + peer1.agent == "nwaku" + peer1.protoVersion == "protoVersion1" + + # our extended fields + peer1.connectedness == Connected + peer1.disconnectTime == 0 + peer1.origin == Discv5 + peer1.numberFailedConn == 1 + peer1.lastFailedConn == Moment.init(1001, Second) + + check: + # fields are empty, not part of the peerstore + peer6.peerId == p6 + peer6.addrs.len == 0 + peer6.protocols.len == 0 + peer6.agent == default(string) + peer6.protoVersion == default(string) + peer6.connectedness == default(Connectedness) + peer6.disconnectTime == default(int) + peer6.origin == default(PeerOrigin) + peer6.numberFailedConn == default(int) + peer6.lastFailedConn == default(Moment) + + test "peers() returns all StoredInfo of the PeerStore": + # When + let allPeers = peerStore.peers() + + # Then + check: + allPeers.len == 5 + allPeers.anyIt(it.peerId == p1) + allPeers.anyIt(it.peerId == p2) + allPeers.anyIt(it.peerId == p3) + allPeers.anyIt(it.peerId == p4) + allPeers.anyIt(it.peerId == p5) + + let p3 = allPeers.filterIt(it.peerId == p3)[0] + + check: + # regression on nim-libp2p fields + p3.addrs == @[MultiAddress.init("/ip4/127.0.0.1/tcp/3").tryGet()] + p3.protocols == @["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"] + p3.agent == "gowaku" + p3.protoVersion == "protoVersion3" + + # our extended fields + p3.connectedness == Connected + p3.disconnectTime == 0 + p3.origin == Discv5 + p3.numberFailedConn == 3 + p3.lastFailedConn == Moment.init(1003, Second) + + test "peers() returns all StoredInfo matching a specific protocol": + # When + let storePeers = peerStore.peers("/vac/waku/store/2.0.0") + let lpPeers = peerStore.peers("/vac/waku/lightpush/2.0.0") + + # Then + check: + # Only p1 and p2 support that protocol + storePeers.len == 2 + storePeers.anyIt(it.peerId == p1) + storePeers.anyIt(it.peerId == p2) + + check: + # Only p3 supports that protocol + lpPeers.len == 1 + lpPeers.anyIt(it.peerId == p3) + lpPeers[0].protocols == + @["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"] + + test "peers() returns all StoredInfo matching a given protocolMatcher": + # When + let pMatcherStorePeers = peerStore.peers(protocolMatcher("/vac/waku/store/2.0.0")) + let pMatcherSwapPeers = peerStore.peers(protocolMatcher("/vac/waku/swap/2.0.0")) + + # Then + check: + # peers: 1,2,3,5 match /vac/waku/store/2.0.0/xxx + pMatcherStorePeers.len == 4 + pMatcherStorePeers.anyIt(it.peerId == p1) + pMatcherStorePeers.anyIt(it.peerId == p2) + pMatcherStorePeers.anyIt(it.peerId == p3) + pMatcherStorePeers.anyIt(it.peerId == p5) + + check: + pMatcherStorePeers.filterIt(it.peerId == p1)[0].protocols == + @["/vac/waku/relay/2.0.0-beta1", "/vac/waku/store/2.0.0"] + pMatcherStorePeers.filterIt(it.peerId == p2)[0].protocols == + @["/vac/waku/relay/2.0.0", "/vac/waku/store/2.0.0"] + pMatcherStorePeers.filterIt(it.peerId == p3)[0].protocols == + @["/vac/waku/lightpush/2.0.0", "/vac/waku/store/2.0.0-beta1"] + pMatcherStorePeers.filterIt(it.peerId == p5)[0].protocols == + @["/vac/waku/swap/2.0.0", "/vac/waku/store/2.0.0-beta2"] + + check: + pMatcherSwapPeers.len == 1 + pMatcherSwapPeers.anyIt(it.peerId == p5) + pMatcherSwapPeers[0].protocols == + @["/vac/waku/swap/2.0.0", "/vac/waku/store/2.0.0-beta2"] + + test "toRemotePeerInfo() converts a StoredInfo to a RemotePeerInfo": + # Given + let peer1 = peerStore.getPeer(p1) + + # Then + check: + peer1.peerId == p1 + peer1.addrs == @[MultiAddress.init("/ip4/127.0.0.1/tcp/1").tryGet()] + peer1.protocols == @["/vac/waku/relay/2.0.0-beta1", "/vac/waku/store/2.0.0"] + + test "connectedness() returns the connection status of a given PeerId": + check: + # peers tracked in the peerstore + peerStore.connectedness(p1) == Connected + peerStore.connectedness(p2) == Connected + peerStore.connectedness(p3) == Connected + peerStore.connectedness(p4) == NotConnected + peerStore.connectedness(p5) == CanConnect + + # peer not tracked in the peerstore + peerStore.connectedness(p6) == NotConnected + + test "hasPeer() returns true if the peer supports a given protocol": + check: + peerStore.hasPeer(p1, "/vac/waku/relay/2.0.0-beta1") + peerStore.hasPeer(p1, "/vac/waku/store/2.0.0") + not peerStore.hasPeer(p1, "it-does-not-contain-this-protocol") + + peerStore.hasPeer(p2, "/vac/waku/relay/2.0.0") + peerStore.hasPeer(p2, "/vac/waku/store/2.0.0") + + peerStore.hasPeer(p3, "/vac/waku/lightpush/2.0.0") + peerStore.hasPeer(p3, "/vac/waku/store/2.0.0-beta1") + + # we have no knowledge of p4 supported protocols + not peerStore.hasPeer(p4, "/vac/waku/lightpush/2.0.0") + + peerStore.hasPeer(p5, "/vac/waku/swap/2.0.0") + peerStore.hasPeer(p5, "/vac/waku/store/2.0.0-beta2") + not peerStore.hasPeer(p5, "another-protocol-not-contained") + + # peer 6 is not in the PeerStore + not peerStore.hasPeer(p6, "/vac/waku/lightpush/2.0.0") + + test "hasPeers() returns true if any peer in the PeerStore supports a given protocol": + # Match specific protocols + check: + peerStore.hasPeers("/vac/waku/relay/2.0.0-beta1") + peerStore.hasPeers("/vac/waku/store/2.0.0") + peerStore.hasPeers("/vac/waku/lightpush/2.0.0") + not peerStore.hasPeers("/vac/waku/does-not-exist/2.0.0") + + # Match protocolMatcher protocols + check: + peerStore.hasPeers(protocolMatcher("/vac/waku/store/2.0.0")) + not peerStore.hasPeers(protocolMatcher("/vac/waku/does-not-exist/2.0.0")) + + test "getPeersByDirection()": + # When + let inPeers = peerStore.getPeersByDirection(Inbound) + let outPeers = peerStore.getPeersByDirection(Outbound) + + # Then + check: + inPeers.len == 4 + outPeers.len == 1 + + test "getDisconnectedPeers()": + # When + let disconnedtedPeers = peerStore.getDisconnectedPeers() + + # Then + check: + disconnedtedPeers.len == 2 + disconnedtedPeers.anyIt(it.peerId == p4) + disconnedtedPeers.anyIt(it.peerId == p5) + not disconnedtedPeers.anyIt(it.connectedness == Connected) + + test "del() successfully deletes waku custom books": + # Given + let peerStore = PeerStore.new(nil, capacity = 5) + var p1: PeerId + require p1.init("QmeuZJbXrszW2jdT7GdduSjQskPU3S7vvGWKtKgDfkDvW1") + + let remotePeer = RemotePeerInfo.init( + peerId = p1, + addrs = @[MultiAddress.init("/ip4/127.0.0.1/tcp/1").tryGet()], + protocols = @["proto"], + publicKey = generateEcdsaKeyPair().pubkey, + agent = "agent", + protoVersion = "version", + lastFailedConn = Moment.init(getTime().toUnix, Second), + numberFailedConn = 1, + connectedness = Connected, + disconnectTime = 0, + origin = Discv5, + direction = Inbound, + ) + + peerStore.addPeer(remotePeer) + + # When + peerStore.delete(p1) + + # Then + check: + peerStore[AddressBook][p1] == newSeq[MultiAddress](0) + peerStore[ProtoBook][p1] == newSeq[string](0) + peerStore[KeyBook][p1] == default(PublicKey) + peerStore[AgentBook][p1] == "" + peerStore[ProtoVersionBook][p1] == "" + peerStore[LastFailedConnBook][p1] == default(Moment) + peerStore[NumberFailedConnBook][p1] == 0 + peerStore[ConnectionBook][p1] == default(Connectedness) + peerStore[DisconnectBook][p1] == 0 + peerStore[SourceBook][p1] == default(PeerOrigin) + peerStore[DirectionBook][p1] == default(PeerDirection) diff --git a/third-party/nwaku/tests/test_relay_peer_exchange.nim b/third-party/nwaku/tests/test_relay_peer_exchange.nim new file mode 100644 index 0000000..84976bd --- /dev/null +++ b/third-party/nwaku/tests/test_relay_peer_exchange.nim @@ -0,0 +1,111 @@ +{.used.} + +import + std/[sequtils, options], + testutils/unittests, + chronos, + libp2p/peerid, + libp2p/protocols/pubsub/gossipsub + +import waku/waku_core, waku/waku_node, ./testlib/wakucore, ./testlib/wakunode + +procSuite "Relay (GossipSub) Peer Exchange": + asyncTest "Mount relay without peer exchange handler": + # Given two nodes + let + listenAddress = parseIpAddress("0.0.0.0") + port = Port(0) + node1Key = generateSecp256k1Key() + node1 = newTestWakuNode(node1Key, listenAddress, port) + node2Key = generateSecp256k1Key() + node2 = + newTestWakuNode(node2Key, listenAddress, port, sendSignedPeerRecord = true) + + # When both client and server mount relay without a handler + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await node2.mountRelay(none(RoutingRecordsHandler))).isOkOr: + assert false, "Failed to mount relay" + + # Then the relays are mounted without a handler + check: + node1.wakuRelay.parameters.enablePX == false + node1.wakuRelay.routingRecordsHandler.len == 0 + node2.wakuRelay.parameters.enablePX == false + node2.wakuRelay.routingRecordsHandler.len == 0 + + asyncTest "Mount relay with peer exchange handler": + ## Given three nodes + # Create nodes and ENR. These will be added to the discoverable list + let + bindIp = parseIpAddress("0.0.0.0") + port = Port(0) + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode(nodeKey1, bindIp, port) + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, bindIp, port, sendSignedPeerRecord = true) + nodeKey3 = generateSecp256k1Key() + node3 = newTestWakuNode(nodeKey3, bindIp, port, sendSignedPeerRecord = true) + + # Given some peer exchange handlers + proc emptyPeerExchangeHandler( + peer: PeerId, topic: string, peers: seq[RoutingRecordsPair] + ) {.gcsafe.} = + discard + + var completionFut = newFuture[bool]() + proc peerExchangeHandler( + peer: PeerId, topic: string, peers: seq[RoutingRecordsPair] + ) {.gcsafe.} = + ## Handle peers received via gossipsub peer exchange + let peerRecords = peers.mapIt(it.record.get()) + + check: + # Node 3 is informed of node 2 via peer exchange + peer == node1.switch.peerInfo.peerId + topic == DefaultPubsubTopic + peerRecords.countIt(it.peerId == node2.switch.peerInfo.peerId) == 1 + + if (not completionFut.completed()): + completionFut.complete(true) + + let + emptyPeerExchangeHandle: RoutingRecordsHandler = emptyPeerExchangeHandler + peerExchangeHandle: RoutingRecordsHandler = peerExchangeHandler + + # Givem the nodes mount relay with a peer exchange handler + (await node1.mountRelay(some(emptyPeerExchangeHandle))).isOkOr: + assert false, "Failed to mount relay" + (await node2.mountRelay(some(emptyPeerExchangeHandle))).isOkOr: + assert false, "Failed to mount relay" + (await node3.mountRelay(some(peerExchangeHandle))).isOkOr: + assert false, "Failed to mount relay" + + # Ensure that node1 prunes all peers after the first connection + node1.wakuRelay.parameters.dHigh = 1 + + await allFutures([node1.start(), node2.start(), node3.start()]) + + # The three nodes should be subscribed to the same shard + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node1.subscribe((kind: PubsubSub, topic: $DefaultRelayShard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + node2.subscribe((kind: PubsubSub, topic: $DefaultRelayShard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + node3.subscribe((kind: PubsubSub, topic: $DefaultRelayShard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + + # When nodes are connected + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + await node3.connectToNodes(@[node1.switch.peerInfo.toRemotePeerInfo()]) + + # Verify that the handlePeerExchange was called (node3) + check: + (await completionFut.withTimeout(5.seconds)) == true + + # Clean up + await allFutures([node1.stop(), node2.stop(), node3.stop()]) diff --git a/third-party/nwaku/tests/test_utils_compat.nim b/third-party/nwaku/tests/test_utils_compat.nim new file mode 100644 index 0000000..121efa4 --- /dev/null +++ b/third-party/nwaku/tests/test_utils_compat.nim @@ -0,0 +1,48 @@ +{.used.} + +import testutils/unittests +import stew/results, waku/waku_core/message, waku/waku_core/time, ./testlib/common + +suite "Waku Payload": + test "Encode/Decode waku message with timestamp": + ## Test encoding and decoding of the timestamp field of a WakuMessage + + ## Given + let + version = 0'u32 + payload = @[byte 0, 1, 2] + timestamp = Timestamp(10) + msg = WakuMessage(payload: payload, version: version, timestamp: timestamp) + + ## When + let pb = msg.encode() + let msgDecoded = WakuMessage.decode(pb.buffer) + + ## Then + check: + msgDecoded.isOk() + + let timestampDecoded = msgDecoded.value.timestamp + check: + timestampDecoded == timestamp + + test "Encode/Decode waku message without timestamp": + ## Test the encoding and decoding of a WakuMessage with an empty timestamp field + + ## Given + let + version = 0'u32 + payload = @[byte 0, 1, 2] + msg = WakuMessage(payload: payload, version: version) + + ## When + let pb = msg.encode() + let msgDecoded = WakuMessage.decode(pb.buffer) + + ## Then + check: + msgDecoded.isOk() + + let timestampDecoded = msgDecoded.value.timestamp + check: + timestampDecoded == Timestamp(0) diff --git a/third-party/nwaku/tests/test_waku.nim b/third-party/nwaku/tests/test_waku.nim new file mode 100644 index 0000000..defec12 --- /dev/null +++ b/third-party/nwaku/tests/test_waku.nim @@ -0,0 +1,91 @@ +{.used.} + +import chronos, testutils/unittests, std/options + +import waku + +suite "Waku API - Create node": + asyncTest "Create node with minimal configuration": + ## Given + let nodeConfig = + NodeConfig.init(wakuConfig = WakuConfig.init(entryNodes = @[], clusterId = 1)) + + # This is the actual minimal config but as the node auto-start, it is not suitable for tests + # NodeConfig.init(ethRpcEndpoints = @["http://someaddress"]) + + ## When + let node = (await createNode(nodeConfig)).valueOr: + raiseAssert error + + ## Then + check: + not node.isNil() + node.conf.clusterId == 1 + node.conf.relay == true + + asyncTest "Create node with full configuration": + ## Given + let nodeConfig = NodeConfig.init( + mode = Core, + wakuConfig = WakuConfig.init( + entryNodes = + @[ + "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g" + ], + staticStoreNodes = + @[ + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + ], + clusterId = 99, + autoShardingConfig = AutoShardingConfig(numShardsInCluster: 16), + messageValidation = + MessageValidation(maxMessageSize: "1024 KiB", rlnConfig: none(RlnConfig)), + ), + ) + + ## When + let node = (await createNode(nodeConfig)).valueOr: + raiseAssert error + + ## Then + check: + not node.isNil() + node.conf.clusterId == 99 + node.conf.shardingConf.numShardsInCluster == 16 + node.conf.maxMessageSizeBytes == 1024'u64 * 1024'u64 + node.conf.staticNodes.len == 1 + node.conf.relay == true + node.conf.lightPush == true + node.conf.peerExchangeService == true + node.conf.rendezvous == true + + asyncTest "Create node with mixed entry nodes (enrtree, multiaddr)": + ## Given + let nodeConfig = NodeConfig.init( + mode = Core, + wakuConfig = WakuConfig.init( + entryNodes = + @[ + "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im", + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", + ], + clusterId = 42, + ), + ) + + ## When + let node = (await createNode(nodeConfig)).valueOr: + raiseAssert error + + ## Then + check: + not node.isNil() + node.conf.clusterId == 42 + # ENRTree should go to DNS discovery + node.conf.dnsDiscoveryConf.isSome() + node.conf.dnsDiscoveryConf.get().enrTreeUrl == + "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im" + # Multiaddr should go to static nodes + node.conf.staticNodes.len == 1 + node.conf.staticNodes[0] == + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" diff --git a/third-party/nwaku/tests/test_waku_dnsdisc.nim b/third-party/nwaku/tests/test_waku_dnsdisc.nim new file mode 100644 index 0000000..758bdb3 --- /dev/null +++ b/third-party/nwaku/tests/test_waku_dnsdisc.nim @@ -0,0 +1,117 @@ +{.used.} + +import + std/[sequtils, tables], + results, + stew/base32, + testutils/unittests, + chronicles, + chronos, + libp2p/crypto/crypto, + eth/keys, + dnsdisc/builder +import + waku/node/peer_manager, + waku/waku_node, + waku/discovery/waku_dnsdisc, + ./testlib/common, + ./testlib/wakucore, + ./testlib/wakunode + +suite "Waku DNS Discovery": + asyncTest "Waku DNS Discovery end-to-end": + ## Tests integrated DNS discovery, from building + ## the tree to connecting to discovered nodes + + # Create nodes and ENR. These will be added to the discoverable list + let + bindIp = parseIpAddress("0.0.0.0") + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode(nodeKey1, bindIp, Port(63500)) + enr1 = node1.enr + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, bindIp, Port(63502)) + enr2 = node2.enr + nodeKey3 = generateSecp256k1Key() + node3 = newTestWakuNode(nodeKey3, bindIp, Port(63503)) + enr3 = node3.enr + + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + await allFutures([node1.start(), node2.start(), node3.start()]) + + # Build and sign tree + var tree = buildTree( + 1, # Seq no + @[enr1, enr2, enr3], # ENR entries + @[], + ) + .get() # No link entries + + let treeKeys = keys.KeyPair.random(rng[]) + + # Sign tree + check: + tree.signTree(treeKeys.seckey()).isOk() + + # Create TXT records at domain + let + domain = "testnodes.aq" + zoneTxts = tree.buildTXT(domain).get() + username = Base32.encode(treeKeys.pubkey().toRawCompressed()) + location = LinkPrefix & username & "@" & domain + # See EIP-1459: https://eips.ethereum.org/EIPS/eip-1459 + + # Create a resolver for the domain + + proc resolver(domain: string): Future[string] {.async, gcsafe.} = + return zoneTxts[domain] + + # Create Waku DNS discovery client on a new Waku v2 node using the resolver + + let + nodeKey4 = generateSecp256k1Key() + node4 = newTestWakuNode(nodeKey4, bindIp, Port(63504)) + + (await node4.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + await node4.start() + + var wakuDnsDisc = WakuDnsDiscovery.init(location, resolver).get() + + let res = await wakuDnsDisc.findPeers() + + check: + # We have discovered all three nodes + res.isOk() + res[].len == 3 + res[].mapIt(it.peerId).contains(node1.switch.peerInfo.peerId) + res[].mapIt(it.peerId).contains(node2.switch.peerInfo.peerId) + res[].mapIt(it.peerId).contains(node3.switch.peerInfo.peerId) + + # Connect to discovered nodes + await node4.connectToNodes(res[]) + + check: + # We have successfully connected to all discovered nodes + node4.peerManager.switch.peerStore.peers().anyIt( + it.peerId == node1.switch.peerInfo.peerId + ) + node4.peerManager.switch.peerStore.connectedness(node1.switch.peerInfo.peerId) == + Connected + node4.peerManager.switch.peerStore.peers().anyIt( + it.peerId == node2.switch.peerInfo.peerId + ) + node4.peerManager.switch.peerStore.connectedness(node2.switch.peerInfo.peerId) == + Connected + node4.peerManager.switch.peerStore.peers().anyIt( + it.peerId == node3.switch.peerInfo.peerId + ) + node4.peerManager.switch.peerStore.connectedness(node3.switch.peerInfo.peerId) == + Connected + + await allFutures([node1.stop(), node2.stop(), node3.stop(), node4.stop()]) diff --git a/third-party/nwaku/tests/test_waku_enr.nim b/third-party/nwaku/tests/test_waku_enr.nim new file mode 100644 index 0000000..2ffff5e --- /dev/null +++ b/third-party/nwaku/tests/test_waku_enr.nim @@ -0,0 +1,448 @@ +{.used.} + +import std/[options, sequtils], results, testutils/unittests +import waku/waku_core, waku/waku_enr, ./testlib/wakucore + +suite "Waku ENR - Capabilities bitfield": + test "check capabilities support": + ## Given + let bitfield: CapabilitiesBitfield = 0b0000_1101u8 # Lightpush, Filter, Relay + + ## Then + check: + bitfield.supportsCapability(Capabilities.Relay) + not bitfield.supportsCapability(Capabilities.Store) + bitfield.supportsCapability(Capabilities.Filter) + bitfield.supportsCapability(Capabilities.Lightpush) + + test "bitfield to capabilities list": + ## Given + let bitfield = CapabilitiesBitfield.init( + relay = true, store = false, lightpush = true, filter = true + ) + + ## When + let caps = bitfield.toCapabilities() + + ## Then + check: + caps == @[Capabilities.Relay, Capabilities.Filter, Capabilities.Lightpush] + + test "encode and decode record with capabilities field (EnrBuilder ext)": + ## Given + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + ## When + var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) + builder.withWakuCapabilities(Capabilities.Relay, Capabilities.Store) + + let recordRes = builder.build() + + ## Then + check recordRes.isOk() + let record = recordRes.tryGet() + + let typedRecord = record.toTyped() + require typedRecord.isOk() + + let bitfieldOpt = typedRecord.value.waku2 + check bitfieldOpt.isSome() + + let bitfield = bitfieldOpt.get() + check: + bitfield.toCapabilities() == @[Capabilities.Relay, Capabilities.Store] + + test "cannot decode capabilities from record": + ## Given + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + let record = EnrBuilder.init(enrPrivKey, enrSeqNum).build().tryGet() + + ## When + let typedRecord = record.toTyped() + require typedRecord.isOk() + + let bitfieldOpt = typedRecord.value.waku2 + + ## Then + check bitfieldOpt.isNone() + + test "check capabilities on a waku node record": + ## Given + let wakuRecord = + "-Hy4QC73_E3B_FkZhsOakaD4pHe-U--UoGASdG9N0F3SFFUDY_jdQbud8" & + "EXVyrlOZ5pZ7VYFBDPMRCENwy87Lh74dFIBgmlkgnY0iXNlY3AyNTZrMaECvNt1jIWbWGp" & + "AWWdlLGYm1E1OjlkQk3ONoxDC5sfw8oOFd2FrdTID" + + ## When + var record: Record + require waku_enr.fromBase64(record, wakuRecord) + + ## Then + let typedRecordRes = record.toTyped() + require typedRecordRes.isOk() + + let bitfieldOpt = typedRecordRes.value.waku2 + require bitfieldOpt.isSome() + + let bitfield = bitfieldOpt.get() + check: + bitfield.supportsCapability(Capabilities.Relay) == true + bitfield.supportsCapability(Capabilities.Store) == true + bitfield.supportsCapability(Capabilities.Filter) == false + bitfield.supportsCapability(Capabilities.Lightpush) == false + bitfield.toCapabilities() == @[Capabilities.Relay, Capabilities.Store] + + test "get capabilities codecs from record": + ## Given + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + ## When + var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) + builder.withWakuCapabilities(Capabilities.Relay, Capabilities.Store) + + let recordRes = builder.build() + + ## Then + assert recordRes.isOk(), $recordRes.error + let record = recordRes.tryGet() + + let codecs = record.getCapabilitiesCodecs() + check: + codecs.len == 2 + codecs.contains(WakuRelayCodec) + codecs.contains(WakuStoreCodec) + + test "check capabilities on a non-waku node record": + ## Given + # non waku enr, i.e. Ethereum one + let nonWakuEnr = + "enr:-KG4QOtcP9X1FbIMOe17QNMKqDxCpm14jcX5tiOE4_TyMrFqbmhPZHK_ZPG2G" & + "xb1GE2xdtodOfx9-cgvNtxnRyHEmC0ghGV0aDKQ9aX9QgAAAAD__________4JpZIJ2NIJpcIQDE8KdiXNl" & + "Y3AyNTZrMaEDhpehBDbZjM_L9ek699Y7vhUJ-eAdMyQW_Fil522Y0fODdGNwgiMog3VkcIIjKA" + + ## When + var record: Record + require waku_enr.fromURI(record, nonWakuEnr) + + ## Then + let typedRecordRes = record.toTyped() + require typedRecordRes.isOk() + + let bitfieldOpt = typedRecordRes.value.waku2 + check bitfieldOpt.isNone() + + check: + record.getCapabilities() == [] + record.supportsCapability(Capabilities.Relay) == false + record.supportsCapability(Capabilities.Store) == false + record.supportsCapability(Capabilities.Filter) == false + record.supportsCapability(Capabilities.Lightpush) == false + +suite "Waku ENR - Multiaddresses": + test "decode record with multiaddrs field": + ## Given + let enrUri = + "enr:-QEMuEAs8JmmyUI3b9v_ADqYtELHUYAsAMS21lA2BMtrzF86tVmyy9cCrhmzfHGH" & + "x_g3nybn7jIRybzXTGNj3C2KzrriAYJpZIJ2NIJpcISf3_Jeim11bHRpYWRkcnO4XAAr" & + "NiZzdG9yZS0wMS5kby1hbXMzLnN0YXR1cy5wcm9kLnN0YXR1cy5pbQZ2XwAtNiZzdG9y" & + "ZS0wMS5kby1hbXMzLnN0YXR1cy5wcm9kLnN0YXR1cy5pbQYBu94DgnJzjQAQBQABACAA" & + "QACAAQCJc2VjcDI1NmsxoQLfoaQH3oSYW59yxEBfeAZbltmUnC4BzYkHqer2VQMTyoN0" & + "Y3CCdl-DdWRwgiMohXdha3UyAw" + + var record: Record + require record.fromURI(enrUri) + + # TODO: get rid of wakuv2 here too. Needt to generate a ne ENR record + let + expectedAddr1 = MultiAddress + .init("/dns4/store-01.do-ams3.status.prod.status.im/tcp/30303") + .get() + expectedAddr2 = MultiAddress + .init("/dns4/store-01.do-ams3.status.prod.status.im/tcp/443/wss") + .get() + + ## When + let typedRecord = record.toTyped() + require typedRecord.isOk() + + let multiaddrsOpt = typedRecord.value.multiaddrs + + ## Then + check multiaddrsOpt.isSome() + let multiaddrs = multiaddrsOpt.get() + + check: + multiaddrs.len == 2 + multiaddrs.contains(expectedAddr1) + multiaddrs.contains(expectedAddr2) + + test "encode and decode record with multiaddrs field (EnrBuilder ext)": + ## Given + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + let + addr1 = MultiAddress.init("/ip4/127.0.0.1/tcp/80/ws").get() + addr2 = MultiAddress.init("/ip4/127.0.0.1/tcp/443/wss").get() + + ## When + var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) + builder.withMultiaddrs(addr1, addr2) + + let recordRes = builder.build() + + require recordRes.isOk() + let record = recordRes.tryGet() + + let typedRecord = record.toTyped() + require typedRecord.isOk() + + let multiaddrsOpt = typedRecord.value.multiaddrs + + ## Then + check multiaddrsOpt.isSome() + + let multiaddrs = multiaddrsOpt.get() + check: + multiaddrs.len == 2 + multiaddrs.contains(addr1) + multiaddrs.contains(addr2) + + test "cannot decode multiaddresses from record": + ## Given + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + let record = EnrBuilder.init(enrPrivKey, enrSeqNum).build().tryGet() + + ## When + let typedRecord = record.toTyped() + require typedRecord.isOk() + + let fieldOpt = typedRecord.value.multiaddrs + + ## Then + check fieldOpt.isNone() + + test "encode and decode record with multiaddresses field - strip peer ID": + ## Given + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + let + addr1 = MultiAddress + .init( + "/ip4/127.0.0.1/tcp/80/ws/p2p/16Uiu2HAm4v86W3bmT1BiH6oSPzcsSr31iDQpSN5Qa882BCjjwgrD" + ) + .get() + addr2 = MultiAddress.init("/ip4/127.0.0.1/tcp/443/wss").get() + + let expectedAddr1 = MultiAddress.init("/ip4/127.0.0.1/tcp/80/ws").get() + + ## When + var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) + builder.withMultiaddrs(addr1, addr2) + + let recordRes = builder.build() + + require recordRes.isOk() + let record = recordRes.tryGet() + + let typedRecord = record.toTyped() + require typedRecord.isOk() + + let multiaddrsOpt = typedRecord.value.multiaddrs + + ## Then + check multiaddrsOpt.isSome() + + let multiaddrs = multiaddrsOpt.get() + check: + multiaddrs.contains(expectedAddr1) + multiaddrs.contains(addr2) + +suite "Waku ENR - Relay static sharding": + test "new relay shards object with single invalid shard id": + ## Given + let + clusterId: uint16 = 22 + shard: uint16 = 1024 + + ## When + let shardsTopics = RelayShards.init(clusterId, shard) + + ## Then + assert shardsTopics.isErr(), $shardsTopics.get() + + test "new relay shards object with single invalid shard id in list": + ## Given + let + clusterId: uint16 = 22 + shardIds: seq[uint16] = @[1u16, 1u16, 2u16, 3u16, 5u16, 8u16, 1024u16] + + ## When + let shardsTopics = RelayShards.init(clusterId, shardIds) + + ## Then + assert shardsTopics.isErr(), $shardsTopics.get() + + test "new relay shards object with single valid shard id": + ## Given + let + clusterId: uint16 = 22 + shardId: uint16 = 1 + + let shard = RelayShard(clusterId: clusterId, shardId: shardId) + + ## When + let shardsTopics = RelayShards.init(clusterId, shardId).expect("Valid Shards") + + ## Then + check: + shardsTopics.clusterId == clusterId + shardsTopics.shardIds == @[1u16] + + let shards = shardsTopics.topics.mapIt($it) + check: + shards == @[$shard] + + check: + shardsTopics.contains(clusterId, shardId) + not shardsTopics.contains(clusterId, 33u16) + not shardsTopics.contains(20u16, 33u16) + + shardsTopics.contains(shard) + shardsTopics.contains("/waku/2/rs/22/1") + + test "new relay shards object with repeated but valid shard ids": + ## Given + let + clusterId: uint16 = 22 + shardIds: seq[uint16] = @[1u16, 2u16, 2u16, 3u16, 3u16, 3u16] + + ## When + let shardsTopics = RelayShards.init(clusterId, shardIds).expect("Valid Shards") + + ## Then + check: + shardsTopics.clusterId == clusterId + shardsTopics.shardIds == @[1u16, 2u16, 3u16] + + test "cannot decode relay shards from record if not present": + ## Given + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + let record = EnrBuilder.init(enrPrivKey, enrSeqNum).build().tryGet() + + ## When + let typedRecord = record.toTyped() + require typedRecord.isOk() + + let fieldOpt = typedRecord.value.relaySharding + + ## Then + check fieldOpt.isNone() + + test "encode and decode record with relay shards field (EnrBuilder ext - shardIds list)": + ## Given + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + let + clusterId: uint16 = 22 + shardIds: seq[uint16] = @[1u16, 1u16, 2u16, 3u16, 5u16, 8u16] + + let shardsTopics = RelayShards.init(clusterId, shardIds).expect("Valid Shards") + + ## When + var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) + require builder.withWakuRelaySharding(shardsTopics).isOk() + + let recordRes = builder.build() + + ## Then + check recordRes.isOk() + let record = recordRes.tryGet() + + let typedRecord = record.toTyped() + require typedRecord.isOk() + + let shardsOpt = typedRecord.value.relaySharding + check: + shardsOpt.isSome() + shardsOpt.get() == shardsTopics + + test "encode and decode record with relay shards field (EnrBuilder ext - bit vector)": + ## Given + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + let shardsTopics = + RelayShards.init(33, toSeq(0u16 ..< 64u16)).expect("Valid Shards") + + var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) + require builder.withWakuRelaySharding(shardsTopics).isOk() + + let recordRes = builder.build() + require recordRes.isOk() + + let record = recordRes.tryGet() + + ## When + let typedRecord = record.toTyped() + require typedRecord.isOk() + + let shardsOpt = typedRecord.value.relaySharding + + ## Then + check: + shardsOpt.isSome() + shardsOpt.get() == shardsTopics + + test "decode record with relay shards shard list and bit vector fields": + ## Given + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + let + relayShardsIndicesList = RelayShards + .init(22, @[1u16, 1u16, 2u16, 3u16, 5u16, 8u16]) + .expect("Valid Shards") + relayShardsBitVector = RelayShards + .init(33, @[13u16, 24u16, 37u16, 61u16, 98u16, 159u16]) + .expect("Valid Shards") + + var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) + require builder.withWakuRelayShardingIndicesList(relayShardsIndicesList).isOk() + require builder.withWakuRelayShardingBitVector(relayShardsBitVector).isOk() + + let recordRes = builder.build() + require recordRes.isOk() + + let record = recordRes.tryGet() + + ## When + let typedRecord = record.toTyped() + require typedRecord.isOk() + + let shardsOpt = typedRecord.value.relaySharding + + ## Then + check: + shardsOpt.isSome() + shardsOpt.get() == relayShardsIndicesList diff --git a/third-party/nwaku/tests/test_waku_keepalive.nim b/third-party/nwaku/tests/test_waku_keepalive.nim new file mode 100644 index 0000000..f6a9e63 --- /dev/null +++ b/third-party/nwaku/tests/test_waku_keepalive.nim @@ -0,0 +1,56 @@ +{.used.} + +import + testutils/unittests, + chronos, + chronicles, + libp2p/switch, + libp2p/protocols/ping, + libp2p/stream/bufferstream, + libp2p/stream/connection, + libp2p/crypto/crypto +import waku/waku_core, waku/waku_node, ./testlib/wakucore, ./testlib/wakunode + +suite "Waku Keepalive": + asyncTest "handle ping keepalives": + let + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0)) + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0)) + + var completionFut = newFuture[bool]() + + proc pingHandler(peerId: PeerID) {.async, gcsafe.} = + debug "Ping received" + + check: + peerId == node1.switch.peerInfo.peerId + + completionFut.complete(true) + + await node1.start() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + await node1.mountLibp2pPing() + + await node2.start() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + let pingProto = Ping.new(handler = pingHandler) + await pingProto.start() + node2.switch.mount(pingProto) + + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + + let healthMonitor = NodeHealthMonitor() + healthMonitor.setNodeToHealthMonitor(node1) + healthMonitor.startKeepalive(2.seconds).isOkOr: + assert false, "Failed to start keepalive" + + check: + (await completionFut.withTimeout(5.seconds)) == true + + await node2.stop() + await node1.stop() diff --git a/third-party/nwaku/tests/test_waku_keystore.nim b/third-party/nwaku/tests/test_waku_keystore.nim new file mode 100644 index 0000000..8fd8ad2 --- /dev/null +++ b/third-party/nwaku/tests/test_waku_keystore.nim @@ -0,0 +1,305 @@ +{.used.} + +import std/[os, json], chronos, testutils/unittests +import waku/waku_keystore, ./testlib/common + +from waku/waku_noise/noise_utils import randomSeqByte + +procSuite "Credentials test suite": + let testAppInfo = AppInfo(application: "test", appIdentifier: "1234", version: "0.1") + + test "Create keystore": + let filepath = "./testAppKeystore.txt" + defer: + removeFile(filepath) + + let keystoreRes = createAppKeystore(path = filepath, appInfo = testAppInfo) + + check: + keystoreRes.isOk() + + test "Load keystore": + let filepath = "./testAppKeystore.txt" + defer: + removeFile(filepath) + + # If no keystore exists at filepath, a new one is created for appInfo and empty credentials + let keystoreRes = loadAppKeystore(path = filepath, appInfo = testAppInfo) + + check: + keystoreRes.isOk() + + let keystore = keystoreRes.get() + + check: + keystore.hasKeys(["application", "appIdentifier", "version", "credentials"]) + keystore["application"].getStr() == testAppInfo.application + keystore["appIdentifier"].getStr() == testAppInfo.appIdentifier + keystore["version"].getStr() == testAppInfo.version + # We assume the loaded keystore to not have credentials set (previous tests delete the keystore at filepath) + keystore["credentials"].len() == 0 + + test "Add credentials to keystore": + let filepath = "./testAppKeystore.txt" + defer: + removeFile(filepath) + + # We generate a random identity credential (inter-value constrains are not enforced, otherwise we need to load e.g. zerokit RLN keygen) + var + idTrapdoor = randomSeqByte(rng[], 32) + idNullifier = randomSeqByte(rng[], 32) + idSecretHash = randomSeqByte(rng[], 32) + idCommitment = randomSeqByte(rng[], 32) + + var idCredential = IdentityCredential( + idTrapdoor: idTrapdoor, + idNullifier: idNullifier, + idSecretHash: idSecretHash, + idCommitment: idCommitment, + ) + + var contract = MembershipContract( + chainId: "5", address: "0x0123456789012345678901234567890123456789" + ) + var index = MembershipIndex(1) + + let membershipCredential = KeystoreMembership( + membershipContract: contract, treeIndex: index, identityCredential: idCredential + ) + let password = "%m0um0ucoW%" + + let keystoreRes = addMembershipCredentials( + path = filepath, + membership = membershipCredential, + password = password, + appInfo = testAppInfo, + ) + + check: + keystoreRes.isOk() + + test "Add/retrieve credentials in keystore": + let filepath = "./testAppKeystore.txt" + defer: + removeFile(filepath) + + # We generate two random identity credentials (inter-value constrains are not enforced, otherwise we need to load e.g. zerokit RLN keygen) + var + idTrapdoor = randomSeqByte(rng[], 32) + idNullifier = randomSeqByte(rng[], 32) + idSecretHash = randomSeqByte(rng[], 32) + idCommitment = randomSeqByte(rng[], 32) + idCredential = IdentityCredential( + idTrapdoor: idTrapdoor, + idNullifier: idNullifier, + idSecretHash: idSecretHash, + idCommitment: idCommitment, + ) + + # We generate two distinct membership groups + var contract = MembershipContract( + chainId: "5", address: "0x0123456789012345678901234567890123456789" + ) + var index = MembershipIndex(1) + var membershipCredential = KeystoreMembership( + membershipContract: contract, treeIndex: index, identityCredential: idCredential + ) + + let password = "%m0um0ucoW%" + + # We add credentials to the keystore. Note that only 3 credentials should be effectively added, since rlnMembershipCredentials3 is equal to membershipCredentials2 + let keystoreRes = addMembershipCredentials( + path = filepath, + membership = membershipCredential, + password = password, + appInfo = testAppInfo, + ) + + check: + keystoreRes.isOk() + + # We test retrieval of credentials. + var expectedMembership = membershipCredential + let membershipQuery = + KeystoreMembership(membershipContract: contract, treeIndex: index) + + var recoveredCredentialsRes = getMembershipCredentials( + path = filepath, + password = password, + query = membershipQuery, + appInfo = testAppInfo, + ) + + check: + recoveredCredentialsRes.isOk() + recoveredCredentialsRes.get() == expectedMembership + + test "if the keystore contains only one credential, fetch that irrespective of treeIndex": + let filepath = "./testAppKeystore.txt" + defer: + removeFile(filepath) + + # We generate random identity credentials (inter-value constrains are not enforced, otherwise we need to load e.g. zerokit RLN keygen) + let + idTrapdoor = randomSeqByte(rng[], 32) + idNullifier = randomSeqByte(rng[], 32) + idSecretHash = randomSeqByte(rng[], 32) + idCommitment = randomSeqByte(rng[], 32) + idCredential = IdentityCredential( + idTrapdoor: idTrapdoor, + idNullifier: idNullifier, + idSecretHash: idSecretHash, + idCommitment: idCommitment, + ) + + let contract = MembershipContract( + chainId: "5", address: "0x0123456789012345678901234567890123456789" + ) + let index = MembershipIndex(1) + let membershipCredential = KeystoreMembership( + membershipContract: contract, treeIndex: index, identityCredential: idCredential + ) + + let password = "%m0um0ucoW%" + + let keystoreRes = addMembershipCredentials( + path = filepath, + membership = membershipCredential, + password = password, + appInfo = testAppInfo, + ) + + assert(keystoreRes.isOk(), $keystoreRes.error) + + # We test retrieval of credentials. + let expectedMembership = membershipCredential + let membershipQuery = KeystoreMembership(membershipContract: contract) + + let recoveredCredentialsRes = getMembershipCredentials( + path = filepath, + password = password, + query = membershipQuery, + appInfo = testAppInfo, + ) + + assert(recoveredCredentialsRes.isOk(), $recoveredCredentialsRes.error) + check: + recoveredCredentialsRes.get() == expectedMembership + + test "if the keystore contains multiple credentials, then error out if treeIndex has not been passed in": + let filepath = "./testAppKeystore.txt" + defer: + removeFile(filepath) + + # We generate random identity credentials (inter-value constrains are not enforced, otherwise we need to load e.g. zerokit RLN keygen) + let + idTrapdoor = randomSeqByte(rng[], 32) + idNullifier = randomSeqByte(rng[], 32) + idSecretHash = randomSeqByte(rng[], 32) + idCommitment = randomSeqByte(rng[], 32) + idCredential = IdentityCredential( + idTrapdoor: idTrapdoor, + idNullifier: idNullifier, + idSecretHash: idSecretHash, + idCommitment: idCommitment, + ) + + # We generate two distinct membership groups + let contract = MembershipContract( + chainId: "5", address: "0x0123456789012345678901234567890123456789" + ) + let index = MembershipIndex(1) + var membershipCredential = KeystoreMembership( + membershipContract: contract, treeIndex: index, identityCredential: idCredential + ) + + let password = "%m0um0ucoW%" + + let keystoreRes = addMembershipCredentials( + path = filepath, + membership = membershipCredential, + password = password, + appInfo = testAppInfo, + ) + + assert(keystoreRes.isOk(), $keystoreRes.error) + + membershipCredential.treeIndex = MembershipIndex(2) + let keystoreRes2 = addMembershipCredentials( + path = filepath, + membership = membershipCredential, + password = password, + appInfo = testAppInfo, + ) + assert(keystoreRes2.isOk(), $keystoreRes2.error) + + # We test retrieval of credentials. + let membershipQuery = KeystoreMembership(membershipContract: contract) + + let recoveredCredentialsRes = getMembershipCredentials( + path = filepath, + password = password, + query = membershipQuery, + appInfo = testAppInfo, + ) + + check: + recoveredCredentialsRes.isErr() + recoveredCredentialsRes.error.kind == KeystoreCredentialNotFoundError + + test "if a keystore exists, but the keystoreQuery doesn't match it": + let filepath = "./testAppKeystore.txt" + defer: + removeFile(filepath) + + # We generate random identity credentials (inter-value constrains are not enforced, otherwise we need to load e.g. zerokit RLN keygen) + let + idTrapdoor = randomSeqByte(rng[], 32) + idNullifier = randomSeqByte(rng[], 32) + idSecretHash = randomSeqByte(rng[], 32) + idCommitment = randomSeqByte(rng[], 32) + idCredential = IdentityCredential( + idTrapdoor: idTrapdoor, + idNullifier: idNullifier, + idSecretHash: idSecretHash, + idCommitment: idCommitment, + ) + + # We generate two distinct membership groups + let contract = MembershipContract( + chainId: "5", address: "0x0123456789012345678901234567890123456789" + ) + let index = MembershipIndex(1) + var membershipCredential = KeystoreMembership( + membershipContract: contract, treeIndex: index, identityCredential: idCredential + ) + + let password = "%m0um0ucoW%" + + let keystoreRes = addMembershipCredentials( + path = filepath, + membership = membershipCredential, + password = password, + appInfo = testAppInfo, + ) + + assert(keystoreRes.isOk(), $keystoreRes.error) + + let badTestAppInfo = + AppInfo(application: "_bad_test_", appIdentifier: "1234", version: "0.1") + + # We test retrieval of credentials. + let membershipQuery = KeystoreMembership(membershipContract: contract) + + let recoveredCredentialsRes = getMembershipCredentials( + path = filepath, + password = password, + query = membershipQuery, + appInfo = badTestAppInfo, + ) + + check: + recoveredCredentialsRes.isErr() + recoveredCredentialsRes.error.kind == KeystoreJsonValueMismatchError + recoveredCredentialsRes.error.msg == + "Application does not match. Expected '_bad_test_' but got 'test'" diff --git a/third-party/nwaku/tests/test_waku_keystore_keyfile.nim b/third-party/nwaku/tests/test_waku_keystore_keyfile.nim new file mode 100644 index 0000000..5f4c745 --- /dev/null +++ b/third-party/nwaku/tests/test_waku_keystore_keyfile.nim @@ -0,0 +1,366 @@ +{.used.} + +import std/[json, os], stew/byteutils, testutils/unittests, chronos, eth/keys +import waku/waku_keystore, ./testlib/common + +from waku/waku_noise/noise_utils import randomSeqByte + +suite "KeyFile test suite": + test "Create/Save/Load single keyfile": + # The password we use to encrypt our secret + let password = "randompassword" + + # The filepath were the keyfile will be stored + let filepath = "./test.keyfile" + defer: + removeFile(filepath) + + # The secret + var secret = randomSeqByte(rng[], 300) + + # We create a keyfile encrypting the secret with password + let keyfile = createKeyFileJson(secret, password) + + check: + keyfile.isOk() + # We save to disk the keyfile + saveKeyFile(filepath, keyfile.get()).isOk() + + # We load from the file all the decrypted keyfiles encrypted under password + var decodedKeyfiles = loadKeyFiles(filepath, password) + + check: + decodedKeyfiles.isOk() + # Since only one secret was stored in file, we expect only one keyfile being decrypted + decodedKeyfiles.get().len == 1 + + # We check if the decrypted secret is the same as the original secret + let decodedSecret = decodedKeyfiles.get()[0] + + check: + secret == decodedSecret.get() + + test "Create/Save/Load multiple keyfiles in same file": + # We set different passwords for different keyfiles that will be stored in same file + let password1 = string.fromBytes(randomSeqByte(rng[], 20)) + let password2 = "" + let password3 = string.fromBytes(randomSeqByte(rng[], 20)) + var keyfile: KfResult[JsonNode] + + let filepath = "./test.keyfile" + defer: + removeFile(filepath) + + # We generate 6 different secrets and we encrypt them using 3 different passwords, and we store the obtained keystore + + let secret1 = randomSeqByte(rng[], 300) + keyfile = createKeyFileJson(secret1, password1) + check: + keyfile.isOk() + saveKeyFile(filepath, keyfile.get()).isOk() + + let secret2 = randomSeqByte(rng[], 300) + keyfile = createKeyFileJson(secret2, password2) + check: + keyfile.isOk() + saveKeyFile(filepath, keyfile.get()).isOk() + + let secret3 = randomSeqByte(rng[], 300) + keyfile = createKeyFileJson(secret3, password3) + check: + keyfile.isOk() + saveKeyFile(filepath, keyfile.get()).isOk() + + # We encrypt secret4 with password3 + let secret4 = randomSeqByte(rng[], 300) + keyfile = createKeyFileJson(secret4, password3) + check: + keyfile.isOk() + saveKeyFile(filepath, keyfile.get()).isOk() + + # We encrypt secret5 with password1 + let secret5 = randomSeqByte(rng[], 300) + keyfile = createKeyFileJson(secret5, password1) + check: + keyfile.isOk() + saveKeyFile(filepath, keyfile.get()).isOk() + + # We encrypt secret6 with password1 + let secret6 = randomSeqByte(rng[], 300) + keyfile = createKeyFileJson(secret6, password1) + check: + keyfile.isOk() + saveKeyFile(filepath, keyfile.get()).isOk() + + # Now there are 6 keyfiles stored in filepath encrypted with 3 different passwords + # We decrypt the keyfiles using the respective passwords and we check that the number of + # successful decryptions corresponds to the number of secrets encrypted under that password + + var decodedKeyfilesPassword1 = loadKeyFiles(filepath, password1) + check: + decodedKeyfilesPassword1.isOk() + decodedKeyfilesPassword1.get().len == 3 + var decodedSecretsPassword1 = decodedKeyfilesPassword1.get() + + var decodedKeyfilesPassword2 = loadKeyFiles(filepath, password2) + check: + decodedKeyfilesPassword2.isOk() + decodedKeyfilesPassword2.get().len == 1 + var decodedSecretsPassword2 = decodedKeyfilesPassword2.get() + + var decodedKeyfilesPassword3 = loadKeyFiles(filepath, password3) + check: + decodedKeyfilesPassword3.isOk() + decodedKeyfilesPassword3.get().len == 2 + var decodedSecretsPassword3 = decodedKeyfilesPassword3.get() + + # We check if the corresponding secrets are correct + check: + # Secrets encrypted with password 1 + secret1 == decodedSecretsPassword1[0].get() + secret5 == decodedSecretsPassword1[1].get() + secret6 == decodedSecretsPassword1[2].get() + # Secrets encrypted with password 2 + secret2 == decodedSecretsPassword2[0].get() + # Secrets encrypted with password 3 + secret3 == decodedSecretsPassword3[0].get() + secret4 == decodedSecretsPassword3[1].get() + +# The following tests are originally from the nim-eth keyfile tests module https://github.com/status-im/nim-eth/blob/master/tests/keyfile/test_keyfile.nim +# and are slightly adapted to test backwards compatibility with nim-eth implementation of our customized version of the utils/keyfile module +# Note: the original nim-eth "Create/Save/Load test" is redefined and expanded above in "KeyFile test suite" +suite "KeyFile test suite (adapted from nim-eth keyfile tests)": + # Testvectors originally from https://github.com/status-im/nim-eth/blob/fef47331c37ee8abb8608037222658737ff498a6/tests/keyfile/test_keyfile.nim#L22-L168 + let TestVectors = [ + %*{ + "keyfile": { + "crypto": { + "cipher": "aes-128-ctr", + "cipherparams": {"iv": "6087dab2f9fdbbfaddc31a909735c1e6"}, + "ciphertext": + "5318b4d5bcd28de64ee5559e671353e16f075ecae9f99c7a79a38af5f869aa46", + "kdf": "pbkdf2", + "kdfparams": { + "c": 262144, + "dklen": 32, + "prf": "hmac-sha256", + "salt": "ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd", + }, + "mac": "517ead924a9d0dc3124507e3393d175ce3ff7c1e96529c6c555ce9e51205e9b2", + }, + "id": "3198bc9c-6672-5ab3-d995-4942343ae5b6", + "version": 3, + }, + "name": "test1", + "password": "testpassword", + "priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d", + }, + %*{ + "keyfile": { + "version": 3, + "crypto": { + "ciphertext": + "ee75456c006b1e468133c5d2a916bacd3cf515ced4d9b021b5c59978007d1e87", + "version": 1, + "kdf": "pbkdf2", + "kdfparams": { + "dklen": 32, + "c": 262144, + "prf": "hmac-sha256", + "salt": "504490577620f64f43d73f29479c2cf0", + }, + "mac": "196815708465de9af7504144a1360d08874fc3c30bb0e648ce88fbc36830d35d", + "cipherparams": {"iv": "514ccc8c4fb3e60e5538e0cf1e27c233"}, + "cipher": "aes-128-ctr", + }, + "id": "98d193c7-5174-4c7c-5345-c1daf95477b5", + }, + "name": "python_generated_test_with_odd_iv", + "password": "foo", + "priv": "0101010101010101010101010101010101010101010101010101010101010101", + }, + %*{ + "keyfile": { + "version": 3, + "crypto": { + "ciphertext": + "d69313b6470ac1942f75d72ebf8818a0d484ac78478a132ee081cd954d6bd7a9", + "cipherparams": {"iv": "ffffffffffffffffffffffffffffffff"}, + "kdf": "pbkdf2", + "kdfparams": { + "dklen": 32, + "c": 262144, + "prf": "hmac-sha256", + "salt": "c82ef14476014cbf438081a42709e2ed", + }, + "mac": "cf6bfbcc77142a22c4a908784b4a16f1023a1d0e2aff404c20158fa4f1587177", + "cipher": "aes-128-ctr", + "version": 1, + }, + "id": "abb67040-8dbe-0dad-fc39-2b082ef0ee5f", + }, + "name": "evilnonce", + "password": "bar", + "priv": "0202020202020202020202020202020202020202020202020202020202020202", + }, + %*{ + "keyfile": { + "version": 3, + "crypto": { + "cipher": "aes-128-ctr", + "cipherparams": {"iv": "83dbcc02d8ccb40e466191a123791e0e"}, + "ciphertext": + "d172bf743a674da9cdad04534d56926ef8358534d458fffccd4e6ad2fbde479c", + "kdf": "scrypt", + "kdfparams": { + "dklen": 32, + "n": 262144, + "r": 1, + "p": 8, + "salt": "ab0c7876052600dd703518d6fc3fe8984592145b591fc8fb5c6d43190334ba19", + }, + "mac": "2103ac29920d71da29f15d75b4a16dbe95cfd7ff8faea1056c33131d846e3097", + }, + "id": "3198bc9c-6672-5ab3-d995-4942343ae5b6", + }, + "name": "test2", + "password": "testpassword", + "priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d", + }, + %*{ + "keyfile": { + "version": 3, + "address": "460121576cc7df020759730751f92bd62fd78dd6", + "crypto": { + "ciphertext": + "54ae683c6287fa3d58321f09d56e26d94e58a00d4f90bdd95782ae0e4aab618b", + "cipherparams": {"iv": "681679cdb125bba9495d068b002816a4"}, + "cipher": "aes-128-ctr", + "kdf": "scrypt", + "kdfparams": { + "dklen": 32, + "salt": "c3407f363fce02a66e3c4bf4a8f6b7da1c1f54266cef66381f0625c251c32785", + "n": 8192, + "r": 8, + "p": 1, + }, + "mac": "dea6bdf22a2f522166ed82808c22a6311e84c355f4bbe100d4260483ff675a46", + }, + "id": "0eb785e0-340a-4290-9c42-90a11973ee47", + }, + "name": "mycrypto", + "password": "foobartest121", + "priv": "05a4d3eb46c742cb8850440145ce70cbc80b59f891cf5f50fd3e9c280b50c4e4", + }, + %*{ + "keyfile": { + "crypto": { + "cipher": "aes-128-ctr", + "cipherparams": {"iv": "7e7b02d2b4ef45d6c98cb885e75f48d5"}, + "ciphertext": + "a7a5743a6c7eb3fa52396bd3fd94043b79075aac3ccbae8e62d3af94db00397c", + "kdf": "scrypt", + "kdfparams": { + "dklen": 32, + "n": 8192, + "p": 1, + "r": 8, + "salt": "247797c7a357b707a3bdbfaa55f4c553756bca09fec20ddc938e7636d21e4a20", + }, + "mac": "5a3ba5bebfda2c384586eda5fcda9c8397d37c9b0cc347fea86525cf2ea3a468", + }, + "address": "0b6f2de3dee015a95d3330dcb7baf8e08aa0112d", + "id": "3c8efdd6-d538-47ec-b241-36783d3418b9", + "version": 3, + }, + "password": "moomoocow", + "priv": "21eac69b9a52f466bfe9047f0f21c9caf3a5cdaadf84e2750a9b3265d450d481", + "name": "eth-keyfile-conftest", + }, + ] + + test "Testing nim-eth test vectors": + var secret: KfResult[seq[byte]] + var expectedSecret: seq[byte] + + for i in 0 ..< TestVectors.len: + # Decryption with correct password + expectedSecret = decodeHex(TestVectors[i].getOrDefault("priv").getStr()) + secret = decodeKeyFileJson( + TestVectors[i].getOrDefault("keyfile"), + TestVectors[i].getOrDefault("password").getStr(), + ) + check: + secret.isOk() + secret.get() == expectedSecret + + # Decryption with wrong password + secret = + decodeKeyFileJson(TestVectors[i].getOrDefault("keyfile"), "wrongpassword") + + check: + secret.isErr() + secret.error == KeyFileError.KeyfileIncorrectMac + + test "Wrong mac in keyfile": + # This keyfile is the same as the first one in TestVectors, + # but the last byte of mac is changed to 00. + # While ciphertext is the correct encryption of priv under password, + # mac verfication should fail and nothing will be decrypted + let keyfileWrongMac = + %*{ + "keyfile": { + "crypto": { + "cipher": "aes-128-ctr", + "cipherparams": {"iv": "6087dab2f9fdbbfaddc31a909735c1e6"}, + "ciphertext": + "5318b4d5bcd28de64ee5559e671353e16f075ecae9f99c7a79a38af5f869aa46", + "kdf": "pbkdf2", + "kdfparams": { + "c": 262144, + "dklen": 32, + "prf": "hmac-sha256", + "salt": "ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd", + }, + "mac": "517ead924a9d0dc3124507e3393d175ce3ff7c1e96529c6c555ce9e51205e900", + }, + "id": "3198bc9c-6672-5ab3-d995-4942343ae5b6", + "version": 3, + }, + "name": "test1", + "password": "testpassword", + "priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d", + } + + # Decryption with correct password + let expectedSecret = decodeHex(keyfileWrongMac.getOrDefault("priv").getStr()) + let secret = decodeKeyFileJson( + keyfileWrongMac.getOrDefault("keyfile"), + keyfileWrongMac.getOrDefault("password").getStr(), + ) + check: + secret.isErr() + secret.error == KeyFileError.KeyFileIncorrectMac + + test "Scrypt keyfiles": + let + expectedSecret = randomSeqByte(rng[], 300) + password = "miawmiawcat" + + # By default, keyfiles' encryption key is derived from password using PBKDF2. + # Here we test keyfiles encypted with a key derived from password using scrypt + jsonKeyfile = createKeyFileJson(expectedSecret, password, 3, AES128CTR, SCRYPT) + + check: + jsonKeyfile.isOk() + + let secret = decodeKeyFileJson(jsonKeyfile.get(), password) + + check: + secret.isOk() + secret.get() == expectedSecret + + test "Load non-existent keyfile test": + check: + loadKeyFiles("nonexistant.keyfile", "password").error == + KeyFileError.KeyfileDoesNotExist diff --git a/third-party/nwaku/tests/test_waku_metadata.nim b/third-party/nwaku/tests/test_waku_metadata.nim new file mode 100644 index 0000000..b30fd17 --- /dev/null +++ b/third-party/nwaku/tests/test_waku_metadata.nim @@ -0,0 +1,66 @@ +{.used.} + +import + std/[options, sequtils, tables], + testutils/unittests, + chronos, + chronicles, + libp2p/switch, + libp2p/peerId, + libp2p/crypto/crypto, + libp2p/multistream, + libp2p/muxers/muxer, + eth/keys, + eth/p2p/discoveryv5/enr +import + waku/ + [ + waku_node, + waku_core/topics, + node/peer_manager, + discovery/waku_discv5, + waku_metadata, + ], + ./testlib/wakucore, + ./testlib/wakunode + +procSuite "Waku Metadata Protocol": + asyncTest "request() returns the supported metadata of the peer": + let clusterId = 10.uint16 + let + node1 = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + Port(0), + clusterId = clusterId, + ) + node2 = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + Port(0), + clusterId = clusterId, + ) + + # Start nodes + await allFutures([node1.start(), node2.start()]) + + node1.topicSubscriptionQueue.emit((kind: PubsubSub, topic: "/waku/2/rs/10/7")) + node1.topicSubscriptionQueue.emit((kind: PubsubSub, topic: "/waku/2/rs/10/6")) + + # Create connection + let connOpt = await node2.peerManager.dialPeer( + node1.switch.peerInfo.toRemotePeerInfo(), WakuMetadataCodec + ) + require: + connOpt.isSome + + # Request metadata + let response1 = await node2.wakuMetadata.request(connOpt.get()) + + # Check the response or dont even continue + require: + response1.isOk + + check: + response1.get().clusterId.get() == clusterId + response1.get().shards == @[uint32(6), uint32(7)] diff --git a/third-party/nwaku/tests/test_waku_netconfig.nim b/third-party/nwaku/tests/test_waku_netconfig.nim new file mode 100644 index 0000000..5f9ff4b --- /dev/null +++ b/third-party/nwaku/tests/test_waku_netconfig.nim @@ -0,0 +1,428 @@ +{.used.} + +import chronos, confutils/toml/std/net, libp2p/multiaddress, testutils/unittests + +import ./testlib/wakunode, waku/waku_enr/capabilities + +include + waku/node/net_config, + waku/factory/conf_builder/web_socket_conf_builder, + waku/factory/conf_builder/conf_builder + +proc defaultTestWakuFlags(): CapabilitiesBitfield = + CapabilitiesBitfield.init( + lightpush = false, filter = false, store = false, relay = true + ) + +suite "Waku NetConfig": + asyncTest "Create NetConfig with default values": + let conf = defaultTestWakuConf() + + let wakuFlags = defaultTestWakuFlags() + + let netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + extIp = none(IpAddress), + extPort = none(Port), + extMultiAddrs = @[], + wsBindPort = + if conf.webSocketConf.isSome(): + some(conf.webSocketConf.get().port) + else: + none(Port), + wsEnabled = conf.webSocketConf.isSome(), + wssEnabled = + if conf.webSocketConf.isSome(): + conf.webSocketConf.get().secureConf.isSome() + else: + false, + dns4DomainName = none(string), + discv5UdpPort = none(Port), + wakuFlags = some(wakuFlags), + ) + + check: + netConfigRes.isOk() + + asyncTest "AnnouncedAddresses contains only bind address when no external addresses are provided": + let conf = defaultTestWakuConf() + + let netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + let netConfig = netConfigRes.get() + + check: + netConfig.announcedAddresses.len == 1 # Only bind address should be present + netConfig.announcedAddresses[0] == + formatListenAddress( + ip4TcpEndPoint( + conf.endpointConf.p2pListenAddress, conf.endpointConf.p2pTcpPort + ) + ) + + asyncTest "AnnouncedAddresses contains external address if extIp/Port are provided": + let + conf = defaultTestWakuConf() + extIp = parseIpAddress("1.2.3.4") + extPort = Port(1234) + + let netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + extIp = some(extIp), + extPort = some(extPort), + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + let netConfig = netConfigRes.get() + + check: + netConfig.announcedAddresses.len == 1 # Only external address should be present + netConfig.announcedAddresses[0] == ip4TcpEndPoint(extIp, extPort) + + asyncTest "AnnouncedAddresses contains dns4DomainName if provided": + let + conf = defaultTestWakuConf() + dns4DomainName = "example.com" + extPort = Port(1234) + + let netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + dns4DomainName = some(dns4DomainName), + extPort = some(extPort), + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + let netConfig = netConfigRes.get() + + check: + netConfig.announcedAddresses.len == 1 # Only DNS address should be present + netConfig.announcedAddresses[0] == dns4TcpEndPoint(dns4DomainName, extPort) + + asyncTest "AnnouncedAddresses includes extMultiAddrs when provided": + let + conf = defaultTestWakuConf() + extIp = parseIpAddress("1.2.3.4") + extPort = Port(1234) + extMultiAddrs = @[ip4TcpEndPoint(extIp, extPort)] + + let netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + extMultiAddrs = extMultiAddrs, + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + let netConfig = netConfigRes.get() + + check: + netConfig.announcedAddresses.len == 2 # Bind address + extAddress + netConfig.announcedAddresses[1] == extMultiAddrs[0] + + asyncTest "AnnouncedAddresses uses dns4DomainName over extIp when both are provided": + let + conf = defaultTestWakuConf() + dns4DomainName = "example.com" + extIp = parseIpAddress("1.2.3.4") + extPort = Port(1234) + + let netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + dns4DomainName = some(dns4DomainName), + extIp = some(extIp), + extPort = some(extPort), + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + let netConfig = netConfigRes.get() + + check: + netConfig.announcedAddresses.len == 1 # DNS address + netConfig.announcedAddresses[0] == dns4TcpEndPoint(dns4DomainName, extPort) + + asyncTest "AnnouncedAddresses includes WebSocket addresses when enabled": + var confBuilder = defaultTestWakuConfBuilder() + + confBuilder.webSocketConf.withEnabled(true) + confBuilder.webSocketConf.withWebSocketPort(Port(8000)) + + let conf = confBuilder.build().valueOr: + raiseAssert error + + var wssEnabled = false + + var netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + wsEnabled = true, + wssEnabled = wssEnabled, + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + var netConfig = netConfigRes.get() + + check: + netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress + netConfig.announcedAddresses[1] == ( + ip4TcpEndPoint( + conf.endpointConf.p2pListenAddress, conf.webSocketConf.get().port + ) & wsFlag(wssEnabled) + ) + + ## Now try the same for the case of wssEnabled = true + + wssEnabled = true + + netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + wsEnabled = true, + wssEnabled = wssEnabled, + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + netConfig = netConfigRes.get() + + check: + netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress + netConfig.announcedAddresses[1] == ( + ip4TcpEndPoint( + conf.endpointConf.p2pListenAddress, conf.websocketConf.get().port + ) & wsFlag(wssEnabled) + ) + + asyncTest "Announced WebSocket address contains external IP if provided": + var confBuilder = defaultTestWakuConfBuilder() + confBuilder.webSocketConf.withEnabled(true) + confBuilder.webSocketConf.withWebSocketPort(Port(8000)) + + let conf = confBuilder.build().valueOr: + raiseAssert error + + let + extIp = parseIpAddress("1.2.3.4") + extPort = Port(1234) + wssEnabled = false + + let netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + extIp = some(extIp), + extPort = some(extPort), + wsEnabled = true, + wssEnabled = wssEnabled, + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + let netConfig = netConfigRes.get() + + check: + netConfig.announcedAddresses.len == 2 # External address + wsHostAddress + netConfig.announcedAddresses[1] == + (ip4TcpEndPoint(extIp, conf.websocketConf.get().port) & wsFlag(wssEnabled)) + + asyncTest "Announced WebSocket address contains dns4DomainName if provided": + var confBuilder = defaultTestWakuConfBuilder() + confBuilder.webSocketConf.withEnabled(true) + confBuilder.webSocketConf.withWebSocketPort(Port(8000)) + + let conf = confBuilder.build().valueOr: + raiseAssert error + + let + dns4DomainName = "example.com" + extPort = Port(1234) + wssEnabled = false + + let netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + dns4DomainName = some(dns4DomainName), + extPort = some(extPort), + wsEnabled = true, + wssEnabled = wssEnabled, + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + let netConfig = netConfigRes.get() + + check: + netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress + netConfig.announcedAddresses[1] == ( + dns4TcpEndPoint(dns4DomainName, conf.webSocketConf.get().port) & + wsFlag(wssEnabled) + ) + + asyncTest "Announced WebSocket address contains dns4DomainName if provided alongside extIp": + var confBuilder = defaultTestWakuConfBuilder() + confBuilder.webSocketConf.withEnabled(true) + confBuilder.webSocketConf.withWebSocketPort(Port(8000)) + + let conf = confBuilder.build().valueOr: + raiseAssert error + + let + dns4DomainName = "example.com" + extIp = parseIpAddress("1.2.3.4") + extPort = Port(1234) + wssEnabled = false + + let netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + dns4DomainName = some(dns4DomainName), + extIp = some(extIp), + extPort = some(extPort), + wsEnabled = true, + wssEnabled = wssEnabled, + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + let netConfig = netConfigRes.get() + + check: + netConfig.announcedAddresses.len == 2 # DNS address + wsHostAddress + netConfig.announcedAddresses[0] == dns4TcpEndPoint(dns4DomainName, extPort) + netConfig.announcedAddresses[1] == ( + dns4TcpEndPoint(dns4DomainName, conf.webSocketConf.get().port) & + wsFlag(wssEnabled) + ) + + asyncTest "ENR is set with bindIp/Port if no extIp/Port are provided": + let conf = defaultTestWakuConf() + + let netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + let netConfig = netConfigRes.get() + + check: + netConfig.enrIp.get() == conf.endpointConf.p2pListenAddress + netConfig.enrPort.get() == conf.endpointConf.p2pTcpPort + + asyncTest "ENR is set with extIp/Port if provided": + let + conf = defaultTestWakuConf() + extIp = parseIpAddress("1.2.3.4") + extPort = Port(1234) + + let netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + extIp = some(extIp), + extPort = some(extPort), + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + let netConfig = netConfigRes.get() + + check: + netConfig.extIp.get() == extIp + netConfig.enrPort.get() == extPort + + asyncTest "ENR is set with dns4DomainName if provided": + let + conf = defaultTestWakuConf() + dns4DomainName = "example.com" + extPort = Port(1234) + + let netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + dns4DomainName = some(dns4DomainName), + extPort = some(extPort), + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + let netConfig = netConfigRes.get() + + check: + netConfig.enrMultiaddrs.contains(dns4TcpEndPoint(dns4DomainName, extPort)) + + asyncTest "wsHostAddress is not announced if a WS/WSS address is provided in extMultiAddrs": + var + conf = defaultTestWakuConf() + extAddIp = parseIpAddress("1.2.3.4") + extAddPort = Port(1234) + wsEnabled = true + wssEnabled = false + extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))] + + var netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + extMultiAddrs = extMultiAddrs, + wsEnabled = wsEnabled, + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + var netConfig = netConfigRes.get() + + check: + netConfig.announcedAddresses.len == 2 # Bind address + extAddress + netConfig.announcedAddresses[1] == extMultiAddrs[0] + + # Now same test for WSS external address + wssEnabled = true + extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))] + + netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + extMultiAddrs = extMultiAddrs, + wssEnabled = wssEnabled, + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + netConfig = netConfigRes.get() + + check: + netConfig.announcedAddresses.len == 2 # Bind address + extAddress + netConfig.announcedAddresses[1] == extMultiAddrs[0] + + asyncTest "Only extMultiAddrs are published when enabling extMultiAddrsOnly flag": + let + conf = defaultTestWakuConf() + extAddIp = parseIpAddress("1.2.3.4") + extAddPort = Port(1234) + extMultiAddrs = @[ip4TcpEndPoint(extAddIp, extAddPort)] + + let netConfigRes = NetConfig.init( + bindIp = conf.endpointConf.p2pListenAddress, + bindPort = conf.endpointConf.p2pTcpPort, + extMultiAddrs = extMultiAddrs, + extMultiAddrsOnly = true, + ) + + assert netConfigRes.isOk(), $netConfigRes.error + + let netConfig = netConfigRes.get() + + check: + netConfig.announcedAddresses.len == 1 # ExtAddress + netConfig.announcedAddresses[0] == extMultiAddrs[0] diff --git a/third-party/nwaku/tests/test_waku_noise.nim b/third-party/nwaku/tests/test_waku_noise.nim new file mode 100644 index 0000000..980e752 --- /dev/null +++ b/third-party/nwaku/tests/test_waku_noise.nim @@ -0,0 +1,902 @@ +{.used.} + +import + testutils/unittests, + std/random, + std/tables, + stew/byteutils, + libp2p/crypto/chacha20poly1305, + libp2p/protobuf/minprotobuf, + stew/endians2 +import + waku/[ + utils/noise as waku_message_utils, + waku_noise/noise_types, + waku_noise/noise_utils, + waku_noise/noise, + waku_noise/noise_handshake_processing, + waku_core, + ], + ./testlib/common + +procSuite "Waku Noise": + common.randomize() + + test "PKCS#7 Padding/Unpadding": + # We test padding for different message lengths + let maxMessageLength = 3 * NoisePaddingBlockSize + for messageLen in 0 .. maxMessageLength: + let + message = randomSeqByte(rng[], messageLen) + padded = pkcs7_pad(message, NoisePaddingBlockSize) + unpadded = pkcs7_unpad(padded, NoisePaddingBlockSize) + + check: + padded.len != 0 + padded.len mod NoisePaddingBlockSize == 0 + message == unpadded + + test "ChaChaPoly Encryption/Decryption: random byte sequences": + let cipherState = randomChaChaPolyCipherState(rng[]) + + # We encrypt/decrypt random byte sequences + let + plaintext: seq[byte] = randomSeqByte(rng[], rand(1 .. 128)) + ciphertext: ChaChaPolyCiphertext = encrypt(cipherState, plaintext) + decryptedCiphertext: seq[byte] = decrypt(cipherState, ciphertext) + + check: + plaintext == decryptedCiphertext + + test "ChaChaPoly Encryption/Decryption: random strings": + let cipherState = randomChaChaPolyCipherState(rng[]) + + # We encrypt/decrypt random strings + var plaintext: string + for _ in 1 .. rand(1 .. 128): + add(plaintext, char(rand(int('A') .. int('z')))) + + let + ciphertext: ChaChaPolyCiphertext = encrypt(cipherState, plaintext.toBytes()) + decryptedCiphertext: seq[byte] = decrypt(cipherState, ciphertext) + + check: + plaintext.toBytes() == decryptedCiphertext + + test "Noise public keys: encrypt and decrypt a public key": + let noisePublicKey: NoisePublicKey = genNoisePublicKey(rng[]) + + let + cs: ChaChaPolyCipherState = randomChaChaPolyCipherState(rng[]) + encryptedPk: NoisePublicKey = encryptNoisePublicKey(cs, noisePublicKey) + decryptedPk: NoisePublicKey = decryptNoisePublicKey(cs, encryptedPk) + + check: + noisePublicKey == decryptedPk + + test "Noise public keys: decrypt an unencrypted public key": + let noisePublicKey: NoisePublicKey = genNoisePublicKey(rng[]) + + let + cs: ChaChaPolyCipherState = randomChaChaPolyCipherState(rng[]) + decryptedPk: NoisePublicKey = decryptNoisePublicKey(cs, noisePublicKey) + + check: + noisePublicKey == decryptedPk + + test "Noise public keys: encrypt an encrypted public key": + let noisePublicKey: NoisePublicKey = genNoisePublicKey(rng[]) + + let + cs: ChaChaPolyCipherState = randomChaChaPolyCipherState(rng[]) + encryptedPk: NoisePublicKey = encryptNoisePublicKey(cs, noisePublicKey) + encryptedPk2: NoisePublicKey = encryptNoisePublicKey(cs, encryptedPk) + + check: + encryptedPk == encryptedPk2 + + test "Noise public keys: encrypt, decrypt and decrypt a public key": + let noisePublicKey: NoisePublicKey = genNoisePublicKey(rng[]) + + let + cs: ChaChaPolyCipherState = randomChaChaPolyCipherState(rng[]) + encryptedPk: NoisePublicKey = encryptNoisePublicKey(cs, noisePublicKey) + decryptedPk: NoisePublicKey = decryptNoisePublicKey(cs, encryptedPk) + decryptedPk2: NoisePublicKey = decryptNoisePublicKey(cs, decryptedPk) + + check: + decryptedPk == decryptedPk2 + + test "Noise public keys: serialize and deserialize an unencrypted public key": + let + noisePublicKey: NoisePublicKey = genNoisePublicKey(rng[]) + serializedNoisePublicKey: seq[byte] = serializeNoisePublicKey(noisePublicKey) + deserializedNoisePublicKey: NoisePublicKey = + intoNoisePublicKey(serializedNoisePublicKey) + + check: + noisePublicKey == deserializedNoisePublicKey + + test "Noise public keys: encrypt, serialize, deserialize and decrypt a public key": + let noisePublicKey: NoisePublicKey = genNoisePublicKey(rng[]) + + let + cs: ChaChaPolyCipherState = randomChaChaPolyCipherState(rng[]) + encryptedPk: NoisePublicKey = encryptNoisePublicKey(cs, noisePublicKey) + serializedNoisePublicKey: seq[byte] = serializeNoisePublicKey(encryptedPk) + deserializedNoisePublicKey: NoisePublicKey = + intoNoisePublicKey(serializedNoisePublicKey) + decryptedPk: NoisePublicKey = + decryptNoisePublicKey(cs, deserializedNoisePublicKey) + + check: + noisePublicKey == decryptedPk + + test "PayloadV2: serialize/deserialize PayloadV2 to byte sequence": + let + payload2: PayloadV2 = randomPayloadV2(rng[]) + serializedPayload = serializePayloadV2(payload2) + + check: + serializedPayload.isOk() + + let deserializedPayload = deserializePayloadV2(serializedPayload.get()) + + check: + deserializedPayload.isOk() + payload2 == deserializedPayload.get() + + test "PayloadV2: Encode/Decode a Waku Message (version 2) to a PayloadV2": + # We encode to a WakuMessage a random PayloadV2 + let + payload2 = randomPayloadV2(rng[]) + msg = encodePayloadV2(payload2) + + check: + msg.isOk() + + # We create ProtoBuffer from WakuMessage + let pb = msg.get().encode() + + # We decode the WakuMessage from the ProtoBuffer + let msgFromPb = WakuMessage.decode(pb.buffer) + + check: + msgFromPb.isOk() + + let decoded = decodePayloadV2(msgFromPb.get()) + + check: + decoded.isOk() + payload2 == decoded.get() + + test "Noise State Machine: Diffie-Hellman operation": + #We generate random keypairs + let + aliceKey = genKeyPair(rng[]) + bobKey = genKeyPair(rng[]) + + # A Diffie-Hellman operation between Alice's private key and Bob's public key must be equal to + # a Diffie-hellman operation between Alice's public key and Bob's private key + let + dh1 = dh(getPrivateKey(aliceKey), getPublicKey(bobKey)) + dh2 = dh(getPrivateKey(bobKey), getPublicKey(aliceKey)) + + check: + dh1 == dh2 + + test "Noise State Machine: Cipher State primitives": + # We generate a random Cipher State, associated data ad and plaintext + var + cipherState: CipherState = randomCipherState(rng[]) + nonce: uint64 = uint64(rand(0 .. int.high)) + ad: seq[byte] = randomSeqByte(rng[], rand(1 .. 128)) + plaintext: seq[byte] = randomSeqByte(rng[], rand(1 .. 128)) + + # We set the random nonce generated in the cipher state + setNonce(cipherState, nonce) + + # We perform encryption + var ciphertext: seq[byte] = encryptWithAd(cipherState, ad, plaintext) + + # After any encryption/decryption operation, the Cipher State's nonce increases by 1 + check: + getNonce(cipherState) == nonce + 1 + + # We set the nonce back to its original value for decryption + setNonce(cipherState, nonce) + + # We decrypt (using the original nonce) + var decrypted: seq[byte] = decryptWithAd(cipherState, ad, ciphertext) + + # We check if encryption and decryption are correct and that nonce correctly increased after decryption + check: + getNonce(cipherState) == nonce + 1 + plaintext == decrypted + + # If a Cipher State has no key set, encryptWithAd should return the plaintext without increasing the nonce + setCipherStateKey(cipherState, EmptyKey) + nonce = getNonce(cipherState) + + plaintext = randomSeqByte(rng[], rand(1 .. 128)) + ciphertext = encryptWithAd(cipherState, ad, plaintext) + + check: + ciphertext == plaintext + getNonce(cipherState) == nonce + + # If a Cipher State has no key set, decryptWithAd should return the ciphertext without increasing the nonce + setCipherStateKey(cipherState, EmptyKey) + nonce = getNonce(cipherState) + + # Note that we set ciphertext minimum length to 16 to not trigger checks on authentication tag length + ciphertext = randomSeqByte(rng[], rand(16 .. 128)) + plaintext = decryptWithAd(cipherState, ad, ciphertext) + + check: + ciphertext == plaintext + getNonce(cipherState) == nonce + + # A Cipher State cannot have a nonce greater or equal 2^64-1 + # Note that NonceMax is uint64.high - 1 = 2^64-1-1 and that nonce is increased after each encryption and decryption operation + + # We generate a test Cipher State with nonce set to MaxNonce + cipherState = randomCipherState(rng[]) + setNonce(cipherState, NonceMax) + plaintext = randomSeqByte(rng[], rand(1 .. 128)) + + # We test if encryption fails with a NoiseNonceMaxError error. Any subsequent encryption call over the Cipher State should fail similarly and leave the nonce unchanged + for _ in [1 .. 5]: + expect NoiseNonceMaxError: + ciphertext = encryptWithAd(cipherState, ad, plaintext) + + check: + getNonce(cipherState) == NonceMax + 1 + + # We generate a test Cipher State + # Since nonce is increased after decryption as well, we need to generate a proper ciphertext in order to test MaxNonceError error handling + # We cannot call encryptWithAd to encrypt a plaintext using a nonce equal MaxNonce, since this will trigger a MaxNonceError. + # To perform such test, we then need to encrypt a test plaintext using directly ChaChaPoly primitive + cipherState = randomCipherState(rng[]) + setNonce(cipherState, NonceMax) + plaintext = randomSeqByte(rng[], rand(1 .. 128)) + + # We perform encryption using the Cipher State key, NonceMax and ad + # By Noise specification the nonce is 8 bytes long out of the 12 bytes supported by ChaChaPoly, thus we copy the Little endian conversion of the nonce to a ChaChaPolyNonce + var + encNonce: ChaChaPolyNonce + authorizationTag: ChaChaPolyTag + encNonce[4 ..< 12] = toBytesLE(NonceMax) + ChaChaPoly.encrypt(getKey(cipherState), encNonce, authorizationTag, plaintext, ad) + + # The output ciphertext is stored in the plaintext variable after ChaChaPoly.encrypt is called: we copy it along with the authorization tag. + ciphertext = @[] + ciphertext.add(plaintext) + ciphertext.add(authorizationTag) + + # At this point ciphertext is a proper encryption of the original plaintext obtained with nonce equal to NonceMax + # We can now test if decryption fails with a NoiseNonceMaxError error. Any subsequent decryption call over the Cipher State should fail similarly and leave the nonce unchanged + # Note that decryptWithAd doesn't fail in decrypting the ciphertext (otherwise a NoiseDecryptTagError would have been triggered) + for _ in [1 .. 5]: + expect NoiseNonceMaxError: + plaintext = decryptWithAd(cipherState, ad, ciphertext) + + check: + getNonce(cipherState) == NonceMax + 1 + + test "Noise State Machine: Symmetric State primitives": + # We select one supported handshake pattern and we initialize a symmetric state + var + hsPattern = NoiseHandshakePatterns["XX"] + symmetricState: SymmetricState = SymmetricState.init(hsPattern) + + # We get all the Symmetric State field + # cs : Cipher State + # ck : chaining key + # h : handshake hash + var + cs = getCipherState(symmetricState) + ck = getChainingKey(symmetricState) + h = getHandshakeHash(symmetricState) + + # When a Symmetric state is initialized, handshake hash and chaining key are (byte-wise) equal + check: + h.data.intoChaChaPolyKey == ck + + ######################################## + # mixHash + ######################################## + + # We generate a random byte sequence and execute a mixHash over it + mixHash(symmetricState, randomSeqByte(rng[], rand(1 .. 128))) + + # mixHash changes only the handshake hash value of the Symmetric state + check: + cs == getCipherState(symmetricState) + ck == getChainingKey(symmetricState) + h != getHandshakeHash(symmetricState) + + # We update test values + h = getHandshakeHash(symmetricState) + + ######################################## + # mixKey + ######################################## + + # We generate random input key material and we execute mixKey + var inputKeyMaterial = randomSeqByte(rng[], rand(1 .. 128)) + mixKey(symmetricState, inputKeyMaterial) + + # mixKey changes the Symmetric State's chaining key and encryption key of the embedded Cipher State + # It further sets to 0 the nonce of the embedded Cipher State + check: + getKey(cs) != getKey(getCipherState(symmetricState)) + getNonce(getCipherState(symmetricState)) == 0.uint64 + cs != getCipherState(symmetricState) + ck != getChainingKey(symmetricState) + h == getHandshakeHash(symmetricState) + + # We update test values + cs = getCipherState(symmetricState) + ck = getChainingKey(symmetricState) + + ######################################## + # mixKeyAndHash + ######################################## + + # We generate random input key material and we execute mixKeyAndHash + inputKeyMaterial = randomSeqByte(rng[], rand(1 .. 128)) + mixKeyAndHash(symmetricState, inputKeyMaterial) + + # mixKeyAndHash executes a mixKey and a mixHash using the input key material + # All Symmetric State's fields are updated + check: + cs != getCipherState(symmetricState) + ck != getChainingKey(symmetricState) + h != getHandshakeHash(symmetricState) + + # We update test values + cs = getCipherState(symmetricState) + ck = getChainingKey(symmetricState) + h = getHandshakeHash(symmetricState) + + ######################################## + # encryptAndHash and decryptAndHash + ######################################## + + # We store the initial symmetricState in order to correctly perform decryption + var initialSymmetricState = symmetricState + + # We generate random plaintext and we execute encryptAndHash + var plaintext = randomChaChaPolyKey(rng[]) + var nonce = getNonce(getCipherState(symmetricState)) + var ciphertext = encryptAndHash(symmetricState, plaintext) + + # encryptAndHash combines encryptWithAd and mixHash over the ciphertext (encryption increases the nonce of the embedded Cipher State but does not change its key) + # We check if only the handshake hash value and the Symmetric State changed accordingly + check: + cs != getCipherState(symmetricState) + getKey(cs) == getKey(getCipherState(symmetricState)) + getNonce(getCipherState(symmetricState)) == nonce + 1 + ck == getChainingKey(symmetricState) + h != getHandshakeHash(symmetricState) + + # We restore the symmetric State to its initial value to test decryption + symmetricState = initialSymmetricState + + # We execute decryptAndHash over the ciphertext + var decrypted = decryptAndHash(symmetricState, ciphertext) + + # decryptAndHash combines decryptWithAd and mixHash over the ciphertext (encryption increases the nonce of the embedded Cipher State but does not change its key) + # We check if only the handshake hash value and the Symmetric State changed accordingly + # We further check if decryption corresponds to the original plaintext + check: + cs != getCipherState(symmetricState) + getKey(cs) == getKey(getCipherState(symmetricState)) + getNonce(getCipherState(symmetricState)) == nonce + 1 + ck == getChainingKey(symmetricState) + h != getHandshakeHash(symmetricState) + decrypted == plaintext + + ######################################## + # split + ######################################## + + # If at least one mixKey is executed (as above), ck is non-empty + check: + getChainingKey(symmetricState) != EmptyKey + + # When a Symmetric State's ck is non-empty, we can execute split, which creates two distinct Cipher States cs1 and cs2 + # with non-empty encryption keys and nonce set to 0 + var (cs1, cs2) = split(symmetricState) + + check: + getKey(cs1) != EmptyKey + getKey(cs2) != EmptyKey + getNonce(cs1) == 0.uint64 + getNonce(cs2) == 0.uint64 + getKey(cs1) != getKey(cs2) + + test "Noise XX Handhshake and message encryption (extended test)": + let hsPattern = NoiseHandshakePatterns["XX"] + + # We initialize Alice's and Bob's Handshake State + let aliceStaticKey = genKeyPair(rng[]) + var aliceHS = + initialize(hsPattern = hsPattern, staticKey = aliceStaticKey, initiator = true) + + let bobStaticKey = genKeyPair(rng[]) + var bobHS = initialize(hsPattern = hsPattern, staticKey = bobStaticKey) + + var + sentTransportMessage: seq[byte] + aliceStep, bobStep: HandshakeStepResult + + # Here the handshake starts + # Write and read calls alternate between Alice and Bob: the handhshake progresses by alternatively calling stepHandshake for each user + + ############### + # 1st step + ############### + + # We generate a random transport message + sentTransportMessage = randomSeqByte(rng[], 32) + + # By being the handshake initiator, Alice writes a Waku2 payload v2 containing her handshake message + # and the (encrypted) transport message + aliceStep = + stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get() + + # Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him + bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get() + + check: + bobStep.transportMessage == sentTransportMessage + + ############### + # 2nd step + ############### + + # We generate a random transport message + sentTransportMessage = randomSeqByte(rng[], 32) + + # At this step, Bob writes and returns a payload + bobStep = stepHandshake(rng[], bobHS, transportMessage = sentTransportMessage).get() + + # While Alice reads and returns the (decrypted) transport message + aliceStep = stepHandshake(rng[], aliceHS, readPayloadV2 = bobStep.payload2).get() + + check: + aliceStep.transportMessage == sentTransportMessage + + ############### + # 3rd step + ############### + + # We generate a random transport message + sentTransportMessage = randomSeqByte(rng[], 32) + + # Similarly as in first step, Alice writes a Waku2 payload containing the handshake message and the (encrypted) transport message + aliceStep = + stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get() + + # Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him + bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get() + + check: + bobStep.transportMessage == sentTransportMessage + + # Note that for this handshake pattern, no more message patterns are left for processing + # Another call to stepHandshake would return an empty HandshakeStepResult + # We test that extra calls to stepHandshake do not affect parties' handshake states + # and that the intermediate HandshakeStepResult are empty + let prevAliceHS = aliceHS + let prevBobHS = bobHS + + let bobStep1 = + stepHandshake(rng[], bobHS, transportMessage = sentTransportMessage).get() + let aliceStep1 = + stepHandshake(rng[], aliceHS, readPayloadV2 = bobStep1.payload2).get() + let aliceStep2 = + stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get() + let bobStep2 = + stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep2.payload2).get() + + check: + aliceStep1 == default(HandshakeStepResult) + aliceStep2 == default(HandshakeStepResult) + bobStep1 == default(HandshakeStepResult) + bobStep2 == default(HandshakeStepResult) + aliceHS == prevAliceHS + bobHS == prevBobHS + + ######################### + # After Handshake + ######################### + + # We finalize the handshake to retrieve the Inbound/Outbound symmetric states + var aliceHSResult, bobHSResult: HandshakeResult + + aliceHSResult = finalizeHandshake(aliceHS) + bobHSResult = finalizeHandshake(bobHS) + + # We test read/write of random messages exchanged between Alice and Bob + var + payload2: PayloadV2 + message: seq[byte] + readMessage: seq[byte] + defaultMessageNametagBuffer: MessageNametagBuffer + + for _ in 0 .. 10: + # Alice writes to Bob + message = randomSeqByte(rng[], 32) + payload2 = writeMessage(aliceHSResult, message, defaultMessageNametagBuffer) + readMessage = + readMessage(bobHSResult, payload2, defaultMessageNametagBuffer).get() + + check: + message == readMessage + + # Bob writes to Alice + message = randomSeqByte(rng[], 32) + payload2 = writeMessage(bobHSResult, message, defaultMessageNametagBuffer) + readMessage = + readMessage(aliceHSResult, payload2, defaultMessageNametagBuffer).get() + + check: + message == readMessage + + test "Noise XXpsk0 Handhshake and message encryption (short test)": + let hsPattern = NoiseHandshakePatterns["XXpsk0"] + + # We generate a random psk + let psk = randomSeqByte(rng[], 32) + + # We initialize Alice's and Bob's Handshake State + let aliceStaticKey = genKeyPair(rng[]) + var aliceHS = initialize( + hsPattern = hsPattern, staticKey = aliceStaticKey, psk = psk, initiator = true + ) + + let bobStaticKey = genKeyPair(rng[]) + var bobHS = initialize(hsPattern = hsPattern, staticKey = bobStaticKey, psk = psk) + + var + sentTransportMessage: seq[byte] + aliceStep, bobStep: HandshakeStepResult + + # Here the handshake starts + # Write and read calls alternate between Alice and Bob: the handhshake progresses by alternatively calling stepHandshake for each user + + ############### + # 1st step + ############### + + # We generate a random transport message + sentTransportMessage = randomSeqByte(rng[], 32) + + # By being the handshake initiator, Alice writes a Waku2 payload v2 containing her handshake message + # and the (encrypted) transport message + aliceStep = + stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get() + + # Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him + bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get() + + check: + bobStep.transportMessage == sentTransportMessage + + ############### + # 2nd step + ############### + + # We generate a random transport message + sentTransportMessage = randomSeqByte(rng[], 32) + + # At this step, Bob writes and returns a payload + bobStep = stepHandshake(rng[], bobHS, transportMessage = sentTransportMessage).get() + + # While Alice reads and returns the (decrypted) transport message + aliceStep = stepHandshake(rng[], aliceHS, readPayloadV2 = bobStep.payload2).get() + + check: + aliceStep.transportMessage == sentTransportMessage + + ############### + # 3rd step + ############### + + # We generate a random transport message + sentTransportMessage = randomSeqByte(rng[], 32) + + # Similarly as in first step, Alice writes a Waku2 payload containing the handshake message and the (encrypted) transport message + aliceStep = + stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get() + + # Bob reads Alice's payloads, and returns the (decrypted) transportMessage alice sent to him + bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get() + + check: + bobStep.transportMessage == sentTransportMessage + + # Note that for this handshake pattern, no more message patterns are left for processing + + ######################### + # After Handshake + ######################### + + # We finalize the handshake to retrieve the Inbound/Outbound Symmetric States + var aliceHSResult, bobHSResult: HandshakeResult + + aliceHSResult = finalizeHandshake(aliceHS) + bobHSResult = finalizeHandshake(bobHS) + + # We test read/write of random messages exchanged between Alice and Bob + var + payload2: PayloadV2 + message: seq[byte] + readMessage: seq[byte] + defaultMessageNametagBuffer: MessageNametagBuffer + + for _ in 0 .. 10: + # Alice writes to Bob + message = randomSeqByte(rng[], 32) + payload2 = writeMessage(aliceHSResult, message, defaultMessageNametagBuffer) + readMessage = + readMessage(bobHSResult, payload2, defaultMessageNametagBuffer).get() + + check: + message == readMessage + + # Bob writes to Alice + message = randomSeqByte(rng[], 32) + payload2 = writeMessage(bobHSResult, message, defaultMessageNametagBuffer) + readMessage = + readMessage(aliceHSResult, payload2, defaultMessageNametagBuffer).get() + + check: + message == readMessage + + test "Noise K1K1 Handhshake and message encryption (short test)": + let hsPattern = NoiseHandshakePatterns["K1K1"] + + # We initialize Alice's and Bob's Handshake State + let aliceStaticKey = genKeyPair(rng[]) + let bobStaticKey = genKeyPair(rng[]) + + # This handshake has the following pre-message pattern: + # -> s + # <- s + # ... + # So we define accordingly the sequence of the pre-message public keys + let preMessagePKs: seq[NoisePublicKey] = + @[ + toNoisePublicKey(getPublicKey(aliceStaticKey)), + toNoisePublicKey(getPublicKey(bobStaticKey)), + ] + + var aliceHS = initialize( + hsPattern = hsPattern, + staticKey = aliceStaticKey, + preMessagePKs = preMessagePKs, + initiator = true, + ) + var bobHS = initialize( + hsPattern = hsPattern, staticKey = bobStaticKey, preMessagePKs = preMessagePKs + ) + + var + sentTransportMessage: seq[byte] + aliceStep, bobStep: HandshakeStepResult + + # Here the handshake starts + # Write and read calls alternate between Alice and Bob: the handhshake progresses by alternatively calling stepHandshake for each user + + ############### + # 1st step + ############### + + # We generate a random transport message + sentTransportMessage = randomSeqByte(rng[], 32) + + # By being the handshake initiator, Alice writes a Waku2 payload v2 containing her handshake message + # and the (encrypted) transport message + aliceStep = + stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get() + + # Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him + bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get() + + check: + bobStep.transportMessage == sentTransportMessage + + ############### + # 2nd step + ############### + + # We generate a random transport message + sentTransportMessage = randomSeqByte(rng[], 32) + + # At this step, Bob writes and returns a payload + bobStep = stepHandshake(rng[], bobHS, transportMessage = sentTransportMessage).get() + + # While Alice reads and returns the (decrypted) transport message + aliceStep = stepHandshake(rng[], aliceHS, readPayloadV2 = bobStep.payload2).get() + + check: + aliceStep.transportMessage == sentTransportMessage + + ############### + # 3rd step + ############### + + # We generate a random transport message + sentTransportMessage = randomSeqByte(rng[], 32) + + # Similarly as in first step, Alice writes a Waku2 payload containing the handshake_message and the (encrypted) transportMessage + aliceStep = + stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get() + + # Bob reads Alice's payloads, and returns the (decrypted) transportMessage alice sent to him + bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get() + + check: + bobStep.transportMessage == sentTransportMessage + + # Note that for this handshake pattern, no more message patterns are left for processing + + ######################### + # After Handshake + ######################### + + # We finalize the handshake to retrieve the Inbound/Outbound Symmetric States + var aliceHSResult, bobHSResult: HandshakeResult + + aliceHSResult = finalizeHandshake(aliceHS) + bobHSResult = finalizeHandshake(bobHS) + + # We test read/write of random messages between Alice and Bob + var + payload2: PayloadV2 + message: seq[byte] + readMessage: seq[byte] + defaultMessageNametagBuffer: MessageNametagBuffer + + for _ in 0 .. 10: + # Alice writes to Bob + message = randomSeqByte(rng[], 32) + payload2 = writeMessage(aliceHSResult, message, defaultMessageNametagBuffer) + readMessage = + readMessage(bobHSResult, payload2, defaultMessageNametagBuffer).get() + + check: + message == readMessage + + # Bob writes to Alice + message = randomSeqByte(rng[], 32) + payload2 = writeMessage(bobHSResult, message, defaultMessageNametagBuffer) + readMessage = + readMessage(aliceHSResult, payload2, defaultMessageNametagBuffer).get() + + check: + message == readMessage + + test "Noise XK1 Handhshake and message encryption (short test)": + let hsPattern = NoiseHandshakePatterns["XK1"] + + # We initialize Alice's and Bob's Handshake State + let aliceStaticKey = genKeyPair(rng[]) + let bobStaticKey = genKeyPair(rng[]) + + # This handshake has the following pre-message pattern: + # <- s + # ... + # So we define accordingly the sequence of the pre-message public keys + let preMessagePKs: seq[NoisePublicKey] = + @[toNoisePublicKey(getPublicKey(bobStaticKey))] + + var aliceHS = initialize( + hsPattern = hsPattern, + staticKey = aliceStaticKey, + preMessagePKs = preMessagePKs, + initiator = true, + ) + var bobHS = initialize( + hsPattern = hsPattern, staticKey = bobStaticKey, preMessagePKs = preMessagePKs + ) + + var + sentTransportMessage: seq[byte] + aliceStep, bobStep: HandshakeStepResult + + # Here the handshake starts + # Write and read calls alternate between Alice and Bob: the handhshake progresses by alternatively calling stepHandshake for each user + + ############### + # 1st step + ############### + + # We generate a random transport message + sentTransportMessage = randomSeqByte(rng[], 32) + + # By being the handshake initiator, Alice writes a Waku2 payload v2 containing her handshake message + # and the (encrypted) transport message + aliceStep = + stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get() + + # Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him + bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get() + + check: + bobStep.transportMessage == sentTransportMessage + + ############### + # 2nd step + ############### + + # We generate a random transport message + sentTransportMessage = randomSeqByte(rng[], 32) + + # At this step, Bob writes and returns a payload + bobStep = stepHandshake(rng[], bobHS, transportMessage = sentTransportMessage).get() + + # While Alice reads and returns the (decrypted) transport message + aliceStep = stepHandshake(rng[], aliceHS, readPayloadV2 = bobStep.payload2).get() + + check: + aliceStep.transportMessage == sentTransportMessage + + ############### + # 3rd step + ############### + + # We generate a random transport message + sentTransportMessage = randomSeqByte(rng[], 32) + + # Similarly as in first step, Alice writes a Waku2 payload containing the handshake message and the (encrypted) transport message + aliceStep = + stepHandshake(rng[], aliceHS, transportMessage = sentTransportMessage).get() + + # Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him + bobStep = stepHandshake(rng[], bobHS, readPayloadV2 = aliceStep.payload2).get() + + check: + bobStep.transportMessage == sentTransportMessage + + # Note that for this handshake pattern, no more message patterns are left for processing + + ######################### + # After Handshake + ######################### + + # We finalize the handshake to retrieve the Inbound/Outbound Symmetric States + var aliceHSResult, bobHSResult: HandshakeResult + + aliceHSResult = finalizeHandshake(aliceHS) + bobHSResult = finalizeHandshake(bobHS) + + # We test read/write of random messages exchanged between Alice and Bob + var + payload2: PayloadV2 + message: seq[byte] + readMessage: seq[byte] + defaultMessageNametagBuffer: MessageNametagBuffer + + for _ in 0 .. 10: + # Alice writes to Bob + message = randomSeqByte(rng[], 32) + payload2 = writeMessage(aliceHSResult, message, defaultMessageNametagBuffer) + readMessage = + readMessage(bobHSResult, payload2, defaultMessageNametagBuffer).get() + + check: + message == readMessage + + # Bob writes to Alice + message = randomSeqByte(rng[], 32) + payload2 = writeMessage(bobHSResult, message, defaultMessageNametagBuffer) + readMessage = + readMessage(aliceHSResult, payload2, defaultMessageNametagBuffer).get() + + check: + message == readMessage diff --git a/third-party/nwaku/tests/test_waku_noise_sessions.nim b/third-party/nwaku/tests/test_waku_noise_sessions.nim new file mode 100644 index 0000000..5436539 --- /dev/null +++ b/third-party/nwaku/tests/test_waku_noise_sessions.nim @@ -0,0 +1,421 @@ +{.used.} + +import std/tables, results, stew/byteutils, testutils/unittests +import + waku/[ + common/protobuf, + utils/noise as waku_message_utils, + waku_noise/noise_types, + waku_noise/noise_utils, + waku_noise/noise_handshake_processing, + waku_core, + ], + ./testlib/common + +procSuite "Waku Noise Sessions": + randomize() + + # This test implements the Device pairing and Secure Transfers with Noise + # detailed in the 43/WAKU2-DEVICE-PAIRING RFC https://rfc.vac.dev/spec/43/ + test "Noise Waku Pairing Handhshake and Secure transfer": + ######################### + # Pairing Phase + ######################### + + let hsPattern = NoiseHandshakePatterns["WakuPairing"] + + # Alice static/ephemeral key initialization and commitment + let aliceStaticKey = genKeyPair(rng[]) + let aliceEphemeralKey = genKeyPair(rng[]) + let s = randomSeqByte(rng[], 32) + let aliceCommittedStaticKey = commitPublicKey(getPublicKey(aliceStaticKey), s) + + # Bob static/ephemeral key initialization and commitment + let bobStaticKey = genKeyPair(rng[]) + let bobEphemeralKey = genKeyPair(rng[]) + let r = randomSeqByte(rng[], 32) + let bobCommittedStaticKey = commitPublicKey(getPublicKey(bobStaticKey), r) + + # Content Topic information + let applicationName = "waku-noise-sessions" + let applicationVersion = "0.1" + let shardId = "10" + let qrMessageNametag = randomSeqByte(rng[], MessageNametagLength) + + # Out-of-band Communication + + # Bob prepares the QR and sends it out-of-band to Alice + let qr = toQr( + applicationName, + applicationVersion, + shardId, + getPublicKey(bobEphemeralKey), + bobCommittedStaticKey, + ) + + # Alice deserializes the QR code + let ( + readApplicationName, readApplicationVersion, readShardId, readEphemeralKey, + readCommittedStaticKey, + ) = fromQr(qr) + + # We check if QR serialization/deserialization works + check: + applicationName == readApplicationName + applicationVersion == readApplicationVersion + shardId == readShardId + getPublicKey(bobEphemeralKey) == readEphemeralKey + bobCommittedStaticKey == readCommittedStaticKey + + # We set the contentTopic from the content topic parameters exchanged in the QR + let contentTopic: ContentTopic = + "/" & applicationName & "/" & applicationVersion & "/wakunoise/1/sessions_shard-" & + shardId & "/proto" + + ############### + # Pre-handshake message + # + # <- eB {H(sB||r), contentTopicParams, messageNametag} + ############### + let preMessagePKs: seq[NoisePublicKey] = + @[toNoisePublicKey(getPublicKey(bobEphemeralKey))] + + # We initialize the Handshake states. + # Note that we pass the whole qr serialization as prologue information + var aliceHS = initialize( + hsPattern = hsPattern, + ephemeralKey = aliceEphemeralKey, + staticKey = aliceStaticKey, + prologue = qr.toBytes, + preMessagePKs = preMessagePKs, + initiator = true, + ) + var bobHS = initialize( + hsPattern = hsPattern, + ephemeralKey = bobEphemeralKey, + staticKey = bobStaticKey, + prologue = qr.toBytes, + preMessagePKs = preMessagePKs, + ) + + ############### + # Pairing Handshake + ############### + + var + sentTransportMessage: seq[byte] + aliceStep, bobStep: HandshakeStepResult + msgFromPb: ProtobufResult[WakuMessage] + wakuMsg: Result[WakuMessage, cstring] + pb: ProtoBuffer + readPayloadV2: PayloadV2 + aliceMessageNametag, bobMessageNametag: MessageNametag + + # Write and read calls alternate between Alice and Bob: the handhshake progresses by alternatively calling stepHandshake for each user + + ############### + # 1st step + # + # -> eA, eAeB {H(sA||s)} [authcode] + ############### + + # The messageNametag for the first handshake message is randomly generated and exchanged out-of-band + # and corresponds to qrMessageNametag + + # We set the transport message to be H(sA||s) + sentTransportMessage = digestToSeq(aliceCommittedStaticKey) + + # We ensure that digestToSeq and its inverse seqToDigest256 are correct + check: + seqToDigest256(sentTransportMessage) == aliceCommittedStaticKey + + # By being the handshake initiator, Alice writes a Waku2 payload v2 containing her handshake message + # and the (encrypted) transport message + # The message is sent with a messageNametag equal to the one received through the QR code + aliceStep = stepHandshake( + rng[], + aliceHS, + transportMessage = sentTransportMessage, + messageNametag = qrMessageNametag, + ) + .get() + + ############################################### + # We prepare a Waku message from Alice's payload2 + wakuMsg = encodePayloadV2(aliceStep.payload2, contentTopic) + + check: + wakuMsg.isOk() + wakuMsg.get().contentTopic == contentTopic + + # At this point wakuMsg is sent over the Waku network and is received + # We simulate this by creating the ProtoBuffer from wakuMsg + pb = wakuMsg.get().encode() + + # We decode the WakuMessage from the ProtoBuffer + msgFromPb = WakuMessage.decode(pb.buffer) + + check: + msgFromPb.isOk() + + # We decode the payloadV2 from the WakuMessage + readPayloadV2 = decodePayloadV2(msgFromPb.get()).get() + + check: + readPayloadV2 == aliceStep.payload2 + ############################################### + + # Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him + # Note that Bob verifies if the received payloadv2 has the expected messageNametag set + bobStep = stepHandshake( + rng[], bobHS, readPayloadV2 = readPayloadV2, messageNametag = qrMessageNametag + ) + .get() + + check: + bobStep.transportMessage == sentTransportMessage + + # We generate an authorization code using the handshake state + let aliceAuthcode = genAuthcode(aliceHS) + let bobAuthcode = genAuthcode(bobHS) + + # We check that they are equal. Note that this check has to be confirmed with a user interaction. + check: + aliceAuthcode == bobAuthcode + + ############### + # 2nd step + # + # <- sB, eAsB {r} + ############### + + # Alice and Bob update their local next messageNametag using the available handshake information + # During the handshake, messageNametag = HKDF(h), where h is the handshake hash value at the end of the last processed message + aliceMessageNametag = toMessageNametag(aliceHS) + bobMessageNametag = toMessageNametag(bobHS) + + # We set as a transport message the commitment randomness r + sentTransportMessage = r + + # At this step, Bob writes and returns a payload + bobStep = stepHandshake( + rng[], + bobHS, + transportMessage = sentTransportMessage, + messageNametag = bobMessageNametag, + ) + .get() + + ############################################### + # We prepare a Waku message from Bob's payload2 + wakuMsg = encodePayloadV2(bobStep.payload2, contentTopic) + + check: + wakuMsg.isOk() + wakuMsg.get().contentTopic == contentTopic + + # At this point wakuMsg is sent over the Waku network and is received + # We simulate this by creating the ProtoBuffer from wakuMsg + pb = wakuMsg.get().encode() + + # We decode the WakuMessage from the ProtoBuffer + msgFromPb = WakuMessage.decode(pb.buffer) + + check: + msgFromPb.isOk() + + # We decode the payloadV2 from the WakuMessage + readPayloadV2 = decodePayloadV2(msgFromPb.get()).get() + + check: + readPayloadV2 == bobStep.payload2 + ############################################### + + # While Alice reads and returns the (decrypted) transport message + aliceStep = stepHandshake( + rng[], + aliceHS, + readPayloadV2 = readPayloadV2, + messageNametag = aliceMessageNametag, + ) + .get() + + check: + aliceStep.transportMessage == sentTransportMessage + + # Alice further checks if Bob's commitment opens to Bob's static key she just received + let expectedBobCommittedStaticKey = + commitPublicKey(aliceHS.rs, aliceStep.transportMessage) + + check: + expectedBobCommittedStaticKey == bobCommittedStaticKey + + ############### + # 3rd step + # + # -> sA, sAeB, sAsB {s} + ############### + + # Alice and Bob update their local next messageNametag using the available handshake information + aliceMessageNametag = toMessageNametag(aliceHS) + bobMessageNametag = toMessageNametag(bobHS) + + # We set as a transport message the commitment randomness s + sentTransportMessage = s + + # Similarly as in first step, Alice writes a Waku2 payload containing the handshake message and the (encrypted) transport message + aliceStep = stepHandshake( + rng[], + aliceHS, + transportMessage = sentTransportMessage, + messageNametag = aliceMessageNametag, + ) + .get() + + ############################################### + # We prepare a Waku message from Bob's payload2 + wakuMsg = encodePayloadV2(aliceStep.payload2, contentTopic) + + check: + wakuMsg.isOk() + wakuMsg.get().contentTopic == contentTopic + + # At this point wakuMsg is sent over the Waku network and is received + # We simulate this by creating the ProtoBuffer from wakuMsg + pb = wakuMsg.get().encode() + + # We decode the WakuMessage from the ProtoBuffer + msgFromPb = WakuMessage.decode(pb.buffer) + + check: + msgFromPb.isOk() + + # We decode the payloadV2 from the WakuMessage + readPayloadV2 = decodePayloadV2(msgFromPb.get()).get() + + check: + readPayloadV2 == aliceStep.payload2 + ############################################### + + # Bob reads Alice's payloads, and returns the (decrypted) transport message Alice sent to him + bobStep = stepHandshake( + rng[], bobHS, readPayloadV2 = readPayloadV2, messageNametag = bobMessageNametag + ) + .get() + + check: + bobStep.transportMessage == sentTransportMessage + + # Bob further checks if Alice's commitment opens to Alice's static key he just received + let expectedAliceCommittedStaticKey = + commitPublicKey(bobHS.rs, bobStep.transportMessage) + + check: + expectedAliceCommittedStaticKey == aliceCommittedStaticKey + + ######################### + # Secure Transfer Phase + ######################### + + # We finalize the handshake to retrieve the Inbound/Outbound Symmetric States + var aliceHSResult, bobHSResult: HandshakeResult + + aliceHSResult = finalizeHandshake(aliceHS) + bobHSResult = finalizeHandshake(bobHS) + + # We test read/write of random messages exchanged between Alice and Bob + var + payload2: PayloadV2 + message: seq[byte] + readMessage: seq[byte] + + # We test message exchange + # Note that we exchange more than the number of messages contained in the nametag buffer to test if they are filled correctly as the communication proceeds + for i in 0 .. 10 * MessageNametagBufferSize: + # Alice writes to Bob + message = randomSeqByte(rng[], 32) + payload2 = writeMessage( + aliceHSResult, + message, + outboundMessageNametagBuffer = aliceHSResult.nametagsOutbound, + ) + readMessage = readMessage( + bobHSResult, + payload2, + inboundMessageNametagBuffer = bobHSResult.nametagsInbound, + ) + .get() + + check: + message == readMessage + + # Bob writes to Alice + message = randomSeqByte(rng[], 32) + payload2 = writeMessage( + bobHSResult, + message, + outboundMessageNametagBuffer = bobHSResult.nametagsOutbound, + ) + readMessage = readMessage( + aliceHSResult, + payload2, + inboundMessageNametagBuffer = aliceHSResult.nametagsInbound, + ) + .get() + + check: + message == readMessage + + # We test how nametag buffers help in detecting lost messages + # Alice writes two messages to Bob, but only the second is received + message = randomSeqByte(rng[], 32) + payload2 = writeMessage( + aliceHSResult, + message, + outboundMessageNametagBuffer = aliceHSResult.nametagsOutbound, + ) + message = randomSeqByte(rng[], 32) + payload2 = writeMessage( + aliceHSResult, + message, + outboundMessageNametagBuffer = aliceHSResult.nametagsOutbound, + ) + expect NoiseSomeMessagesWereLost: + readMessage = readMessage( + bobHSResult, + payload2, + inboundMessageNametagBuffer = bobHSResult.nametagsInbound, + ) + .get() + + # We adjust bob nametag buffer for next test (i.e. the missed message is correctly recovered) + delete(bobHSResult.nametagsInbound, 2) + message = randomSeqByte(rng[], 32) + payload2 = writeMessage( + bobHSResult, message, outboundMessageNametagBuffer = bobHSResult.nametagsOutbound + ) + readMessage = readMessage( + aliceHSResult, + payload2, + inboundMessageNametagBuffer = aliceHSResult.nametagsInbound, + ) + .get() + + check: + message == readMessage + + # We test if a missing nametag is correctly detected + message = randomSeqByte(rng[], 32) + payload2 = writeMessage( + aliceHSResult, + message, + outboundMessageNametagBuffer = aliceHSResult.nametagsOutbound, + ) + delete(bobHSResult.nametagsInbound, 1) + expect NoiseMessageNametagError: + readMessage = readMessage( + bobHSResult, + payload2, + inboundMessageNametagBuffer = bobHSResult.nametagsInbound, + ) + .get() diff --git a/third-party/nwaku/tests/test_waku_protobufs.nim b/third-party/nwaku/tests/test_waku_protobufs.nim new file mode 100644 index 0000000..cd5e3dd --- /dev/null +++ b/third-party/nwaku/tests/test_waku_protobufs.nim @@ -0,0 +1,30 @@ +{.used.} + +import std/[options, sequtils, tables], testutils/unittests, chronos, chronicles +import + waku/waku_metadata, waku/waku_metadata/rpc, ./testlib/wakucore, ./testlib/wakunode + +procSuite "Waku Protobufs": + # TODO: Missing test coverage in many encode/decode protobuf functions + + test "WakuMetadataResponse": + let res = WakuMetadataResponse(clusterId: some(7), shards: @[10, 23, 33]) + + let buffer = res.encode() + + let decodedBuff = WakuMetadataResponse.decode(buffer.buffer) + check: + decodedBuff.isOk() + decodedBuff.get().clusterId.get() == res.clusterId.get() + decodedBuff.get().shards == res.shards + + test "WakuMetadataRequest": + let req = WakuMetadataRequest(clusterId: some(5), shards: @[100, 2, 0]) + + let buffer = req.encode() + + let decodedBuff = WakuMetadataRequest.decode(buffer.buffer) + check: + decodedBuff.isOk() + decodedBuff.get().clusterId.get() == req.clusterId.get() + decodedBuff.get().shards == req.shards diff --git a/third-party/nwaku/tests/test_waku_rendezvous.nim b/third-party/nwaku/tests/test_waku_rendezvous.nim new file mode 100644 index 0000000..fa2efbd --- /dev/null +++ b/third-party/nwaku/tests/test_waku_rendezvous.nim @@ -0,0 +1,67 @@ +{.used.} + +import std/options, chronos, testutils/unittests, libp2p/builders + +import + waku/waku_core/peers, + waku/node/waku_node, + waku/node/peer_manager/peer_manager, + waku/waku_rendezvous/protocol, + ./testlib/[wakucore, wakunode] + +procSuite "Waku Rendezvous": + asyncTest "Simple remote test": + let + clusterId = 10.uint16 + node1 = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + Port(0), + clusterId = clusterId, + ) + node2 = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + Port(0), + clusterId = clusterId, + ) + node3 = newTestWakuNode( + generateSecp256k1Key(), + parseIpAddress("0.0.0.0"), + Port(0), + clusterId = clusterId, + ) + + await allFutures( + [ + node1.mountRendezvous(clusterId), + node2.mountRendezvous(clusterId), + node3.mountRendezvous(clusterId), + ] + ) + await allFutures([node1.start(), node2.start(), node3.start()]) + + let peerInfo1 = node1.switch.peerInfo.toRemotePeerInfo() + let peerInfo2 = node2.switch.peerInfo.toRemotePeerInfo() + let peerInfo3 = node3.switch.peerInfo.toRemotePeerInfo() + + node1.peerManager.addPeer(peerInfo2) + node2.peerManager.addPeer(peerInfo1) + node2.peerManager.addPeer(peerInfo3) + node3.peerManager.addPeer(peerInfo2) + + let namespace = "test/name/space" + + let res = await node1.wakuRendezvous.batchAdvertise( + namespace, 60.seconds, @[peerInfo2.peerId] + ) + assert res.isOk(), $res.error + + let response = + await node3.wakuRendezvous.batchRequest(namespace, 1, @[peerInfo2.peerId]) + assert response.isOk(), $response.error + let records = response.get() + + check: + records.len == 1 + records[0].peerId == peerInfo1.peerId diff --git a/third-party/nwaku/tests/test_waku_switch.nim b/third-party/nwaku/tests/test_waku_switch.nim new file mode 100644 index 0000000..3e6fd08 --- /dev/null +++ b/third-party/nwaku/tests/test_waku_switch.nim @@ -0,0 +1,114 @@ +{.used.} + +import + testutils/unittests, + chronos, + libp2p/builders, + libp2p/protocols/connectivity/autonat/client, + libp2p/protocols/connectivity/relay/relay, + libp2p/protocols/connectivity/relay/client, + stew/byteutils +import waku/node/waku_switch, ./testlib/common, ./testlib/wakucore + +proc newCircuitRelayClientSwitch(relayClient: RelayClient): Switch = + SwitchBuilder + .new() + .withRng(rng()) + .withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()]) + .withTcpTransport() + .withMplex() + .withNoise() + .withCircuitRelay(relayClient) + .build() + +suite "Waku Switch": + asyncTest "Waku Switch works with AutoNat": + ## Given + let + sourceSwitch = newTestSwitch() + wakuSwitch = newWakuSwitch(rng = rng(), circuitRelay = Relay.new()) + await sourceSwitch.start() + await wakuSwitch.start() + + ## When + await sourceSwitch.connect(wakuSwitch.peerInfo.peerId, wakuSwitch.peerInfo.addrs) + let ma = await AutonatClient.new().dialMe( + sourceSwitch, wakuSwitch.peerInfo.peerId, wakuSwitch.peerInfo.addrs + ) + + ## Then + check: + ma == sourceSwitch.peerInfo.addrs[0] + + ## Teardown + await allFutures(sourceSwitch.stop(), wakuSwitch.stop()) + + asyncTest "Waku Switch acts as circuit relayer": + ## Setup + let + wakuSwitch = newWakuSwitch(rng = rng(), circuitRelay = Relay.new()) + sourceClient = RelayClient.new() + destClient = RelayClient.new() + sourceSwitch = newCircuitRelayClientSwitch(sourceClient) + destSwitch = newCircuitRelayClientSwitch(destClient) + + # Setup client relays + sourceClient.setup(sourceSwitch) + destClient.setup(destSwitch) + + await allFutures(wakuSwitch.start(), sourceSwitch.start(), destSwitch.start()) + + ## Given + let + # Create a relay address to destSwitch using wakuSwitch as the relay + addrs = MultiAddress + .init( + $wakuSwitch.peerInfo.addrs[0] & "/p2p/" & $wakuSwitch.peerInfo.peerId & + "/p2p-circuit" + ) + .get() + msg = "Just one relay away..." + + # Create a custom protocol + let customProtoCodec = "/vac/waku/test/1.0.0" + var + completionFut = newFuture[bool]() + proto = new LPProtocol + proto.codec = customProtoCodec + proto.handler = proc( + conn: Connection, proto: string + ) {.async: (raises: [CancelledError]).} = + try: + assert (await conn.readLp(1024)) == msg.toBytes() + except LPStreamError: + error "Connection read error", error = getCurrentExceptionMsg() + assert false, getCurrentExceptionMsg() + + completionFut.complete(true) + + await proto.start() + destSwitch.mount(proto) + + ## When + # Connect destSwitch to the relay + await destSwitch.connect(wakuSwitch.peerInfo.peerId, wakuSwitch.peerInfo.addrs) + + # Connect sourceSwitch to the relay + await sourceSwitch.connect(wakuSwitch.peerInfo.peerId, wakuSwitch.peerInfo.addrs) + + # destClient reserves a slot on the relay. + let rsvp = + await destClient.reserve(wakuSwitch.peerInfo.peerId, wakuSwitch.peerInfo.addrs) + + # sourceSwitch dial destSwitch using the relay + let conn = + await sourceSwitch.dial(destSwitch.peerInfo.peerId, @[addrs], customProtoCodec) + + await conn.writeLp(msg) + + ## Then + check: + await completionFut.withTimeout(3.seconds) + + ## Teardown + await allFutures(wakuSwitch.stop(), sourceSwitch.stop(), destSwitch.stop()) diff --git a/third-party/nwaku/tests/test_wakunode.nim b/third-party/nwaku/tests/test_wakunode.nim new file mode 100644 index 0000000..a7f1084 --- /dev/null +++ b/third-party/nwaku/tests/test_wakunode.nim @@ -0,0 +1,372 @@ +{.used.} + +import + std/[sequtils, strutils, net], + stew/byteutils, + testutils/unittests, + chronicles, + chronos, + libp2p/crypto/crypto, + libp2p/crypto/secp, + libp2p/multiaddress, + libp2p/switch, + libp2p/protocols/pubsub/rpc/messages, + libp2p/protocols/pubsub/pubsub, + libp2p/protocols/pubsub/gossipsub, + libp2p/nameresolving/mockresolver, + eth/p2p/discoveryv5/enr, + eth/net/utils +import + waku/[waku_core, waku_node, node/peer_manager], ./testlib/wakucore, ./testlib/wakunode + +suite "WakuNode": + asyncTest "Protocol matcher works as expected": + let + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(61000)) + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(61002)) + shard = DefaultRelayShard + contentTopic = ContentTopic("/waku/2/default-content/proto") + payload = "hello world".toBytes() + message = WakuMessage(payload: payload, contentTopic: contentTopic) + + # Setup node 1 with stable codec "/vac/waku/relay/2.0.0" + + await node1.start() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + node1.wakuRelay.codec = "/vac/waku/relay/2.0.0" + + # Setup node 2 with beta codec "/vac/waku/relay/2.0.0-beta2" + + await node2.start() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + node2.wakuRelay.codec = "/vac/waku/relay/2.0.0-beta2" + + check: + # Check that mounted codecs are actually different + node1.wakuRelay.codec == "/vac/waku/relay/2.0.0" + node2.wakuRelay.codec == "/vac/waku/relay/2.0.0-beta2" + + # Now verify that protocol matcher returns `true` and relay works + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + + var completionFut = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + check: + topic == $shard + msg.contentTopic == contentTopic + msg.payload == payload + completionFut.complete(true) + + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node2.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: + assert false, "Failed to unsubscribe from topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node2.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: + assert false, "Failed to subscribe to topic" + await sleepAsync(2000.millis) + + var res = await node1.publish(some($shard), message) + assert res.isOk(), $res.error + + await sleepAsync(2000.millis) + + check: + (await completionFut.withTimeout(5.seconds)) == true + + await allFutures(node1.stop(), node2.stop()) + + asyncTest "resolve and connect to dns multiaddrs": + let resolver = MockResolver.new() + + resolver.ipResponses[("localhost", false)] = @["127.0.0.1"] + + let + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode( + nodeKey1, parseIpAddress("0.0.0.0"), Port(61020), nameResolver = resolver + ) + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(61022)) + + # Construct DNS multiaddr for node2 + let + node2PeerId = $(node2.switch.peerInfo.peerId) + node2Dns4Addr = "/dns4/localhost/tcp/61022/p2p/" & node2PeerId + + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await allFutures([node1.start(), node2.start()]) + + await node1.connectToNodes(@[node2Dns4Addr]) + + check: + node1.switch.connManager.connCount(node2.switch.peerInfo.peerId) == 1 + + await allFutures([node1.stop(), node2.stop()]) + + asyncTest "Maximum connections can be configured with 20 nodes": + let + maxConnections = 20 + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode( + nodeKey1, + parseIpAddress("127.0.0.1"), + Port(60010), + maxConnections = maxConnections, + ) + + # Initialize and start node1 + await node1.start() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # Create an array to hold the other nodes + var otherNodes: seq[WakuNode] = @[] + + # Create and start 20 other nodes + for i in 0 ..< maxConnections + 1: + let + nodeKey = generateSecp256k1Key() + port = 60012 + i * 2 # Ensure unique ports for each node + node = newTestWakuNode(nodeKey, parseIpAddress("127.0.0.1"), Port(port)) + await node.start() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + otherNodes.add(node) + + # Connect all other nodes to node1 + for node in otherNodes: + discard + await node1.peerManager.connectPeer(node.switch.peerInfo.toRemotePeerInfo()) + await sleepAsync(2.seconds) # Small delay to avoid hammering the connection process + + # Check that the number of connections matches the maxConnections + check: + node1.switch.isConnected(otherNodes[0].switch.peerInfo.peerId) + node1.switch.isConnected(otherNodes[8].switch.peerInfo.peerId) + node1.switch.isConnected(otherNodes[14].switch.peerInfo.peerId) + node1.switch.isConnected(otherNodes[20].switch.peerInfo.peerId) == false + + # Stop all nodes + var stopFutures = @[node1.stop()] + for node in otherNodes: + stopFutures.add(node.stop()) + await allFutures(stopFutures) + + asyncTest "Messages fails with wrong key path": + let nodeKey1 = generateSecp256k1Key() + + expect ResultDefect: + # gibberish + discard newTestWakuNode( + nodeKey1, + parseIpAddress("0.0.0.0"), + bindPort = Port(61004), + wsBindPort = Port(8000), + wssEnabled = true, + secureKey = "../../waku/node/key_dummy.txt", + ) + + asyncTest "Peer info updates with correct announced addresses": + let + nodeKey = generateSecp256k1Key() + bindIp = parseIpAddress("0.0.0.0") + bindPort = Port(61006) + extIp = some(getPrimaryIPAddr()) + extPort = some(Port(61008)) + node = newTestWakuNode(nodeKey, bindIp, bindPort, extIp, extPort) + + let + bindEndpoint = MultiAddress.init(bindIp, tcpProtocol, bindPort) + announcedEndpoint = MultiAddress.init(extIp.get(), tcpProtocol, extPort.get()) + + check: + # Check that underlying peer info contains only bindIp before starting + node.switch.peerInfo.listenAddrs.len == 1 + node.switch.peerInfo.listenAddrs.contains(bindEndpoint) + # Underlying peer info has not updated addrs before starting + node.switch.peerInfo.addrs.len == 0 + + node.announcedAddresses.len == 1 + node.announcedAddresses.contains(announcedEndpoint) + + await node.start() + + check: + node.started + # Underlying peer info listenAddrs has not changed + node.switch.peerInfo.listenAddrs.len == 1 + node.switch.peerInfo.listenAddrs.contains(bindEndpoint) + # Check that underlying peer info is updated with announced address + node.switch.peerInfo.addrs.len == 1 + node.switch.peerInfo.addrs.contains(announcedEndpoint) + + await node.stop() + + asyncTest "Node can use dns4 in announced addresses": + let + nodeKey = generateSecp256k1Key() + bindIp = parseIpAddress("0.0.0.0") + bindPort = Port(61010) + extIp = some(getPrimaryIPAddr()) + extPort = some(Port(61012)) + domainName = "example.com" + expectedDns4Addr = + MultiAddress.init("/dns4/" & domainName & "/tcp/" & $(extPort.get())).get() + node = newTestWakuNode( + nodeKey, bindIp, bindPort, extIp, extPort, dns4DomainName = some(domainName) + ) + + check: + node.announcedAddresses.len == 1 + node.announcedAddresses.contains(expectedDns4Addr) + + asyncTest "Node uses dns4 resolved ip in announced addresses if no extIp is provided": + let + nodeKey = generateSecp256k1Key() + bindIp = parseIpAddress("0.0.0.0") + bindPort = Port(0) + + domainName = "status.im" + node = + newTestWakuNode(nodeKey, bindIp, bindPort, dns4DomainName = some(domainName)) + + var ipStr = "" + var enrIp = node.enr.tryGet("ip", array[4, byte]) + + if enrIp.isSome(): + ipStr &= $ipv4(enrIp.get()) + + # Check that the IP filled is the one received by the DNS lookup + # As IPs may change, we check that it's not empty, not the 0 IP and not localhost + check: + ipStr.len() > 0 + not ipStr.contains("0.0.0.0") + not ipStr.contains("127.0.0.1") + + asyncTest "Node creation fails when invalid dns4 address is provided": + let + nodeKey = generateSecp256k1Key() + bindIp = parseIpAddress("0.0.0.0") + bindPort = Port(0) + + inexistentDomain = "thisdomain.doesnot.exist" + invalidDomain = "" + expectedError = "Could not resolve IP from DNS: empty response" + + var inexistentDomainErr, invalidDomainErr: string = "" + + # Create node with inexistent domain + try: + let node = newTestWakuNode( + nodeKey, bindIp, bindPort, dns4DomainName = some(inexistentDomain) + ) + except Exception as e: + inexistentDomainErr = e.msg + + # Create node with invalid domain + try: + let node = + newTestWakuNode(nodeKey, bindIp, bindPort, dns4DomainName = some(invalidDomain)) + except Exception as e: + invalidDomainErr = e.msg + + # Check that exceptions were raised in both cases + check: + inexistentDomainErr == expectedError + invalidDomainErr == expectedError + + asyncTest "Agent string is set and advertised correctly": + let + # custom agent string + expectedAgentString1 = "node1-agent-string" + + # bump when updating nim-libp2p + expectedAgentString2 = "nim-libp2p/0.0.1" + let + # node with custom agent string + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode( + nodeKey1, + parseIpAddress("0.0.0.0"), + Port(61014), + agentString = some(expectedAgentString1), + ) + + # node with default agent string from libp2p + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(61016)) + + await node1.start() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node2.start() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + await node2.connectToNodes(@[node1.switch.peerInfo.toRemotePeerInfo()]) + + let node1Agent = + node2.switch.peerStore[AgentBook][node1.switch.peerInfo.toRemotePeerInfo().peerId] + let node2Agent = + node1.switch.peerStore[AgentBook][node2.switch.peerInfo.toRemotePeerInfo().peerId] + + check: + node1Agent == expectedAgentString1 + node2Agent == expectedAgentString2 + + await allFutures(node1.stop(), node2.stop()) + + asyncTest "Custom multiaddresses are set and advertised correctly": + let + # custom multiaddress + expectedMultiaddress1 = MultiAddress.init("/ip4/200.200.200.200/tcp/1234").get() + + # Note: this could have been done with a single node, but it is useful to + # have two nodes to check that the multiaddress is advertised correctly + let + # node with custom multiaddress + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode( + nodeKey1, + parseIpAddress("0.0.0.0"), + Port(61018), + extMultiAddrs = @[expectedMultiaddress1], + ) + + # node with default multiaddress + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(61020)) + + await node1.start() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node2.start() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + await node2.connectToNodes(@[node1.switch.peerInfo.toRemotePeerInfo()]) + + let node1MultiAddrs = node2.switch.peerStore[AddressBook][ + node1.switch.peerInfo.toRemotePeerInfo().peerId + ] + + check: + node1MultiAddrs.contains(expectedMultiaddress1) + + await allFutures(node1.stop(), node2.stop()) diff --git a/third-party/nwaku/tests/testlib/assertions.nim b/third-party/nwaku/tests/testlib/assertions.nim new file mode 100644 index 0000000..0347839 --- /dev/null +++ b/third-party/nwaku/tests/testlib/assertions.nim @@ -0,0 +1,14 @@ +import chronos + +template assertResultOk*[T, E](result: Result[T, E]) = + assert result.isOk(), $result.error() + +template assertResultOk*(result: Result[void, string]) = + assert result.isOk(), $result.error() + +template typeEq*(t: typedesc, u: typedesc): bool = + # is also true if a is subtype of b + t is u and u is t # Only true if actually equal types + +template typeEq*(t: auto, u: typedesc): bool = + typeEq(type(t), u) diff --git a/third-party/nwaku/tests/testlib/common.nim b/third-party/nwaku/tests/testlib/common.nim new file mode 100644 index 0000000..2163206 --- /dev/null +++ b/third-party/nwaku/tests/testlib/common.nim @@ -0,0 +1,32 @@ +import std/[times, random], bearssl/rand, libp2p/crypto/crypto + +## Randomization + +proc randomize*() = + ## Initializes the default random number generator with the given seed. + ## From: https://nim-lang.org/docs/random.html#randomize,int64 + let now = getTime() + randomize(now.toUnix() * 1_000_000_000 + now.nanosecond) + +## RNG +# Copied from here: https://github.com/status-im/nim-libp2p/blob/d522537b19a532bc4af94fcd146f779c1f23bad0/tests/helpers.nim#L28 + +type Rng = object + rng: ref HmacDrbgContext + +# Typically having a module variable is considered bad design. This case should +# be considered as an exception and it should be used only in the tests. +var rngVar: Rng + +proc getRng(): ref HmacDrbgContext = + # TODO: if `rngVar` is a threadvar like it should be, there are random and + # spurious compile failures on mac - this is not gcsafe but for the + # purpose of the tests, it's ok as long as we only use a single thread + {.gcsafe.}: + if rngVar.rng.isNil(): + rngVar.rng = crypto.newRng() + + rngVar.rng + +template rng*(): ref HmacDrbgContext = + getRng() diff --git a/third-party/nwaku/tests/testlib/comparisons.nim b/third-party/nwaku/tests/testlib/comparisons.nim new file mode 100644 index 0000000..cdc2404 --- /dev/null +++ b/third-party/nwaku/tests/testlib/comparisons.nim @@ -0,0 +1,2 @@ +template chainedComparison*(a: untyped, b: untyped, c: untyped): bool = + a == b and b == c diff --git a/third-party/nwaku/tests/testlib/futures.nim b/third-party/nwaku/tests/testlib/futures.nim new file mode 100644 index 0000000..e9a7933 --- /dev/null +++ b/third-party/nwaku/tests/testlib/futures.nim @@ -0,0 +1,49 @@ +import chronos + +import waku/[waku_core/message, waku_store, waku_store_legacy] + +const + FUTURE_TIMEOUT* = 1.seconds + FUTURE_TIMEOUT_MEDIUM* = 5.seconds + FUTURE_TIMEOUT_LONG* = 10.seconds + FUTURE_TIMEOUT_SHORT* = 100.milliseconds + FUTURE_TIMEOUT_SCORING* = 13.seconds # Scoring is 12s, so we need to wait more + +proc newPushHandlerFuture*(): Future[(string, WakuMessage)] = + newFuture[(string, WakuMessage)]() + +proc newBoolFuture*(): Future[bool] = + newFuture[bool]() + +proc newHistoryFuture*(): Future[StoreQueryRequest] = + newFuture[StoreQueryRequest]() + +proc newLegacyHistoryFuture*(): Future[waku_store_legacy.HistoryQuery] = + newFuture[waku_store_legacy.HistoryQuery]() + +proc toResult*[T](future: Future[T]): Result[T, string] = + if future.cancelled(): + return chronos.err("Future timeouted before completing.") + elif future.finished() and not future.failed(): + return chronos.ok(future.read()) + else: + return chronos.err("Future finished but failed.") + +proc toResult*(future: Future[void]): Result[void, string] = + if future.cancelled(): + return chronos.err("Future timeouted before completing.") + elif future.finished() and not future.failed(): + return chronos.ok() + else: + return chronos.err("Future finished but failed.") + +proc waitForResult*[T]( + future: Future[T], timeout = FUTURE_TIMEOUT +): Future[Result[T, string]] {.async.} = + discard await future.withTimeout(timeout) + return future.toResult() + +proc reset*[T](future: Future[T]): void = + # Likely an incomplete reset, but good enough for testing purposes (for now) + future.internalError = nil + future.internalState = FutureState.Pending diff --git a/third-party/nwaku/tests/testlib/postgres.nim b/third-party/nwaku/tests/testlib/postgres.nim new file mode 100644 index 0000000..1449a59 --- /dev/null +++ b/third-party/nwaku/tests/testlib/postgres.nim @@ -0,0 +1,27 @@ +import chronicles, chronos +import + waku/[ + waku_archive, + waku_archive/driver as driver_module, + waku_archive/driver/builder, + waku_archive/driver/postgres_driver, + ] + +const storeMessageDbUrl = "postgres://postgres:test123@localhost:5432/postgres" + +proc newTestPostgresDriver*(): Future[Result[ArchiveDriver, string]] {.async.} = + proc onErr(errMsg: string) {.gcsafe, closure.} = + error "error creating ArchiveDriver", error = errMsg + quit(QuitFailure) + + let + vacuum = false + migrate = true + maxNumConn = 50 + + let driverRes = + await ArchiveDriver.new(storeMessageDbUrl, vacuum, migrate, maxNumConn, onErr) + if driverRes.isErr(): + onErr("could not create archive driver: " & driverRes.error) + + return ok(driverRes.get()) diff --git a/third-party/nwaku/tests/testlib/postgres_legacy.nim b/third-party/nwaku/tests/testlib/postgres_legacy.nim new file mode 100644 index 0000000..50988c6 --- /dev/null +++ b/third-party/nwaku/tests/testlib/postgres_legacy.nim @@ -0,0 +1,27 @@ +import chronicles, chronos +import + waku/waku_archive_legacy, + waku/waku_archive_legacy/driver as driver_module, + waku/waku_archive_legacy/driver/builder, + waku/waku_archive_legacy/driver/postgres_driver + +const storeMessageDbUrl = "postgres://postgres:test123@localhost:5432/postgres" + +proc newTestPostgresDriver*(): Future[Result[ArchiveDriver, string]] {. + async, deprecated +.} = + proc onErr(errMsg: string) {.gcsafe, closure.} = + error "error creating ArchiveDriver", error = errMsg + quit(QuitFailure) + + let + vacuum = false + migrate = true + maxNumConn = 50 + + let driverRes = + await ArchiveDriver.new(storeMessageDbUrl, vacuum, migrate, maxNumConn, onErr) + if driverRes.isErr(): + onErr("could not create archive driver: " & driverRes.error) + + return ok(driverRes.get()) diff --git a/third-party/nwaku/tests/testlib/sequtils.nim b/third-party/nwaku/tests/testlib/sequtils.nim new file mode 100644 index 0000000..5fd3d41 --- /dev/null +++ b/third-party/nwaku/tests/testlib/sequtils.nim @@ -0,0 +1,2 @@ +proc toString*(bytes: seq[byte]): string = + cast[string](bytes) diff --git a/third-party/nwaku/tests/testlib/simple_mock.nim b/third-party/nwaku/tests/testlib/simple_mock.nim new file mode 100644 index 0000000..91ec192 --- /dev/null +++ b/third-party/nwaku/tests/testlib/simple_mock.nim @@ -0,0 +1,47 @@ +# Sourced from https://forum.nim-lang.org/t/9255#60617 + +import posix + +type Instr {.union.} = object + bytes: array[8, byte] + value: uint64 + +proc mockImpl*(target, replacement: pointer) = + # YOLO who needs alignment + #doAssert (cast[ByteAddress](target) and ByteAddress(0x07)) == 0 + var page = cast[pointer](cast[uint](target) and (not 0xfff)) + doAssert mprotect(page, 4096, PROT_WRITE or PROT_EXEC) == 0 + let rel = cast[uint](replacement) - cast[uint](target) - 5 + var instr = Instr( + bytes: [ + 0xe9.byte, + (rel shr 0).byte, + (rel shr 8).byte, + (rel shr 16).byte, + (rel shr 24).byte, + 0, + 0, + 0, + ] + ) + cast[ptr uint64](target)[] = instr.value + doAssert mprotect(page, 4096, PROT_EXEC) == 0 + +# Note: Requires manual cleanup +# Usage Example: +# proc helloWorld(): string = +# "Hello, World!" +# +# echo helloWorld() # "Hello, World!" +# +# let backup = helloWorld +# mock(helloWorld): +# proc mockedHellWorld(): string = +# "Mocked Hello, World!" +# mockedMigrate +# +# echo helloWorld() # "Mocked Hello, World!" +# +# helloWorld = backup # Restore the original function +template mock*(target, replacement: untyped): untyped = + mockImpl(cast[pointer](target), cast[pointer](replacement)) diff --git a/third-party/nwaku/tests/testlib/tables.nim b/third-party/nwaku/tests/testlib/tables.nim new file mode 100644 index 0000000..2abb2d6 --- /dev/null +++ b/third-party/nwaku/tests/testlib/tables.nim @@ -0,0 +1,21 @@ +import std/[tables, sequtils, options] + +import waku/waku_core/topics, ../testlib/wakucore + +proc `==`*( + table: Table[pubsub_topic.RelayShard, seq[NsContentTopic]], + other: array[0 .. 0, (string, seq[string])], +): bool = + let otherTyped = other.map( + proc(item: (string, seq[string])): (RelayShard, seq[NsContentTopic]) = + let + (pubsubTopic, contentTopics) = item + shard = RelayShard.parse(pubsubTopic).value() + nsContentTopics = contentTopics.map( + proc(contentTopic: string): NsContentTopic = + NsContentTopic.parse(contentTopic).value() + ) + return (shard, nsContentTopics) + ) + + table == otherTyped.toTable() diff --git a/third-party/nwaku/tests/testlib/testasync.nim b/third-party/nwaku/tests/testlib/testasync.nim new file mode 100644 index 0000000..78ff856 --- /dev/null +++ b/third-party/nwaku/tests/testlib/testasync.nim @@ -0,0 +1,20 @@ +# Sourced from: vendor/nim-libp2p/tests/testutils.nim +# Adds the ability for asyncSetup and asyncTeardown to be used in unittest2 + +template asyncTeardown*(body: untyped): untyped = + teardown: + waitFor( + ( + proc() {.async, gcsafe.} = + body + )() + ) + +template asyncSetup*(body: untyped): untyped = + setup: + waitFor( + ( + proc() {.async, gcsafe.} = + body + )() + ) diff --git a/third-party/nwaku/tests/testlib/testutils.nim b/third-party/nwaku/tests/testlib/testutils.nim new file mode 100644 index 0000000..b436c6a --- /dev/null +++ b/third-party/nwaku/tests/testlib/testutils.nim @@ -0,0 +1,37 @@ +import testutils/unittests, chronos + +template xsuite*(name: string, body: untyped) = + discard + +template suitex*(name: string, body: untyped) = + discard + +template xprocSuite*(name: string, body: untyped) = + discard + +template procSuitex*(name: string, body: untyped) = + discard + +template xtest*(name: string, body: untyped) = + test name: + skip() + +template testx*(name: string, body: untyped) = + test name: + skip() + +template xasyncTest*(name: string, body: untyped) = + test name: + skip() + +template asyncTestx*(name: string, body: untyped) = + test name: + skip() + +template waitActive*(condition: bool) = + for i in 0 ..< 200: + if condition: + break + await sleepAsync(10) + + assert condition diff --git a/third-party/nwaku/tests/testlib/wakucore.nim b/third-party/nwaku/tests/testlib/wakucore.nim new file mode 100644 index 0000000..c5e16d0 --- /dev/null +++ b/third-party/nwaku/tests/testlib/wakucore.nim @@ -0,0 +1,76 @@ +import + std/[options, times], + results, + stew/byteutils, + chronos, + libp2p/switch, + libp2p/builders, + libp2p/crypto/crypto as libp2p_keys, + eth/keys as eth_keys +import waku/waku_core, ./common + +export switch + +# Time + +proc now*(): Timestamp = + getNanosecondTime(getTime().toUnixFloat()) + +proc ts*(offset = 0, origin = now()): Timestamp = + origin + getNanosecondTime(int64(offset)) + +# Switch + +proc generateEcdsaKey*(): libp2p_keys.PrivateKey = + libp2p_keys.PrivateKey.random(ECDSA, rng[]).get() + +proc generateEcdsaKeyPair*(): libp2p_keys.KeyPair = + libp2p_keys.KeyPair.random(ECDSA, rng[]).get() + +proc generateSecp256k1Key*(): libp2p_keys.PrivateKey = + libp2p_keys.PrivateKey.random(Secp256k1, rng[]).get() + +proc ethSecp256k1Key*(hex: string): eth_keys.PrivateKey = + eth_keys.PrivateKey.fromHex(hex).get() + +proc newTestSwitch*( + key = none(libp2p_keys.PrivateKey), address = none(MultiAddress) +): Switch = + let peerKey = key.get(generateSecp256k1Key()) + let peerAddr = address.get(MultiAddress.init("/ip4/127.0.0.1/tcp/0").get()) + return newStandardSwitch(some(peerKey), addrs = peerAddr) + +# Waku message + +export waku_core.DefaultPubsubTopic, waku_core.DefaultContentTopic + +proc fakeWakuMessage*( + payload: string | seq[byte] = "TEST-PAYLOAD", + contentTopic = DefaultContentTopic, + meta: string | seq[byte] = newSeq[byte](), + ts = now(), + ephemeral = false, + proof = newSeq[byte](), +): WakuMessage = + var payloadBytes: seq[byte] + var metaBytes: seq[byte] + + when payload is string: + payloadBytes = toBytes(payload) + else: + payloadBytes = payload + + when meta is string: + metaBytes = toBytes(meta) + else: + metaBytes = meta + + WakuMessage( + payload: payloadBytes, + contentTopic: contentTopic, + meta: metaBytes, + version: 2, + timestamp: ts, + ephemeral: ephemeral, + proof: proof, + ) diff --git a/third-party/nwaku/tests/testlib/wakunode.nim b/third-party/nwaku/tests/testlib/wakunode.nim new file mode 100644 index 0000000..105dc4f --- /dev/null +++ b/third-party/nwaku/tests/testlib/wakunode.nim @@ -0,0 +1,154 @@ +import + std/options, + results, + chronos, + libp2p/switch, + libp2p/builders, + libp2p/nameresolving/nameresolver, + libp2p/crypto/crypto as libp2p_keys, + eth/keys as eth_keys +import + waku/[ + waku_node, + waku_core/topics, + node/peer_manager, + waku_enr, + discovery/waku_discv5, + factory/internal_config, + factory/waku_conf, + factory/conf_builder/conf_builder, + factory/builder, + ], + ./common + +# Waku node + +# TODO: migrate to usage of a test cluster conf +proc defaultTestWakuConfBuilder*(): WakuConfBuilder = + var builder = WakuConfBuilder.init() + builder.withP2pTcpPort(Port(60000)) + builder.withP2pListenAddress(parseIpAddress("0.0.0.0")) + builder.restServerConf.withListenAddress(parseIpAddress("127.0.0.1")) + builder.withDnsAddrsNameServers( + @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")] + ) + builder.withNatStrategy("any") + builder.withMaxConnections(50) + builder.withRelayServiceRatio("60:40") + builder.withMaxMessageSize("1024 KiB") + builder.withClusterId(DefaultClusterId) + builder.withSubscribeShards(@[DefaultShardId]) + builder.withRelay(true) + builder.withRendezvous(true) + builder.storeServiceConf.withDbMigration(false) + builder.storeServiceConf.withSupportV2(false) + return builder + +proc defaultTestWakuConf*(): WakuConf = + var builder = defaultTestWakuConfBuilder() + return builder.build().value + +proc newTestWakuNode*( + nodeKey: crypto.PrivateKey, + bindIp: IpAddress, + bindPort: Port, + extIp = none(IpAddress), + extPort = none(Port), + extMultiAddrs = newSeq[MultiAddress](), + peerStorage: PeerStorage = nil, + maxConnections = builders.MaxConnections, + wsBindPort: Port = (Port) 8000, + wsEnabled: bool = false, + wssEnabled: bool = false, + secureKey: string = "", + secureCert: string = "", + wakuFlags = none(CapabilitiesBitfield), + nameResolver: NameResolver = nil, + sendSignedPeerRecord = false, + dns4DomainName = none(string), + discv5UdpPort = none(Port), + agentString = none(string), + peerStoreCapacity = none(int), + clusterId = DefaultClusterId, + subscribeShards = @[DefaultShardId], +): WakuNode = + var resolvedExtIp = extIp + + # Update extPort to default value if it's missing and there's an extIp or a DNS domain + let extPort = + if (extIp.isSome() or dns4DomainName.isSome()) and extPort.isNone(): + some(Port(60000)) + else: + extPort + + var conf = defaultTestWakuConf() + + conf.clusterId = clusterId + conf.subscribeShards = subscribeShards + + if dns4DomainName.isSome() and extIp.isNone(): + # If there's an error resolving the IP, an exception is thrown and test fails + let dns = (waitFor dnsResolve(dns4DomainName.get(), conf.dnsAddrsNameServers)).valueOr: + raise newException(Defect, error) + + resolvedExtIp = some(parseIpAddress(dns)) + + let netConf = NetConfig.init( + clusterId = conf.clusterId, + bindIp = bindIp, + bindPort = bindPort, + extIp = resolvedExtIp, + extPort = extPort, + extMultiAddrs = extMultiAddrs, + wsBindPort = some(wsBindPort), + wsEnabled = wsEnabled, + wssEnabled = wssEnabled, + dns4DomainName = dns4DomainName, + discv5UdpPort = discv5UdpPort, + wakuFlags = wakuFlags, + ).valueOr: + raise newException(Defect, "Invalid network configuration: " & error) + + var enrBuilder = EnrBuilder.init(nodeKey) + + enrBuilder.withWakuRelaySharding( + RelayShards(clusterId: conf.clusterId, shardIds: conf.subscribeShards) + ).isOkOr: + raise newException(Defect, "Invalid record: " & $error) + + enrBuilder.withIpAddressAndPorts( + ipAddr = netConf.enrIp, tcpPort = netConf.enrPort, udpPort = netConf.discv5UdpPort + ) + + enrBuilder.withMultiaddrs(netConf.enrMultiaddrs) + + if netConf.wakuFlags.isSome(): + enrBuilder.withWakuCapabilities(netConf.wakuFlags.get()) + + let record = enrBuilder.build().valueOr: + raise newException(Defect, "Invalid record: " & $error) + + var builder = WakuNodeBuilder.init() + builder.withRng(rng()) + builder.withNodeKey(nodeKey) + builder.withRecord(record) + builder.withNetworkConfiguration(netConf) + builder.withPeerStorage(peerStorage, capacity = peerStoreCapacity) + builder.withSwitchConfiguration( + maxConnections = some(maxConnections), + nameResolver = nameResolver, + sendSignedPeerRecord = sendSignedPeerRecord, + secureKey = + if secureKey != "": + some(secureKey) + else: + none(string), + secureCert = + if secureCert != "": + some(secureCert) + else: + none(string), + agentString = agentString, + ) + + return builder.build().get() diff --git a/third-party/nwaku/tests/tools/test_all.nim b/third-party/nwaku/tests/tools/test_all.nim new file mode 100644 index 0000000..e5ace3d --- /dev/null +++ b/third-party/nwaku/tests/tools/test_all.nim @@ -0,0 +1,3 @@ +{.used.} + +import ./test_confutils_envvar, ./test_confutils_envvar_serialization.nim diff --git a/third-party/nwaku/tests/tools/test_confutils_envvar.nim b/third-party/nwaku/tests/tools/test_confutils_envvar.nim new file mode 100644 index 0000000..ed559ad --- /dev/null +++ b/third-party/nwaku/tests/tools/test_confutils_envvar.nim @@ -0,0 +1,73 @@ +{.used.} + +import + std/[os, options], + results, + testutils/unittests, + confutils, + confutils/defs, + confutils/std/net +import ../../tools/confutils/[envvar as confEnvvarDefs, envvar_net as confEnvvarNet] + +type ConfResult[T] = Result[T, string] + +type TestConf = object + configFile* {.desc: "Configuration file path", name: "config-file".}: + Option[InputFile] + + testFile* {.desc: "Configuration test file path", name: "test-file".}: + Option[InputFile] + + listenAddress* {. + defaultValue: parseIpAddress("127.0.0.1"), + desc: "Listening address", + name: "listen-address" + .}: IpAddress + + tcpPort* {.desc: "TCP listening port", defaultValue: 60000, name: "tcp-port".}: Port + +{.push warning[ProveInit]: off.} + +proc load*(T: type TestConf, prefix: string): ConfResult[T] = + try: + let conf = TestConf.load( + secondarySources = proc( + conf: TestConf, sources: auto + ) {.gcsafe, raises: [ConfigurationError].} = + sources.addConfigFile(Envvar, InputFile(prefix)) + ) + ok(conf) + except CatchableError: + err(getCurrentExceptionMsg()) + +{.pop.} + +suite "nim-confutils - envvar": + test "load configuration from environment variables": + ## Given + let prefix = "test-prefix" + + let + listenAddress = "1.1.1.1" + tcpPort = "8080" + configFile = "/tmp/test.conf" + + ## When + os.putEnv("TEST_PREFIX_CONFIG_FILE", configFile) + os.putEnv("TEST_PREFIX_LISTEN_ADDRESS", listenAddress) + os.putEnv("TEST_PREFIX_TCP_PORT", tcpPort) + + let confLoadRes = TestConf.load(prefix) + + ## Then + check confLoadRes.isOk() + + let conf = confLoadRes.get() + check: + conf.listenAddress == parseIpAddress(listenAddress) + conf.tcpPort == Port(8080) + + conf.configFile.isSome() + conf.configFile.get().string == configFile + + conf.testFile.isNone() diff --git a/third-party/nwaku/tests/tools/test_confutils_envvar_serialization.nim b/third-party/nwaku/tests/tools/test_confutils_envvar_serialization.nim new file mode 100644 index 0000000..c9639e2 --- /dev/null +++ b/third-party/nwaku/tests/tools/test_confutils_envvar_serialization.nim @@ -0,0 +1,17 @@ +{.used.} + +import testutils/unittests +import ../../tools/confutils/envvar_serialization/utils + +suite "nim-envvar-serialization - utils": + test "construct env var key": + ## Given + let prefix = "some-prefix" + let name = @["db-url"] + + ## When + let key = constructKey(prefix, name) + + ## Then + check: + key == "SOME_PREFIX_DB_URL" diff --git a/third-party/nwaku/tests/waku_archive/archive_utils.nim b/third-party/nwaku/tests/waku_archive/archive_utils.nim new file mode 100644 index 0000000..4988550 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive/archive_utils.nim @@ -0,0 +1,39 @@ +{.used.} + +import std/options, results, chronos, libp2p/crypto/crypto + +import + waku/[ + node/peer_manager, + waku_core, + waku_archive, + waku_archive/driver/sqlite_driver, + waku_archive/driver/sqlite_driver/migrations, + common/databases/db_sqlite, + ], + ../testlib/[wakucore] + +proc newSqliteDatabase*(path: Option[string] = string.none()): SqliteDatabase = + SqliteDatabase.new(path.get(":memory:")).tryGet() + +proc newSqliteArchiveDriver*(): ArchiveDriver = + let database = newSqliteDatabase() + migrate(database).tryGet() + return SqliteDriver.new(database).tryGet() + +proc newWakuArchive*(driver: ArchiveDriver): WakuArchive = + WakuArchive.new(driver).get() + +proc put*( + driver: ArchiveDriver, pubsubTopic: PubSubTopic, msgList: seq[WakuMessage] +): ArchiveDriver = + for msg in msgList: + let _ = waitFor driver.put(computeMessageHash(pubsubTopic, msg), pubsubTopic, msg) + return driver + +proc newArchiveDriverWithMessages*( + pubsubTopic: PubSubTopic, msgList: seq[WakuMessage] +): ArchiveDriver = + var driver = newSqliteArchiveDriver() + driver = driver.put(pubsubTopic, msgList) + return driver diff --git a/third-party/nwaku/tests/waku_archive/test_all.nim b/third-party/nwaku/tests/waku_archive/test_all.nim new file mode 100644 index 0000000..9b97cc2 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive/test_all.nim @@ -0,0 +1,14 @@ +{.used.} + +import + ./test_driver_postgres_query, + ./test_driver_postgres, + ./test_driver_queue_index, + ./test_driver_queue_pagination, + ./test_driver_queue_query, + ./test_driver_queue, + ./test_driver_sqlite_query, + ./test_driver_sqlite, + ./test_partition_manager, + ./test_retention_policy, + ./test_waku_archive diff --git a/third-party/nwaku/tests/waku_archive/test_driver_postgres.nim b/third-party/nwaku/tests/waku_archive/test_driver_postgres.nim new file mode 100644 index 0000000..34a4286 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive/test_driver_postgres.nim @@ -0,0 +1,170 @@ +{.used.} + +import std/[sequtils, options], testutils/unittests, chronos +import + waku/[ + waku_archive, + waku_archive/driver/postgres_driver, + waku_core, + waku_core/message/digest, + ], + ../testlib/wakucore, + ../testlib/testasync, + ../testlib/postgres + +suite "Postgres driver": + ## Unique driver instance + var driver {.threadvar.}: PostgresDriver + + asyncSetup: + let driverRes = await newTestPostgresDriver() + if driverRes.isErr(): + assert false, driverRes.error + + driver = PostgresDriver(driverRes.get()) + + asyncTeardown: + let resetRes = await driver.reset() + if resetRes.isErr(): + assert false, resetRes.error + + (await driver.close()).expect("driver to close") + + asyncTest "Asynchronous queries": + var futures = newSeq[Future[ArchiveDriverResult[void]]](0) + + let beforeSleep = now() + + for _ in 1 .. 25: + futures.add(driver.sleep(1)) + + await allFutures(futures) + + let diff = now() - beforeSleep + + assert diff < 2_000_000_000 ## nanoseconds + + asyncTest "Insert a message": + const contentTopic = "test-content-topic" + const meta = "test meta" + + let msg = fakeWakuMessage(contentTopic = contentTopic, meta = meta) + + let putRes = await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + assert putRes.isOk(), putRes.error + + let storedMsg = (await driver.getAllMessages()).tryGet() + + assert storedMsg.len == 1 + + let (_, pubsubTopic, actualMsg) = storedMsg[0] + assert actualMsg.contentTopic == contentTopic + assert pubsubTopic == DefaultPubsubTopic + assert toHex(actualMsg.payload) == toHex(msg.payload) + assert toHex(actualMsg.meta) == toHex(msg.meta) + + asyncTest "Insert and query message": + const contentTopic1 = "test-content-topic-1" + const contentTopic2 = "test-content-topic-2" + const pubsubTopic1 = "pubsubtopic-1" + const pubsubTopic2 = "pubsubtopic-2" + + let msg1 = fakeWakuMessage(contentTopic = contentTopic1) + + var putRes = + await driver.put(computeMessageHash(pubsubTopic1, msg1), pubsubTopic1, msg1) + assert putRes.isOk(), putRes.error + + let msg2 = fakeWakuMessage(contentTopic = contentTopic2) + + putRes = + await driver.put(computeMessageHash(pubsubTopic2, msg2), pubsubTopic2, msg2) + assert putRes.isOk(), putRes.error + + let countMessagesRes = await driver.getMessagesCount() + + assert countMessagesRes.isOk(), $countMessagesRes.error + assert countMessagesRes.get() == 2 + + var messagesRes = await driver.getMessages(contentTopics = @[contentTopic1]) + + assert messagesRes.isOk(), $messagesRes.error + assert messagesRes.get().len == 1 + + # Get both content topics, check ordering + messagesRes = + await driver.getMessages(contentTopics = @[contentTopic1, contentTopic2]) + assert messagesRes.isOk(), messagesRes.error + + assert messagesRes.get().len == 2 + assert messagesRes.get()[0][2].contentTopic == contentTopic1 + + # Descending order + messagesRes = await driver.getMessages( + contentTopics = @[contentTopic1, contentTopic2], ascendingOrder = false + ) + assert messagesRes.isOk(), messagesRes.error + + assert messagesRes.get().len == 2 + assert messagesRes.get()[0][2].contentTopic == contentTopic2 + + # cursor + # Get both content topics + messagesRes = await driver.getMessages( + contentTopics = @[contentTopic1, contentTopic2], + cursor = some(computeMessageHash(pubsubTopic1, messagesRes.get()[1][2])), + ) + assert messagesRes.isOk() + assert messagesRes.get().len == 1 + + # Get both content topics but one pubsub topic + messagesRes = await driver.getMessages( + contentTopics = @[contentTopic1, contentTopic2], pubsubTopic = some(pubsubTopic1) + ) + assert messagesRes.isOk(), messagesRes.error + + assert messagesRes.get().len == 1 + assert messagesRes.get()[0][2].contentTopic == contentTopic1 + + # Limit + messagesRes = await driver.getMessages( + contentTopics = @[contentTopic1, contentTopic2], maxPageSize = 1 + ) + assert messagesRes.isOk(), messagesRes.error + assert messagesRes.get().len == 1 + + asyncTest "Insert true duplicated messages": + # Validates that two completely equal messages can not be stored. + + let now = now() + + let msg1 = fakeWakuMessage(ts = now) + let msg2 = fakeWakuMessage(ts = now) + + let initialNumMsgs = (await driver.getMessagesCount()).valueOr: + raiseAssert "could not get num mgs correctly: " & $error + + var putRes = await driver.put( + computeMessageHash(DefaultPubsubTopic, msg1), DefaultPubsubTopic, msg1 + ) + assert putRes.isOk(), putRes.error + + var newNumMsgs = (await driver.getMessagesCount()).valueOr: + raiseAssert "could not get num mgs correctly: " & $error + + assert newNumMsgs == (initialNumMsgs + 1.int64), + "wrong number of messages: " & $newNumMsgs + + putRes = await driver.put( + computeMessageHash(DefaultPubsubTopic, msg2), DefaultPubsubTopic, msg2 + ) + + assert putRes.isOk() + + newNumMsgs = (await driver.getMessagesCount()).valueOr: + raiseAssert "could not get num mgs correctly: " & $error + + assert newNumMsgs == (initialNumMsgs + 1.int64), + "wrong number of messages: " & $newNumMsgs diff --git a/third-party/nwaku/tests/waku_archive/test_driver_postgres_query.nim b/third-party/nwaku/tests/waku_archive/test_driver_postgres_query.nim new file mode 100644 index 0000000..7a135a7 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive/test_driver_postgres_query.nim @@ -0,0 +1,1819 @@ +{.used.} + +import + std/[options, sequtils, strformat, random, algorithm], + testutils/unittests, + chronos, + chronicles +import + waku/[ + waku_archive, + waku_archive/driver as driver_module, + waku_archive/driver/postgres_driver, + waku_core, + waku_core/message/digest, + ], + ../testlib/common, + ../testlib/wakucore, + ../testlib/testasync, + ../testlib/postgres + +logScope: + topics = "test archive postgres driver" + +## This whole file is copied from the 'test_driver_sqlite_query.nim' file +## and it tests the same use cases but using the postgres driver. + +# Initialize the random number generator +common.randomize() + +suite "Postgres driver - queries": + ## Unique driver instance + var driver {.threadvar.}: PostgresDriver + + asyncSetup: + let driverRes = await newTestPostgresDriver() + + assert driverRes.isOk(), $driverRes.error + + driver = PostgresDriver(driverRes.get()) + + asyncTeardown: + let resetRes = await driver.reset() + + assert resetRes.isOk(), $resetRes.error + + (await driver.close()).expect("driver to close") + + asyncTest "no content topic": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = DefaultContentTopic, ts = ts(00)), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic, ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let putRes = await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + assert putRes.isOk(), $putRes.error + + ## When + let res = await driver.getMessages(maxPageSize = 5, ascendingOrder = true) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[0 .. 4] + + asyncTest "single content topic": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 3] + + asyncTest "single content topic with meta field": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00), meta = "meta-0"), + fakeWakuMessage(@[byte 1], ts = ts(10), meta = "meta-1"), + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20), meta = "meta-2" + ), + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30), meta = "meta-3" + ), + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40), meta = "meta-4" + ), + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50), meta = "meta-5" + ), + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60), meta = "meta-6" + ), + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70), meta = "meta-7" + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 3] + + asyncTest "single content topic - descending order": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = false + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[6 .. 7].reversed() + + asyncTest "multiple content topic": + ## Given + const contentTopic1 = "test-content-topic-1" + const contentTopic2 = "test-content-topic-2" + const contentTopic3 = "test-content-topic-3" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic1, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic2, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic3, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic1, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic2, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic3, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + var res = await driver.getMessages( + contentTopics = @[contentTopic1, contentTopic2], + pubsubTopic = some(DefaultPubsubTopic), + maxPageSize = 2, + ascendingOrder = true, + startTime = some(ts(00)), + endTime = some(ts(40)), + ) + + ## Then + assert res.isOk(), res.error + var filteredMessages = res.tryGet().mapIt(it[2]) + check filteredMessages == expected[2 .. 3] + + ## When + ## This is very similar to the previous one but we enforce to use the prepared + ## statement by querying one single content topic + res = await driver.getMessages( + contentTopics = @[contentTopic1], + pubsubTopic = some(DefaultPubsubTopic), + maxPageSize = 2, + ascendingOrder = true, + startTime = some(ts(00)), + endTime = some(ts(40)), + ) + + ## Then + assert res.isOk(), res.error + filteredMessages = res.tryGet().mapIt(it[2]) + check filteredMessages == @[expected[2]] + + asyncTest "single content topic - no results": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = DefaultContentTopic, ts = ts(00)), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic, ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = DefaultContentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = DefaultContentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = DefaultContentTopic, ts = ts(40)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages.len == 0 + + asyncTest "content topic and max page size - not enough messages stored": + ## Given + const pageSize: uint = 50 + + for t in 0 ..< 40: + let msg = fakeWakuMessage(@[byte t], DefaultContentTopic, ts = ts(t)) + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[DefaultContentTopic], + maxPageSize = pageSize, + ascendingOrder = true, + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages.len == 40 + + asyncTest "pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + ## When + let res = await driver.getMessages( + pubsubTopic = some(pubsubTopic), maxPageSize = 2, ascendingOrder = true + ) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[4 .. 5] + + asyncTest "no pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + ## When + let res = await driver.getMessages(maxPageSize = 2, ascendingOrder = true) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[0 .. 1] + + asyncTest "content topic and pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + pubsubTopic = some(pubsubTopic), + maxPageSize = 2, + ascendingOrder = true, + ) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[4 .. 5] + + asyncTest "only cursor": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) + + ## When + let res = await driver.getMessages( + cursor = some(cursor), maxPageSize = 2, ascendingOrder = true + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[5 .. 6] + + asyncTest "only cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) + + ## When + let res = await driver.getMessages( + cursor = some(cursor), maxPageSize = 2, ascendingOrder = false + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 3].reversed() + + asyncTest "only cursor - invalid": + ## Given + const contentTopic = "test-content-topic" + + var messages = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage()) + + ## When + let res = await driver.getMessages( + includeData = true, + contentTopics = @[DefaultContentTopic], + pubsubTopic = none(PubsubTopic), + cursor = some(cursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes = @[], + maxPageSize = 5, + ascendingOrder = true, + ) + + ## Then + assert res.isErr(), $res.value + + check: + res.error == "cursor not found" + + asyncTest "content topic and cursor": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[5 .. 6] + + asyncTest "content topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + # << cursor + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[6]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 5].reversed() + + asyncTest "pubsub topic and cursor": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), # << cursor + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + let cursor = computeMessageHash(expected[5][0], expected[5][1]) + + ## When + let res = await driver.getMessages( + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[6 .. 7] + + asyncTest "pubsub topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), # << cursor + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + let cursor = computeMessageHash(expected[6][0], expected[6][1]) + + ## When + let res = await driver.getMessages( + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[4 .. 5].reversed() + + asyncTest "only hashes - descending order": + ## Given + let timeOrigin = now() + var expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + let hashes = messages.mapIt(computeMessageHash(DefaultPubsubTopic, it)) + + for (msg, hash) in messages.zip(hashes): + require (await driver.put(hash, DefaultPubsubTopic, msg)).isOk() + + ## When + let res = await driver.getMessages(hashes = hashes, ascendingOrder = false) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.reversed() + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages + + asyncTest "start time only": + ## Given + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + startTime = some(ts(15, timeOrigin)), maxPageSize = 10, ascendingOrder = true + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 6] + + asyncTest "end time only": + ## Given + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + # end_time + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + endTime = some(ts(45, timeOrigin)), maxPageSize = 10, ascendingOrder = true + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[0 .. 4] + + asyncTest "start time and end time": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # start_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + # end_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + ## When + let res = await driver.getMessages( + startTime = some(ts(15, timeOrigin)), + endTime = some(ts(45, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[2 .. 4] + + asyncTest "invalid time range - no results": + ## Given + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + # end_time + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + startTime = some(ts(45, timeOrigin)), + endTime = some(ts(15, timeOrigin)), + maxPageSize = 2, + ascendingOrder = true, + ) + + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages.len == 0 + + asyncTest "time range start and content topic": + ## Given + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 6] + + asyncTest "time range start and content topic - descending order": + ## Given + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 6].reversed() + + asyncTest "time range start, single content topic and cursor": + ## Given + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + # << cursor + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[3]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + cursor = some(cursor), + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[4 .. 9] + + asyncTest "time range start, single content topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + # << cursor + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[6]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + cursor = some(cursor), + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[3 .. 4].reversed() + + asyncTest "time range, content topic, pubsub topic and cursor": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let timeOrigin = now() + let expected = + @[ + # start_time + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + # end_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[1][1]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(0, timeOrigin)), + endTime = some(ts(45, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[3 .. 4] + + asyncTest "time range, content topic, pubsub topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + let cursor = computeMessageHash(expected[7][0], expected[7][1]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[4 .. 5].reversed() + + asyncTest "time range, content topic, pubsub topic and cursor - cursor timestamp out of time range": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + let cursor = computeMessageHash(expected[1][0], expected[1][1]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[4 .. 5] + + asyncTest "time range, content topic, pubsub topic and cursor - cursor timestamp out of time range, descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + let cursor = computeMessageHash(expected[1][0], expected[1][1]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages.len == 0 + + asyncTest "Get oldest and newest message timestamp": + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let oldestTime = ts(00, timeOrigin) + let newestTime = ts(100, timeOrigin) + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = oldestTime), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = newestTime), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## just keep the second resolution. + ## Notice that the oldest timestamps considers the minimum partition timestamp, which + ## is expressed in seconds. + let oldestPartitionTimestamp = + Timestamp(float(oldestTime) / 1_000_000_000) * 1_000_000_000 + + var res = await driver.getOldestMessageTimestamp() + assert res.isOk(), res.error + + ## We give certain margin of error. The oldest timestamp is obtained from + ## the oldest partition timestamp and there might be at most one second of difference + ## between the time created in the test and the oldest-partition-timestamp created within + ## the driver logic. + assert abs(res.get() - oldestPartitionTimestamp) < (2 * 1_000_000_000), + fmt"Failed to retrieve the latest timestamp {res.get()} != {oldestPartitionTimestamp}" + + res = await driver.getNewestMessageTimestamp() + assert res.isOk(), res.error + assert res.get() == newestTime, "Failed to retrieve the newest timestamp" + + asyncTest "Delete messages older than certain timestamp": + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let targetTime = ts(40, timeOrigin) + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = targetTime), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + var res = await driver.getMessagesCount() + assert res.isOk(), res.error + assert res.get() == 7, "Failed to retrieve the initial number of messages" + + let deleteRes = await driver.deleteMessagesOlderThanTimestamp(targetTime) + assert deleteRes.isOk(), deleteRes.error + + res = await driver.getMessagesCount() + assert res.isOk(), res.error + assert res.get() == 3, "Failed to retrieve the # of messages after deletion" + + asyncTest "Keep last n messages": + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + var res = await driver.getMessagesCount() + assert res.isOk(), res.error + assert res.get() == 7, "Failed to retrieve the initial number of messages" + + let deleteRes = await driver.deleteOldestMessagesNotWithinLimit(2) + assert deleteRes.isOk(), deleteRes.error + + res = await driver.getMessagesCount() + assert res.isOk(), res.error + assert res.get() == 2, "Failed to retrieve the # of messages after deletion" + + asyncTest "Exists table": + var existsRes = await driver.existsTable("version") + assert existsRes.isOk(), existsRes.error + check existsRes.get() == true + + asyncTest "Query by message hash only": + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + ] + var messages = expected + + var hashes = newSeq[WakuMessageHash](0) + for msg in messages: + let hash = computeMessageHash(DefaultPubsubTopic, msg) + hashes.add(hash) + let ret = await driver.put(hash, DefaultPubsubTopic, msg) + assert ret.isOk(), ret.error + + let ret = (await driver.getMessages(hashes = hashes)).valueOr: + assert false, $error + return + + check: + ret.len == 3 + ret[2][0] == hashes[0] + ret[1][0] == hashes[1] + ret[0][0] == hashes[2] diff --git a/third-party/nwaku/tests/waku_archive/test_driver_queue.nim b/third-party/nwaku/tests/waku_archive/test_driver_queue.nim new file mode 100644 index 0000000..584ea9d --- /dev/null +++ b/third-party/nwaku/tests/waku_archive/test_driver_queue.nim @@ -0,0 +1,182 @@ +{.used.} + +import std/options, results, testutils/unittests +import + waku/[ + waku_archive, + waku_archive/driver/queue_driver/queue_driver {.all.}, + waku_archive/driver/queue_driver/index, + waku_core, + ] + +# Helper functions + +proc genIndexedWakuMessage(i: int8): (Index, WakuMessage) = + ## Use i to generate an Index WakuMessage + var data {.noinit.}: array[32, byte] + for x in data.mitems: + x = i.byte + + let + message = WakuMessage(payload: @[byte i], timestamp: Timestamp(i)) + pubsubTopic = "test-pubsub-topic" + cursor = Index( + time: Timestamp(i), + hash: computeMessageHash(pubsubTopic, message), + pubsubTopic: pubsubTopic, + ) + + (cursor, message) + +proc getPrepopulatedTestQueue(unsortedSet: auto, capacity: int): QueueDriver = + let driver = QueueDriver.new(capacity) + + for i in unsortedSet: + let (index, message) = genIndexedWakuMessage(i.int8) + discard driver.add(index, message) + + driver + +procSuite "Sorted driver queue": + test "queue capacity - add a message over the limit": + ## Given + let capacity = 5 + let driver = QueueDriver.new(capacity) + + ## When + # Fill up the queue + for i in 1 .. capacity: + let (index, message) = genIndexedWakuMessage(i.int8) + require(driver.add(index, message).isOk()) + + # Add one more. Capacity should not be exceeded + let (index, message) = genIndexedWakuMessage(capacity.int8 + 1) + require(driver.add(index, message).isOk()) + + ## Then + check: + driver.len == capacity + + test "queue capacity - add message older than oldest in the queue": + ## Given + let capacity = 5 + let driver = QueueDriver.new(capacity) + + ## When + # Fill up the queue + for i in 1 .. capacity: + let (index, message) = genIndexedWakuMessage(i.int8) + require(driver.add(index, message).isOk()) + + # Attempt to add message with older value than oldest in queue should fail + let + oldestTimestamp = driver.first().get().time + (index, message) = genIndexedWakuMessage(oldestTimestamp.int8 - 1) + addRes = driver.add(index, message) + + ## Then + check: + addRes.isErr() + addRes.error() == "too_old" + + check: + driver.len == capacity + + test "queue sort-on-insert": + ## Given + let + capacity = 5 + unsortedSet = [5, 1, 3, 2, 4] + let driver = getPrepopulatedTestQueue(unsortedSet, capacity) + + # Walk forward through the set and verify ascending order + var (prevSmaller, _) = genIndexedWakuMessage(min(unsortedSet).int8 - 1) + for i in driver.fwdIterator: + let (index, _) = i + check cmp(index, prevSmaller) > 0 + prevSmaller = index + + # Walk backward through the set and verify descending order + var (prevLarger, _) = genIndexedWakuMessage(max(unsortedSet).int8 + 1) + for i in driver.bwdIterator: + let (index, _) = i + check cmp(index, prevLarger) < 0 + prevLarger = index + + test "access first item from queue": + ## Given + let + capacity = 5 + unsortedSet = [5, 1, 3, 2, 4] + let driver = getPrepopulatedTestQueue(unsortedSet, capacity) + + ## When + let firstRes = driver.first() + + ## Then + check: + firstRes.isOk() + + let first = firstRes.tryGet() + check: + first.time == Timestamp(1) + + test "get first item from empty queue should fail": + ## Given + let capacity = 5 + let driver = QueueDriver.new(capacity) + + ## When + let firstRes = driver.first() + + ## Then + check: + firstRes.isErr() + firstRes.error() == "Not found" + + test "access last item from queue": + ## Given + let + capacity = 5 + unsortedSet = [5, 1, 3, 2, 4] + let driver = getPrepopulatedTestQueue(unsortedSet, capacity) + + ## When + let lastRes = driver.last() + + ## Then + check: + lastRes.isOk() + + let last = lastRes.tryGet() + check: + last.time == Timestamp(5) + + test "get last item from empty queue should fail": + ## Given + let capacity = 5 + let driver = QueueDriver.new(capacity) + + ## When + let lastRes = driver.last() + + ## Then + check: + lastRes.isErr() + lastRes.error() == "Not found" + + test "verify if queue contains an index": + ## Given + let + capacity = 5 + unsortedSet = [5, 1, 3, 2, 4] + let driver = getPrepopulatedTestQueue(unsortedSet, capacity) + + let + (existingIndex, _) = genIndexedWakuMessage(4) + (nonExistingIndex, _) = genIndexedWakuMessage(99) + + ## Then + check: + driver.contains(existingIndex) == true + driver.contains(nonExistingIndex) == false diff --git a/third-party/nwaku/tests/waku_archive/test_driver_queue_index.nim b/third-party/nwaku/tests/waku_archive/test_driver_queue_index.nim new file mode 100644 index 0000000..f34e181 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive/test_driver_queue_index.nim @@ -0,0 +1,47 @@ +{.used.} + +import std/random, testutils/unittests +import waku/waku_core, waku/waku_archive/driver/queue_driver/index + +var rng = initRand() + +## Helpers + +proc randomHash(): WakuMessageHash = + var hash: WakuMessageHash + + for i in 0 ..< hash.len: + let numb: byte = byte(rng.next()) + hash[i] = numb + + hash + +suite "Queue Driver - index": + ## Test vars + let + hash = randomHash() + eqIndex1 = Index(time: getNanosecondTime(54321), hash: hash) + eqIndex2 = Index(time: getNanosecondTime(54321), hash: hash) + eqIndex3 = Index(time: getNanosecondTime(54321), hash: randomHash()) + eqIndex4 = Index(time: getNanosecondTime(65432), hash: hash) + + test "Index comparison": + check: + # equality + cmp(eqIndex1, eqIndex2) == 0 + cmp(eqIndex1, eqIndex3) != 0 + cmp(eqIndex1, eqIndex4) != 0 + + # ordering + cmp(eqIndex3, eqIndex4) < 0 + cmp(eqIndex4, eqIndex3) > 0 # Test symmetry + + cmp(eqIndex2, eqIndex4) < 0 + cmp(eqIndex4, eqIndex2) > 0 # Test symmetry + + test "Index equality": + check: + eqIndex1 == eqIndex2 + eqIndex1 == eqIndex4 + eqIndex2 != eqIndex3 + eqIndex4 != eqIndex3 diff --git a/third-party/nwaku/tests/waku_archive/test_driver_queue_pagination.nim b/third-party/nwaku/tests/waku_archive/test_driver_queue_pagination.nim new file mode 100644 index 0000000..45543c5 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive/test_driver_queue_pagination.nim @@ -0,0 +1,396 @@ +{.used.} + +import + std/[options, sequtils, algorithm], testutils/unittests, libp2p/protobuf/minprotobuf +import + waku/[ + waku_archive, + waku_archive/driver/queue_driver/queue_driver {.all.}, + waku_archive/driver/queue_driver/index, + waku_core, + ], + ../testlib/wakucore + +proc getTestQueueDriver(numMessages: int): QueueDriver = + let testQueueDriver = QueueDriver.new(numMessages) + + var data {.noinit.}: array[32, byte] + for x in data.mitems: + x = 1 + + for i in 0 ..< numMessages: + let msg = WakuMessage(payload: @[byte i], timestamp: Timestamp(i)) + + let index = Index( + time: Timestamp(i), + hash: computeMessageHash(DefaultPubsubTopic, msg), + pubsubTopic: DefaultPubsubTopic, + ) + + discard testQueueDriver.add(index, msg) + + return testQueueDriver + +procSuite "Queue driver - pagination": + let driver = getTestQueueDriver(10) + let + indexList: seq[Index] = toSeq(driver.fwdIterator()).mapIt(it[0]) + msgList: seq[WakuMessage] = toSeq(driver.fwdIterator()).mapIt(it[1]) + + test "Forward pagination - normal pagination": + ## Given + let + pageSize: uint = 2 + cursor: Option[Index] = some(indexList[3]) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.len == 2 + data == msgList[4 .. 5] + + test "Forward pagination - initial pagination request with an empty cursor": + ## Given + let + pageSize: uint = 2 + cursor: Option[Index] = none(Index) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.len == 2 + data == msgList[0 .. 1] + + test "Forward pagination - initial pagination request with an empty cursor to fetch the entire history": + ## Given + let + pageSize: uint = 13 + cursor: Option[Index] = none(Index) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.len == 10 + data == msgList[0 .. 9] + + test "Forward pagination - empty msgList": + ## Given + let driver = getTestQueueDriver(0) + let + pageSize: uint = 2 + cursor: Option[Index] = none(Index) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.len == 0 + + test "Forward pagination - page size larger than the remaining messages": + ## Given + let + pageSize: uint = 10 + cursor: Option[Index] = some(indexList[3]) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.len == 6 + data == msgList[4 .. 9] + + test "Forward pagination - page size larger than the maximum allowed page size": + ## Given + let + pageSize: uint = MaxPageSize + 1 + cursor: Option[Index] = some(indexList[3]) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + uint(data.len) <= MaxPageSize + + test "Forward pagination - cursor pointing to the end of the message list": + ## Given + let + pageSize: uint = 10 + cursor: Option[Index] = some(indexList[9]) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.len == 0 + + test "Forward pagination - invalid cursor": + ## Given + let msg = fakeWakuMessage(payload = @[byte 10]) + let index = Index(hash: computeMessageHash(DefaultPubsubTopic, msg)) + + let + pageSize: uint = 10 + cursor: Option[Index] = some(index) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let error = page.tryError() + check: + error == QueueDriverErrorKind.INVALID_CURSOR + + test "Forward pagination - initial paging query over a message list with one message": + ## Given + let driver = getTestQueueDriver(1) + let + pageSize: uint = 10 + cursor: Option[Index] = none(Index) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.len == 1 + + test "Forward pagination - pagination over a message list with one message": + ## Given + let driver = getTestQueueDriver(1) + let + pageSize: uint = 10 + cursor: Option[Index] = some(indexList[0]) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.len == 0 + + test "Forward pagination - with pradicate": + ## Given + let + pageSize: uint = 3 + cursor: Option[Index] = none(Index) + forward = true + + proc onlyEvenTimes(index: Index, msg: WakuMessage): bool = + msg.timestamp.int64 mod 2 == 0 + + ## When + let page = driver.getPage( + pageSize = pageSize, forward = forward, cursor = cursor, predicate = onlyEvenTimes + ) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.mapIt(it.timestamp.int) == @[0, 2, 4] + + test "Backward pagination - normal pagination": + ## Given + let + pageSize: uint = 2 + cursor: Option[Index] = some(indexList[3]) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data == msgList[1 .. 2].reversed + + test "Backward pagination - empty msgList": + ## Given + let driver = getTestQueueDriver(0) + let + pageSize: uint = 2 + cursor: Option[Index] = none(Index) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.len == 0 + + test "Backward pagination - initial pagination request with an empty cursor": + ## Given + let + pageSize: uint = 2 + cursor: Option[Index] = none(Index) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.len == 2 + data == msgList[8 .. 9].reversed + + test "Backward pagination - initial pagination request with an empty cursor to fetch the entire history": + ## Given + let + pageSize: uint = 13 + cursor: Option[Index] = none(Index) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.len == 10 + data == msgList[0 .. 9].reversed + + test "Backward pagination - page size larger than the remaining messages": + ## Given + let + pageSize: uint = 5 + cursor: Option[Index] = some(indexList[3]) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data == msgList[0 .. 2].reversed + + test "Backward pagination - page size larger than the Maximum allowed page size": + ## Given + let + pageSize: uint = MaxPageSize + 1 + cursor: Option[Index] = some(indexList[3]) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + uint(data.len) <= MaxPageSize + + test "Backward pagination - cursor pointing to the begining of the message list": + ## Given + let + pageSize: uint = 5 + cursor: Option[Index] = some(indexList[0]) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.len == 0 + + test "Backward pagination - invalid cursor": + ## Given + let msg = fakeWakuMessage(payload = @[byte 10]) + let index = Index(hash: computeMessageHash(DefaultPubsubTopic, msg)) + + let + pageSize: uint = 2 + cursor: Option[Index] = some(index) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let error = page.tryError() + check: + error == QueueDriverErrorKind.INVALID_CURSOR + + test "Backward pagination - initial paging query over a message list with one message": + ## Given + let driver = getTestQueueDriver(1) + let + pageSize: uint = 10 + cursor: Option[Index] = none(Index) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.len == 1 + + test "Backward pagination - paging query over a message list with one message": + ## Given + let driver = getTestQueueDriver(1) + let + pageSize: uint = 10 + cursor: Option[Index] = some(indexList[0]) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.len == 0 + + test "Backward pagination - with predicate": + ## Given + let + pageSize: uint = 3 + cursor: Option[Index] = none(Index) + forward = false + + proc onlyOddTimes(index: Index, msg: WakuMessage): bool = + msg.timestamp.int64 mod 2 != 0 + + ## When + let page = driver.getPage( + pageSize = pageSize, forward = forward, cursor = cursor, predicate = onlyOddTimes + ) + + ## Then + let data = page.tryGet().mapIt(it[2]) + check: + data.mapIt(it.timestamp.int) == @[5, 7, 9].reversed diff --git a/third-party/nwaku/tests/waku_archive/test_driver_queue_query.nim b/third-party/nwaku/tests/waku_archive/test_driver_queue_query.nim new file mode 100644 index 0000000..34d8087 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive/test_driver_queue_query.nim @@ -0,0 +1,1696 @@ +{.used.} + +import + std/[options, sequtils, random, algorithm], testutils/unittests, chronos, chronicles +import + waku/ + [ + waku_archive, + waku_archive/driver/queue_driver, + waku_core, + waku_core/message/digest, + ], + ../testlib/common, + ../testlib/wakucore + +logScope: + topics = "test archive queue_driver" + +# Initialize the random number generator +common.randomize() + +proc newTestSqliteDriver(): ArchiveDriver = + QueueDriver.new(capacity = 50) + +suite "Queue driver - query by content topic": + test "no content topic": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = DefaultContentTopic, ts = ts(00)), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic, ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages(maxPageSize = 5, ascendingOrder = true) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[0 .. 4] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "single content topic": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 3] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "single content topic - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = false + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[6 .. 7].reversed() + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "multiple content topic": + ## Given + const contentTopic1 = "test-content-topic-1" + const contentTopic2 = "test-content-topic-2" + const contentTopic3 = "test-content-topic-3" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic1, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic2, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic3, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic1, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic2, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic3, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopics = @[contentTopic1, contentTopic2], + maxPageSize = 2, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 3] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "single content topic - no results": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = DefaultContentTopic, ts = ts(00)), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic, ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = DefaultContentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = DefaultContentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = DefaultContentTopic, ts = ts(40)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages.len == 0 + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "content topic and max page size - not enough messages stored": + ## Given + const pageSize: uint = 50 + + let driver = newTestSqliteDriver() + + for t in 0 ..< 40: + let msg = fakeWakuMessage(@[byte t], DefaultContentTopic, ts = ts(t)) + let retFut = waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopics = @[DefaultContentTopic], + maxPageSize = pageSize, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages.len == 40 + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + +suite "SQLite driver - query by pubsub topic": + test "pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + pubsubTopic = some(pubsubTopic), maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[4 .. 5] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "no pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages(maxPageSize = 2, ascendingOrder = true) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[0 .. 1] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "content topic and pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopics = @[contentTopic], + pubsubTopic = some(pubsubTopic), + maxPageSize = 2, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[4 .. 5] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + +suite "Queue driver - query by cursor": + test "only cursor": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) + + ## When + let res = waitFor driver.getMessages( + cursor = some(cursor), maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[5 .. 6] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "only cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) + + ## When + let res = waitFor driver.getMessages( + cursor = some(cursor), maxPageSize = 2, ascendingOrder = false + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 3].reversed() + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "only cursor - invalid": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + var messages = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage()) + + ## When + let res = waitFor driver.getMessages( + includeData = true, + contentTopics = @[DefaultContentTopic], + pubsubTopic = none(PubsubTopic), + cursor = some(cursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes = @[], + maxPageSize = 5, + ascendingOrder = true, + ) + + ## Then + check: + res.isErr() + res.error == "invalid_cursor" + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "content topic and cursor": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) + + ## When + let res = waitFor driver.getMessages( + contentTopics = @[contentTopic], + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[5 .. 6] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "content topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + # << cursor + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[6]) + + ## When + let res = waitFor driver.getMessages( + contentTopics = @[contentTopic], + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 5].reversed() + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "pubsub topic and cursor": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), # << cursor + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) + require retFut.isOk() + + let cursor = computeMessageHash(expected[5][0], expected[5][1]) + + ## When + let res = waitFor driver.getMessages( + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[6 .. 7] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "pubsub topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), # << cursor + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) + require retFut.isOk() + + let cursor = computeMessageHash(expected[6][0], expected[6][1]) + + ## When + let res = waitFor driver.getMessages( + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[4 .. 5].reversed() + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + +suite "Queue driver - query by time range": + test "start time only": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + startTime = some(ts(15, timeOrigin)), maxPageSize = 10, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 6] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "end time only": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + # end_time + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + endTime = some(ts(45, timeOrigin)), maxPageSize = 10, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[0 .. 4] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "start time and end time": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # start_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + # end_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + startTime = some(ts(15, timeOrigin)), + endTime = some(ts(45, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[2 .. 4] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "invalid time range - no results": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + # end_time + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopics = @[contentTopic], + startTime = some(ts(45, timeOrigin)), + endTime = some(ts(15, timeOrigin)), + maxPageSize = 2, + ascendingOrder = true, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages.len == 0 + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + asynctest "time range start and content topic": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 6] + + ## Cleanup + (await driver.close()).expect("driver to close") + + test "time range start and content topic - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopics = @[contentTopic], + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 6].reversed() + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + asynctest "time range start, single content topic and cursor": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + # << cursor + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[3]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + cursor = some(cursor), + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[4 .. 9] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asynctest "time range start, single content topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + # << cursor + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + require retFut.isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[6]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + cursor = some(cursor), + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[3 .. 4].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + + test "time range, content topic, pubsub topic and cursor": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + # start_time + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + # end_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) + require retFut.isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[1][1]) + + ## When + let res = waitFor driver.getMessages( + contentTopics = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(0, timeOrigin)), + endTime = some(ts(45, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[3 .. 4] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "time range, content topic, pubsub topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) + require retFut.isOk() + + let cursor = computeMessageHash(expected[7][0], expected[7][1]) + + ## When + let res = waitFor driver.getMessages( + contentTopics = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[4 .. 5].reversed() + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "time range, content topic, pubsub topic and cursor - cursor timestamp out of time range": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) + require retFut.isOk() + + let cursor = computeMessageHash(expected[1][0], expected[1][1]) + + ## When + let res = waitFor driver.getMessages( + contentTopics = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[4 .. 5] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "time range, content topic, pubsub topic and cursor - cursor timestamp out of time range, descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put(computeMessageHash(topic, msg), topic, msg) + require retFut.isOk() + + let cursor = computeMessageHash(expected[1][0], expected[1][1]) + + ## When + let res = waitFor driver.getMessages( + contentTopics = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages.len == 0 + + ## Cleanup + (waitFor driver.close()).expect("driver to close") diff --git a/third-party/nwaku/tests/waku_archive/test_driver_sqlite.nim b/third-party/nwaku/tests/waku_archive/test_driver_sqlite.nim new file mode 100644 index 0000000..5809a84 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive/test_driver_sqlite.nim @@ -0,0 +1,54 @@ +{.used.} + +import std/sequtils, testutils/unittests, chronos +import + waku/[waku_archive, waku_archive/driver/sqlite_driver, waku_core], + ../waku_archive/archive_utils, + ../testlib/wakucore + +suite "SQLite driver": + test "init driver and database": + ## Given + let database = newSqliteDatabase() + + ## When + let driverRes = SqliteDriver.new(database) + + ## Then + check: + driverRes.isOk() + + let driver: ArchiveDriver = driverRes.tryGet() + check: + not driver.isNil() + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "insert a message": + ## Given + const contentTopic = "test-content-topic" + const meta = "test meta" + + let driver = newSqliteArchiveDriver() + + let msg = fakeWakuMessage(contentTopic = contentTopic, meta = meta) + let msgHash = computeMessageHash(DefaultPubsubTopic, msg) + + ## When + let putRes = waitFor driver.put(msgHash, DefaultPubsubTopic, msg) + + ## Then + check: + putRes.isOk() + + let storedMsg = (waitFor driver.getAllMessages()).tryGet() + check: + storedMsg.len == 1 + storedMsg.all do(item: auto) -> bool: + let (hash, pubsubTopic, actualMsg) = item + actualMsg.contentTopic == contentTopic and pubsubTopic == DefaultPubsubTopic and + hash == msgHash and msg.meta == actualMsg.meta + + ## Cleanup + (waitFor driver.close()).expect("driver to close") diff --git a/third-party/nwaku/tests/waku_archive/test_driver_sqlite_query.nim b/third-party/nwaku/tests/waku_archive/test_driver_sqlite_query.nim new file mode 100644 index 0000000..327ae17 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive/test_driver_sqlite_query.nim @@ -0,0 +1,1754 @@ +{.used.} + +import + std/[options, sequtils, random, algorithm], testutils/unittests, chronos, chronicles + +import + waku/[waku_archive, waku_core, waku_core/message/digest], + ../testlib/common, + ../testlib/wakucore, + ../waku_archive/archive_utils + +logScope: + topics = "test archive _driver" + +# Initialize the random number generator +common.randomize() + +suite "SQLite driver - query by content topic": + asyncTest "no content topic": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = DefaultContentTopic, ts = ts(00)), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic, ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages(maxPageSize = 5, ascendingOrder = true) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[0 .. 4] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "single content topic": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 3] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "single content topic with meta field": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00), meta = "meta-0"), + fakeWakuMessage(@[byte 1], ts = ts(10), meta = "meta-1"), + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20), meta = "meta-2" + ), + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30), meta = "meta-3" + ), + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40), meta = "meta-4" + ), + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50), meta = "meta-5" + ), + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60), meta = "meta-6" + ), + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70), meta = "meta-7" + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 3] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "single content topic - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = false + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[6 .. 7].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "multiple content topic": + ## Given + const contentTopic1 = "test-content-topic-1" + const contentTopic2 = "test-content-topic-2" + const contentTopic3 = "test-content-topic-3" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic1, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic2, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic3, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic1, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic2, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic3, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic1, contentTopic2], + maxPageSize = 2, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 3] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "single content topic - no results": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = DefaultContentTopic, ts = ts(00)), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic, ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = DefaultContentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = DefaultContentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = DefaultContentTopic, ts = ts(40)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages.len == 0 + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "content topic and max page size - not enough messages stored": + ## Given + const pageSize: uint = 50 + + let driver = newSqliteArchiveDriver() + + for t in 0 ..< 40: + let msg = fakeWakuMessage(@[byte t], DefaultContentTopic, ts = ts(t)) + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[DefaultContentTopic], + maxPageSize = pageSize, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages.len == 40 + + ## Cleanup + (await driver.close()).expect("driver to close") + +suite "SQLite driver - query by pubsub topic": + asyncTest "pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + ## When + let res = await driver.getMessages( + pubsubTopic = some(pubsubTopic), maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[4 .. 5] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "no pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + ## When + let res = await driver.getMessages(maxPageSize = 2, ascendingOrder = true) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[0 .. 1] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "content topic and pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + pubsubTopic = some(pubsubTopic), + maxPageSize = 2, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[4 .. 5] + + ## Cleanup + (await driver.close()).expect("driver to close") + +suite "SQLite driver - query by cursor": + asyncTest "only cursor": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) + + ## When + let res = await driver.getMessages( + cursor = some(cursor), maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[5 .. 6] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "only cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) + + ## When + let res = await driver.getMessages( + cursor = some(cursor), maxPageSize = 2, ascendingOrder = false + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 3].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "only cursor - invalid": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + var messages = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage()) + + ## When + let res = await driver.getMessages( + includeData = true, + contentTopics = @[DefaultContentTopic], + pubsubTopic = none(PubsubTopic), + cursor = some(cursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes = @[], + maxPageSize = 5, + ascendingOrder = true, + ) + + ## Then + check: + res.isErr() + res.error == "cursor not found" + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "content topic and cursor": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[4]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[5 .. 6] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "content topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + # << cursor + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[6]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 5].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "pubsub topic and cursor": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), # << cursor + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + let cursor = computeMessageHash(expected[5][0], expected[5][1]) + + ## When + let res = await driver.getMessages( + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[6 .. 7] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "pubsub topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), # << cursor + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + let cursor = computeMessageHash(expected[6][0], expected[6][1]) + + ## When + let res = await driver.getMessages( + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[4 .. 5].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + +suite "SQLite driver - query by time range": + asyncTest "start time only": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + startTime = some(ts(15, timeOrigin)), maxPageSize = 10, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 6] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "end time only": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + # end_time + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + endTime = some(ts(45, timeOrigin)), maxPageSize = 10, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[0 .. 4] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "start time and end time": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # start_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + # end_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + ## When + let res = await driver.getMessages( + startTime = some(ts(15, timeOrigin)), + endTime = some(ts(45, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[2 .. 4] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "invalid time range - no results": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + # end_time + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + startTime = some(ts(45, timeOrigin)), + endTime = some(ts(15, timeOrigin)), + maxPageSize = 2, + ascendingOrder = true, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages.len == 0 + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range start and content topic": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 6] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range start and content topic - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[2 .. 6].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range start, single content topic and cursor": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + # << cursor + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[3]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + cursor = some(cursor), + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[4 .. 9] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range start, single content topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + # << cursor + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[6]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + cursor = some(cursor), + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expected[3 .. 4].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range, content topic, pubsub topic and cursor": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + # start_time + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + # end_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + let cursor = computeMessageHash(DefaultPubsubTopic, expected[1][1]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(0, timeOrigin)), + endTime = some(ts(45, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[3 .. 4] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range, content topic, pubsub topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + let cursor = computeMessageHash(expected[7][0], expected[7][1]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[4 .. 5].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range, content topic, pubsub topic and cursor - cursor timestamp out of time range": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + let cursor = computeMessageHash(expected[1][0], expected[1][1]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages == expectedMessages[4 .. 5] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range, content topic, pubsub topic and cursor - cursor timestamp out of time range, descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require (await driver.put(computeMessageHash(topic, msg), topic, msg)).isOk() + + let cursor = computeMessageHash(expected[1][0], expected[1][1]) + + ## When + let res = await driver.getMessages( + contentTopics = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[2]) + check: + filteredMessages.len == 0 + + ## Cleanup + (await driver.close()).expect("driver to close") diff --git a/third-party/nwaku/tests/waku_archive/test_partition_manager.nim b/third-party/nwaku/tests/waku_archive/test_partition_manager.nim new file mode 100644 index 0000000..dbfdd84 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive/test_partition_manager.nim @@ -0,0 +1,14 @@ +{.used.} + +import testutils/unittests, chronos +import waku/waku_archive/driver/postgres_driver/partitions_manager, waku/waku_core/time + +suite "Partition Manager": + test "Calculate end partition time": + # 1717372850 == Mon Jun 03 2024 00:00:50 GMT+0000 + # 1717376400 == Mon Jun 03 2024 01:00:00 GMT+0000 + check 1717376400 == partitions_manager.calcEndPartitionTime(Timestamp(1717372850)) + + # 1717372800 == Mon Jun 03 2024 00:00:00 GMT+0000 + # 1717376400 == Mon Jun 03 2024 01:00:00 GMT+0000 + check 1717376400 == partitions_manager.calcEndPartitionTime(Timestamp(1717372800)) diff --git a/third-party/nwaku/tests/waku_archive/test_retention_policy.nim b/third-party/nwaku/tests/waku_archive/test_retention_policy.nim new file mode 100644 index 0000000..ea86e1d --- /dev/null +++ b/third-party/nwaku/tests/waku_archive/test_retention_policy.nim @@ -0,0 +1,153 @@ +{.used.} + +import std/[sequtils, times], results, testutils/unittests, chronos +import + waku/[ + waku_core, + waku_core/message/digest, + waku_archive, + waku_archive/retention_policy, + waku_archive/retention_policy/retention_policy_capacity, + waku_archive/retention_policy/retention_policy_size, + ], + ../waku_archive/archive_utils, + ../testlib/wakucore + +suite "Waku Archive - Retention policy": + test "capacity retention policy - windowed message deletion": + ## Given + let + capacity = 100 + excess = 60 + + let driver = newSqliteArchiveDriver() + + let retentionPolicy: RetentionPolicy = + CapacityRetentionPolicy.new(capacity = capacity) + var putFutures = newSeq[Future[ArchiveDriverResult[void]]]() + + ## When + for i in 1 .. capacity + excess: + let msg = fakeWakuMessage( + payload = @[byte i], contentTopic = DefaultContentTopic, ts = Timestamp(i) + ) + putFutures.add( + driver.put(computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg) + ) + + discard waitFor allFinished(putFutures) + + let res = waitFor retentionPolicy.execute(driver) + assert res.isOk(), $res.error + + ## Then + let numMessages = (waitFor driver.getMessagesCount()).tryGet() + check: + # Expected number of messages is 120 because + # (capacity = 100) + (half of the overflow window = 15) + (5 messages added after after the last delete) + # the window size changes when changing `const maxStoreOverflow = 1.3 in sqlite_store + numMessages == 115 + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "size retention policy - windowed message deletion": + ## Given + let + # in bytes + sizeLimit: int64 = 52428 + excess = 325 + + let driver = newSqliteArchiveDriver() + + let retentionPolicy: RetentionPolicy = SizeRetentionPolicy.new(size = sizeLimit) + var putFutures = newSeq[Future[ArchiveDriverResult[void]]]() + + # make sure that the db is empty to before test begins + let storedMsg = (waitFor driver.getAllMessages()).tryGet() + # if there are messages in db, empty them + if storedMsg.len > 0: + let now = getNanosecondTime(getTime().toUnixFloat()) + require (waitFor driver.deleteMessagesOlderThanTimestamp(ts = now)).isOk() + require (waitFor driver.performVacuum()).isOk() + + ## When + ## + + # create a number of messages so that the size of the DB overshoots + for i in 1 .. excess: + let msg = fakeWakuMessage( + payload = @[byte i], contentTopic = DefaultContentTopic, ts = Timestamp(i) + ) + putFutures.add( + driver.put(computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg) + ) + + # waitFor is used to synchronously wait for the futures to complete. + discard waitFor allFinished(putFutures) + + ## Then + # calculate the current database size + let sizeDB = int64((waitFor driver.getDatabaseSize()).tryGet()) + + # NOTE: since vacuumin is done manually, this needs to be revisited if vacuuming done automatically + + # get the rows count pre-deletion + let rowsCountBeforeDeletion = (waitFor driver.getMessagesCount()).tryGet() + + # execute policy provided the current db size oveflows, results in rows deletion + require (sizeDB >= sizeLimit) + require (waitFor retentionPolicy.execute(driver)).isOk() + + # get the number or rows from database + let rowCountAfterDeletion = (waitFor driver.getMessagesCount()).tryGet() + + check: + # size of the database is used to check if the storage limit has been preserved + # check the current database size with the limitSize provided by the user + # it should be lower + rowCountAfterDeletion <= rowsCountBeforeDeletion + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "store capacity should be limited": + ## Given + const capacity = 5 + const contentTopic = "test-content-topic" + + let + driver = newSqliteArchiveDriver() + retentionPolicy: RetentionPolicy = + CapacityRetentionPolicy.new(capacity = capacity) + + let messages = + @[ + fakeWakuMessage(contentTopic = DefaultContentTopic, ts = ts(0)), + fakeWakuMessage(contentTopic = DefaultContentTopic, ts = ts(1)), + fakeWakuMessage(contentTopic = contentTopic, ts = ts(2)), + fakeWakuMessage(contentTopic = contentTopic, ts = ts(3)), + fakeWakuMessage(contentTopic = contentTopic, ts = ts(4)), + fakeWakuMessage(contentTopic = contentTopic, ts = ts(5)), + fakeWakuMessage(contentTopic = contentTopic, ts = ts(6)), + ] + + ## When + for msg in messages: + require ( + waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + require (waitFor retentionPolicy.execute(driver)).isOk() + + ## Then + let storedMsg = (waitFor driver.getAllMessages()).tryGet() + check: + storedMsg.len == capacity + storedMsg.all do(item: auto) -> bool: + let (_, pubsubTopic, msg) = item + msg.contentTopic == contentTopic and pubsubTopic == DefaultPubsubTopic + + ## Cleanup + (waitFor driver.close()).expect("driver to close") diff --git a/third-party/nwaku/tests/waku_archive/test_waku_archive.nim b/third-party/nwaku/tests/waku_archive/test_waku_archive.nim new file mode 100644 index 0000000..802473d --- /dev/null +++ b/third-party/nwaku/tests/waku_archive/test_waku_archive.nim @@ -0,0 +1,544 @@ +{.used.} + +import std/[options, sequtils], testutils/unittests, chronos, libp2p/crypto/crypto + +import + waku/[ + common/databases/db_postgres/dbconn, + common/paging, + waku_core, + waku_core/message/digest, + waku_archive, + ], + ../waku_archive/archive_utils, + ../testlib/wakucore + +suite "Waku Archive - message handling": + test "it should archive a valid and non-ephemeral message": + ## Setup + let driver = newSqliteArchiveDriver() + let archive = newWakuArchive(driver) + + ## Given + let validSenderTime = now() + let message = fakeWakuMessage(ephemeral = false, ts = validSenderTime) + + ## When + waitFor archive.handleMessage(DefaultPubSubTopic, message) + + ## Then + check: + (waitFor driver.getMessagesCount()).tryGet() == 1 + + test "it should not archive ephemeral messages": + ## Setup + let driver = newSqliteArchiveDriver() + let archive = newWakuArchive(driver) + + ## Given + let msgList = + @[ + fakeWakuMessage(ephemeral = false, payload = "1"), + fakeWakuMessage(ephemeral = true, payload = "2"), + fakeWakuMessage(ephemeral = true, payload = "3"), + fakeWakuMessage(ephemeral = true, payload = "4"), + fakeWakuMessage(ephemeral = false, payload = "5"), + ] + + ## When + for msg in msgList: + waitFor archive.handleMessage(DefaultPubsubTopic, msg) + + ## Then + check: + (waitFor driver.getMessagesCount()).tryGet() == 2 + + test "it should not archive a message with no sender timestamp": + ## Setup + let driver = newSqliteArchiveDriver() + let archive = newWakuArchive(driver) + + ## Given + let invalidSenderTime = 0 + let message = fakeWakuMessage(ts = invalidSenderTime) + + ## When + waitFor archive.handleMessage(DefaultPubSubTopic, message) + + ## Then + check: + (waitFor driver.getMessagesCount()).tryGet() == 0 + + test "it should not archive a message with a sender time variance greater than max time variance (future)": + ## Setup + let driver = newSqliteArchiveDriver() + let archive = newWakuArchive(driver) + + ## Given + let + now = now() + invalidSenderTime = now + MaxMessageTimestampVariance + 1_000_000_000 + # 1 second over the max variance + + let message = fakeWakuMessage(ts = invalidSenderTime) + + ## When + waitFor archive.handleMessage(DefaultPubSubTopic, message) + + ## Then + check: + (waitFor driver.getMessagesCount()).tryGet() == 0 + + test "it should not archive a message with a sender time variance greater than max time variance (past)": + ## Setup + let driver = newSqliteArchiveDriver() + let archive = newWakuArchive(driver) + + ## Given + let + now = now() + invalidSenderTime = now - MaxMessageTimestampVariance - 1 + + let message = fakeWakuMessage(ts = invalidSenderTime) + + ## When + waitFor archive.handleMessage(DefaultPubSubTopic, message) + + ## Then + check: + (waitFor driver.getMessagesCount()).tryGet() == 0 + + test "convert query to label": + check: + convertQueryToMetricLabel("SELECT version();") == "select_version" + convertQueryToMetricLabel( + "SELECT messageHash FROM messages WHERE pubsubTopic = ? AND timestamp >= ? AND timestamp <= ? ORDER BY timestamp DESC, messageHash DESC LIMIT ?" + ) == "msg_hash_no_ctopic" + convertQueryToMetricLabel( + """ SELECT child.relname AS partition_name + FROM pg_inherits + JOIN pg_class parent ON pg_inherits.inhparent = parent.oid + JOIN pg_class child ON pg_inherits.inhrelid = child.oid + JOIN pg_namespace nmsp_parent ON nmsp_parent.oid = parent.relnamespace + JOIN pg_namespace nmsp_child ON nmsp_child.oid = child.relnamespace + WHERE parent.relname='messages""" + ) == "get_partitions_list" + +procSuite "Waku Archive - find messages": + ## Fixtures + let timeOrigin = now() + let msgListA = + @[ + fakeWakuMessage( + @[byte 00], contentTopic = ContentTopic("2"), ts = ts(00, timeOrigin) + ), + fakeWakuMessage( + @[byte 01], contentTopic = ContentTopic("1"), ts = ts(10, timeOrigin) + ), + fakeWakuMessage( + @[byte 02], contentTopic = ContentTopic("2"), ts = ts(20, timeOrigin) + ), + fakeWakuMessage( + @[byte 03], contentTopic = ContentTopic("1"), ts = ts(30, timeOrigin) + ), + fakeWakuMessage( + @[byte 04], contentTopic = ContentTopic("2"), ts = ts(40, timeOrigin) + ), + fakeWakuMessage( + @[byte 05], contentTopic = ContentTopic("1"), ts = ts(50, timeOrigin) + ), + fakeWakuMessage( + @[byte 06], contentTopic = ContentTopic("2"), ts = ts(60, timeOrigin) + ), + fakeWakuMessage( + @[byte 07], contentTopic = ContentTopic("1"), ts = ts(70, timeOrigin) + ), + fakeWakuMessage( + @[byte 08], contentTopic = ContentTopic("2"), ts = ts(80, timeOrigin) + ), + fakeWakuMessage( + @[byte 09], contentTopic = ContentTopic("1"), ts = ts(90, timeOrigin) + ), + ] + + let archiveA = block: + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + for msg in msgListA: + require ( + waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + archive + + test "handle query": + ## Setup + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + let topic = ContentTopic("1") + let + msg1 = fakeWakuMessage(contentTopic = topic) + msg2 = fakeWakuMessage() + + waitFor archive.handleMessage("foo", msg1) + waitFor archive.handleMessage("foo", msg2) + + ## Given + let req = ArchiveQuery(includeData: true, contentTopics: @[topic]) + + ## When + let queryRes = waitFor archive.findMessages(req) + + ## Then + check: + queryRes.isOk() + + let response = queryRes.tryGet() + check: + response.messages.len == 1 + response.messages == @[msg1] + + test "handle query with multiple content filters": + ## Setup + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + let + topic1 = ContentTopic("1") + topic2 = ContentTopic("2") + topic3 = ContentTopic("3") + + let + msg1 = fakeWakuMessage(contentTopic = topic1) + msg2 = fakeWakuMessage(contentTopic = topic2) + msg3 = fakeWakuMessage(contentTopic = topic3) + + waitFor archive.handleMessage("foo", msg1) + waitFor archive.handleMessage("foo", msg2) + waitFor archive.handleMessage("foo", msg3) + + ## Given + let req = ArchiveQuery(includeData: true, contentTopics: @[topic1, topic3]) + + ## When + let queryRes = waitFor archive.findMessages(req) + + ## Then + check: + queryRes.isOk() + + let response = queryRes.tryGet() + check: + response.messages.len() == 2 + response.messages.anyIt(it == msg1) + response.messages.anyIt(it == msg3) + + test "handle query with more than 100 content filters": + ## Setup + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + let queryTopics = toSeq(1 .. 150).mapIt(ContentTopic($it)) + + ## Given + let req = ArchiveQuery(contentTopics: queryTopics) + + ## When + let queryRes = waitFor archive.findMessages(req) + + ## Then + assert queryRes.isOk(), $queryRes.error + + let response = queryRes.tryGet() + check: + response.messages.len() == 0 + + test "handle query with pubsub topic filter": + ## Setup + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + let + pubsubTopic1 = "queried-topic" + pubsubTopic2 = "non-queried-topic" + + let + contentTopic1 = ContentTopic("1") + contentTopic2 = ContentTopic("2") + contentTopic3 = ContentTopic("3") + + let + msg1 = fakeWakuMessage(contentTopic = contentTopic1) + msg2 = fakeWakuMessage(contentTopic = contentTopic2) + msg3 = fakeWakuMessage(contentTopic = contentTopic3) + + waitFor archive.handleMessage(pubsubtopic1, msg1) + waitFor archive.handleMessage(pubsubtopic2, msg2) + waitFor archive.handleMessage(pubsubtopic2, msg3) + + ## Given + # This query targets: pubsubtopic1 AND (contentTopic1 OR contentTopic3) + let req = ArchiveQuery( + includeData: true, + pubsubTopic: some(pubsubTopic1), + contentTopics: @[contentTopic1, contentTopic3], + ) + + ## When + let queryRes = waitFor archive.findMessages(req) + + ## Then + check: + queryRes.isOk() + + let response = queryRes.tryGet() + check: + response.messages.len() == 1 + response.messages.anyIt(it == msg1) + + test "handle query with pubsub topic filter - no match": + ## Setup + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + let + pubsubtopic1 = "queried-topic" + pubsubtopic2 = "non-queried-topic" + + let + msg1 = fakeWakuMessage() + msg2 = fakeWakuMessage() + msg3 = fakeWakuMessage() + + waitFor archive.handleMessage(pubsubtopic2, msg1) + waitFor archive.handleMessage(pubsubtopic2, msg2) + waitFor archive.handleMessage(pubsubtopic2, msg3) + + ## Given + let req = ArchiveQuery(pubsubTopic: some(pubsubTopic1)) + + ## When + let res = waitFor archive.findMessages(req) + + ## Then + check: + res.isOk() + + let response = res.tryGet() + check: + response.messages.len() == 0 + + test "handle query with pubsub topic filter - match the entire stored messages": + ## Setup + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + let pubsubTopic = "queried-topic" + + let + msg1 = fakeWakuMessage(payload = "TEST-1") + msg2 = fakeWakuMessage(payload = "TEST-2") + msg3 = fakeWakuMessage(payload = "TEST-3") + + waitFor archive.handleMessage(pubsubTopic, msg1) + waitFor archive.handleMessage(pubsubTopic, msg2) + waitFor archive.handleMessage(pubsubTopic, msg3) + + ## Given + let req = ArchiveQuery(includeData: true, pubsubTopic: some(pubsubTopic)) + + ## When + let res = waitFor archive.findMessages(req) + + ## Then + check: + res.isOk() + + let response = res.tryGet() + check: + response.messages.len() == 3 + response.messages.anyIt(it == msg1) + response.messages.anyIt(it == msg2) + response.messages.anyIt(it == msg3) + + test "handle query with forward pagination": + ## Given + let req = + ArchiveQuery(includeData: true, pageSize: 4, direction: PagingDirection.FORWARD) + + ## When + var nextReq = req # copy + + var pages = newSeq[seq[WakuMessage]](3) + var cursors = newSeq[Option[ArchiveCursor]](3) + + for i in 0 ..< 3: + let res = waitFor archiveA.findMessages(nextReq) + require res.isOk() + + # Keep query response content + let response = res.get() + pages[i] = response.messages + cursors[i] = response.cursor + + # Set/update the request cursor + nextReq.cursor = cursors[i] + + ## Then + check: + cursors[0] == some(computeMessageHash(DefaultPubsubTopic, msgListA[3])) + cursors[1] == some(computeMessageHash(DefaultPubsubTopic, msgListA[7])) + cursors[2] == none(ArchiveCursor) + + check: + pages[0] == msgListA[0 .. 3] + pages[1] == msgListA[4 .. 7] + pages[2] == msgListA[8 .. 9] + + test "handle query with backward pagination": + ## Given + let req = + ArchiveQuery(includeData: true, pageSize: 4, direction: PagingDirection.BACKWARD) + + ## When + var nextReq = req # copy + + var pages = newSeq[seq[WakuMessage]](3) + var cursors = newSeq[Option[ArchiveCursor]](3) + + for i in 0 ..< 3: + let res = waitFor archiveA.findMessages(nextReq) + require res.isOk() + + # Keep query response content + let response = res.get() + pages[i] = response.messages + cursors[i] = response.cursor + + # Set/update the request cursor + nextReq.cursor = cursors[i] + + ## Then + check: + cursors[0] == some(computeMessageHash(DefaultPubsubTopic, msgListA[6])) + cursors[1] == some(computeMessageHash(DefaultPubsubTopic, msgListA[2])) + cursors[2] == none(ArchiveCursor) + + check: + pages[0] == msgListA[6 .. 9] + pages[1] == msgListA[2 .. 5] + pages[2] == msgListA[0 .. 1] + + test "handle query with no paging info - auto-pagination": + ## Setup + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + let msgList = + @[ + fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("2")), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 2], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 3], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 4], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 5], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 6], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 7], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 8], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("2")), + ] + + for msg in msgList: + require ( + waitFor driver.put( + computeMessageHash(DefaultPubsubTopic, msg), DefaultPubsubTopic, msg + ) + ).isOk() + + ## Given + let req = ArchiveQuery(includeData: true, contentTopics: @[DefaultContentTopic]) + + ## When + let res = waitFor archive.findMessages(req) + + ## Then + check: + res.isOk() + + let response = res.tryGet() + check: + ## No pagination specified. Response will be auto-paginated with + ## up to MaxPageSize messages per page. + response.messages.len() == 8 + response.cursor.isNone() + + test "handle temporal history query with a valid time window": + ## Given + let req = ArchiveQuery( + includeData: true, + contentTopics: @[ContentTopic("1")], + startTime: some(ts(15, timeOrigin)), + endTime: some(ts(55, timeOrigin)), + direction: PagingDirection.FORWARD, + ) + + ## When + let res = waitFor archiveA.findMessages(req) + + ## Then + check res.isOk() + + let response = res.tryGet() + check: + response.messages.len() == 2 + response.messages.mapIt(it.timestamp) == @[ts(30, timeOrigin), ts(50, timeOrigin)] + + test "handle temporal history query with a zero-size time window": + ## A zero-size window results in an empty list of history messages + ## Given + let req = ArchiveQuery( + contentTopics: @[ContentTopic("1")], + startTime: some(Timestamp(2)), + endTime: some(Timestamp(2)), + ) + + ## When + let res = waitFor archiveA.findMessages(req) + + ## Then + check res.isOk() + + let response = res.tryGet() + check: + response.messages.len == 0 + + test "handle temporal history query with an invalid time window": + ## A history query with an invalid time range results in an empty list of history messages + ## Given + let req = ArchiveQuery( + contentTopics: @[ContentTopic("1")], + startTime: some(Timestamp(5)), + endTime: some(Timestamp(2)), + ) + + ## When + let res = waitFor archiveA.findMessages(req) + + ## Then + check res.isOk() + + let response = res.tryGet() + check: + response.messages.len == 0 diff --git a/third-party/nwaku/tests/waku_archive_legacy/archive_utils.nim b/third-party/nwaku/tests/waku_archive_legacy/archive_utils.nim new file mode 100644 index 0000000..8df0f5d --- /dev/null +++ b/third-party/nwaku/tests/waku_archive_legacy/archive_utils.nim @@ -0,0 +1,55 @@ +{.used.} + +import std/options, results, chronos, libp2p/crypto/crypto + +import + waku/[ + node/peer_manager, + waku_core, + waku_archive_legacy, + waku_archive_legacy/common, + waku_archive_legacy/driver/sqlite_driver, + waku_archive_legacy/driver/sqlite_driver/migrations, + common/databases/db_sqlite, + ], + ../testlib/[wakucore] + +proc newSqliteDatabase*(path: Option[string] = string.none()): SqliteDatabase = + SqliteDatabase.new(path.get(":memory:")).tryGet() + +proc newSqliteArchiveDriver*(): ArchiveDriver = + let database = newSqliteDatabase() + migrate(database).tryGet() + return SqliteDriver.new(database).tryGet() + +proc newWakuArchive*(driver: ArchiveDriver): WakuArchive = + WakuArchive.new(driver).get() + +proc computeArchiveCursor*( + pubsubTopic: PubsubTopic, message: WakuMessage +): ArchiveCursor = + ArchiveCursor( + pubsubTopic: pubsubTopic, + senderTime: message.timestamp, + storeTime: message.timestamp, + digest: computeDigest(message), + hash: computeMessageHash(pubsubTopic, message), + ) + +proc put*( + driver: ArchiveDriver, pubsubTopic: PubSubTopic, msgList: seq[WakuMessage] +): ArchiveDriver = + for msg in msgList: + let + msgDigest = computeDigest(msg) + msgHash = computeMessageHash(pubsubTopic, msg) + _ = waitFor driver.put(pubsubTopic, msg, msgDigest, msgHash, msg.timestamp) + # discard crashes + return driver + +proc newArchiveDriverWithMessages*( + pubsubTopic: PubSubTopic, msgList: seq[WakuMessage] +): ArchiveDriver = + var driver = newSqliteArchiveDriver() + driver = driver.put(pubsubTopic, msgList) + return driver diff --git a/third-party/nwaku/tests/waku_archive_legacy/test_all.nim b/third-party/nwaku/tests/waku_archive_legacy/test_all.nim new file mode 100644 index 0000000..9d45d99 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive_legacy/test_all.nim @@ -0,0 +1,13 @@ +{.used.} + +import + ./test_driver_postgres_query, + ./test_driver_postgres, + ./test_driver_queue_index, + ./test_driver_queue_pagination, + ./test_driver_queue_query, + ./test_driver_queue, + ./test_driver_sqlite_query, + ./test_driver_sqlite, + ./test_retention_policy, + ./test_waku_archive diff --git a/third-party/nwaku/tests/waku_archive_legacy/test_driver_postgres.nim b/third-party/nwaku/tests/waku_archive_legacy/test_driver_postgres.nim new file mode 100644 index 0000000..7657b6e --- /dev/null +++ b/third-party/nwaku/tests/waku_archive_legacy/test_driver_postgres.nim @@ -0,0 +1,220 @@ +{.used.} + +import std/[sequtils, options], testutils/unittests, chronos +import + waku/waku_archive_legacy, + waku/waku_archive_legacy/driver/postgres_driver, + waku/waku_archive/driver/postgres_driver as new_postgres_driver, + waku/waku_core, + waku/waku_core/message/digest, + ../testlib/wakucore, + ../testlib/testasync, + ../testlib/postgres_legacy, + ../testlib/postgres as new_postgres + +proc computeTestCursor(pubsubTopic: PubsubTopic, message: WakuMessage): ArchiveCursor = + ArchiveCursor( + pubsubTopic: pubsubTopic, + senderTime: message.timestamp, + storeTime: message.timestamp, + digest: computeDigest(message), + hash: computeMessageHash(pubsubTopic, message), + ) + +suite "Postgres driver": + ## Unique driver instance + var driver {.threadvar.}: postgres_driver.PostgresDriver + + ## We need to artificially create an instance of the "newDriver" + ## because this is the only one in charge of creating partitions + ## We will clean legacy store soon and this file will get removed. + var newDriver {.threadvar.}: new_postgres_driver.PostgresDriver + + asyncSetup: + let driverRes = await postgres_legacy.newTestPostgresDriver() + if driverRes.isErr(): + assert false, driverRes.error + + driver = postgres_driver.PostgresDriver(driverRes.get()) + + let newDriverRes = await new_postgres.newTestPostgresDriver() + if driverRes.isErr(): + assert false, driverRes.error + + newDriver = new_postgres_driver.PostgresDriver(newDriverRes.get()) + + asyncTeardown: + var resetRes = await driver.reset() + if resetRes.isErr(): + assert false, resetRes.error + + (await driver.close()).expect("driver to close") + + resetRes = await newDriver.reset() + if resetRes.isErr(): + assert false, resetRes.error + + (await newDriver.close()).expect("driver to close") + + asyncTest "Asynchronous queries": + var futures = newSeq[Future[ArchiveDriverResult[void]]](0) + + let beforeSleep = now() + for _ in 1 .. 100: + futures.add(driver.sleep(1)) + + await allFutures(futures) + + let diff = now() - beforeSleep + # Actually, the diff randomly goes between 1 and 2 seconds. + # although in theory it should spend 1s because we establish 100 + # connections and we spawn 100 tasks that spend ~1s each. + assert diff < 20_000_000_000 + + asyncTest "Insert a message": + const contentTopic = "test-content-topic" + const meta = "test meta" + + let msg = fakeWakuMessage(contentTopic = contentTopic, meta = meta) + + let computedDigest = computeDigest(msg) + let computedHash = computeMessageHash(DefaultPubsubTopic, msg) + + let putRes = await driver.put( + DefaultPubsubTopic, msg, computedDigest, computedHash, msg.timestamp + ) + assert putRes.isOk(), putRes.error + + let storedMsg = (await driver.getAllMessages()).tryGet() + + assert storedMsg.len == 1 + + let (pubsubTopic, actualMsg, digest, _, hash) = storedMsg[0] + assert actualMsg.contentTopic == contentTopic + assert pubsubTopic == DefaultPubsubTopic + assert toHex(computedDigest.data) == toHex(digest) + assert toHex(actualMsg.payload) == toHex(msg.payload) + assert toHex(computedHash) == toHex(hash) + assert toHex(actualMsg.meta) == toHex(msg.meta) + + asyncTest "Insert and query message": + const contentTopic1 = "test-content-topic-1" + const contentTopic2 = "test-content-topic-2" + const pubsubTopic1 = "pubsubtopic-1" + const pubsubTopic2 = "pubsubtopic-2" + + let msg1 = fakeWakuMessage(contentTopic = contentTopic1) + + var putRes = await driver.put( + pubsubTopic1, + msg1, + computeDigest(msg1), + computeMessageHash(pubsubTopic1, msg1), + msg1.timestamp, + ) + assert putRes.isOk(), putRes.error + + let msg2 = fakeWakuMessage(contentTopic = contentTopic2) + + putRes = await driver.put( + pubsubTopic2, + msg2, + computeDigest(msg2), + computeMessageHash(pubsubTopic2, msg2), + msg2.timestamp, + ) + assert putRes.isOk(), putRes.error + + let countMessagesRes = await driver.getMessagesCount() + + assert countMessagesRes.isOk(), $countMessagesRes.error + assert countMessagesRes.get() == 2 + + var messagesRes = await driver.getMessages(contentTopic = @[contentTopic1]) + + assert messagesRes.isOk(), $messagesRes.error + assert messagesRes.get().len == 1 + + # Get both content topics, check ordering + messagesRes = + await driver.getMessages(contentTopic = @[contentTopic1, contentTopic2]) + assert messagesRes.isOk(), messagesRes.error + + assert messagesRes.get().len == 2 + assert messagesRes.get()[0][1].contentTopic == contentTopic1 + + # Descending order + messagesRes = await driver.getMessages( + contentTopic = @[contentTopic1, contentTopic2], ascendingOrder = false + ) + assert messagesRes.isOk(), messagesRes.error + + assert messagesRes.get().len == 2 + assert messagesRes.get()[0][1].contentTopic == contentTopic2 + + # cursor + # Get both content topics + messagesRes = await driver.getMessages( + contentTopic = @[contentTopic1, contentTopic2], + cursor = some(computeTestCursor(pubsubTopic1, messagesRes.get()[1][1])), + ) + assert messagesRes.isOk() + assert messagesRes.get().len == 1 + + # Get both content topics but one pubsub topic + messagesRes = await driver.getMessages( + contentTopic = @[contentTopic1, contentTopic2], pubsubTopic = some(pubsubTopic1) + ) + assert messagesRes.isOk(), messagesRes.error + + assert messagesRes.get().len == 1 + assert messagesRes.get()[0][1].contentTopic == contentTopic1 + + # Limit + messagesRes = await driver.getMessages( + contentTopic = @[contentTopic1, contentTopic2], maxPageSize = 1 + ) + assert messagesRes.isOk(), messagesRes.error + assert messagesRes.get().len == 1 + + asyncTest "Insert true duplicated messages": + # Validates that two completely equal messages can not be stored. + + let now = now() + + let msg1 = fakeWakuMessage(ts = now) + let msg2 = fakeWakuMessage(ts = now) + + let initialNumMsgs = (await driver.getMessagesCount()).valueOr: + raiseAssert "could not get num mgs correctly: " & $error + + var putRes = await driver.put( + DefaultPubsubTopic, + msg1, + computeDigest(msg1), + computeMessageHash(DefaultPubsubTopic, msg1), + msg1.timestamp, + ) + assert putRes.isOk(), putRes.error + + var newNumMsgs = (await driver.getMessagesCount()).valueOr: + raiseAssert "could not get num mgs correctly: " & $error + + assert newNumMsgs == (initialNumMsgs + 1.int64), + "wrong number of messages: " & $newNumMsgs + + putRes = await driver.put( + DefaultPubsubTopic, + msg2, + computeDigest(msg2), + computeMessageHash(DefaultPubsubTopic, msg2), + msg2.timestamp, + ) + + assert putRes.isOk() + + newNumMsgs = (await driver.getMessagesCount()).valueOr: + raiseAssert "could not get num mgs correctly: " & $error + + assert newNumMsgs == (initialNumMsgs + 1.int64), + "wrong number of messages: " & $newNumMsgs diff --git a/third-party/nwaku/tests/waku_archive_legacy/test_driver_postgres_query.nim b/third-party/nwaku/tests/waku_archive_legacy/test_driver_postgres_query.nim new file mode 100644 index 0000000..29c8e07 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive_legacy/test_driver_postgres_query.nim @@ -0,0 +1,1987 @@ +{.used.} + +import + std/[options, sequtils, strformat, random, algorithm], + testutils/unittests, + chronos, + chronicles +import + waku/waku_archive_legacy, + waku/waku_archive_legacy/driver as driver_module, + waku/waku_archive_legacy/driver/postgres_driver, + waku/waku_archive/driver/postgres_driver as new_postgres_driver, + waku/waku_core, + waku/waku_core/message/digest, + ../testlib/common, + ../testlib/wakucore, + ../testlib/testasync, + ../testlib/postgres_legacy, + ../testlib/postgres as new_postgres, + ../testlib/testutils + +logScope: + topics = "test archive postgres driver" + +## This whole file is copied from the 'test_driver_sqlite_query.nim' file +## and it tests the same use cases but using the postgres driver. + +# Initialize the random number generator +common.randomize() + +proc computeTestCursor(pubsubTopic: PubsubTopic, message: WakuMessage): ArchiveCursor = + ArchiveCursor( + pubsubTopic: pubsubTopic, + senderTime: message.timestamp, + storeTime: message.timestamp, + digest: computeDigest(message), + hash: computeMessageHash(pubsubTopic, message), + ) + +suite "Postgres driver - queries": + ## Unique driver instance + var driver {.threadvar.}: postgres_driver.PostgresDriver + + ## We need to artificially create an instance of the "newDriver" + ## because this is the only one in charge of creating partitions + ## We will clean legacy store soon and this file will get removed. + var newDriver {.threadvar.}: new_postgres_driver.PostgresDriver + + asyncSetup: + let driverRes = await postgres_legacy.newTestPostgresDriver() + if driverRes.isErr(): + assert false, driverRes.error + + driver = postgres_driver.PostgresDriver(driverRes.get()) + + let newDriverRes = await new_postgres.newTestPostgresDriver() + if driverRes.isErr(): + assert false, driverRes.error + + newDriver = new_postgres_driver.PostgresDriver(newDriverRes.get()) + + asyncTeardown: + var resetRes = await driver.reset() + if resetRes.isErr(): + assert false, resetRes.error + + (await driver.close()).expect("driver to close") + + resetRes = await newDriver.reset() + if resetRes.isErr(): + assert false, resetRes.error + + (await newDriver.close()).expect("driver to close") + + asyncTest "no content topic": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = DefaultContentTopic, ts = ts(00)), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic, ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages(maxPageSize = 5, ascendingOrder = true) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[0 .. 4] + + asyncTest "single content topic": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 3] + + asyncTest "single content topic with meta field": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00), meta = "meta-0"), + fakeWakuMessage(@[byte 1], ts = ts(10), meta = "meta-1"), + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20), meta = "meta-2" + ), + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30), meta = "meta-3" + ), + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40), meta = "meta-4" + ), + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50), meta = "meta-5" + ), + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60), meta = "meta-6" + ), + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70), meta = "meta-7" + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 3] + + asyncTest "single content topic - descending order": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = false + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[6 .. 7].reversed() + + asyncTest "multiple content topic": + ## Given + const contentTopic1 = "test-content-topic-1" + const contentTopic2 = "test-content-topic-2" + const contentTopic3 = "test-content-topic-3" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic1, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic2, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic3, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic1, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic2, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic3, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + var res = await driver.getMessages( + contentTopic = @[contentTopic1, contentTopic2], + pubsubTopic = some(DefaultPubsubTopic), + maxPageSize = 2, + ascendingOrder = true, + startTime = some(ts(00)), + endTime = some(ts(40)), + ) + + ## Then + assert res.isOk(), res.error + var filteredMessages = res.tryGet().mapIt(it[1]) + check filteredMessages == expected[2 .. 3] + + ## When + ## This is very similar to the previous one but we enforce to use the prepared + ## statement by querying one single content topic + res = await driver.getMessages( + contentTopic = @[contentTopic1], + pubsubTopic = some(DefaultPubsubTopic), + maxPageSize = 2, + ascendingOrder = true, + startTime = some(ts(00)), + endTime = some(ts(40)), + ) + + ## Then + assert res.isOk(), res.error + filteredMessages = res.tryGet().mapIt(it[1]) + check filteredMessages == @[expected[2]] + + asyncTest "single content topic - no results": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = DefaultContentTopic, ts = ts(00)), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic, ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = DefaultContentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = DefaultContentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = DefaultContentTopic, ts = ts(40)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages.len == 0 + + asyncTest "content topic and max page size - not enough messages stored": + ## Given + const pageSize: uint = 50 + + for t in 0 ..< 40: + let msg = fakeWakuMessage(@[byte t], DefaultContentTopic, ts = ts(t)) + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[DefaultContentTopic], + maxPageSize = pageSize, + ascendingOrder = true, + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages.len == 40 + + asyncTest "pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + ## When + let res = await driver.getMessages( + pubsubTopic = some(pubsubTopic), maxPageSize = 2, ascendingOrder = true + ) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[4 .. 5] + + asyncTest "no pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + ## When + let res = await driver.getMessages(maxPageSize = 2, ascendingOrder = true) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[0 .. 1] + + asyncTest "content topic and pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + maxPageSize = 2, + ascendingOrder = true, + ) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[4 .. 5] + + asyncTest "only cursor": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + let cursor = computeTestCursor(DefaultPubsubTopic, expected[4]) + + ## When + let res = await driver.getMessages( + cursor = some(cursor), maxPageSize = 2, ascendingOrder = true + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[5 .. 6] + + asyncTest "only cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + let cursor = computeTestCursor(DefaultPubsubTopic, expected[4]) + + ## When + let res = await driver.getMessages( + cursor = some(cursor), maxPageSize = 2, ascendingOrder = false + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 3].reversed() + + asyncTest "only cursor - invalid": + ## Given + const contentTopic = "test-content-topic" + + var messages = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + let fakeCursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage()) + let cursor = ArchiveCursor(hash: fakeCursor) + + ## When + let res = await driver.getMessages( + includeData = true, + contentTopicSeq = @[DefaultContentTopic], + pubsubTopic = none(PubsubTopic), + cursor = some(cursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes = @[], + maxPageSize = 5, + ascendingOrder = true, + ) + + ## Then + assert res.isOk(), res.error + + check: + res.value.len == 0 + + asyncTest "content topic and cursor": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + let cursor = computeTestCursor(DefaultPubsubTopic, expected[4]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[5 .. 6] + + asyncTest "content topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + # << cursor + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + let cursor = computeTestCursor(DefaultPubsubTopic, expected[6]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 5].reversed() + + asyncTest "pubsub topic and cursor": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), # << cursor + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + let cursor = computeTestCursor(expected[5][0], expected[5][1]) + + ## When + let res = await driver.getMessages( + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[6 .. 7] + + asyncTest "pubsub topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), # << cursor + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + let cursor = computeTestCursor(expected[6][0], expected[6][1]) + + ## When + let res = await driver.getMessages( + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[4 .. 5].reversed() + + asyncTest "only hashes - descending order": + ## Given + let timeOrigin = now() + var expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + let hashes = messages.mapIt(computeMessageHash(DefaultPubsubTopic, it)) + + for (msg, hash) in messages.zip(hashes): + require ( + await driver.put( + DefaultPubsubTopic, msg, computeDigest(msg), hash, msg.timestamp + ) + ).isOk() + + ## When + let res = await driver.getMessages(hashes = hashes, ascendingOrder = false) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.reversed() + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages + + asyncTest "start time only": + ## Given + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + startTime = some(ts(15, timeOrigin)), maxPageSize = 10, ascendingOrder = true + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 6] + + asyncTest "end time only": + ## Given + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + # end_time + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + endTime = some(ts(45, timeOrigin)), maxPageSize = 10, ascendingOrder = true + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[0 .. 4] + + asyncTest "start time and end time": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # start_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + # end_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + ## When + let res = await driver.getMessages( + startTime = some(ts(15, timeOrigin)), + endTime = some(ts(45, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[2 .. 4] + + asyncTest "invalid time range - no results": + ## Given + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + # end_time + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + startTime = some(ts(45, timeOrigin)), + endTime = some(ts(15, timeOrigin)), + maxPageSize = 2, + ascendingOrder = true, + ) + + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages.len == 0 + + asyncTest "time range start and content topic": + ## Given + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 6] + + asyncTest "time range start and content topic - descending order": + ## Given + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 6].reversed() + + asyncTest "time range start, single content topic and cursor": + ## Given + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + # << cursor + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + let cursor = computeTestCursor(DefaultPubsubTopic, expected[3]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + cursor = some(cursor), + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[4 .. 9] + + asyncTest "time range start, single content topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + # << cursor + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + let cursor = computeTestCursor(DefaultPubsubTopic, expected[6]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + cursor = some(cursor), + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[3 .. 4].reversed() + + asyncTest "time range, content topic, pubsub topic and cursor": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let timeOrigin = now() + let expected = + @[ + # start_time + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + # end_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + let cursor = computeTestCursor(DefaultPubsubTopic, expected[1][1]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(0, timeOrigin)), + endTime = some(ts(45, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[3 .. 4] + + asyncTest "time range, content topic, pubsub topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + let cursor = computeTestCursor(expected[7][0], expected[7][1]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[4 .. 5].reversed() + + asyncTest "time range, content topic, pubsub topic and cursor - cursor timestamp out of time range": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + let cursor = computeTestCursor(expected[1][0], expected[1][1]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + assert res.isOk(), res.error + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[4 .. 5] + + asyncTest "time range, content topic, pubsub topic and cursor - cursor timestamp out of time range, descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + let cursor = computeTestCursor(expected[1][0], expected[1][1]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + assert res.isOk(), res.error + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages.len == 0 + + xasyncTest "Get oldest and newest message timestamp": + ## This test no longer makes sense because that will always be controlled by the newDriver + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let oldestTime = ts(00, timeOrigin) + let newestTime = ts(100, timeOrigin) + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = oldestTime), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = newestTime), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## just keep the second resolution. + ## Notice that the oldest timestamps considers the minimum partition timestamp, which + ## is expressed in seconds. + let oldestPartitionTimestamp = + Timestamp(float(oldestTime) / 1_000_000_000) * 1_000_000_000 + + var res = await driver.getOldestMessageTimestamp() + assert res.isOk(), res.error + + ## We give certain margin of error. The oldest timestamp is obtained from + ## the oldest partition timestamp and there might be at most one second of difference + ## between the time created in the test and the oldest-partition-timestamp created within + ## the driver logic. + assert abs(res.get() - oldestPartitionTimestamp) < (2 * 1_000_000_000), + fmt"Failed to retrieve the latest timestamp {res.get()} != {oldestPartitionTimestamp}" + + res = await driver.getNewestMessageTimestamp() + assert res.isOk(), res.error + assert res.get() == newestTime, "Failed to retrieve the newest timestamp" + + xasyncTest "Delete messages older than certain timestamp": + ## This test no longer makes sense because that will always be controlled by the newDriver + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let targetTime = ts(40, timeOrigin) + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = targetTime), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + var res = await driver.getMessagesCount() + assert res.isOk(), res.error + assert res.get() == 7, "Failed to retrieve the initial number of messages" + + let deleteRes = await driver.deleteMessagesOlderThanTimestamp(targetTime) + assert deleteRes.isOk(), deleteRes.error + + res = await driver.getMessagesCount() + assert res.isOk(), res.error + assert res.get() == 3, "Failed to retrieve the # of messages after deletion" + + xasyncTest "Keep last n messages": + ## This test no longer makes sense because that will always be controlled by the newDriver + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + var res = await driver.getMessagesCount() + assert res.isOk(), res.error + assert res.get() == 7, "Failed to retrieve the initial number of messages" + + let deleteRes = await driver.deleteOldestMessagesNotWithinLimit(2) + assert deleteRes.isOk(), deleteRes.error + + res = await driver.getMessagesCount() + assert res.isOk(), res.error + assert res.get() == 2, "Failed to retrieve the # of messages after deletion" + + asyncTest "Exists table": + var existsRes = await driver.existsTable("version") + assert existsRes.isOk(), existsRes.error + check existsRes.get() == true + + asyncTest "Query by message hash only - legacy": + const contentTopic = "test-content-topic" + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + ] + var messages = expected + + var hashes = newSeq[WakuMessageHash](0) + for msg in messages: + let hash = computeMessageHash(DefaultPubsubTopic, msg) + hashes.add(hash) + require ( + await driver.put( + DefaultPubsubTopic, msg, computeDigest(msg), hash, msg.timestamp + ) + ).isOk() + + let ret = (await driver.getMessages(hashes = hashes)).valueOr: + assert false, $error + return + + check: + ret.len == 3 + ## (PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash) + ret[2][4] == hashes[0] + ret[1][4] == hashes[1] + ret[0][4] == hashes[2] diff --git a/third-party/nwaku/tests/waku_archive_legacy/test_driver_queue.nim b/third-party/nwaku/tests/waku_archive_legacy/test_driver_queue.nim new file mode 100644 index 0000000..aec9ad6 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive_legacy/test_driver_queue.nim @@ -0,0 +1,182 @@ +{.used.} + +import std/options, results, testutils/unittests +import + waku/waku_archive_legacy, + waku/waku_archive_legacy/driver/queue_driver/queue_driver {.all.}, + waku/waku_archive_legacy/driver/queue_driver/index, + waku/waku_core + +# Helper functions + +proc genIndexedWakuMessage(i: int8): (Index, WakuMessage) = + ## Use i to generate an Index WakuMessage + var data {.noinit.}: array[32, byte] + for x in data.mitems: + x = i.byte + + let + message = WakuMessage(payload: @[byte i], timestamp: Timestamp(i)) + topic = "test-pubsub-topic" + cursor = Index( + receiverTime: Timestamp(i), + senderTime: Timestamp(i), + digest: MessageDigest(data: data), + pubsubTopic: topic, + hash: computeMessageHash(topic, message), + ) + + (cursor, message) + +proc getPrepopulatedTestQueue(unsortedSet: auto, capacity: int): QueueDriver = + let driver = QueueDriver.new(capacity) + + for i in unsortedSet: + let (index, message) = genIndexedWakuMessage(i.int8) + discard driver.add(index, message) + + driver + +procSuite "Sorted driver queue": + test "queue capacity - add a message over the limit": + ## Given + let capacity = 5 + let driver = QueueDriver.new(capacity) + + ## When + # Fill up the queue + for i in 1 .. capacity: + let (index, message) = genIndexedWakuMessage(i.int8) + require(driver.add(index, message).isOk()) + + # Add one more. Capacity should not be exceeded + let (index, message) = genIndexedWakuMessage(capacity.int8 + 1) + require(driver.add(index, message).isOk()) + + ## Then + check: + driver.len == capacity + + test "queue capacity - add message older than oldest in the queue": + ## Given + let capacity = 5 + let driver = QueueDriver.new(capacity) + + ## When + # Fill up the queue + for i in 1 .. capacity: + let (index, message) = genIndexedWakuMessage(i.int8) + require(driver.add(index, message).isOk()) + + # Attempt to add message with older value than oldest in queue should fail + let + oldestTimestamp = driver.first().get().senderTime + (index, message) = genIndexedWakuMessage(oldestTimestamp.int8 - 1) + addRes = driver.add(index, message) + + ## Then + check: + addRes.isErr() + addRes.error() == "too_old" + + check: + driver.len == capacity + + test "queue sort-on-insert": + ## Given + let + capacity = 5 + unsortedSet = [5, 1, 3, 2, 4] + let driver = getPrepopulatedTestQueue(unsortedSet, capacity) + + # Walk forward through the set and verify ascending order + var (prevSmaller, _) = genIndexedWakuMessage(min(unsortedSet).int8 - 1) + for i in driver.fwdIterator: + let (index, _) = i + check cmp(index, prevSmaller) > 0 + prevSmaller = index + + # Walk backward through the set and verify descending order + var (prevLarger, _) = genIndexedWakuMessage(max(unsortedSet).int8 + 1) + for i in driver.bwdIterator: + let (index, _) = i + check cmp(index, prevLarger) < 0 + prevLarger = index + + test "access first item from queue": + ## Given + let + capacity = 5 + unsortedSet = [5, 1, 3, 2, 4] + let driver = getPrepopulatedTestQueue(unsortedSet, capacity) + + ## When + let firstRes = driver.first() + + ## Then + check: + firstRes.isOk() + + let first = firstRes.tryGet() + check: + first.senderTime == Timestamp(1) + + test "get first item from empty queue should fail": + ## Given + let capacity = 5 + let driver = QueueDriver.new(capacity) + + ## When + let firstRes = driver.first() + + ## Then + check: + firstRes.isErr() + firstRes.error() == "Not found" + + test "access last item from queue": + ## Given + let + capacity = 5 + unsortedSet = [5, 1, 3, 2, 4] + let driver = getPrepopulatedTestQueue(unsortedSet, capacity) + + ## When + let lastRes = driver.last() + + ## Then + check: + lastRes.isOk() + + let last = lastRes.tryGet() + check: + last.senderTime == Timestamp(5) + + test "get last item from empty queue should fail": + ## Given + let capacity = 5 + let driver = QueueDriver.new(capacity) + + ## When + let lastRes = driver.last() + + ## Then + check: + lastRes.isErr() + lastRes.error() == "Not found" + + test "verify if queue contains an index": + ## Given + let + capacity = 5 + unsortedSet = [5, 1, 3, 2, 4] + let driver = getPrepopulatedTestQueue(unsortedSet, capacity) + + let + (existingIndex, _) = genIndexedWakuMessage(4) + (nonExistingIndex, _) = genIndexedWakuMessage(99) + + ## Then + check: + driver.contains(existingIndex) == true + driver.contains(nonExistingIndex) == false diff --git a/third-party/nwaku/tests/waku_archive_legacy/test_driver_queue_index.nim b/third-party/nwaku/tests/waku_archive_legacy/test_driver_queue_index.nim new file mode 100644 index 0000000..404dca8 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive_legacy/test_driver_queue_index.nim @@ -0,0 +1,219 @@ +{.used.} + +import std/[times, random], stew/byteutils, testutils/unittests, nimcrypto +import waku/waku_core, waku/waku_archive_legacy/driver/queue_driver/index + +var rng = initRand() + +## Helpers + +proc getTestTimestamp(offset = 0): Timestamp = + let now = getNanosecondTime(epochTime() + float(offset)) + Timestamp(now) + +proc hashFromStr(input: string): MDigest[256] = + var ctx: sha256 + + ctx.init() + ctx.update(input.toBytes()) + let hashed = ctx.finish() + ctx.clear() + + return hashed + +proc randomHash(): WakuMessageHash = + var hash: WakuMessageHash + + for i in 0 ..< hash.len: + let numb: byte = byte(rng.next()) + hash[i] = numb + + hash + +suite "Queue Driver - index": + ## Test vars + let + smallIndex1 = Index( + digest: hashFromStr("1234"), + receiverTime: getNanosecondTime(0), + senderTime: getNanosecondTime(1000), + hash: randomHash(), + ) + smallIndex2 = Index( + digest: hashFromStr("1234567"), # digest is less significant than senderTime + receiverTime: getNanosecondTime(0), + senderTime: getNanosecondTime(1000), + hash: randomHash(), + ) + largeIndex1 = Index( + digest: hashFromStr("1234"), + receiverTime: getNanosecondTime(0), + senderTime: getNanosecondTime(9000), + hash: randomHash(), + ) # only senderTime differ from smallIndex1 + largeIndex2 = Index( + digest: hashFromStr("12345"), # only digest differs from smallIndex1 + receiverTime: getNanosecondTime(0), + senderTime: getNanosecondTime(1000), + hash: randomHash(), + ) + eqIndex1 = Index( + digest: hashFromStr("0003"), + receiverTime: getNanosecondTime(0), + senderTime: getNanosecondTime(54321), + hash: randomHash(), + ) + eqIndex2 = Index( + digest: hashFromStr("0003"), + receiverTime: getNanosecondTime(0), + senderTime: getNanosecondTime(54321), + hash: randomHash(), + ) + eqIndex3 = Index( + digest: hashFromStr("0003"), + receiverTime: getNanosecondTime(9999), + # receiverTime difference should have no effect on comparisons + senderTime: getNanosecondTime(54321), + hash: randomHash(), + ) + diffPsTopic = Index( + digest: hashFromStr("1234"), + receiverTime: getNanosecondTime(0), + senderTime: getNanosecondTime(1000), + pubsubTopic: "zzzz", + hash: randomHash(), + ) + noSenderTime1 = Index( + digest: hashFromStr("1234"), + receiverTime: getNanosecondTime(1100), + senderTime: getNanosecondTime(0), + pubsubTopic: "zzzz", + hash: randomHash(), + ) + noSenderTime2 = Index( + digest: hashFromStr("1234"), + receiverTime: getNanosecondTime(10000), + senderTime: getNanosecondTime(0), + pubsubTopic: "zzzz", + hash: randomHash(), + ) + noSenderTime3 = Index( + digest: hashFromStr("1234"), + receiverTime: getNanosecondTime(1200), + senderTime: getNanosecondTime(0), + pubsubTopic: "aaaa", + hash: randomHash(), + ) + noSenderTime4 = Index( + digest: hashFromStr("0"), + receiverTime: getNanosecondTime(1200), + senderTime: getNanosecondTime(0), + pubsubTopic: "zzzz", + hash: randomHash(), + ) + + test "Index comparison": + # Index comparison with senderTime diff + check: + cmp(smallIndex1, largeIndex1) < 0 + cmp(smallIndex2, largeIndex1) < 0 + + # Index comparison with digest diff + check: + cmp(smallIndex1, smallIndex2) < 0 + cmp(smallIndex1, largeIndex2) < 0 + cmp(smallIndex2, largeIndex2) > 0 + cmp(largeIndex1, largeIndex2) > 0 + + # Index comparison when equal + check: + cmp(eqIndex1, eqIndex2) == 0 + + # pubsubTopic difference + check: + cmp(smallIndex1, diffPsTopic) < 0 + + # receiverTime diff plays no role when senderTime set + check: + cmp(eqIndex1, eqIndex3) == 0 + + # receiverTime diff plays no role when digest/pubsubTopic equal + check: + cmp(noSenderTime1, noSenderTime2) == 0 + + # sort on receiverTime with no senderTimestamp and unequal pubsubTopic + check: + cmp(noSenderTime1, noSenderTime3) < 0 + + # sort on receiverTime with no senderTimestamp and unequal digest + check: + cmp(noSenderTime1, noSenderTime4) < 0 + + # sort on receiverTime if no senderTimestamp on only one side + check: + cmp(smallIndex1, noSenderTime1) < 0 + cmp(noSenderTime1, smallIndex1) > 0 # Test symmetry + cmp(noSenderTime2, eqIndex3) < 0 + cmp(eqIndex3, noSenderTime2) > 0 # Test symmetry + + test "Index equality": + # Exactly equal + check: + eqIndex1 == eqIndex2 + + # Receiver time plays no role, even without sender time + check: + eqIndex1 == eqIndex3 + noSenderTime1 == noSenderTime2 # only receiver time differs, indices are equal + noSenderTime1 != noSenderTime3 # pubsubTopics differ + noSenderTime1 != noSenderTime4 # digests differ + + # Unequal sender time + check: + smallIndex1 != largeIndex1 + + # Unequal digest + check: + smallIndex1 != smallIndex2 + + # Unequal hash and digest + check: + smallIndex1 != eqIndex1 + + # Unequal pubsubTopic + check: + smallIndex1 != diffPsTopic + + test "Index computation should not be empty": + ## Given + let ts = getTestTimestamp() + let wm = WakuMessage(payload: @[byte 1, 2, 3], timestamp: ts) + + ## When + let ts2 = getTestTimestamp() + 10 + let index = Index.compute(wm, ts2, DefaultContentTopic) + + ## Then + check: + index.digest.data.len != 0 + index.digest.data.len == 32 # sha2 output length in bytes + index.receiverTime == ts2 # the receiver timestamp should be a non-zero value + index.senderTime == ts + index.pubsubTopic == DefaultContentTopic + + test "Index digest of two identical messsage should be the same": + ## Given + let topic = ContentTopic("test-content-topic") + let + wm1 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: topic) + wm2 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: topic) + + ## When + let ts = getTestTimestamp() + let + index1 = Index.compute(wm1, ts, DefaultPubsubTopic) + index2 = Index.compute(wm2, ts, DefaultPubsubTopic) + + ## Then + check: + index1.digest == index2.digest diff --git a/third-party/nwaku/tests/waku_archive_legacy/test_driver_queue_pagination.nim b/third-party/nwaku/tests/waku_archive_legacy/test_driver_queue_pagination.nim new file mode 100644 index 0000000..05d9759 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive_legacy/test_driver_queue_pagination.nim @@ -0,0 +1,405 @@ +{.used.} + +import + std/[options, sequtils, algorithm], testutils/unittests, libp2p/protobuf/minprotobuf +import + waku/waku_archive_legacy, + waku/waku_archive_legacy/driver/queue_driver/queue_driver {.all.}, + waku/waku_archive_legacy/driver/queue_driver/index, + waku/waku_core, + ../testlib/wakucore + +proc getTestQueueDriver(numMessages: int): QueueDriver = + let testQueueDriver = QueueDriver.new(numMessages) + + var data {.noinit.}: array[32, byte] + for x in data.mitems: + x = 1 + + for i in 0 ..< numMessages: + let msg = WakuMessage(payload: @[byte i], timestamp: Timestamp(i)) + + let index = Index( + receiverTime: Timestamp(i), + senderTime: Timestamp(i), + digest: MessageDigest(data: data), + hash: computeMessageHash(DefaultPubsubTopic, msg), + ) + + discard testQueueDriver.add(index, msg) + + return testQueueDriver + +procSuite "Queue driver - pagination": + let driver = getTestQueueDriver(10) + let + indexList: seq[Index] = toSeq(driver.fwdIterator()).mapIt(it[0]) + msgList: seq[WakuMessage] = toSeq(driver.fwdIterator()).mapIt(it[1]) + + test "Forward pagination - normal pagination": + ## Given + let + pageSize: uint = 2 + cursor: Option[Index] = some(indexList[3]) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.len == 2 + data == msgList[4 .. 5] + + test "Forward pagination - initial pagination request with an empty cursor": + ## Given + let + pageSize: uint = 2 + cursor: Option[Index] = none(Index) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.len == 2 + data == msgList[0 .. 1] + + test "Forward pagination - initial pagination request with an empty cursor to fetch the entire history": + ## Given + let + pageSize: uint = 13 + cursor: Option[Index] = none(Index) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.len == 10 + data == msgList[0 .. 9] + + test "Forward pagination - empty msgList": + ## Given + let driver = getTestQueueDriver(0) + let + pageSize: uint = 2 + cursor: Option[Index] = none(Index) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.len == 0 + + test "Forward pagination - page size larger than the remaining messages": + ## Given + let + pageSize: uint = 10 + cursor: Option[Index] = some(indexList[3]) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.len == 6 + data == msgList[4 .. 9] + + test "Forward pagination - page size larger than the maximum allowed page size": + ## Given + let + pageSize: uint = MaxPageSize + 1 + cursor: Option[Index] = some(indexList[3]) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + uint(data.len) <= MaxPageSize + + test "Forward pagination - cursor pointing to the end of the message list": + ## Given + let + pageSize: uint = 10 + cursor: Option[Index] = some(indexList[9]) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.len == 0 + + test "Forward pagination - invalid cursor": + ## Given + let msg = fakeWakuMessage(payload = @[byte 10]) + let index = ArchiveCursor( + pubsubTopic: DefaultPubsubTopic, + senderTime: msg.timestamp, + storeTime: msg.timestamp, + digest: computeDigest(msg), + ).toIndex() + + let + pageSize: uint = 10 + cursor: Option[Index] = some(index) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let error = page.tryError() + check: + error == QueueDriverErrorKind.INVALID_CURSOR + + test "Forward pagination - initial paging query over a message list with one message": + ## Given + let driver = getTestQueueDriver(1) + let + pageSize: uint = 10 + cursor: Option[Index] = none(Index) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.len == 1 + + test "Forward pagination - pagination over a message list with one message": + ## Given + let driver = getTestQueueDriver(1) + let + pageSize: uint = 10 + cursor: Option[Index] = some(indexList[0]) + forward: bool = true + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.len == 0 + + test "Forward pagination - with pradicate": + ## Given + let + pageSize: uint = 3 + cursor: Option[Index] = none(Index) + forward = true + + proc onlyEvenTimes(index: Index, msg: WakuMessage): bool = + msg.timestamp.int64 mod 2 == 0 + + ## When + let page = driver.getPage( + pageSize = pageSize, forward = forward, cursor = cursor, predicate = onlyEvenTimes + ) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.mapIt(it.timestamp.int) == @[0, 2, 4] + + test "Backward pagination - normal pagination": + ## Given + let + pageSize: uint = 2 + cursor: Option[Index] = some(indexList[3]) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data == msgList[1 .. 2].reversed + + test "Backward pagination - empty msgList": + ## Given + let driver = getTestQueueDriver(0) + let + pageSize: uint = 2 + cursor: Option[Index] = none(Index) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.len == 0 + + test "Backward pagination - initial pagination request with an empty cursor": + ## Given + let + pageSize: uint = 2 + cursor: Option[Index] = none(Index) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.len == 2 + data == msgList[8 .. 9].reversed + + test "Backward pagination - initial pagination request with an empty cursor to fetch the entire history": + ## Given + let + pageSize: uint = 13 + cursor: Option[Index] = none(Index) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.len == 10 + data == msgList[0 .. 9].reversed + + test "Backward pagination - page size larger than the remaining messages": + ## Given + let + pageSize: uint = 5 + cursor: Option[Index] = some(indexList[3]) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data == msgList[0 .. 2].reversed + + test "Backward pagination - page size larger than the Maximum allowed page size": + ## Given + let + pageSize: uint = MaxPageSize + 1 + cursor: Option[Index] = some(indexList[3]) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + uint(data.len) <= MaxPageSize + + test "Backward pagination - cursor pointing to the begining of the message list": + ## Given + let + pageSize: uint = 5 + cursor: Option[Index] = some(indexList[0]) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.len == 0 + + test "Backward pagination - invalid cursor": + ## Given + let msg = fakeWakuMessage(payload = @[byte 10]) + let index = ArchiveCursor( + pubsubTopic: DefaultPubsubTopic, + senderTime: msg.timestamp, + storeTime: msg.timestamp, + digest: computeDigest(msg), + ).toIndex() + + let + pageSize: uint = 2 + cursor: Option[Index] = some(index) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let error = page.tryError() + check: + error == QueueDriverErrorKind.INVALID_CURSOR + + test "Backward pagination - initial paging query over a message list with one message": + ## Given + let driver = getTestQueueDriver(1) + let + pageSize: uint = 10 + cursor: Option[Index] = none(Index) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.len == 1 + + test "Backward pagination - paging query over a message list with one message": + ## Given + let driver = getTestQueueDriver(1) + let + pageSize: uint = 10 + cursor: Option[Index] = some(indexList[0]) + forward: bool = false + + ## When + let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.len == 0 + + test "Backward pagination - with predicate": + ## Given + let + pageSize: uint = 3 + cursor: Option[Index] = none(Index) + forward = false + + proc onlyOddTimes(index: Index, msg: WakuMessage): bool = + msg.timestamp.int64 mod 2 != 0 + + ## When + let page = driver.getPage( + pageSize = pageSize, forward = forward, cursor = cursor, predicate = onlyOddTimes + ) + + ## Then + let data = page.tryGet().mapIt(it[1]) + check: + data.mapIt(it.timestamp.int) == @[5, 7, 9].reversed diff --git a/third-party/nwaku/tests/waku_archive_legacy/test_driver_queue_query.nim b/third-party/nwaku/tests/waku_archive_legacy/test_driver_queue_query.nim new file mode 100644 index 0000000..6bd4405 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive_legacy/test_driver_queue_query.nim @@ -0,0 +1,1795 @@ +{.used.} + +import + std/[options, sequtils, random, algorithm], testutils/unittests, chronos, chronicles +import + waku/waku_archive_legacy, + waku/waku_archive_legacy/driver/queue_driver, + waku/waku_core, + waku/waku_core/message/digest, + ../testlib/common, + ../testlib/wakucore + +logScope: + topics = "test archive queue_driver" + +# Initialize the random number generator +common.randomize() + +proc newTestSqliteDriver(): ArchiveDriver = + QueueDriver.new(capacity = 50) + +proc computeTestCursor(pubsubTopic: PubsubTopic, message: WakuMessage): ArchiveCursor = + ArchiveCursor( + pubsubTopic: pubsubTopic, + senderTime: message.timestamp, + storeTime: message.timestamp, + digest: computeDigest(message), + hash: computeMessageHash(pubsubTopic, message), + ) + +suite "Queue driver - query by content topic": + test "no content topic": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = DefaultContentTopic, ts = ts(00)), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic, ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages(maxPageSize = 5, ascendingOrder = true) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[0 .. 4] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "single content topic": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 3] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "single content topic - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = false + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[6 .. 7].reversed() + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "multiple content topic": + ## Given + const contentTopic1 = "test-content-topic-1" + const contentTopic2 = "test-content-topic-2" + const contentTopic3 = "test-content-topic-3" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic1, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic2, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic3, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic1, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic2, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic3, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopic = @[contentTopic1, contentTopic2], + maxPageSize = 2, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 3] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "single content topic - no results": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = DefaultContentTopic, ts = ts(00)), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic, ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = DefaultContentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = DefaultContentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = DefaultContentTopic, ts = ts(40)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages.len == 0 + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "content topic and max page size - not enough messages stored": + ## Given + const pageSize: uint = 50 + + let driver = newTestSqliteDriver() + + for t in 0 ..< 40: + let msg = fakeWakuMessage(@[byte t], DefaultContentTopic, ts = ts(t)) + let retFut = waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopic = @[DefaultContentTopic], + maxPageSize = pageSize, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages.len == 40 + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + +suite "SQLite driver - query by pubsub topic": + test "pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + pubsubTopic = some(pubsubTopic), maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[4 .. 5] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "no pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages(maxPageSize = 2, ascendingOrder = true) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[0 .. 1] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "content topic and pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + maxPageSize = 2, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[4 .. 5] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + +suite "Queue driver - query by cursor": + test "only cursor": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + let cursor = computeTestCursor(DefaultPubsubTopic, expected[4]) + + ## When + let res = waitFor driver.getMessages( + cursor = some(cursor), maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[5 .. 6] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "only cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + let cursor = computeTestCursor(DefaultPubsubTopic, expected[4]) + + ## When + let res = waitFor driver.getMessages( + cursor = some(cursor), maxPageSize = 2, ascendingOrder = false + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 3].reversed() + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "only cursor - invalid": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + var messages = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + let fakeCursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage()) + let cursor = ArchiveCursor(hash: fakeCursor) + + ## When + let res = waitFor driver.getMessages( + includeData = true, + contentTopic = @[DefaultContentTopic], + pubsubTopic = none(PubsubTopic), + cursor = some(cursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes = @[], + maxPageSize = 5, + ascendingOrder = true, + ) + + ## Then + check: + res.isErr() + res.error == "invalid_cursor" + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "content topic and cursor": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + let cursor = computeTestCursor(DefaultPubsubTopic, expected[4]) + + ## When + let res = waitFor driver.getMessages( + contentTopic = @[contentTopic], + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[5 .. 6] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "content topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + # << cursor + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + let cursor = computeTestCursor(DefaultPubsubTopic, expected[6]) + + ## When + let res = waitFor driver.getMessages( + contentTopic = @[contentTopic], + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 5].reversed() + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "pubsub topic and cursor": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), # << cursor + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + require retFut.isOk() + + let cursor = computeTestCursor(expected[5][0], expected[5][1]) + + ## When + let res = waitFor driver.getMessages( + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[6 .. 7] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "pubsub topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), # << cursor + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + require retFut.isOk() + + let cursor = computeTestCursor(expected[6][0], expected[6][1]) + + ## When + let res = waitFor driver.getMessages( + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[4 .. 5].reversed() + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + +suite "Queue driver - query by time range": + test "start time only": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + startTime = some(ts(15, timeOrigin)), maxPageSize = 10, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 6] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "end time only": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + # end_time + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + endTime = some(ts(45, timeOrigin)), maxPageSize = 10, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[0 .. 4] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "start time and end time": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # start_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + # end_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + startTime = some(ts(15, timeOrigin)), + endTime = some(ts(45, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[2 .. 4] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "invalid time range - no results": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + # end_time + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopic = @[contentTopic], + startTime = some(ts(45, timeOrigin)), + endTime = some(ts(15, timeOrigin)), + maxPageSize = 2, + ascendingOrder = true, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages.len == 0 + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + asynctest "time range start and content topic": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 6] + + ## Cleanup + (await driver.close()).expect("driver to close") + + test "time range start and content topic - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + ## When + let res = waitFor driver.getMessages( + contentTopic = @[contentTopic], + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 6].reversed() + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + asynctest "time range start, single content topic and cursor": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + # << cursor + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + let cursor = computeTestCursor(DefaultPubsubTopic, expected[3]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + cursor = some(cursor), + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[4 .. 9] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asynctest "time range start, single content topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + # << cursor + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + let retFut = await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + require retFut.isOk() + + let cursor = computeTestCursor(DefaultPubsubTopic, expected[6]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + cursor = some(cursor), + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[3 .. 4].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + + test "time range, content topic, pubsub topic and cursor": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + # start_time + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + # end_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + require retFut.isOk() + + let cursor = computeTestCursor(DefaultPubsubTopic, expected[1][1]) + + ## When + let res = waitFor driver.getMessages( + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(0, timeOrigin)), + endTime = some(ts(45, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[3 .. 4] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "time range, content topic, pubsub topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + require retFut.isOk() + + let cursor = computeTestCursor(expected[7][0], expected[7][1]) + + ## When + let res = waitFor driver.getMessages( + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[4 .. 5].reversed() + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "time range, content topic, pubsub topic and cursor - cursor timestamp out of time range": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + require retFut.isOk() + + let cursor = computeTestCursor(expected[1][0], expected[1][1]) + + ## When + let res = waitFor driver.getMessages( + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[4 .. 5] + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "time range, content topic, pubsub topic and cursor - cursor timestamp out of time range, descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newTestSqliteDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + let retFut = waitFor driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + require retFut.isOk() + + let cursor = computeTestCursor(expected[1][0], expected[1][1]) + + ## When + let res = waitFor driver.getMessages( + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages.len == 0 + + ## Cleanup + (waitFor driver.close()).expect("driver to close") diff --git a/third-party/nwaku/tests/waku_archive_legacy/test_driver_sqlite.nim b/third-party/nwaku/tests/waku_archive_legacy/test_driver_sqlite.nim new file mode 100644 index 0000000..9d8c4d1 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive_legacy/test_driver_sqlite.nim @@ -0,0 +1,58 @@ +{.used.} + +import std/sequtils, testutils/unittests, chronos +import + waku/waku_archive_legacy, + waku/waku_archive_legacy/driver/sqlite_driver, + waku/waku_core, + ../waku_archive_legacy/archive_utils, + ../testlib/wakucore + +suite "SQLite driver": + test "init driver and database": + ## Given + let database = newSqliteDatabase() + + ## When + let driverRes = SqliteDriver.new(database) + + ## Then + check: + driverRes.isOk() + + let driver: ArchiveDriver = driverRes.tryGet() + check: + not driver.isNil() + + ## Cleanup + (waitFor driver.close()).expect("driver to close") + + test "insert a message": + ## Given + const contentTopic = "test-content-topic" + const meta = "test meta" + + let driver = newSqliteArchiveDriver() + + let msg = fakeWakuMessage(contentTopic = contentTopic, meta = meta) + let msgHash = computeMessageHash(DefaultPubsubTopic, msg) + + ## When + let putRes = waitFor driver.put( + DefaultPubsubTopic, msg, computeDigest(msg), msgHash, msg.timestamp + ) + + ## Then + check: + putRes.isOk() + + let storedMsg = (waitFor driver.getAllMessages()).tryGet() + check: + storedMsg.len == 1 + storedMsg.all do(item: auto) -> bool: + let (pubsubTopic, actualMsg, _, _, hash) = item + actualMsg.contentTopic == contentTopic and pubsubTopic == DefaultPubsubTopic and + hash == msgHash and msg.meta == actualMsg.meta + + ## Cleanup + (waitFor driver.close()).expect("driver to close") diff --git a/third-party/nwaku/tests/waku_archive_legacy/test_driver_sqlite_query.nim b/third-party/nwaku/tests/waku_archive_legacy/test_driver_sqlite_query.nim new file mode 100644 index 0000000..42f3948 --- /dev/null +++ b/third-party/nwaku/tests/waku_archive_legacy/test_driver_sqlite_query.nim @@ -0,0 +1,1873 @@ +{.used.} + +import + std/[options, sequtils, random, algorithm], testutils/unittests, chronos, chronicles + +import + waku/waku_archive_legacy, + waku/waku_core, + waku/waku_core/message/digest, + ../testlib/common, + ../testlib/wakucore, + ../waku_archive_legacy/archive_utils + +logScope: + topics = "test archive _driver" + +# Initialize the random number generator +common.randomize() + +suite "SQLite driver - query by content topic": + asyncTest "no content topic": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = DefaultContentTopic, ts = ts(00)), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic, ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages(maxPageSize = 5, ascendingOrder = true) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[0 .. 4] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "single content topic": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 3] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "single content topic with meta field": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00), meta = "meta-0"), + fakeWakuMessage(@[byte 1], ts = ts(10), meta = "meta-1"), + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20), meta = "meta-2" + ), + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30), meta = "meta-3" + ), + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40), meta = "meta-4" + ), + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50), meta = "meta-5" + ), + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60), meta = "meta-6" + ), + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70), meta = "meta-7" + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 3] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "single content topic - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = false + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[6 .. 7].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "multiple content topic": + ## Given + const contentTopic1 = "test-content-topic-1" + const contentTopic2 = "test-content-topic-2" + const contentTopic3 = "test-content-topic-3" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic1, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic2, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic3, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic1, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic2, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic3, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic1, contentTopic2], + maxPageSize = 2, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 3] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "single content topic - no results": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = DefaultContentTopic, ts = ts(00)), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic, ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = DefaultContentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = DefaultContentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = DefaultContentTopic, ts = ts(40)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages.len == 0 + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "content topic and max page size - not enough messages stored": + ## Given + const pageSize: uint = 50 + + let driver = newSqliteArchiveDriver() + + for t in 0 ..< 40: + let msg = fakeWakuMessage(@[byte t], DefaultContentTopic, ts = ts(t)) + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[DefaultContentTopic], + maxPageSize = pageSize, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages.len == 40 + + ## Cleanup + (await driver.close()).expect("driver to close") + +suite "SQLite driver - query by pubsub topic": + asyncTest "pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + ## When + let res = await driver.getMessages( + pubsubTopic = some(pubsubTopic), maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[4 .. 5] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "no pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + ## When + let res = await driver.getMessages(maxPageSize = 2, ascendingOrder = true) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[0 .. 1] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "content topic and pubsub topic": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10))), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + ), + ( + pubsubTopic, + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + maxPageSize = 2, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[4 .. 5] + + ## Cleanup + (await driver.close()).expect("driver to close") + +suite "SQLite driver - query by cursor": + asyncTest "only cursor": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[4]) + + ## When + let res = await driver.getMessages( + cursor = some(cursor), maxPageSize = 2, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[5 .. 6] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "only cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[4]) + + ## When + let res = await driver.getMessages( + cursor = some(cursor), maxPageSize = 2, ascendingOrder = false + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 3].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "only cursor - invalid": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + var messages = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + let fakeCursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage()) + let cursor = ArchiveCursor(hash: fakeCursor) + + ## When + let res = await driver.getMessages( + includeData = true, + contentTopic = @[DefaultContentTopic], + pubsubTopic = none(PubsubTopic), + cursor = some(cursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes = @[], + maxPageSize = 5, + ascendingOrder = true, + ) + + ## Then + check: + res.isErr() + res.error == "cursor not found" + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "content topic and cursor": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + # << cursor + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + fakeWakuMessage(@[byte 7], ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[4]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[5 .. 6] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "content topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let expected = + @[ + fakeWakuMessage(@[byte 0], ts = ts(00)), + fakeWakuMessage(@[byte 1], ts = ts(10)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60)), + # << cursor + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[6]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 5].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "pubsub topic and cursor": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), # << cursor + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + let cursor = computeArchiveCursor(expected[5][0], expected[5][1]) + + ## When + let res = await driver.getMessages( + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[6 .. 7] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "pubsub topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), # << cursor + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + let cursor = computeArchiveCursor(expected[6][0], expected[6][1]) + + ## When + let res = await driver.getMessages( + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[4 .. 5].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + +suite "SQLite driver - query by time range": + asyncTest "start time only": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + startTime = some(ts(15, timeOrigin)), maxPageSize = 10, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 6] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "end time only": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + # end_time + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + endTime = some(ts(45, timeOrigin)), maxPageSize = 10, ascendingOrder = true + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[0 .. 4] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "start time and end time": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # start_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + # end_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + ## When + let res = await driver.getMessages( + startTime = some(ts(15, timeOrigin)), + endTime = some(ts(45, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[2 .. 4] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "invalid time range - no results": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + # end_time + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + startTime = some(ts(45, timeOrigin)), + endTime = some(ts(15, timeOrigin)), + maxPageSize = 2, + ascendingOrder = true, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages.len == 0 + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range start and content topic": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 6] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range start and content topic - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[2 .. 6].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range start, single content topic and cursor": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + # << cursor + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[3]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + cursor = some(cursor), + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[4 .. 9] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range start, single content topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + fakeWakuMessage(@[byte 0], contentTopic = contentTopic, ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 1], contentTopic = contentTopic, ts = ts(10, timeOrigin)), + # start_time + fakeWakuMessage(@[byte 2], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 5], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 6], contentTopic = contentTopic, ts = ts(60, timeOrigin)), + # << cursor + fakeWakuMessage(@[byte 7], contentTopic = contentTopic, ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin)), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", sequence = messages.mapIt(it.payload) + + for msg in messages: + require ( + await driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[6]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + cursor = some(cursor), + startTime = some(ts(15, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expected[3 .. 4].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range, content topic, pubsub topic and cursor": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + # start_time + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + # end_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + let cursor = computeArchiveCursor(DefaultPubsubTopic, expected[1][1]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(0, timeOrigin)), + endTime = some(ts(45, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[3 .. 4] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range, content topic, pubsub topic and cursor - descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + let cursor = computeArchiveCursor(expected[7][0], expected[7][1]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[4 .. 5].reversed() + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range, content topic, pubsub topic and cursor - cursor timestamp out of time range": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + let cursor = computeArchiveCursor(expected[1][0], expected[1][1]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = true, + ) + + ## Then + check: + res.isOk() + + let expectedMessages = expected.mapIt(it[1]) + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages == expectedMessages[4 .. 5] + + ## Cleanup + (await driver.close()).expect("driver to close") + + asyncTest "time range, content topic, pubsub topic and cursor - cursor timestamp out of time range, descending order": + ## Given + const contentTopic = "test-content-topic" + const pubsubTopic = "test-pubsub-topic" + + let driver = newSqliteArchiveDriver() + + let timeOrigin = now() + let expected = + @[ + (DefaultPubsubTopic, fakeWakuMessage(@[byte 0], ts = ts(00, timeOrigin))), + (DefaultPubsubTopic, fakeWakuMessage(@[byte 1], ts = ts(10, timeOrigin))), + # << cursor + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 2], contentTopic = contentTopic, ts = ts(20, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 3], contentTopic = contentTopic, ts = ts(30, timeOrigin) + ), + ), + # start_time + ( + pubsubTopic, + fakeWakuMessage( + @[byte 4], contentTopic = contentTopic, ts = ts(40, timeOrigin) + ), + ), + ( + pubsubTopic, + fakeWakuMessage( + @[byte 5], contentTopic = contentTopic, ts = ts(50, timeOrigin) + ), + ), + (pubsubTopic, fakeWakuMessage(@[byte 6], ts = ts(60, timeOrigin))), + (pubsubTopic, fakeWakuMessage(@[byte 7], ts = ts(70, timeOrigin))), + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 8], contentTopic = contentTopic, ts = ts(80, timeOrigin) + ), + ), + # end_time + ( + DefaultPubsubTopic, + fakeWakuMessage( + @[byte 9], contentTopic = contentTopic, ts = ts(90, timeOrigin) + ), + ), + ] + var messages = expected + + shuffle(messages) + debug "randomized message insertion sequence", + sequence = messages.mapIt(it[1].payload) + + for row in messages: + let (topic, msg) = row + require ( + await driver.put( + topic, msg, computeDigest(msg), computeMessageHash(topic, msg), msg.timestamp + ) + ).isOk() + + let cursor = computeArchiveCursor(expected[1][0], expected[1][1]) + + ## When + let res = await driver.getMessages( + contentTopic = @[contentTopic], + pubsubTopic = some(pubsubTopic), + cursor = some(cursor), + startTime = some(ts(35, timeOrigin)), + endTime = some(ts(85, timeOrigin)), + maxPageSize = 10, + ascendingOrder = false, + ) + + ## Then + check: + res.isOk() + + let filteredMessages = res.tryGet().mapIt(it[1]) + check: + filteredMessages.len == 0 + + ## Cleanup + (await driver.close()).expect("driver to close") diff --git a/third-party/nwaku/tests/waku_archive_legacy/test_waku_archive.nim b/third-party/nwaku/tests/waku_archive_legacy/test_waku_archive.nim new file mode 100644 index 0000000..e58b2cf --- /dev/null +++ b/third-party/nwaku/tests/waku_archive_legacy/test_waku_archive.nim @@ -0,0 +1,535 @@ +{.used.} + +import std/[options, sequtils], testutils/unittests, chronos, libp2p/crypto/crypto + +import + waku/common/paging, + waku/waku_core, + waku/waku_core/message/digest, + waku/waku_archive_legacy, + ../waku_archive_legacy/archive_utils, + ../testlib/wakucore + +suite "Waku Archive - message handling": + test "it should archive a valid and non-ephemeral message": + ## Setup + let driver = newSqliteArchiveDriver() + let archive = newWakuArchive(driver) + + ## Given + let validSenderTime = now() + let message = fakeWakuMessage(ephemeral = false, ts = validSenderTime) + + ## When + waitFor archive.handleMessage(DefaultPubSubTopic, message) + + ## Then + check: + (waitFor driver.getMessagesCount()).tryGet() == 1 + + test "it should not archive ephemeral messages": + ## Setup + let driver = newSqliteArchiveDriver() + let archive = newWakuArchive(driver) + + ## Given + let msgList = + @[ + fakeWakuMessage(ephemeral = false, payload = "1"), + fakeWakuMessage(ephemeral = true, payload = "2"), + fakeWakuMessage(ephemeral = true, payload = "3"), + fakeWakuMessage(ephemeral = true, payload = "4"), + fakeWakuMessage(ephemeral = false, payload = "5"), + ] + + ## When + for msg in msgList: + waitFor archive.handleMessage(DefaultPubsubTopic, msg) + + ## Then + check: + (waitFor driver.getMessagesCount()).tryGet() == 2 + + test "it should archive a message with no sender timestamp": + ## Setup + let driver = newSqliteArchiveDriver() + let archive = newWakuArchive(driver) + + ## Given + let invalidSenderTime = 0 + let message = fakeWakuMessage(ts = invalidSenderTime) + + ## When + waitFor archive.handleMessage(DefaultPubSubTopic, message) + + ## Then + check: + (waitFor driver.getMessagesCount()).tryGet() == 1 + + test "it should not archive a message with a sender time variance greater than max time variance (future)": + ## Setup + let driver = newSqliteArchiveDriver() + let archive = newWakuArchive(driver) + + ## Given + let + now = now() + invalidSenderTime = now + MaxMessageTimestampVariance + 1_000_000_000 + # 1 second over the max variance + + let message = fakeWakuMessage(ts = invalidSenderTime) + + ## When + waitFor archive.handleMessage(DefaultPubSubTopic, message) + + ## Then + check: + (waitFor driver.getMessagesCount()).tryGet() == 0 + + test "it should not archive a message with a sender time variance greater than max time variance (past)": + ## Setup + let driver = newSqliteArchiveDriver() + let archive = newWakuArchive(driver) + + ## Given + let + now = now() + invalidSenderTime = now - MaxMessageTimestampVariance - 1 + + let message = fakeWakuMessage(ts = invalidSenderTime) + + ## When + waitFor archive.handleMessage(DefaultPubSubTopic, message) + + ## Then + check: + (waitFor driver.getMessagesCount()).tryGet() == 0 + +procSuite "Waku Archive - find messages": + ## Fixtures + let timeOrigin = now() + let msgListA = + @[ + fakeWakuMessage( + @[byte 00], contentTopic = ContentTopic("2"), ts = ts(00, timeOrigin) + ), + fakeWakuMessage( + @[byte 01], contentTopic = ContentTopic("1"), ts = ts(10, timeOrigin) + ), + fakeWakuMessage( + @[byte 02], contentTopic = ContentTopic("2"), ts = ts(20, timeOrigin) + ), + fakeWakuMessage( + @[byte 03], contentTopic = ContentTopic("1"), ts = ts(30, timeOrigin) + ), + fakeWakuMessage( + @[byte 04], contentTopic = ContentTopic("2"), ts = ts(40, timeOrigin) + ), + fakeWakuMessage( + @[byte 05], contentTopic = ContentTopic("1"), ts = ts(50, timeOrigin) + ), + fakeWakuMessage( + @[byte 06], contentTopic = ContentTopic("2"), ts = ts(60, timeOrigin) + ), + fakeWakuMessage( + @[byte 07], contentTopic = ContentTopic("1"), ts = ts(70, timeOrigin) + ), + fakeWakuMessage( + @[byte 08], contentTopic = ContentTopic("2"), ts = ts(80, timeOrigin) + ), + fakeWakuMessage( + @[byte 09], contentTopic = ContentTopic("1"), ts = ts(90, timeOrigin) + ), + ] + + let archiveA = block: + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + for msg in msgListA: + require ( + waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + archive + + test "handle query": + ## Setup + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + let topic = ContentTopic("1") + let + msg1 = fakeWakuMessage(contentTopic = topic) + msg2 = fakeWakuMessage() + + waitFor archive.handleMessage("foo", msg1) + waitFor archive.handleMessage("foo", msg2) + + ## Given + let req = ArchiveQuery(includeData: true, contentTopics: @[topic]) + + ## When + let queryRes = waitFor archive.findMessages(req) + + ## Then + check: + queryRes.isOk() + + let response = queryRes.tryGet() + check: + response.messages.len == 1 + response.messages == @[msg1] + + test "handle query with multiple content filters": + ## Setup + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + let + topic1 = ContentTopic("1") + topic2 = ContentTopic("2") + topic3 = ContentTopic("3") + + let + msg1 = fakeWakuMessage(contentTopic = topic1) + msg2 = fakeWakuMessage(contentTopic = topic2) + msg3 = fakeWakuMessage(contentTopic = topic3) + + waitFor archive.handleMessage("foo", msg1) + waitFor archive.handleMessage("foo", msg2) + waitFor archive.handleMessage("foo", msg3) + + ## Given + let req = ArchiveQuery(includeData: true, contentTopics: @[topic1, topic3]) + + ## When + let queryRes = waitFor archive.findMessages(req) + + ## Then + check: + queryRes.isOk() + + let response = queryRes.tryGet() + check: + response.messages.len() == 2 + response.messages.anyIt(it == msg1) + response.messages.anyIt(it == msg3) + + test "handle query with more than 10 content filters": + ## Setup + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + let queryTopics = toSeq(1 .. 15).mapIt(ContentTopic($it)) + + ## Given + let req = ArchiveQuery(contentTopics: queryTopics) + + ## When + let queryRes = waitFor archive.findMessages(req) + + ## Then + check: + queryRes.isErr() + + let error = queryRes.tryError() + check: + error.kind == ArchiveErrorKind.INVALID_QUERY + error.cause == "too many content topics" + + test "handle query with pubsub topic filter": + ## Setup + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + let + pubsubTopic1 = "queried-topic" + pubsubTopic2 = "non-queried-topic" + + let + contentTopic1 = ContentTopic("1") + contentTopic2 = ContentTopic("2") + contentTopic3 = ContentTopic("3") + + let + msg1 = fakeWakuMessage(contentTopic = contentTopic1) + msg2 = fakeWakuMessage(contentTopic = contentTopic2) + msg3 = fakeWakuMessage(contentTopic = contentTopic3) + + waitFor archive.handleMessage(pubsubtopic1, msg1) + waitFor archive.handleMessage(pubsubtopic2, msg2) + waitFor archive.handleMessage(pubsubtopic2, msg3) + + ## Given + # This query targets: pubsubtopic1 AND (contentTopic1 OR contentTopic3) + let req = ArchiveQuery( + includeData: true, + pubsubTopic: some(pubsubTopic1), + contentTopics: @[contentTopic1, contentTopic3], + ) + + ## When + let queryRes = waitFor archive.findMessages(req) + + ## Then + check: + queryRes.isOk() + + let response = queryRes.tryGet() + check: + response.messages.len() == 1 + response.messages.anyIt(it == msg1) + + test "handle query with pubsub topic filter - no match": + ## Setup + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + let + pubsubtopic1 = "queried-topic" + pubsubtopic2 = "non-queried-topic" + + let + msg1 = fakeWakuMessage() + msg2 = fakeWakuMessage() + msg3 = fakeWakuMessage() + + waitFor archive.handleMessage(pubsubtopic2, msg1) + waitFor archive.handleMessage(pubsubtopic2, msg2) + waitFor archive.handleMessage(pubsubtopic2, msg3) + + ## Given + let req = ArchiveQuery(pubsubTopic: some(pubsubTopic1)) + + ## When + let res = waitFor archive.findMessages(req) + + ## Then + check: + res.isOk() + + let response = res.tryGet() + check: + response.messages.len() == 0 + + test "handle query with pubsub topic filter - match the entire stored messages": + ## Setup + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + let pubsubTopic = "queried-topic" + + let + msg1 = fakeWakuMessage(payload = "TEST-1") + msg2 = fakeWakuMessage(payload = "TEST-2") + msg3 = fakeWakuMessage(payload = "TEST-3") + + waitFor archive.handleMessage(pubsubTopic, msg1) + waitFor archive.handleMessage(pubsubTopic, msg2) + waitFor archive.handleMessage(pubsubTopic, msg3) + + ## Given + let req = ArchiveQuery(includeData: true, pubsubTopic: some(pubsubTopic)) + + ## When + let res = waitFor archive.findMessages(req) + + ## Then + check: + res.isOk() + + let response = res.tryGet() + check: + response.messages.len() == 3 + response.messages.anyIt(it == msg1) + response.messages.anyIt(it == msg2) + response.messages.anyIt(it == msg3) + + test "handle query with forward pagination": + ## Given + let req = + ArchiveQuery(includeData: true, pageSize: 4, direction: PagingDirection.FORWARD) + + ## When + var nextReq = req # copy + + var pages = newSeq[seq[WakuMessage]](3) + var cursors = newSeq[Option[ArchiveCursor]](3) + + for i in 0 ..< 3: + let res = waitFor archiveA.findMessages(nextReq) + require res.isOk() + + # Keep query response content + let response = res.get() + pages[i] = response.messages + cursors[i] = response.cursor + + # Set/update the request cursor + nextReq.cursor = cursors[i] + + ## Then + check: + cursors[0] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[3])) + cursors[1] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[7])) + cursors[2] == none(ArchiveCursor) + + check: + pages[0] == msgListA[0 .. 3] + pages[1] == msgListA[4 .. 7] + pages[2] == msgListA[8 .. 9] + + test "handle query with backward pagination": + ## Given + let req = + ArchiveQuery(includeData: true, pageSize: 4, direction: PagingDirection.BACKWARD) + + ## When + var nextReq = req # copy + + var pages = newSeq[seq[WakuMessage]](3) + var cursors = newSeq[Option[ArchiveCursor]](3) + + for i in 0 ..< 3: + let res = waitFor archiveA.findMessages(nextReq) + require res.isOk() + + # Keep query response content + let response = res.get() + pages[i] = response.messages + cursors[i] = response.cursor + + # Set/update the request cursor + nextReq.cursor = cursors[i] + + ## Then + check: + cursors[0] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[6])) + cursors[1] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[2])) + cursors[2] == none(ArchiveCursor) + + check: + pages[0] == msgListA[6 .. 9] + pages[1] == msgListA[2 .. 5] + pages[2] == msgListA[0 .. 1] + + test "handle query with no paging info - auto-pagination": + ## Setup + let + driver = newSqliteArchiveDriver() + archive = newWakuArchive(driver) + + let msgList = + @[ + fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("2")), + fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 2], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 3], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 4], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 5], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 6], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 7], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 8], contentTopic = DefaultContentTopic), + fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("2")), + ] + + for msg in msgList: + require ( + waitFor driver.put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + ).isOk() + + ## Given + let req = ArchiveQuery(includeData: true, contentTopics: @[DefaultContentTopic]) + + ## When + let res = waitFor archive.findMessages(req) + + ## Then + check: + res.isOk() + + let response = res.tryGet() + check: + ## No pagination specified. Response will be auto-paginated with + ## up to MaxPageSize messages per page. + response.messages.len() == 8 + response.cursor.isNone() + + test "handle temporal history query with a valid time window": + ## Given + let req = ArchiveQuery( + includeData: true, + contentTopics: @[ContentTopic("1")], + startTime: some(ts(15, timeOrigin)), + endTime: some(ts(55, timeOrigin)), + direction: PagingDirection.FORWARD, + ) + + ## When + let res = waitFor archiveA.findMessages(req) + + ## Then + check res.isOk() + + let response = res.tryGet() + check: + response.messages.len() == 2 + response.messages.mapIt(it.timestamp) == @[ts(30, timeOrigin), ts(50, timeOrigin)] + + test "handle temporal history query with a zero-size time window": + ## A zero-size window results in an empty list of history messages + ## Given + let req = ArchiveQuery( + contentTopics: @[ContentTopic("1")], + startTime: some(Timestamp(2)), + endTime: some(Timestamp(2)), + ) + + ## When + let res = waitFor archiveA.findMessages(req) + + ## Then + check res.isOk() + + let response = res.tryGet() + check: + response.messages.len == 0 + + test "handle temporal history query with an invalid time window": + ## A history query with an invalid time range results in an empty list of history messages + ## Given + let req = ArchiveQuery( + contentTopics: @[ContentTopic("1")], + startTime: some(Timestamp(5)), + endTime: some(Timestamp(2)), + ) + + ## When + let res = waitFor archiveA.findMessages(req) + + ## Then + check res.isOk() + + let response = res.tryGet() + check: + response.messages.len == 0 diff --git a/third-party/nwaku/tests/waku_core/test_all.nim b/third-party/nwaku/tests/waku_core/test_all.nim new file mode 100644 index 0000000..f7f4fad --- /dev/null +++ b/third-party/nwaku/tests/waku_core/test_all.nim @@ -0,0 +1,9 @@ +{.used.} + +import + ./test_message_digest, + ./test_namespaced_topics, + ./test_peers, + ./test_published_address, + ./test_sharding, + ./test_time diff --git a/third-party/nwaku/tests/waku_core/test_message_digest.nim b/third-party/nwaku/tests/waku_core/test_message_digest.nim new file mode 100644 index 0000000..1d1f712 --- /dev/null +++ b/third-party/nwaku/tests/waku_core/test_message_digest.nim @@ -0,0 +1,151 @@ +{.used.} + +import std/sequtils, stew/byteutils, stew/endians2, testutils/unittests +import waku/waku_core, ../testlib/wakucore + +suite "Waku Message - Deterministic hashing": + test "digest computation - empty meta field": + ## Test vector: + ## + ## pubsub_topic = 2f77616b752f322f72732f302f30 + ## waku_message.payload = 0x010203045445535405060708 + ## waku_message.content_topic = 0x2f77616b752f322f64656661756c742d636f6e74656e742f70726f746f + ## waku_message.meta = + ## waku_message.ts = 0x175789bfa23f8400 + ## + ## message_hash = 0xcccab07fed94181c83937c8ca8340c9108492b7ede354a6d95421ad34141fd37 + + ## Given + let pubsubTopic = DefaultPubsubTopic # /waku/2/rs/0/0 + let message = fakeWakuMessage( + contentTopic = DefaultContentTopic, # /waku/2/default-content/proto + payload = "\x01\x02\x03\x04TEST\x05\x06\x07\x08".toBytes(), + meta = newSeq[byte](), + ts = getNanosecondTime(1681964442), # Apr 20 2023 04:20:42 + ) + + ## When + let messageHash = computeMessageHash(pubsubTopic, message) + + ## Then + check: + byteutils.toHex(pubsubTopic.toBytes()) == "2f77616b752f322f72732f302f30" + byteutils.toHex(message.contentTopic.toBytes()) == + "2f77616b752f322f64656661756c742d636f6e74656e742f70726f746f" + byteutils.toHex(message.payload) == "010203045445535405060708" + byteutils.toHex(message.meta) == "" + byteutils.toHex(toBytesBE(uint64(message.timestamp))) == "175789bfa23f8400" + messageHash.toHex() == + "cccab07fed94181c83937c8ca8340c9108492b7ede354a6d95421ad34141fd37" + + test "digest computation - meta field (12 bytes)": + ## Test vector: + ## + ## pubsub_topic = 0x2f77616b752f322f72732f302f30 + ## waku_message.payload = 0x010203045445535405060708 + ## waku_message.content_topic = 0x2f77616b752f322f64656661756c742d636f6e74656e742f70726f746f + ## waku_message.meta = 0x73757065722d736563726574 + ## waku_message.ts = 0x175789bfa23f8400 + ## + ## message_hash = 0xb9b4852f9d8c489846e8bfc6c5ca6a1a8d460a40d28832a966e029eb39619199 + + ## Given + let pubsubTopic = DefaultPubsubTopic # /waku/2/rs/0/0 + let message = fakeWakuMessage( + contentTopic = DefaultContentTopic, # /waku/2/default-content/proto + payload = "\x01\x02\x03\x04TEST\x05\x06\x07\x08".toBytes(), + meta = "\x73\x75\x70\x65\x72\x2d\x73\x65\x63\x72\x65\x74".toBytes(), + ts = getNanosecondTime(1681964442), # Apr 20 2023 04:20:42 + ) + + ## When + let messageHash = computeMessageHash(pubsubTopic, message) + + ## Then + check: + byteutils.toHex(pubsubTopic.toBytes()) == "2f77616b752f322f72732f302f30" + byteutils.toHex(message.contentTopic.toBytes()) == + "2f77616b752f322f64656661756c742d636f6e74656e742f70726f746f" + byteutils.toHex(message.payload) == "010203045445535405060708" + byteutils.toHex(message.meta) == "73757065722d736563726574" + byteutils.toHex(toBytesBE(uint64(message.timestamp))) == "175789bfa23f8400" + messageHash.toHex() == + "b9b4852f9d8c489846e8bfc6c5ca6a1a8d460a40d28832a966e029eb39619199" + + test "digest computation - meta field (64 bytes)": + ## Test vector: + ## + ## pubsub_topic = 0x2f77616b752f322f72732f302f30 + ## waku_message.payload = 0x010203045445535405060708 + ## waku_message.content_topic = 0x2f77616b752f322f64656661756c742d636f6e74656e742f70726f746f + ## waku_message.meta = 0x000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f + ## waku_message.ts = 0x175789bfa23f8400 + ## + ## message_hash = 0x653460d04f66c5b11814d235152f4f246e6f03ef80a305a825913636fbafd0ba + + ## Given + let pubsubTopic = DefaultPubsubTopic # /waku/2/rs/0/0 + let message = fakeWakuMessage( + contentTopic = DefaultContentTopic, # /waku/2/default-content/proto + payload = "\x01\x02\x03\x04TEST\x05\x06\x07\x08".toBytes(), + meta = toSeq(0.byte .. 63.byte), + ts = getNanosecondTime(1681964442), # Apr 20 2023 04:20:42 + ) + + ## When + let messageHash = computeMessageHash(pubsubTopic, message) + + ## Then + check: + byteutils.toHex(pubsubTopic.toBytes()) == "2f77616b752f322f72732f302f30" + byteutils.toHex(message.contentTopic.toBytes()) == + "2f77616b752f322f64656661756c742d636f6e74656e742f70726f746f" + byteutils.toHex(message.payload) == "010203045445535405060708" + byteutils.toHex(message.meta) == + "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f" + byteutils.toHex(toBytesBE(uint64(message.timestamp))) == "175789bfa23f8400" + messageHash.toHex() == + "653460d04f66c5b11814d235152f4f246e6f03ef80a305a825913636fbafd0ba" + + test "digest computation - zero length payload": + ## Test vector: + ## + ## pubsub_topic = 0x2f77616b752f322f72732f302f30 + ## waku_message.payload = [] + ## waku_message.content_topic = 0x2f77616b752f322f64656661756c742d636f6e74656e742f70726f746f + ## waku_message.meta = 0x73757065722d736563726574 + ## waku_message.ts = 0x175789bfa23f8400 + ## + ## message_hash = 0x0f6448cc23b2db6c696aa6ab4b693eff4cf3549ff346fe1dbeb281697396a09f + + ## Given + let pubsubTopic = DefaultPubsubTopic # /waku/2/rs/0/0 + let message = fakeWakuMessage( + contentTopic = DefaultContentTopic, # /waku/2/default-content/proto + payload = newSeq[byte](), + meta = "\x73\x75\x70\x65\x72\x2d\x73\x65\x63\x72\x65\x74".toBytes(), + ts = getNanosecondTime(1681964442), # Apr 20 2023 04:20:42 + ) + + ## When + let messageHash = computeMessageHash(pubsubTopic, message) + + ## Then + check: + messageHash.toHex() == + "0f6448cc23b2db6c696aa6ab4b693eff4cf3549ff346fe1dbeb281697396a09f" + + test "waku message - check meta size is enforced": + # create message with meta size > 64 bytes (invalid) + let message = fakeWakuMessage( + contentTopic = DefaultContentTopic, + payload = "\x01\x02\x03\x04TEST\x05\x06\x07\x08".toBytes(), + meta = toSeq(0.byte .. 66.byte), # 67 bytes + ) + + let encodedInvalidMsg = message.encode + let decoded = WakuMessage.decode(encodedInvalidMsg.buffer) + + check: + decoded.isErr == true + $decoded.error == "(kind: InvalidLengthField, field: \"meta\")" diff --git a/third-party/nwaku/tests/waku_core/test_namespaced_topics.nim b/third-party/nwaku/tests/waku_core/test_namespaced_topics.nim new file mode 100644 index 0000000..8c3cee1 --- /dev/null +++ b/third-party/nwaku/tests/waku_core/test_namespaced_topics.nim @@ -0,0 +1,214 @@ +{.used.} + +import std/options, results, testutils/unittests +import waku/waku_core/topics + +suite "Waku Message - Content topics namespacing": + test "Stringify namespaced content topic": + ## Given + var ns = NsContentTopic() + ns.generation = none(int) + ns.application = "toychat" + ns.version = "2" + ns.name = "huilong" + ns.encoding = "proto" + + ## When + let topic = $ns + + ## Then + check: + topic == "/toychat/2/huilong/proto" + + test "Parse content topic string - Valid string": + ## Given + let topic = "/toychat/2/huilong/proto" + + ## When + let nsRes = NsContentTopic.parse(topic) + + ## Then + assert nsRes.isOk(), $nsRes.error + + let ns = nsRes.get() + check: + ns.generation == none(int) + ns.application == "toychat" + ns.version == "2" + ns.name == "huilong" + ns.encoding == "proto" + + test "Parse content topic string - Valid string with sharding": + ## Given + let topic = "/0/toychat/2/huilong/proto" + + ## When + let nsRes = NsContentTopic.parse(topic) + + ## Then + assert nsRes.isOk(), $nsRes.error + + let ns = nsRes.get() + check: + ns.generation == some(0) + ns.application == "toychat" + ns.version == "2" + ns.name == "huilong" + ns.encoding == "proto" + + test "Parse content topic string - Invalid string: missing leading slash": + ## Given + let topic = "toychat/2/huilong/proto" + + ## When + let ns = NsContentTopic.parse(topic) + + ## Then + assert ns.isErr(), $ns.get() + + let err = ns.tryError() + check: + err.kind == ParsingErrorKind.InvalidFormat + err.cause == "content-topic '" & topic & "' must start with slash" + + test "Parse content topic string - Invalid string: not namespaced": + ## Given + let topic = "/this-is-not-namespaced" + + ## When + let ns = NsContentTopic.parse(topic) + + ## Then + assert ns.isErr(), $ns.get() + + let err = ns.tryError() + check: + err.kind == ParsingErrorKind.InvalidFormat + err.cause == + "Invalid content topic structure. Expected either //// or /////" + + test "Parse content topic string - Invalid string: missing encoding part": + ## Given + let topic = "/toychat/2/huilong" + + ## When + let ns = NsContentTopic.parse(topic) + + ## Then + assert ns.isErr(), $ns.get() + + let err = ns.tryError() + check: + err.kind == ParsingErrorKind.InvalidFormat + err.cause == + "Invalid content topic structure. Expected either //// or /////" + + test "Parse content topic string - Invalid string: wrong extra parts": + ## Given + let topic = "/toychat/2/huilong/proto/33" + + ## When + let ns = NsContentTopic.parse(topic) + + ## Then + assert ns.isErr(), $ns.get() + + let err = ns.tryError() + check: + err.kind == ParsingErrorKind.InvalidFormat + err.cause == "generation should be a numeric value" + + test "Parse content topic string - Invalid string: non numeric generation": + ## Given + let topic = "/first/toychat/2/huilong/proto" + + ## When + let ns = NsContentTopic.parse(topic) + + ## Then + assert ns.isErr(), $ns.get() + + let err = ns.tryError() + check: + err.kind == ParsingErrorKind.InvalidFormat + err.cause == "generation should be a numeric value" + +suite "Waku Message - Pub-sub topics namespacing": + test "Stringify static sharding pub-sub topic": + ## Given + var shard = RelayShard(clusterId: 0, shardId: 2) + + ## When + let topic = $shard + + ## Then + check: + topic == "/waku/2/rs/0/2" + + test "Parse invalid pub-sub topic string": + ## Given + let topic = "/waku/2/waku-dev" + + ## When + let shardRes = RelayShard.parse(topic) + + ## Then + check shardRes.isErr() + let err = shardRes.tryError() + check: + err.kind == ParsingErrorKind.InvalidFormat + + test "Parse static sharding pub-sub topic string - Valid string": + ## Given + let topic = "/waku/2/rs/16/42" + + ## When + let shardRes = RelayShard.parse(topic) + + ## Then + check shardRes.isOk() + + let shard = shardRes.get() + check: + shard.clusterId == 16 + shard.shardId == 42 + + test "Parse pub-sub topic string - Invalid string: invalid protocol version": + ## Given + let topic = "/waku/1/rs/16/42" + + ## When + let shard = RelayShard.parse(topic) + + ## Then + check shard.isErr() + let err = shard.tryError() + check: + err.kind == ParsingErrorKind.InvalidFormat + + test "Parse static sharding pub-sub topic string - Invalid string: empty cluster id value": + ## Given + let topic = "/waku/2/rs//02" + + ## When + let shard = RelayShard.parse(topic) + + ## Then + check shard.isErr() + let err = shard.tryError() + check: + err.kind == ParsingErrorKind.MissingPart + err.part == "cluster_id" + + test "Parse static sharding pub-sub topic string - Invalid string: cluster id value": + ## Given + let topic = "/waku/2/rs/xx/77" + + ## When + let shard = RelayShard.parse(topic) + + ## Then + check shard.isErr() + let err = shard.tryError() + check: + err.kind == ParsingErrorKind.InvalidFormat diff --git a/third-party/nwaku/tests/waku_core/test_peers.nim b/third-party/nwaku/tests/waku_core/test_peers.nim new file mode 100644 index 0000000..59ae2e2 --- /dev/null +++ b/third-party/nwaku/tests/waku_core/test_peers.nim @@ -0,0 +1,179 @@ +{.used.} + +import + results, + testutils/unittests, + libp2p/multiaddress, + libp2p/peerid, + libp2p/errors, + confutils/toml/std/net +import waku/[waku_core, waku_enr], ../testlib/wakucore + +suite "Waku Core - Peers": + test "Peer info parses correctly": + ## Given + let address = + "/ip4/127.0.0.1/tcp/65002/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + + ## When + let remotePeerInfoRes = parsePeerInfo(address) + require remotePeerInfoRes.isOk() + + let remotePeerInfo = remotePeerInfoRes.value + + ## Then + check: + $(remotePeerInfo.peerId) == "16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + $(remotePeerInfo.addrs[0][0].tryGet()) == "/ip4/127.0.0.1" + $(remotePeerInfo.addrs[0][1].tryGet()) == "/tcp/65002" + + test "DNS multiaddrs parsing - dns peer": + ## Given + let address = + "/dns/localhost/tcp/65012/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + + ## When + let dnsPeerRes = parsePeerInfo(address) + require dnsPeerRes.isOk() + + let dnsPeer = dnsPeerRes.value + + ## Then + check: + $(dnsPeer.peerId) == "16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + $(dnsPeer.addrs[0][0].tryGet()) == "/dns/localhost" + $(dnsPeer.addrs[0][1].tryGet()) == "/tcp/65012" + + test "DNS multiaddrs parsing - dnsaddr peer": + ## Given + let address = + "/dnsaddr/localhost/tcp/65022/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + + ## When + let dnsAddrPeerRes = parsePeerInfo(address) + require dnsAddrPeerRes.isOk() + + let dnsAddrPeer = dnsAddrPeerRes.value + + ## Then + check: + $(dnsAddrPeer.peerId) == "16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + $(dnsAddrPeer.addrs[0][0].tryGet()) == "/dnsaddr/localhost" + $(dnsAddrPeer.addrs[0][1].tryGet()) == "/tcp/65022" + + test "DNS multiaddrs parsing - dns4 peer": + ## Given + let address = + "/dns4/localhost/tcp/65032/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + + ## When + let dns4PeerRes = parsePeerInfo(address) + require dns4PeerRes.isOk() + + let dns4Peer = dns4PeerRes.value + + # Then + check: + $(dns4Peer.peerId) == "16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + $(dns4Peer.addrs[0][0].tryGet()) == "/dns4/localhost" + $(dns4Peer.addrs[0][1].tryGet()) == "/tcp/65032" + + test "DNS multiaddrs parsing - dns6 peer": + ## Given + let address = + "/dns6/localhost/tcp/65042/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + + ## When + let dns6PeerRes = parsePeerInfo(address) + require dns6PeerRes.isOk() + + let dns6Peer = dns6PeerRes.value + + ## Then + check: + $(dns6Peer.peerId) == "16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + $(dns6Peer.addrs[0][0].tryGet()) == "/dns6/localhost" + $(dns6Peer.addrs[0][1].tryGet()) == "/tcp/65042" + + test "Multiaddr parsing should fail with invalid address": + ## Given + let address = "/p2p/$UCH GIBBER!SH" + + ## Then + check: + parsePeerInfo(address).isErr() + + test "Multiaddr parsing should fail with leading whitespace": + ## Given + let address = + " /ip4/127.0.0.1/tcp/65062/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + + ## Then + check: + parsePeerInfo(address).isErr() + + test "Multiaddr parsing should fail with trailing whitespace": + ## Given + let address = + "/ip4/127.0.0.1/tcp/65072/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc " + + ## Then + check: + parsePeerInfo(address).isErr() + + test "Multiaddress parsing should fail with invalid IP address": + ## Given + let address = + "/ip4/127.0.0.0.1/tcp/65082/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + + ## Then + check: + parsePeerInfo(address).isErr() + + test "Multiaddress parsing should fail with no peer ID": + ## Given + let address = "/ip4/127.0.0.1/tcp/65092" + + # Then + check: + parsePeerInfo(address).isErr() + + test "Multiaddress parsing should fail with unsupported transport": + ## Given + let address = + "/ip4/127.0.0.1/udp/65102/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + + ## Then + check: + parsePeerInfo(address).isErr() + + test "ENRs capabilities are filled when creating RemotePeerInfo": + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + ## When + var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) + builder.withIpAddressAndPorts( + ipAddr = some(parseIpAddress("127.0.0.1")), + tcpPort = some(Port(0)), + udpPort = some(Port(0)), + ) + builder.withWakuCapabilities(Capabilities.Relay, Capabilities.Store) + + let recordRes = builder.build() + + ## Then + assert recordRes.isOk(), $recordRes.error + let record = recordRes.tryGet() + + let remotePeerInfoRes = record.toRemotePeerInfo() + assert remotePeerInfoRes.isOk(), + "failed creating RemotePeerInfo: " & $remotePeerInfoRes.error() + + let remotePeerInfo = remotePeerInfoRes.get() + + check: + remotePeerInfo.protocols.len == 2 + remotePeerInfo.protocols.contains(WakuRelayCodec) + remotePeerInfo.protocols.contains(WakuStoreCodec) diff --git a/third-party/nwaku/tests/waku_core/test_published_address.nim b/third-party/nwaku/tests/waku_core/test_published_address.nim new file mode 100644 index 0000000..9d6201a --- /dev/null +++ b/third-party/nwaku/tests/waku_core/test_published_address.nim @@ -0,0 +1,19 @@ +{.used.} + +import std/[strutils, net], testutils/unittests +import ../testlib/wakucore, ../testlib/wakunode + +suite "Waku Core - Published Address": + test "Test IP 0.0.0.0": + let node = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + + check: + ($node.announcedAddresses).contains("127.0.0.1") + + test "Test custom IP": + let node = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("8.8.8.8"), Port(0)) + + check: + ($node.announcedAddresses).contains("8.8.8.8") diff --git a/third-party/nwaku/tests/waku_core/test_time.nim b/third-party/nwaku/tests/waku_core/test_time.nim new file mode 100644 index 0000000..c54afeb --- /dev/null +++ b/third-party/nwaku/tests/waku_core/test_time.nim @@ -0,0 +1,30 @@ +{.used.} + +import testutils/unittests +import waku/waku_core/time + +suite "Waku Core - Time": + test "Test timestamp conversion": + ## Given + let + nanoseconds = 1676562429123456789.int64 + secondsPart = nanoseconds div 1_000_000_000 + nanosecondsPart = nanoseconds mod 1_000_000_000 + secondsFloat = + secondsPart.float64 + (nanosecondsPart.float64 / 1_000_000_000.float64) + lowResTimestamp = Timestamp(secondsPart.int64 * 1_000_000_000.int64) + # 1676562429000000000 + highResTimestamp = Timestamp(secondsFloat * 1_000_000_000.float64) + # 1676562429123456789 + + require highResTimestamp > lowResTimestamp # Sanity check + + ## When + let + timeInSecondsInt64 = secondsPart.int64 + timeInSecondsFloat64 = float64(secondsFloat) + + ## Then + check: + getNanosecondTime(timeInSecondsInt64) == lowResTimestamp + getNanosecondTime(timeInSecondsFloat64) == highResTimestamp diff --git a/third-party/nwaku/tests/waku_core/topics/test_pubsub_topic.nim b/third-party/nwaku/tests/waku_core/topics/test_pubsub_topic.nim new file mode 100644 index 0000000..4807d30 --- /dev/null +++ b/third-party/nwaku/tests/waku_core/topics/test_pubsub_topic.nim @@ -0,0 +1,20 @@ +{.used.} + +import std/[options], testutils/unittests, results + +import waku/waku_core/topics/pubsub_topic, ../../testlib/[wakucore] + +suite "Static Sharding Functionality": + test "Shard Cluster Identification": + let shard = RelayShard.parseStaticSharding("/waku/2/rs/0/1").get() + check: + shard.clusterId == 0 + shard.shardId == 1 + shard == RelayShard(clusterId: 0, shardId: 1) + + test "Pubsub Topic Naming Compliance": + let shard = RelayShard(clusterId: 0, shardId: 1) + check: + shard.clusterId == 0 + shard.shardId == 1 + shard == "/waku/2/rs/0/1" diff --git a/third-party/nwaku/tests/waku_core/topics/test_sharding.nim b/third-party/nwaku/tests/waku_core/topics/test_sharding.nim new file mode 100644 index 0000000..33c38b4 --- /dev/null +++ b/third-party/nwaku/tests/waku_core/topics/test_sharding.nim @@ -0,0 +1,209 @@ +import std/[options, tables], testutils/unittests + +import waku/waku_core/topics, ../../testlib/[wakucore, tables, testutils] + +const GenerationZeroShardsCount = 8 +const ClusterId = 1 + +suite "Autosharding": + const + pubsubTopic04 = "/waku/2/rs/0/4" + pubsubTopic13 = "/waku/2/rs/1/3" + contentTopicShort = "/toychat/2/huilong/proto" + contentTopicFull = "/0/toychat/2/huilong/proto" + contentTopicShort2 = "/toychat2/2/huilong/proto" + contentTopicFull2 = "/0/toychat2/2/huilong/proto" + contentTopicShort3 = "/toychat/2/huilong/proto2" + contentTopicFull3 = "/0/toychat/2/huilong/proto2" + contentTopicShort4 = "/toychat/4/huilong/proto2" + contentTopicFull4 = "/0/toychat/4/huilong/proto2" + contentTopicFull5 = "/1/toychat/2/huilong/proto" + contentTopicFull6 = "/1/toychat2/2/huilong/proto" + contentTopicInvalid = "/1/toychat/2/huilong/proto" + + suite "getGenZeroShard": + test "Generate Gen0 Shard": + let sharding = + Sharding(clusterId: ClusterId, shardCountGenZero: GenerationZeroShardsCount) + + # Given two valid topics + let + nsContentTopic1 = NsContentTopic.parse(contentTopicShort).value() + nsContentTopic2 = NsContentTopic.parse(contentTopicFull).value() + nsContentTopic3 = NsContentTopic.parse(contentTopicShort2).value() + nsContentTopic4 = NsContentTopic.parse(contentTopicFull2).value() + nsContentTopic5 = NsContentTopic.parse(contentTopicShort3).value() + nsContentTopic6 = NsContentTopic.parse(contentTopicFull3).value() + nsContentTopic7 = NsContentTopic.parse(contentTopicShort3).value() + nsContentTopic8 = NsContentTopic.parse(contentTopicFull3).value() + nsContentTopic9 = NsContentTopic.parse(contentTopicFull4).value() + nsContentTopic10 = NsContentTopic.parse(contentTopicFull5).value() + + # When we generate a gen0 shard from them + let + shard1 = sharding.getGenZeroShard(nsContentTopic1, GenerationZeroShardsCount) + shard2 = sharding.getGenZeroShard(nsContentTopic2, GenerationZeroShardsCount) + shard3 = sharding.getGenZeroShard(nsContentTopic3, GenerationZeroShardsCount) + shard4 = sharding.getGenZeroShard(nsContentTopic4, GenerationZeroShardsCount) + shard5 = sharding.getGenZeroShard(nsContentTopic5, GenerationZeroShardsCount) + shard6 = sharding.getGenZeroShard(nsContentTopic6, GenerationZeroShardsCount) + shard7 = sharding.getGenZeroShard(nsContentTopic7, GenerationZeroShardsCount) + shard8 = sharding.getGenZeroShard(nsContentTopic8, GenerationZeroShardsCount) + shard9 = sharding.getGenZeroShard(nsContentTopic9, GenerationZeroShardsCount) + shard10 = sharding.getGenZeroShard(nsContentTopic10, GenerationZeroShardsCount) + + # Then the generated shards are valid + check: + shard1 == RelayShard(clusterId: ClusterId, shardId: 3) + shard2 == RelayShard(clusterId: ClusterId, shardId: 3) + shard3 == RelayShard(clusterId: ClusterId, shardId: 6) + shard4 == RelayShard(clusterId: ClusterId, shardId: 6) + shard5 == RelayShard(clusterId: ClusterId, shardId: 3) + shard6 == RelayShard(clusterId: ClusterId, shardId: 3) + shard7 == RelayShard(clusterId: ClusterId, shardId: 3) + shard8 == RelayShard(clusterId: ClusterId, shardId: 3) + shard9 == RelayShard(clusterId: ClusterId, shardId: 7) + shard10 == RelayShard(clusterId: ClusterId, shardId: 3) + + suite "getShard from NsContentTopic": + test "Generate Gen0 Shard with topic.generation==none": + let sharding = + Sharding(clusterId: ClusterId, shardCountGenZero: GenerationZeroShardsCount) + + # When we get a shard from a topic without generation + let shard = sharding.getShard(contentTopicShort) + + # Then the generated shard is valid + check: + shard.value() == RelayShard(clusterId: ClusterId, shardId: 3) + + test "Generate Gen0 Shard with topic.generation==0": + let sharding = + Sharding(clusterId: ClusterId, shardCountGenZero: GenerationZeroShardsCount) + # When we get a shard from a gen0 topic + let shard = sharding.getShard(contentTopicFull) + + # Then the generated shard is valid + check: + shard.value() == RelayShard(clusterId: ClusterId, shardId: 3) + + test "Generate Gen0 Shard with topic.generation==other": + let sharding = + Sharding(clusterId: ClusterId, shardCountGenZero: GenerationZeroShardsCount) + # When we get a shard from ain invalid content topic + let shard = sharding.getShard(contentTopicInvalid) + + # Then the generated shard is valid + check: + shard.error() == "Generation > 0 are not supported yet" + + suite "getShard from ContentTopic": + test "Generate Gen0 Shard with topic.generation==none": + let sharding = + Sharding(clusterId: ClusterId, shardCountGenZero: GenerationZeroShardsCount) + # When we get a shard from it + let shard = sharding.getShard(contentTopicShort) + + # Then the generated shard is valid + check: + shard.value() == RelayShard(clusterId: ClusterId, shardId: 3) + + test "Generate Gen0 Shard with topic.generation==0": + let sharding = + Sharding(clusterId: ClusterId, shardCountGenZero: GenerationZeroShardsCount) + # When we get a shard from it + let shard = sharding.getShard(contentTopicFull) + + # Then the generated shard is valid + check: + shard.value() == RelayShard(clusterId: ClusterId, shardId: 3) + + test "Generate Gen0 Shard with topic.generation==other": + let sharding = + Sharding(clusterId: ClusterId, shardCountGenZero: GenerationZeroShardsCount) + # When we get a shard from it + let shard = sharding.getShard(contentTopicInvalid) + + # Then the generated shard is valid + check: + shard.error() == "Generation > 0 are not supported yet" + + test "Generate Gen0 Shard invalid topic": + let sharding = + Sharding(clusterId: ClusterId, shardCountGenZero: GenerationZeroShardsCount) + # When we get a shard from it + let shard = sharding.getShard("invalid") + + # Then the generated shard is valid + check: + shard.error() == "invalid format: topic must start with slash" + + suite "parseSharding": + test "contentTopics is ContentTopic": + let sharding = + Sharding(clusterId: ClusterId, shardCountGenZero: GenerationZeroShardsCount) + # When calling with contentTopic as string + let topicMap = sharding.parseSharding(some(pubsubTopic04), contentTopicShort) + + # Then the topicMap is valid + check: + topicMap.value() == {pubsubTopic04: @[contentTopicShort]} + + test "contentTopics is seq[ContentTopic]": + let sharding = + Sharding(clusterId: ClusterId, shardCountGenZero: GenerationZeroShardsCount) + # When calling with contentTopic as string seq + let topicMap = sharding.parseSharding( + some(pubsubTopic04), @[contentTopicShort, "/0/foo/1/bar/proto"] + ) + + # Then the topicMap is valid + check: + topicMap.value() == {pubsubTopic04: @[contentTopicShort, "/0/foo/1/bar/proto"]} + + test "pubsubTopic is none": + let sharding = + Sharding(clusterId: ClusterId, shardCountGenZero: GenerationZeroShardsCount) + # When calling with pubsubTopic as none + let topicMap = sharding.parseSharding(PubsubTopic.none(), contentTopicShort) + + # Then the topicMap is valid + check: + topicMap.value() == {pubsubTopic13: @[contentTopicShort]} + + test "content parse error": + let sharding = + Sharding(clusterId: ClusterId, shardCountGenZero: GenerationZeroShardsCount) + # When calling with pubsubTopic as none with invalid content + let topicMap = sharding.parseSharding(PubsubTopic.none(), "invalid") + + # Then the topicMap is valid + check: + topicMap.error() == + "Cannot parse content topic: invalid format: topic must start with slash" + + test "pubsubTopic parse error": + let sharding = + Sharding(clusterId: ClusterId, shardCountGenZero: GenerationZeroShardsCount) + # When calling with pubsubTopic as none with invalid content + let topicMap = sharding.parseSharding(some("invalid"), contentTopicShort) + + # Then the topicMap is valid + check: + topicMap.error() == + "Cannot parse pubsub topic: invalid format: must start with /waku/2" + + test "pubsubTopic getShard error": + let sharding = + Sharding(clusterId: ClusterId, shardCountGenZero: GenerationZeroShardsCount) + # When calling with pubsubTopic as none with invalid content + let topicMap = sharding.parseSharding(PubsubTopic.none(), contentTopicInvalid) + + # Then the topicMap is valid + check: + topicMap.error() == + "Cannot autoshard content topic: Generation > 0 are not supported yet" + + xtest "catchable error on add to topicMap": + # TODO: Trigger a CatchableError or mock + discard diff --git a/third-party/nwaku/tests/waku_discv5/test_waku_discv5.nim b/third-party/nwaku/tests/waku_discv5/test_waku_discv5.nim new file mode 100644 index 0000000..abdf096 --- /dev/null +++ b/third-party/nwaku/tests/waku_discv5/test_waku_discv5.nim @@ -0,0 +1,542 @@ +{.used.} + +import + std/[sequtils, algorithm, options, net], + results, + chronos, + chronicles, + testutils/unittests, + libp2p/crypto/crypto as libp2p_keys, + eth/keys as eth_keys, + eth/p2p/discoveryv5/enr as ethEnr, + libp2p/crypto/secp, + libp2p/protocols/rendezvous + +import + waku/[ + waku_core/topics, + waku_core/codecs, + waku_enr, + discovery/waku_discv5, + waku_enr/capabilities, + factory/conf_builder/conf_builder, + factory/waku, + node/waku_node, + node/peer_manager, + ], + ../testlib/[wakucore, testasync, assertions, futures, wakunode, testutils], + ../waku_enr/utils, + ./utils as discv5_utils + +suite "Waku Discovery v5": + const validEnr = + "enr:-K64QGAvsATunmvMT5c3LFjKS0tG39zlQ1195Z2pWu6RoB5fWP3EXz9QPlRXN" & + "wOtDoRLgm4bATUB53AC8uml-ZtUE_kBgmlkgnY0gmlwhApkZgOKbXVsdGlhZGRyc4" & + "CCcnOTAAAIAAAAAQACAAMABAAFAAYAB4lzZWNwMjU2azGhAwG-CMmXpAPj84f6dCt" & + "MZ6xVYOa6bdmgAiKYG6LKGQlbg3RjcILqYIV3YWt1MgE" + + let + rng = eth_keys.newRng() + pk1 = eth_keys.PrivateKey.random(rng[]) + pk2 = eth_keys.PrivateKey.random(rng[]) + + suite "shardingPredicate": + var + recordCluster21 {.threadvar.}: Record + recordCluster22Indices1 {.threadvar.}: Record + recordCluster22Indices2 {.threadvar.}: Record + + asyncSetup: + recordCluster21 = block: + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + let + clusterId: uint16 = 21 + shardIds: seq[uint16] = @[1u16, 2u16, 5u16, 7u16, 9u16, 11u16] + + let shardsTopics = + RelayShards.init(clusterId, shardIds).expect("Valid shardIds") + + var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) + require builder.withWakuRelaySharding(shardsTopics).isOk() + builder.withWakuCapabilities(Capabilities.Relay) + + let recordRes = builder.build() + require recordRes.isOk() + recordRes.tryGet() + + recordCluster22Indices1 = block: + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + let + clusterId: uint16 = 22 + shardIds: seq[uint16] = @[2u16, 4u16, 5u16, 8u16, 10u16, 12u16] + + let shardsTopics = + RelayShards.init(clusterId, shardIds).expect("Valid shardIds") + + var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) + require builder.withWakuRelaySharding(shardsTopics).isOk() + builder.withWakuCapabilities(Capabilities.Relay) + + let recordRes = builder.build() + require recordRes.isOk() + recordRes.tryGet() + + recordCluster22Indices2 = block: + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + let + clusterId: uint16 = 22 + shardIds: seq[uint16] = @[1u16, 3u16, 6u16, 7u16, 9u16, 11u16] + + let shardsTopics = + RelayShards.init(clusterId, shardIds).expect("Valid shardIds") + + var builder = EnrBuilder.init(enrPrivKey, seqNum = enrSeqNum) + require builder.withWakuRelaySharding(shardsTopics).isOk() + builder.withWakuCapabilities(Capabilities.Relay) + + let recordRes = builder.build() + require recordRes.isOk() + recordRes.tryGet() + + asyncTest "filter peer per contained shard": + # When + let predicateCluster21Op = shardingPredicate(recordCluster21) + require predicateCluster21Op.isSome() + let predicateCluster21 = predicateCluster21Op.get() + + let predicateCluster22Op = shardingPredicate(recordCluster22Indices1) + require predicateCluster22Op.isSome() + let predicateCluster22 = predicateCluster22Op.get() + + # Then + check: + predicateCluster21(recordCluster21) == true + predicateCluster21(recordCluster22Indices1) == false + predicateCluster21(recordCluster22Indices2) == false + predicateCluster22(recordCluster21) == false + predicateCluster22(recordCluster22Indices1) == true + predicateCluster22(recordCluster22Indices2) == false + + asyncTest "filter peer per bootnode": + let + enrRelay = initRecord( + 1, + pk2, + {"waku2": @[1.byte], "rs": @[0.byte, 1.byte, 1.byte, 0.byte, 1.byte]}, + ) + .value() + enrNoCapabilities = + initRecord(1, pk1, {"rs": @[0.byte, 0.byte, 1.byte, 0.byte, 0.byte]}).value() + predicateNoCapabilities = + shardingPredicate(enrNoCapabilities, @[enrNoCapabilities]).get() + predicateNoCapabilitiesWithBoth = + shardingPredicate(enrNoCapabilities, @[enrNoCapabilities, enrRelay]).get() + + check: + predicateNoCapabilities(enrNoCapabilities) == true + predicateNoCapabilities(enrRelay) == false + predicateNoCapabilitiesWithBoth(enrNoCapabilities) == true + predicateNoCapabilitiesWithBoth(enrRelay) == true + + let + predicateRelay = shardingPredicate(enrRelay, @[enrRelay]).get() + predicateRelayWithBoth = + shardingPredicate(enrRelay, @[enrRelay, enrNoCapabilities]).get() + + check: + predicateRelay(enrNoCapabilities) == false + predicateRelay(enrRelay) == true + predicateRelayWithBoth(enrNoCapabilities) == true + predicateRelayWithBoth(enrRelay) == true + + asyncTest "does not conform to typed record": + let + record = ethEnr.Record(raw: @[]) + predicateRecord = shardingPredicate(record, @[]) + + check: + predicateRecord.isNone() + + asyncTest "no relay sharding info": + let + enrNoShardingInfo = initRecord(1, pk1, {"waku2": @[1.byte]}).value() + predicateNoShardingInfo = + shardingPredicate(enrNoShardingInfo, @[enrNoShardingInfo]) + + check: + predicateNoShardingInfo.isNone() + + suite "findRandomPeers": + proc buildNode( + tcpPort: uint16, + udpPort: uint16, + bindIp: string = "0.0.0.0", + extIp: string = "127.0.0.1", + indices: seq[uint64] = @[], + recordFlags: Option[CapabilitiesBitfield] = none(CapabilitiesBitfield), + bootstrapRecords: seq[waku_enr.Record] = @[], + ): (WakuDiscoveryV5, Record) {.raises: [ValueError, LPError].} = + let + privKey = generateSecp256k1Key() + record = newTestEnrRecord( + privKey = privKey, + extIp = extIp, + tcpPort = tcpPort, + udpPort = udpPort, + indices = indices, + flags = recordFlags, + ) + node = discv5_utils.newTestDiscv5( + privKey = privKey, + bindIp = bindIp, + tcpPort = tcpPort, + udpPort = udpPort, + record = record, + bootstrapRecords = bootstrapRecords, + ) + + (node, record) + + asyncTest "find random peers without predicate": + # Given 3 nodes + let + (node1, record1) = buildNode(tcpPort = 61500u16, udpPort = 9000u16) + (node2, record2) = buildNode(tcpPort = 61502u16, udpPort = 9002u16) + (node3, record3) = buildNode( + tcpPort = 61504u16, udpPort = 9004u16, bootstrapRecords = @[record1, record2] + ) + + let res1 = await node1.start() + assertResultOk res1 + + let res2 = await node2.start() + assertResultOk res2 + + let res3 = await node3.start() + assertResultOk res3 + + await sleepAsync(FUTURE_TIMEOUT) + + ## When we find random peers + let res = await node3.findRandomPeers() + + var tcpPortList = res.mapIt(it.toTypedRecord().value().tcp.get()) + tcpPortList.sort() + + ## Then + check: + res.len == 2 + tcpPortList == @[61500, 61502] + + ## Cleanup + await allFutures(node1.stop(), node2.stop(), node3.stop()) + + asyncTest "find random peers with parameter predicate": + let filterForStore: WakuDiscv5Predicate = proc(record: waku_enr.Record): bool = + let typedRecord = record.toTyped() + if typedRecord.isErr(): + return false + + let capabilities = typedRecord.value.waku2 + if capabilities.isNone(): + return false + + return capabilities.get().supportsCapability(Capabilities.Store) + + # Given 4 nodes + let + (node3, record3) = buildNode( + tcpPort = 61504u16, + udpPort = 9004u16, + recordFlags = + some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Filter)), + ) + (node4, record4) = buildNode( + tcpPort = 61506u16, + udpPort = 9006u16, + recordFlags = + some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Store)), + ) + (node2, record2) = buildNode( + tcpPort = 61502u16, + udpPort = 9002u16, + recordFlags = + some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Store)), + bootstrapRecords = @[record3, record4], + ) + (node1, record1) = buildNode( + tcpPort = 61500u16, + udpPort = 9000u16, + recordFlags = some(CapabilitiesBitfield.init(Capabilities.Relay)), + bootstrapRecords = @[record2], + ) + + # Start nodes' discoveryV5 protocols + let res1 = await node1.start() + assertResultOk res1 + + let res2 = await node2.start() + assertResultOk res2 + + let res3 = await node3.start() + assertResultOk res3 + + let res4 = await node4.start() + assertResultOk res4 + + await sleepAsync(FUTURE_TIMEOUT) + + ## When + let peers = await node1.findRandomPeers(some(filterForStore)) + + ## Then + check: + peers.len >= 1 + peers.allIt(it.supportsCapability(Capabilities.Store)) + + # Cleanup + await allFutures(node1.stop(), node2.stop(), node3.stop(), node4.stop()) + + xasyncTest "find random peers with instance predicate": + ## This is skipped because is flaky and made CI randomly fail but is useful to run manually + + ## Setup + # Records + let + (node3, record3) = buildNode( + tcpPort = 61504u16, + udpPort = 9004u16, + recordFlags = + some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Filter)), + ) + (node4, record4) = buildNode( + tcpPort = 61506u16, + udpPort = 9006u16, + recordFlags = + some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Store)), + ) + (node2, record2) = buildNode( + tcpPort = 61502u16, + udpPort = 9002u16, + recordFlags = + some(CapabilitiesBitfield.init(Capabilities.Relay, Capabilities.Store)), + bootstrapRecords = @[record3, record4], + ) + let (node1, record1) = buildNode( + tcpPort = 61500u16, + udpPort = 9000u16, + recordFlags = some(CapabilitiesBitfield.init(Capabilities.Relay)), + indices = @[0u64, 0u64, 1u64, 0u64, 0u64], + bootstrapRecords = @[record2], + ) + + # Start nodes' discoveryV5 protocols + let res1 = await node1.start() + assertResultOk res1 + + let res2 = await node2.start() + assertResultOk res2 + + let res3 = await node3.start() + assertResultOk res3 + + let res4 = await node4.start() + assertResultOk res4 + + ## leave some time for discv5 to act + await sleepAsync(chronos.seconds(10)) + + ## When + let peers = await node1.findRandomPeers() + + ## Then + check: + peers.len >= 1 + peers.allIt(it.supportsCapability(Capabilities.Store)) + + # Cleanup + await allFutures(node1.stop(), node2.stop(), node3.stop(), node4.stop()) + + suite "addBootstrapNode": + asyncTest "address is valid": + # Given an empty list of enrs + var enrs: seq[Record] = @[] + + # When adding a valid enr + addBootstrapNode(validEnr, enrs) + var r: Record + echo r.fromURI(validEnr) + echo r + + # Then the enr is added to the list + check: + enrs.len == 1 + enrs[0].toBase64() == validEnr[4 ..^ 1] + + asyncTest "address is empty": + # Given an empty list of enrs + var enrs: seq[Record] = @[] + + # When adding an empty enr + addBootstrapNode("", enrs) + + # Then the enr is not added to the list + check: + enrs.len == 0 + + asyncTest "address is valid but starts with #": + # Given an empty list of enrs + var enrs: seq[Record] = @[] + + # When adding any enr that starts with # + let enr = "#" & validEnr + addBootstrapNode(enr, enrs) + + # Then the enr is not added to the list + check: + enrs.len == 0 + + asyncTest "address is not valid": + # Given an empty list of enrs + var enrs: seq[Record] = @[] + + # When adding an invalid enr + let enr = "enr:invalid" + addBootstrapNode(enr, enrs) + + # Then the enr is not added to the list + check: + enrs.len == 0 + + suite "waku discv5 initialization": + asyncTest "Start waku and check discv5 discovered peers": + let myRng = libp2p_keys.newRng() + var confBuilder = defaultTestWakuConfBuilder() + + confBuilder.withNodeKey(libp2p_keys.PrivateKey.random(Secp256k1, myRng[])[]) + confBuilder.discv5Conf.withEnabled(true) + confBuilder.discv5Conf.withUdpPort(9000.Port) + + let conf = confBuilder.build().valueOr: + raiseAssert error + + let waku0 = (await Waku.new(conf)).valueOr: + raiseAssert error + (waitFor startWaku(addr waku0)).isOkOr: + raiseAssert error + + confBuilder.withNodeKey(crypto.PrivateKey.random(Secp256k1, myRng[])[]) + confBuilder.discv5Conf.withBootstrapNodes(@[waku0.node.enr.toURI()]) + confBuilder.discv5Conf.withEnabled(true) + confBuilder.discv5Conf.withUdpPort(9001.Port) + confBuilder.withP2pTcpPort(60001.Port) + + let conf1 = confBuilder.build().valueOr: + raiseAssert error + + let waku1 = (await Waku.new(conf1)).valueOr: + raiseAssert error + (waitFor startWaku(addr waku1)).isOkOr: + raiseAssert error + + await waku1.node.mountPeerExchange() + await waku1.node.mountRendezvous(conf.clusterId) + + confBuilder.discv5Conf.withBootstrapNodes(@[waku1.node.enr.toURI()]) + confBuilder.withP2pTcpPort(60003.Port) + confBuilder.discv5Conf.withUdpPort(9003.Port) + confBuilder.withNodeKey(crypto.PrivateKey.random(Secp256k1, myRng[])[]) + + let conf2 = confBuilder.build().valueOr: + raiseAssert error + + let waku2 = (await Waku.new(conf2)).valueOr: + raiseAssert error + (waitFor startWaku(addr waku2)).isOkOr: + raiseAssert error + + # leave some time for discv5 to act + await sleepAsync(chronos.seconds(10)) + + var r = waku0.node.peerManager.selectPeer(WakuPeerExchangeCodec) + assert r.isSome(), "could not retrieve peer mounting WakuPeerExchangeCodec" + + r = waku1.node.peerManager.selectPeer(WakuRelayCodec) + assert r.isSome(), "could not retrieve peer mounting WakuRelayCodec" + + r = waku1.node.peerManager.selectPeer(WakuPeerExchangeCodec) + assert r.isNone(), "should not retrieve peer mounting WakuPeerExchangeCodec" + + r = waku2.node.peerManager.selectPeer(WakuPeerExchangeCodec) + assert r.isSome(), "could not retrieve peer mounting WakuPeerExchangeCodec" + + r = waku2.node.peerManager.selectPeer(RendezVousCodec) + assert r.isSome(), "could not retrieve peer mounting RendezVousCodec" + + asyncTest "Discv5 bootstrap nodes should be added to the peer store": + var confBuilder = defaultTestWakuConfBuilder() + confBuilder.discv5Conf.withEnabled(true) + confBuilder.discv5Conf.withUdpPort(9003.Port) + confBuilder.discv5Conf.withBootstrapNodes(@[validEnr]) + let conf = confBuilder.build().valueOr: + raiseAssert error + + let waku = (await Waku.new(conf)).valueOr: + raiseAssert error + + discard setupDiscoveryV5( + waku.node.enr, + waku.node.peerManager, + waku.node.topicSubscriptionQueue, + waku.conf.discv5Conf.get(), + waku.dynamicBootstrapNodes, + waku.rng, + waku.conf.nodeKey, + waku.conf.endpointConf.p2pListenAddress, + waku.conf.portsShift, + ) + + check: + waku.node.peerManager.switch.peerStore.peers().anyIt( + it.enr.isSome() and it.enr.get().toUri() == validEnr + ) + + asyncTest "Invalid discv5 bootstrap node ENRs are ignored": + var confBuilder = defaultTestWakuConfBuilder() + confBuilder.discv5Conf.withEnabled(true) + confBuilder.discv5Conf.withUdpPort(9004.Port) + + let invalidEnr = "invalid-enr" + + confBuilder.discv5Conf.withBootstrapNodes(@[invalidEnr]) + let conf = confBuilder.build().valueOr: + raiseAssert error + + let waku = (await Waku.new(conf)).valueOr: + raiseAssert error + + discard setupDiscoveryV5( + waku.node.enr, + waku.node.peerManager, + waku.node.topicSubscriptionQueue, + conf.discv5Conf.get(), + waku.dynamicBootstrapNodes, + waku.rng, + waku.conf.nodeKey, + waku.conf.endpointConf.p2pListenAddress, + waku.conf.portsShift, + ) + + check: + not waku.node.peerManager.switch.peerStore.peers().anyIt( + it.enr.isSome() and it.enr.get().toUri() == invalidEnr + ) diff --git a/third-party/nwaku/tests/waku_discv5/utils.nim b/third-party/nwaku/tests/waku_discv5/utils.nim new file mode 100644 index 0000000..5a69108 --- /dev/null +++ b/third-party/nwaku/tests/waku_discv5/utils.nim @@ -0,0 +1,33 @@ +import std/options, chronos, libp2p/crypto/crypto as libp2p_keys, eth/keys as eth_keys + +import + waku/ + [waku_core/topics, waku_enr, discovery/waku_discv5, node/peer_manager/peer_manager], + ../testlib/[common, wakucore] + +proc newTestDiscv5*( + privKey: libp2p_keys.PrivateKey, + bindIp: string, + tcpPort: uint16, + udpPort: uint16, + record: waku_enr.Record, + bootstrapRecords = newSeq[waku_enr.Record](), + queue = newAsyncEventQueue[SubscriptionEvent](30), + peerManager: Option[PeerManager] = none(PeerManager), +): WakuDiscoveryV5 = + let config = WakuDiscoveryV5Config( + privateKey: eth_keys.PrivateKey(privKey.skkey), + address: parseIpAddress(bindIp), + port: Port(udpPort), + bootstrapRecords: bootstrapRecords, + ) + + let discv5 = WakuDiscoveryV5.new( + rng = rng(), + conf = config, + record = some(record), + queue = queue, + peerManager = peerManager, + ) + + return discv5 diff --git a/third-party/nwaku/tests/waku_enr/test_all.nim b/third-party/nwaku/tests/waku_enr/test_all.nim new file mode 100644 index 0000000..13ae1c4 --- /dev/null +++ b/third-party/nwaku/tests/waku_enr/test_all.nim @@ -0,0 +1 @@ +import ./test_sharding diff --git a/third-party/nwaku/tests/waku_enr/test_sharding.nim b/third-party/nwaku/tests/waku_enr/test_sharding.nim new file mode 100644 index 0000000..0984b7d --- /dev/null +++ b/third-party/nwaku/tests/waku_enr/test_sharding.nim @@ -0,0 +1,167 @@ +{.used.} + +import + stew/results, + chronos, + testutils/unittests, + libp2p/crypto/crypto as libp2p_keys, + eth/keys as eth_keys + +import + waku/[waku_enr, discovery/waku_discv5, waku_core, common/enr], + ../testlib/wakucore, + ../waku_discv5/utils, + ./utils + +suite "Sharding": + suite "topicsToRelayShards": + asyncTest "get shards from topics": + ## Given + let mixedTopics = @["/waku/2/thisisatest", "/waku/2/rs/0/2", "/waku/2/rs/0/8"] + let shardedTopics = @["/waku/2/rs/0/2", "/waku/2/rs/0/4", "/waku/2/rs/0/8"] + let namedTopics = + @["/waku/2/thisisatest", "/waku/2/atestthisis", "/waku/2/isthisatest"] + let gibberish = + @["aedyttydcb/uioasduyio", "jhdfsjhlsdfjhk/sadjhk", "khfsd/hjfdsgjh/dfs"] + let empty: seq[string] = @[] + + let shardsTopics = + RelayShards.init(0, @[uint16(2), uint16(4), uint16(8)]).expect("Valid shardIds") + + ## When + + let mixedRes = topicsToRelayShards(mixedTopics) + let shardedRes = topicsToRelayShards(shardedTopics) + let namedRes = topicsToRelayShards(namedTopics) + let gibberishRes = topicsToRelayShards(gibberish) + let emptyRes = topicsToRelayShards(empty) + + ## Then + assert mixedRes.isErr(), $mixedRes.value + assert shardedRes.isOk(), shardedRes.error + assert shardedRes.value.isSome() + assert shardedRes.value.get() == shardsTopics, $shardedRes.value.get() + assert namedRes.isOk(), namedRes.error + assert namedRes.value.isNone(), $namedRes.value + assert gibberishRes.isErr(), $gibberishRes.value + assert emptyRes.isOk(), emptyRes.error + assert emptyRes.value.isNone(), $emptyRes.value + + suite "containsShard": + asyncTest "update ENR from subscriptions": + ## Given + let + shard1 = "/waku/2/rs/0/1" + shard2 = "/waku/2/rs/0/2" + shard3 = "/waku/2/rs/0/3" + privKey = generateSecp256k1Key() + bindIp = "0.0.0.0" + extIp = "127.0.0.1" + tcpPort = 61500u16 + udpPort = 9000u16 + + let record = newTestEnrRecord( + privKey = privKey, extIp = extIp, tcpPort = tcpPort, udpPort = udpPort + ) + + let queue = newAsyncEventQueue[SubscriptionEvent](30) + + let node = newTestDiscv5( + privKey = privKey, + bindIp = bindIp, + tcpPort = tcpPort, + udpPort = udpPort, + record = record, + queue = queue, + ) + + let res = await node.start() + assert res.isOk(), res.error + + ## Then + queue.emit((kind: PubsubSub, topic: shard1)) + queue.emit((kind: PubsubSub, topic: shard2)) + queue.emit((kind: PubsubSub, topic: shard3)) + + await sleepAsync(1.seconds) + + check: + node.protocol.localNode.record.containsShard(shard1) == true + node.protocol.localNode.record.containsShard(shard2) == true + node.protocol.localNode.record.containsShard(shard3) == true + + queue.emit((kind: PubsubSub, topic: shard1)) + queue.emit((kind: PubsubSub, topic: shard2)) + queue.emit((kind: PubsubSub, topic: shard3)) + + await sleepAsync(1.seconds) + + check: + node.protocol.localNode.record.containsShard(shard1) == true + node.protocol.localNode.record.containsShard(shard2) == true + node.protocol.localNode.record.containsShard(shard3) == true + + queue.emit((kind: PubsubUnsub, topic: shard1)) + queue.emit((kind: PubsubUnsub, topic: shard2)) + + await sleepAsync(1.seconds) + + check: + node.protocol.localNode.record.containsShard(shard1) == false + node.protocol.localNode.record.containsShard(shard2) == false + node.protocol.localNode.record.containsShard(shard3) == true + + ## Cleanup + await node.stop() + +suite "Discovery Mechanisms for Shards": + test "Index List Representation": + # Given a valid index list and its representation + let + indicesList: seq[uint8] = @[0, 73, 2, 0, 1, 0, 10] + clusterId: uint16 = 73 # bitVector's clusterId + shardIds: seq[uint16] = @[1u16, 10u16] # bitVector's shardIds + + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + # When building an ENR with the index list + var builder = EnrBuilder.init(enrPrivKey, enrSeqNum) + builder.addFieldPair(ShardingIndicesListEnrField, indicesList) + let + record = builder.build().tryGet() + relayShards = record.toTyped().tryGet().relayShardingIndicesList().get() + + # Then the ENR should be correctly parsed + check: + relayShards == RelayShards.init(clusterId, shardIds).expect("Valid Shards") + + test "Bit Vector Representation": + # Given a valid bit vector and its representation + let + bitVector: seq[byte] = + @[ + 0, 73, 2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + ] + clusterId: uint16 = 73 # bitVector's clusterId + shardIds: seq[uint16] = @[1u16, 10u16] # bitVector's shardIds + + let + enrSeqNum = 1u64 + enrPrivKey = generatesecp256k1key() + + # When building an ENR with the bit vector + var builder = EnrBuilder.init(enrPrivKey, enrSeqNum) + builder.addFieldPair(ShardingBitVectorEnrField, bitVector) + let + record = builder.build().tryGet() + relayShards = record.toTyped().tryGet().relayShardingBitVector().get() + + # Then the ENR should be correctly parsed + check: + relayShards == RelayShards.init(clusterId, shardIds).expect("Valid Shards") diff --git a/third-party/nwaku/tests/waku_enr/utils.nim b/third-party/nwaku/tests/waku_enr/utils.nim new file mode 100644 index 0000000..7302c21 --- /dev/null +++ b/third-party/nwaku/tests/waku_enr/utils.nim @@ -0,0 +1,35 @@ +import + std/options, + sequtils, + results, + chronos, + libp2p/crypto/crypto as libp2p_keys, + eth/keys as eth_keys + +import waku/[waku_enr, discovery/waku_discv5, waku_enr/sharding], ../testlib/wakucore + +proc newTestEnrRecord*( + privKey: libp2p_keys.PrivateKey, + extIp: string, + tcpPort: uint16, + udpPort: uint16, + indices: seq[uint64] = @[], + flags = none(CapabilitiesBitfield), +): waku_enr.Record = + var builder = EnrBuilder.init(privKey) + builder.withIpAddressAndPorts( + ipAddr = some(parseIpAddress(extIp)), + tcpPort = some(Port(tcpPort)), + udpPort = some(Port(udpPort)), + ) + + if indices.len > 0: + let + byteSeq: seq[byte] = indices.mapIt(cast[byte](it)) + relayShards = fromIndicesList(byteSeq).get() + discard builder.withWakuRelayShardingIndicesList(relayShards) + + if flags.isSome(): + builder.withWakuCapabilities(flags.get()) + + builder.build().tryGet() diff --git a/third-party/nwaku/tests/waku_filter_v2/test_all.nim b/third-party/nwaku/tests/waku_filter_v2/test_all.nim new file mode 100644 index 0000000..8777951 --- /dev/null +++ b/third-party/nwaku/tests/waku_filter_v2/test_all.nim @@ -0,0 +1,3 @@ +{.used.} + +import ./test_waku_client, ./test_waku_filter_dos_protection diff --git a/third-party/nwaku/tests/waku_filter_v2/test_waku_client.nim b/third-party/nwaku/tests/waku_filter_v2/test_waku_client.nim new file mode 100644 index 0000000..2c3e2f4 --- /dev/null +++ b/third-party/nwaku/tests/waku_filter_v2/test_waku_client.nim @@ -0,0 +1,2556 @@ +{.used.} + +import std/[options, sequtils, json], testutils/unittests, results, chronos + +import + waku/node/[peer_manager, waku_node], + waku/waku_core, + waku/waku_filter_v2/[common, client, subscriptions, protocol, rpc_codec], + ../testlib/[wakucore, testasync, testutils, futures, sequtils, wakunode], + ./waku_filter_utils, + ../resources/payloads + +suite "Waku Filter - End to End": + suite "MessagePushHandler - Void": + var serverSwitch {.threadvar.}: Switch + var clientSwitch {.threadvar.}: Switch + var wakuFilter {.threadvar.}: WakuFilter + var wakuFilterClient {.threadvar.}: WakuFilterClient + var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + var pubsubTopic {.threadvar.}: PubsubTopic + var contentTopic {.threadvar.}: ContentTopic + var contentTopicSeq {.threadvar.}: seq[ContentTopic] + var clientPeerId {.threadvar.}: PeerId + var messagePushHandler {.threadvar.}: FilterPushHandler + var msgSeq {.threadvar.}: seq[(PubsubTopic, WakuMessage)] + var pushHandlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] + + asyncSetup: + msgSeq = @[] + pushHandlerFuture = newPushHandlerFuture() + messagePushHandler = proc( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.async, closure, gcsafe.} = + msgSeq.add((pubsubTopic, message)) + pushHandlerFuture.complete((pubsubTopic, message)) + + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + contentTopicSeq = @[contentTopic] + serverSwitch = newStandardSwitch() + clientSwitch = newStandardSwitch() + wakuFilter = await newTestWakuFilter(serverSwitch) + wakuFilterClient = await newTestWakuFilterClient(clientSwitch) + + await allFutures(serverSwitch.start(), clientSwitch.start()) + wakuFilterClient.registerPushHandler(messagePushHandler) + serverRemotePeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + clientPeerId = clientSwitch.peerInfo.toRemotePeerInfo().peerId + + asyncTeardown: + await allFutures( + wakuFilter.stop(), + wakuFilterClient.stop(), + serverSwitch.stop(), + clientSwitch.stop(), + ) + + suite "Subscriber Ping": + asyncTest "Active Subscription Identification": + # Given + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check wakuFilter.subscriptions.isSubscribed(clientPeerId) + + # When + let subscribedPingResponse = await wakuFilterClient.ping(serverRemotePeerInfo) + # Then + assert subscribedPingResponse.isOk(), $subscribedPingResponse.error + check: + wakuFilter.subscriptions.isSubscribed(clientPeerId) + + asyncTest "No Active Subscription Identification": + # When + let unsubscribedPingResponse = await wakuFilterClient.ping(serverRemotePeerInfo) + + # Then + check: + unsubscribedPingResponse.isErr() # Not subscribed + unsubscribedPingResponse.error().kind == FilterSubscribeErrorKind.NOT_FOUND + asyncTest "After Unsubscription": + # Given + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + assert subscribeResponse.isOk(), $subscribeResponse.error + check wakuFilter.subscriptions.isSubscribed(clientPeerId) + + # When + let unsubscribeResponse = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert unsubscribeResponse.isOk(), $unsubscribeResponse.error + check not wakuFilter.subscriptions.isSubscribed(clientPeerId) + + let unsubscribedPingResponse = await wakuFilterClient.ping(serverRemotePeerInfo) + # Then + check: + unsubscribedPingResponse.isErr() # Not subscribed + unsubscribedPingResponse.error().kind == FilterSubscribeErrorKind.NOT_FOUND + + suite "Subscribe": + asyncTest "Server remote peer info doesn't match an online server": + # Given an offline service node + let offlineServerSwitch = newStandardSwitch() + let offlineServerRemotePeerInfo = + offlineServerSwitch.peerInfo.toRemotePeerInfo() + + # When subscribing to the offline service node + let subscribeResponse = await wakuFilterClient.subscribe( + offlineServerRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + # Then the subscription is not successful + check: + subscribeResponse.isErr() # Not subscribed + subscribeResponse.error().kind == FilterSubscribeErrorKind.PEER_DIAL_FAILURE + + asyncTest "Subscribing to an empty content topic": + # When subscribing to an empty content topic + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, newSeq[ContentTopic]() + ) + + # Then the subscription is not successful + check: + subscribeResponse.isErr() # Not subscribed + subscribeResponse.error().kind == FilterSubscribeErrorKind.BAD_REQUEST + + asyncTest "PubSub Topic with Single Content Topic": + # Given + let nonExistentContentTopic = "non-existent-content-topic" + + # When subscribing to a content topic + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + # Then the subscription is successful + assert subscribeResponse.isOk(), $subscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq + ) + + # When sending a message to the subscribed content topic + let msg1 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg1) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic, pushedMsg) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic == pubsubTopic + pushedMsg == msg1 + + # When sending a message to a non-subscribed content topic (before unsubscription) + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg2 = fakeWakuMessage(contentTopic = nonExistentContentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg2) + + # Then the message is not pushed to the client + check: + not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + # Given a valid unsubscription to an existing subscription + let unsubscribeResponse = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert unsubscribeResponse.isOk(), $unsubscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + # When sending a message to the previously unsubscribed content topic + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg3 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg3) + + # Then the message is not pushed to the client + check: + not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + # When sending a message to a non-subscribed content topic (after unsubscription) + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg4 = fakeWakuMessage(contentTopic = nonExistentContentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg4) + + # Then the message is not pushed to the client + check: + not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + asyncTest "PubSub Topic with Multiple Content Topics": + # Given + let nonExistentContentTopic = "non-existent-content-topic" + let otherContentTopic = "other-content-topic" + let contentTopicsSeq = @[contentTopic, otherContentTopic] + + # Given a valid subscription to multiple content topics + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicsSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicsSeq + ) + + # When sending a message to the one of the subscribed content topics + let msg1 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg1) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + + # When sending a message to the other subscribed content topic + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg2 = fakeWakuMessage(contentTopic = otherContentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg2) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic2, pushedMsg2) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic2 == pubsubTopic + pushedMsg2 == msg2 + + # When sending a message to a non-subscribed content topic (before unsubscription) + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg3 = fakeWakuMessage(contentTopic = nonExistentContentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg3) + + # Then the message is not pushed to the client + check: + not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + # Given a valid unsubscription to an existing subscription + let unsubscribeResponse = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicsSeq + ) + assert unsubscribeResponse.isOk(), $unsubscribeResponse.error + check wakuFilter.subscriptions.subscribedPeerCount() == 0 + + # When sending a message to the previously unsubscribed content topic + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg4 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg4) + + # Then the message is not pushed to the client + check: + not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + # When sending a message to the other previously unsubscribed content topic + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg5 = fakeWakuMessage(contentTopic = otherContentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg5) + + # Then the message is not pushed to the client + check: + not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + # When sending a message to a non-subscribed content topic (after unsubscription) + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg6 = fakeWakuMessage(contentTopic = nonExistentContentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg6) + + # Then the message is not pushed to the client + check: + not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + asyncTest "Different PubSub Topics with Different Content Topics, Unsubscribe One By One": + # Given + let otherPubsubTopic = "other-pubsub-topic" + let otherContentTopic = "other-content-topic" + let otherContentTopicSeq = @[otherContentTopic] + + # When subscribing to a pubsub topic + let subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + # Then the subscription is successful + assert subscribeResponse1.isOk(), $subscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq + ) + + # When subscribing to a different pubsub topic + let subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, otherPubsubTopic, otherContentTopicSeq + ) + + # Then the subscription is successful + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + contentTopicSeq & otherContentTopicSeq, + ) + + # When sending a message to one of the subscribed content topics + let msg1 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg1) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + + # When sending a message to the other subscribed content topic + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg2 = fakeWakuMessage(contentTopic = otherContentTopic) + await wakuFilter.handleMessage(otherPubsubTopic, msg2) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic2, pushedMsg2) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic2 == otherPubsubTopic + pushedMsg2 == msg2 + + # When sending a message to a non-subscribed content topic (before unsubscription) + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg3 = fakeWakuMessage(contentTopic = "non-existent-content-topic") + await wakuFilter.handleMessage(pubsubTopic, msg3) + + # Then + check: + not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + # When unsubscribing from one of the subscriptions + let unsubscribeResponse1 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse1.isOk(), $unsubscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), otherContentTopicSeq + ) + + # When sending a message to the previously subscribed content topic + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg4 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg4) + + # Then the message is not pushed to the client + check: + not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + # When sending a message to the still subscribed content topic + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg5 = fakeWakuMessage(contentTopic = otherContentTopic) + await wakuFilter.handleMessage(otherPubsubTopic, msg5) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic3, pushedMsg3) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic3 == otherPubsubTopic + pushedMsg3 == msg5 + + # When unsubscribing from the other subscription + let unsubscribeResponse2 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, otherPubsubTopic, otherContentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse2.isOk(), $unsubscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + # When sending a message to the previously unsubscribed content topic + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg6 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg6) + + # Then the message is not pushed to the client + check: + not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + asyncTest "Different PubSub Topics with Different Content Topics, Unsubscribe All": + # Given + let otherPubsubTopic = "other-pubsub-topic" + let otherContentTopic = "other-content-topic" + let otherContentTopicSeq = @[otherContentTopic] + + # When subscribing to a content topic + let subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + # Then + assert subscribeResponse1.isOk(), $subscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq + ) + + # When subscribing to a different content topic + let subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, otherPubsubTopic, otherContentTopicSeq + ) + + # Then + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + contentTopicSeq & otherContentTopicSeq, + ) + + # When sending a message to one of the subscribed content topics + let msg1 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg1) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + + # When sending a message to the other subscribed content topic + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg2 = fakeWakuMessage(contentTopic = otherContentTopic) + await wakuFilter.handleMessage(otherPubsubTopic, msg2) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic2, pushedMsg2) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic2 == otherPubsubTopic + pushedMsg2 == msg2 + + # When sending a message to a non-subscribed content topic (before unsubscription) + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg3 = fakeWakuMessage(contentTopic = "non-existent-content-topic") + await wakuFilter.handleMessage(pubsubTopic, msg3) + + # Then + check: + not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + # When unsubscribing from one of the subscriptions + let unsubscribeResponse = + await wakuFilterClient.unsubscribeAll(serverRemotePeerInfo) + + # Then the unsubscription is successful + assert unsubscribeResponse.isOk(), $unsubscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + # When sending a message the previously subscribed content topics + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg4 = fakeWakuMessage(contentTopic = contentTopic) + let msg5 = fakeWakuMessage(contentTopic = otherContentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg4) + await wakuFilter.handleMessage(otherPubsubTopic, msg5) + + # Then the messages are not pushed to the client + check: + not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + asyncTest "Different PubSub Topics with Same Content Topics, Unsubscribe Selectively": + # Given + let otherPubsubTopic = "other-pubsub-topic" + let otherContentTopic1 = "other-content-topic1" + let otherContentTopic2 = "other-content-topic2" + let contentTopicsSeq1 = @[contentTopic, otherContentTopic1] + let contentTopicsSeq2 = @[contentTopic, otherContentTopic2] + + # When subscribing to a pubsub topic + let subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicsSeq1 + ) + + # Then the subscription is successful + assert subscribeResponse1.isOk(), $subscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicsSeq1 + ) + + # When subscribing to a different pubsub topic + let subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, otherPubsubTopic, contentTopicsSeq2 + ) + + # Then the subscription is successful + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + contentTopicsSeq1 & contentTopicsSeq2, + ) + + # When sending a message to (pubsubTopic, contentTopic) + let msg1 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg1) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + + # When sending a message to (pubsubTopic, otherContentTopic1) + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg2 = fakeWakuMessage(contentTopic = otherContentTopic1) + await wakuFilter.handleMessage(pubsubTopic, msg2) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic2, pushedMsg2) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic2 == pubsubTopic + pushedMsg2 == msg2 + + # When sending a message to (otherPubsubTopic, contentTopic) + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg3 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(otherPubsubTopic, msg3) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic3, pushedMsg3) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic3 == otherPubsubTopic + pushedMsg3 == msg3 + + # When sending a message to (otherPubsubTopic, otherContentTopic2) + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg4 = fakeWakuMessage(contentTopic = otherContentTopic2) + await wakuFilter.handleMessage(otherPubsubTopic, msg4) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic4, pushedMsg4) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic4 == otherPubsubTopic + pushedMsg4 == msg4 + + # When selectively unsubscribing from (pubsubTopic, otherContentTopic1) and (otherPubsubTopic, contentTopic) + let unsubscribeResponse1 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @[otherContentTopic1] + ) + let unsubscribeResponse2 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, otherPubsubTopic, @[contentTopic] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse1.isOk(), $unsubscribeResponse1.error + assert unsubscribeResponse2.isOk(), $unsubscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + @[contentTopic, otherContentTopic2], + ) + + # When sending a message to (pubsubTopic, contentTopic) + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg5 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg5) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic5, pushedMsg5) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic5 == pubsubTopic + pushedMsg5 == msg5 + + # When sending a message to (otherPubsubTopic, otherContentTopic2) + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg6 = fakeWakuMessage(contentTopic = otherContentTopic2) + await wakuFilter.handleMessage(otherPubsubTopic, msg6) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic6, pushedMsg6) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic6 == otherPubsubTopic + pushedMsg6 == msg6 + + # When sending a message to (pubsubTopic, otherContentTopic1) and (otherPubsubTopic, contentTopic) + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg7 = fakeWakuMessage(contentTopic = otherContentTopic1) + await wakuFilter.handleMessage(pubsubTopic, msg7) + let msg8 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(otherPubsubTopic, msg8) + + # Then the messages are not pushed to the client + check: + not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + asyncTest "Max Topic Size": + # Given a topic list of 100 topics + var topicSeq: seq[string] = + toSeq(0 ..< MaxContentTopicsPerRequest).mapIt("topic" & $it) + + # When subscribing to that topic list + let subscribeResponse1 = + await wakuFilterClient.subscribe(serverRemotePeerInfo, pubsubTopic, topicSeq) + + # Then the subscription is successful + assert subscribeResponse1.isOk(), $subscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == + MaxContentTopicsPerRequest + + # When refreshing the subscription with a topic list of 100 topics + let subscribeResponse2 = + await wakuFilterClient.subscribe(serverRemotePeerInfo, pubsubTopic, topicSeq) + + # Then the subscription is successful + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == + MaxContentTopicsPerRequest + + # When creating a subscription with a topic list of 31 topics + let subscribeResponse3 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, topicSeq & @["topic30"] + ) + + # Then the subscription is not successful + check: + subscribeResponse3.isErr() # Not subscribed + subscribeResponse3.error().kind == FilterSubscribeErrorKind.BAD_REQUEST + + # And the previous subscription is still active + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == + MaxContentTopicsPerRequest + + asyncTest "Max Criteria Per Subscription": + # Given a topic list of size MaxFilterCriteriaPerPeer + var topicSeq: seq[string] = + toSeq(0 ..< MaxFilterCriteriaPerPeer).mapIt("topic" & $it) + + # When client service node subscribes to the topic list of size MaxFilterCriteriaPerPeer + var subscribedTopics: seq[string] = @[] + while topicSeq.len > 0: + let takeNumber = min(topicSeq.len, MaxContentTopicsPerRequest) + let topicSeqBatch = topicSeq[0 ..< takeNumber] + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, topicSeqBatch + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + subscribedTopics.add(topicSeqBatch) + topicSeq.delete(0 ..< takeNumber) + + # Then the subscription is successful + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == + MaxFilterCriteriaPerPeer + + # When subscribing to a number of topics that exceeds MaxFilterCriteriaPerPeer + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, @["topic1000"] + ) + + # Then the subscription is not successful + check: + subscribeResponse.isErr() # Not subscribed + subscribeResponse.error().kind == FilterSubscribeErrorKind.SERVICE_UNAVAILABLE + + # And the previous subscription is still active + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 1000 + + # SKIPPED due to it takes a long while because it instances a lot of clients. + xasyncTest "Max Total Subscriptions": + ## TODO: Revise this, due to the nature of peer management, IP colocation + ## and number of allowed connection may not match Filter allowed service peers number. + ## - Rework: as of now timeout/max peers/max subscriptions are configurable, limit the WakuFilter service to lower numbers + ## - Adapt this test to the new limits + + # Given a WakuFilterClient list of size MaxFilterPeers + var clients: seq[(WakuFilterClient, Switch)] = @[] + for i in 0 ..< MaxFilterPeers: + let standardSwitch = newStandardSwitch() + let wakuFilterClient = await newTestWakuFilterClient(standardSwitch) + clients.add((wakuFilterClient, standardSwitch)) + + # When initialising all of them and subscribing them to the same service + for (wakuFilterClient, standardSwitch) in clients: + await standardSwitch.start() + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + + # Then the service node should have MaxFilterPeers subscriptions + check: + wakuFilter.subscriptions.subscribedPeerCount() == MaxFilterPeers + + # When initialising a new WakuFilterClient and subscribing it to the same service + let standardSwitch = newStandardSwitch() + let wakuFilterClient = await newTestWakuFilterClient(standardSwitch) + await standardSwitch.start() + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + # Then the subscription is not successful + check: + subscribeResponse.isErr() # Not subscribed + subscribeResponse.error().kind == FilterSubscribeErrorKind.SERVICE_UNAVAILABLE + + asyncTest "Multiple Subscriptions": + # Given a second service node + let serverSwitch2 = newStandardSwitch() + let wakuFilter2 = await newTestWakuFilter(serverSwitch2) + await allFutures(serverSwitch2.start()) + let serverRemotePeerInfo2 = serverSwitch2.peerInfo.toRemotePeerInfo() + + # And a subscription to the first service node + let subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse1.isOk(), $subscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq + ) + + # When subscribing to the second service node + let subscriptionResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo2, pubsubTopic, contentTopicSeq + ) + + # Then the subscription is successful + assert subscriptionResponse2.isOk(), $subscriptionResponse2.error + check: + wakuFilter2.subscriptions.subscribedPeerCount() == 1 + wakuFilter2.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter2.getSubscribedContentTopics(clientPeerId), contentTopicSeq + ) + + # And the first service node is still subscribed + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq + ) + + # When sending a message to the subscribed content topic on the first service node + let msg1 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg1) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + + # When sending a message to the subscribed content topic on the second service node + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg2 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter2.handleMessage(pubsubTopic, msg2) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic2, pushedMsg2) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic2 == pubsubTopic + pushedMsg2 == msg2 + + asyncTest "Refreshing Subscription": + # Given a valid subscription + let subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse1.isOk(), $subscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq + ) + + # When refreshing the subscription + let subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + # Then the subscription is successful + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq + ) + + # When sending a message to the refreshed subscription + let msg1 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg1) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + + # And the message is not duplicated + check: + msgSeq.len == 1 + msgSeq[0][0] == pubsubTopic + msgSeq[0][1] == msg1 + + asyncTest "Overlapping Topic Subscription": + # Given a set of overlapping subscriptions + let + subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, @["other-content-topic"] + ) + subscribeResponse3 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "other-pubsub-topic", contentTopicSeq + ) + assert subscribeResponse1.isOk(), $subscribeResponse1.error + assert subscribeResponse2.isOk(), $subscribeResponse2.error + assert subscribeResponse3.isOk(), $subscribeResponse3.error + check: + wakuFilter.subscriptions.isSubscribed(clientPeerId) + + # When sending a message to the overlapping subscription 1 + let msg1 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg1) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + + # And the message is not duplicated + check: + msgSeq.len == 1 + msgSeq[0][0] == pubsubTopic + msgSeq[0][1] == msg1 + + # When sending a message to the overlapping subscription 2 + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + check (not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT)) + # Check there're no duplicate messages + pushHandlerFuture = newPushHandlerFuture() # Reset future due to timeout + + let msg2 = fakeWakuMessage(contentTopic = "other-content-topic") + await wakuFilter.handleMessage(pubsubTopic, msg2) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic2, pushedMsg2) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic2 == pubsubTopic + pushedMsg2 == msg2 + + # And the message is not duplicated + check: + msgSeq.len == 2 + msgSeq[1][0] == pubsubTopic + msgSeq[1][1] == msg2 + + # When sending a message to the overlapping subscription 3 + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + check (not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT)) + # Check there're no duplicate messages + pushHandlerFuture = newPushHandlerFuture() # Reset future due to timeout + + let msg3 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage("other-pubsub-topic", msg3) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic3, pushedMsg3) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic3 == "other-pubsub-topic" + pushedMsg3 == msg3 + + # And the message is not duplicated + check: + msgSeq.len == 3 + msgSeq[2][0] == "other-pubsub-topic" + msgSeq[2][1] == msg3 + + suite "Unsubscribe": + ### + # One PubSub Topic + ### + + asyncTest "PubSub Topic with Single Content Topic": + # Given a valid subscription + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq + ) + + # When unsubscribing from the subscription + let unsubscribeResponse = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse.isOk(), $unsubscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "After refreshing a subscription with Single Content Topic": + # Given a valid subscription + let subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse1.isOk(), $subscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq + ) + + # When refreshing the subscription + let subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + # Then the subscription is successful + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), contentTopicSeq + ) + + # When unsubscribing from the subscription + let unsubscribeResponse = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse.isOk(), $unsubscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "PubSub Topic with Multiple Content Topics, One By One": + # Given a valid subscription + let multipleContentTopicSeq = @[contentTopic, "other-content-topic"] + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), multipleContentTopicSeq + ) + + # When unsubscribing from one of the content topics + let unsubscribeResponse1 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @[contentTopic] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse1.isOk(), $unsubscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + @["other-content-topic"], + ) + + # When unsubscribing from the other content topic + let unsubscribeResponse = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @["other-content-topic"] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse.isOk(), $unsubscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "PubSub Topic with Multiple Content Topics, All At Once": + # Given a valid subscription + let multipleContentTopicSeq = @[contentTopic, "other-content-topic"] + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), multipleContentTopicSeq + ) + + # When unsubscribing from all content topics + let unsubscribeResponse = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse.isOk(), $unsubscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "After refreshing a complete subscription with Multiple Content Topics, One By One": + # Given a valid subscription + let multipleContentTopicSeq = @[contentTopic, "other-content-topic"] + let subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + assert subscribeResponse1.isOk(), $subscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), multipleContentTopicSeq + ) + + # And a successful complete refresh of the subscription + let subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), multipleContentTopicSeq + ) + + # When unsubscribing from one of the content topics + let unsubscribeResponse1 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @[contentTopic] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse1.isOk(), $unsubscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + @["other-content-topic"], + ) + + # When unsubscribing from the other content topic + let unsubscribeResponse2 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @["other-content-topic"] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse2.isOk(), $unsubscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "After refreshing a complete subscription with Multiple Content Topics, All At Once": + # Given a valid subscription + let multipleContentTopicSeq = @[contentTopic, "other-content-topic"] + let subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + assert subscribeResponse1.isOk(), $subscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), multipleContentTopicSeq + ) + + # And a successful complete refresh of the subscription + let subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), multipleContentTopicSeq + ) + + # When unsubscribing from all content topics + let unsubscribeResponse = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse.isOk(), $unsubscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "After refreshing a partial subscription with Multiple Content Topics, One By One": + # Given a valid subscription + let multipleContentTopicSeq = @[contentTopic, "other-content-topic"] + let subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + assert subscribeResponse1.isOk(), $subscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), multipleContentTopicSeq + ) + + # Unsubscribing from one content topic + let unsubscribeResponse1 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @[contentTopic] + ) + assert unsubscribeResponse1.isOk(), $unsubscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + @["other-content-topic"], + ) + + # And a successful refresh of the partial subscription + let subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), multipleContentTopicSeq + ) + + # When unsubscribing from one of the content topics + let unsubscribeResponse2 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @[contentTopic] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse2.isOk(), $unsubscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + @["other-content-topic"], + ) + + # When unsubscribing from the other content topic + let unsubscribeResponse3 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @["other-content-topic"] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse3.isOk(), $unsubscribeResponse3.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "After refreshing a partial subscription with Multiple Content Topics, All At Once": + # Given a valid subscription + let multipleContentTopicSeq = @[contentTopic, "other-content-topic"] + let subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + assert subscribeResponse1.isOk(), $subscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), multipleContentTopicSeq + ) + + # Unsubscribing from one content topic + let unsubscribeResponse1 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @[contentTopic] + ) + assert unsubscribeResponse1.isOk(), $unsubscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), + @["other-content-topic"], + ) + + # And a successful refresh of the partial subscription + let subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + unorderedCompare( + wakuFilter.getSubscribedContentTopics(clientPeerId), multipleContentTopicSeq + ) + + # When unsubscribing from all content topics + let unsubscribeResponse2 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse2.isOk(), $unsubscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + ### + # Multiple PubSub Topics + ### + + asyncTest "Different PubSub Topics with Single (Same) Content Topic": + # Given two valid subscriptions with the same content topic + let + subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "other-pubsub-topic", contentTopicSeq + ) + + assert subscribeResponse1.isOk(), $subscribeResponse1.error + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 2 + + # When unsubscribing from one of the subscriptions + let unsubscribeResponse1 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse1.isOk(), $unsubscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 1 + + # When unsubscribing from the other subscription + let unsubscribeResponse2 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, "other-pubsub-topic", contentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse2.isOk(), $unsubscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "Different PubSub Topics with Multiple (Same) Content Topics, One By One": + # Given two valid subscriptions with the same content topics + let + multipleContentTopicSeq = @[contentTopic, "other-content-topic"] + subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "other-pubsub-topic", multipleContentTopicSeq + ) + + assert subscribeResponse1.isOk(), $subscribeResponse1.error + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 4 + + # When unsubscribing from one of the subscriptions + let unsubscribeResponse1 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @[contentTopic] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse1.isOk(), $unsubscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 3 + + # When unsubscribing from another of the subscriptions + let unsubscribeResponse2 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, "other-pubsub-topic", @["other-content-topic"] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse2.isOk(), $unsubscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 2 + + # When unsubscribing from another of the subscriptions + let unsubscribeResponse3 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @["other-content-topic"] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse3.isOk(), $unsubscribeResponse3.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 1 + + # When unsubscribing from the last subscription + let unsubscribeResponse4 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, "other-pubsub-topic", @[contentTopic] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse4.isOk(), $unsubscribeResponse4.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "Different PubSub Topics with Multiple (Same) Content Topics, All At Once": + # Given two valid subscriptions with the same content topics + let + multipleContentTopicSeq = @[contentTopic, "other-content-topic"] + subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "other-pubsub-topic", multipleContentTopicSeq + ) + + assert subscribeResponse1.isOk(), $subscribeResponse1.error + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 4 + + # When unsubscribing from one of the subscriptions + let unsubscribeResponse1 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse1.isOk(), $unsubscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 2 + + # When unsubscribing from the other subscription + let unsubscribeResponse2 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, "other-pubsub-topic", multipleContentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse2.isOk(), $unsubscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "After refreshing a complete subscription with different PubSub Topics and Single (Same) Content Topic": + # Given two valid subscriptions with the same content topic + let + subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "other-pubsub-topic", contentTopicSeq + ) + + assert subscribeResponse1.isOk(), $subscribeResponse1.error + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 2 + + # And a successful complete refresh of the subscription + let + subscribeResponse3 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + subscribeResponse4 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "other-pubsub-topic", contentTopicSeq + ) + + assert subscribeResponse3.isOk(), $subscribeResponse3.error + assert subscribeResponse4.isOk(), $subscribeResponse4.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 2 + + # When unsubscribing from one of the subscriptions + let unsubscribeResponse1 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse1.isOk(), $unsubscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 1 + + # When unsubscribing from the other subscription + let unsubscribeResponse2 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, "other-pubsub-topic", contentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse2.isOk(), $unsubscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "After refreshing a complete subscription with different PubSub Topics and Multiple (Same) Content Topics, One By One": + # Given two valid subscriptions with the same content topics + let + multipleContentTopicSeq = @[contentTopic, "other-content-topic"] + subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "other-pubsub-topic", multipleContentTopicSeq + ) + + assert subscribeResponse1.isOk(), $subscribeResponse1.error + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 4 + + # And a successful complete refresh of the subscription + let + subscribeResponse3 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + subscribeResponse4 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "other-pubsub-topic", multipleContentTopicSeq + ) + + assert subscribeResponse3.isOk(), $subscribeResponse3.error + assert subscribeResponse4.isOk(), $subscribeResponse4.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 4 + + # When unsubscribing from one of the subscriptions + let unsubscribeResponse1 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @[contentTopic] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse1.isOk(), $unsubscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 3 + + # When unsubscribing from another of the subscriptions + let unsubscribeResponse2 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, "other-pubsub-topic", @["other-content-topic"] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse2.isOk(), $unsubscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 2 + + # When unsubscribing from another of the subscriptions + let unsubscribeResponse3 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @["other-content-topic"] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse3.isOk(), $unsubscribeResponse3.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 1 + + # When unsubscribing from the last subscription + let unsubscribeResponse4 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, "other-pubsub-topic", @[contentTopic] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse4.isOk(), $unsubscribeResponse4.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "After refreshing a complete subscription with different PubSub Topics and Multiple (Same) Content Topics, All At Once": + # Given two valid subscriptions with the same content topics + let + multipleContentTopicSeq = @[contentTopic, "other-content-topic"] + subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "other-pubsub-topic", multipleContentTopicSeq + ) + + assert subscribeResponse1.isOk(), $subscribeResponse1.error + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 4 + + # And a successful complete refresh of the subscription + let + subscribeResponse3 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + subscribeResponse4 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "other-pubsub-topic", multipleContentTopicSeq + ) + + assert subscribeResponse3.isOk(), $subscribeResponse3.error + assert subscribeResponse4.isOk(), $subscribeResponse4.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 4 + + # When unsubscribing from one of the subscriptions + let unsubscribeResponse1 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse1.isOk(), $unsubscribeResponse1.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 2 + + # When unsubscribing from the other subscription + let unsubscribeResponse2 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, "other-pubsub-topic", multipleContentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse2.isOk(), $unsubscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "After refreshing a partial subscription with different PubSub Topics and Multiple (Same) Content Topics, One By One": + # Given two valid subscriptions with the same content topics + let + multipleContentTopicSeq = contentTopicSeq & @["other-content-topic"] + subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "other-pubsub-topic", multipleContentTopicSeq + ) + + assert subscribeResponse1.isOk(), $subscribeResponse1.error + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 4 + + # Unsubscribing from one of the content topics of each subscription + let + unsubscribeResponse1 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @[contentTopic] + ) + unsubscribeResponse2 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, "other-pubsub-topic", @["other-content-topic"] + ) + + assert unsubscribeResponse1.isOk(), $unsubscribeResponse1.error + assert unsubscribeResponse2.isOk(), $unsubscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 2 + + # And a successful refresh of the partial subscription + let + refreshSubscriptionResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + refreshSubscriptionResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "other-pubsub-topic", multipleContentTopicSeq + ) + + assert refreshSubscriptionResponse1.isOk(), $refreshSubscriptionResponse1.error + assert refreshSubscriptionResponse2.isOk(), $refreshSubscriptionResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 4 + + # When unsubscribing from one of the subscriptions + let unsubscribeResponse3 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @[contentTopic] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse3.isOk(), $unsubscribeResponse3.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 3 + + # When unsubscribing from another of the subscriptions + let unsubscribeResponse4 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, "other-pubsub-topic", @["other-content-topic"] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse4.isOk(), $unsubscribeResponse4.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 2 + + # When unsubscribing from another of the subscriptions + let unsubscribeResponse5 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @["other-content-topic"] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse5.isOk(), $unsubscribeResponse5.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 1 + + # When unsubscribing from the last subscription + let unsubscribeResponse6 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, "other-pubsub-topic", @[contentTopic] + ) + + # Then the unsubscription is successful + assert unsubscribeResponse6.isOk(), $unsubscribeResponse6.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "After refreshing a partial subscription with different PubSub Topics and Multiple (Same) Content Topics, All At Once": + # Given two valid subscriptions with the same content topics + let + multipleContentTopicSeq = contentTopicSeq & @["other-content-topic"] + subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "other-pubsub-topic", multipleContentTopicSeq + ) + + assert subscribeResponse1.isOk(), $subscribeResponse1.error + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 4 + + # Unsubscribing from one of the content topics of each subscription + let + unsubscribeResponse1 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @[contentTopic] + ) + unsubscribeResponse2 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, "other-pubsub-topic", @["other-content-topic"] + ) + + assert unsubscribeResponse1.isOk(), $unsubscribeResponse1.error + assert unsubscribeResponse2.isOk(), $unsubscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 2 + + # And a successful refresh of the partial subscription + let + refreshSubscriptionResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + refreshSubscriptionResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "other-pubsub-topic", multipleContentTopicSeq + ) + + assert refreshSubscriptionResponse1.isOk(), $refreshSubscriptionResponse1.error + assert refreshSubscriptionResponse2.isOk(), $refreshSubscriptionResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 4 + + # When unsubscribing from one of the subscriptions + let unsubscribeResponse3 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, multipleContentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse3.isOk(), $unsubscribeResponse3.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + wakuFilter.getSubscribedContentTopics(clientPeerId).len == 2 + + # When unsubscribing from the other subscription + let unsubscribeResponse4 = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, "other-pubsub-topic", multipleContentTopicSeq + ) + + # Then the unsubscription is successful + assert unsubscribeResponse4.isOk(), $unsubscribeResponse4.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "Without existing subscription": + # When unsubscribing from a non-existent subscription + let unsubscribeResponse = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + # Then the unsubscription is not successful + check: + unsubscribeResponse.isErr() # Not subscribed + unsubscribeResponse.error().kind == FilterSubscribeErrorKind.NOT_FOUND + + asyncTest "With non existent pubsub topic": + # Given a valid subscription + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "pubsub-topic", contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + + # When unsubscribing from a pubsub topic that does not exist + let unsubscribeResponse = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, "non-existent-pubsub-topic", contentTopicSeq + ) + + # Then the unsubscription is not successful + check: + unsubscribeResponse.isErr() # Not subscribed + unsubscribeResponse.error().kind == FilterSubscribeErrorKind.NOT_FOUND + + asyncTest "With non existent content topic": + # Given a valid subscription + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + + # When unsubscribing from a content topic that does not exist + let unsubscribeResponse = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, @["non-existent-content-topic"] + ) + + # Then the unsubscription is not successful + check: + unsubscribeResponse.isErr() # Not subscribed + unsubscribeResponse.error().kind == FilterSubscribeErrorKind.NOT_FOUND + + asyncTest "Empty content topic": + # Given a valid subscription + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + + # When unsubscribing from an empty content topic + let unsubscribeResponse = await wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, newSeq[ContentTopic]() + ) + + # Then the unsubscription is not successful + check: + unsubscribeResponse.isErr() # Not subscribed + unsubscribeResponse.error().kind == FilterSubscribeErrorKind.BAD_REQUEST + + suite "Unsubscribe All": + asyncTest "Unsubscribe from All Topics, One PubSub Topic": + # Given a valid subscription + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + + # When unsubscribing from all topics + let unsubscribeResponse = + await wakuFilterClient.unsubscribeAll(serverRemotePeerInfo) + + # Then the unsubscription is successful + assert unsubscribeResponse.isOk(), $unsubscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "Unsubscribe from All Topics, Multiple PubSub Topics": + # Given a valid subscription + let subscribeResponse1 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + let subscribeResponse2 = await wakuFilterClient.subscribe( + serverRemotePeerInfo, "other-pubsub-topic", contentTopicSeq + ) + assert subscribeResponse1.isOk(), $subscribeResponse1.error + assert subscribeResponse2.isOk(), $subscribeResponse2.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + + # When unsubscribing from all topics + let unsubscribeResponse = + await wakuFilterClient.unsubscribeAll(serverRemotePeerInfo) + + # Then the unsubscription is successful + assert unsubscribeResponse.isOk(), $unsubscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + asyncTest "Unsubscribe from All Topics from a non-subscribed Service": + # Given the client is not subscribed to a service + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + # When unsubscribing from all topics for that client + let unsubscribeResponse = + await wakuFilterClient.unsubscribeAll(serverRemotePeerInfo) + + # Then the unsubscription is not successful + check: + unsubscribeResponse.isErr() # Not subscribed + unsubscribeResponse.error().kind == FilterSubscribeErrorKind.NOT_FOUND + + suite "Filter-Push": + asyncTest "Valid Payload Types": + # Given a valid subscription + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + + # And some extra payloads + let + JSON_DICTIONARY = getSampleJsonDictionary() + JSON_LIST = getSampleJsonList() + + # And some valid messages + let + msg1 = fakeWakuMessage(contentTopic = contentTopic, payload = ALPHABETIC) + msg2 = fakeWakuMessage(contentTopic = contentTopic, payload = ALPHANUMERIC) + msg3 = + fakeWakuMessage(contentTopic = contentTopic, payload = ALPHANUMERIC_SPECIAL) + msg4 = fakeWakuMessage(contentTopic = contentTopic, payload = EMOJI) + msg5 = fakeWakuMessage(contentTopic = contentTopic, payload = CODE) + msg6 = fakeWakuMessage(contentTopic = contentTopic, payload = QUERY) + msg7 = + fakeWakuMessage(contentTopic = contentTopic, payload = $JSON_DICTIONARY) + msg8 = fakeWakuMessage(contentTopic = contentTopic, payload = $JSON_LIST) + msg9 = fakeWakuMessage(contentTopic = contentTopic, payload = TEXT_SMALL) + msg10 = fakeWakuMessage(contentTopic = contentTopic, payload = TEXT_LARGE) + + # When sending the alphabetic message + await wakuFilter.handleMessage(pubsubTopic, msg1) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + msg1.payload.toString() == ALPHABETIC + + # When sending the alphanumeric message + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + await wakuFilter.handleMessage(pubsubTopic, msg2) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic2, pushedMsg2) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic2 == pubsubTopic + pushedMsg2 == msg2 + msg2.payload.toString() == ALPHANUMERIC + + # When sending the alphanumeric special message + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + await wakuFilter.handleMessage(pubsubTopic, msg3) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic3, pushedMsg3) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic3 == pubsubTopic + pushedMsg3 == msg3 + msg3.payload.toString() == ALPHANUMERIC_SPECIAL + + # When sending the emoji message + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + await wakuFilter.handleMessage(pubsubTopic, msg4) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic4, pushedMsg4) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic4 == pubsubTopic + pushedMsg4 == msg4 + msg4.payload.toString() == EMOJI + + # When sending the code message + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + await wakuFilter.handleMessage(pubsubTopic, msg5) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic5, pushedMsg5) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic5 == pubsubTopic + pushedMsg5 == msg5 + msg5.payload.toString() == CODE + + # When sending the query message + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + await wakuFilter.handleMessage(pubsubTopic, msg6) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic6, pushedMsg6) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic6 == pubsubTopic + pushedMsg6 == msg6 + msg6.payload.toString() == QUERY + + # When sending the table message + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + await wakuFilter.handleMessage(pubsubTopic, msg7) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic7, pushedMsg7) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic7 == pubsubTopic + pushedMsg7 == msg7 + msg7.payload.toString() == $JSON_DICTIONARY + + # When sending the list message + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + await wakuFilter.handleMessage(pubsubTopic, msg8) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic8, pushedMsg8) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic8 == pubsubTopic + pushedMsg8 == msg8 + msg8.payload.toString() == $JSON_LIST + + # When sending the small text message + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + await wakuFilter.handleMessage(pubsubTopic, msg9) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic9, pushedMsg9) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic9 == pubsubTopic + pushedMsg9 == msg9 + msg9.payload.toString() == TEXT_SMALL + + # When sending the large text message + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + await wakuFilter.handleMessage(pubsubTopic, msg10) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic10, pushedMsg10) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic10 == pubsubTopic + pushedMsg10 == msg10 + msg10.payload.toString() == TEXT_LARGE + + asyncTest "Valid Payload Sizes": + # Given a valid subscription + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + + # Given some valid payloads + let + msg1 = fakeWakuMessage( + contentTopic = contentTopic, payload = getByteSequence(1024) + ) # 1KiB + msg2 = fakeWakuMessage( + contentTopic = contentTopic, payload = getByteSequence(10 * 1024) + ) # 10KiB + msg3 = fakeWakuMessage( + contentTopic = contentTopic, payload = getByteSequence(100 * 1024) + ) # 100KiB + msg4 = fakeWakuMessage( + contentTopic = contentTopic, + payload = getByteSequence(DefaultMaxPushSize - 1024), + ) # Max Size (Inclusive Limit) + msg5 = fakeWakuMessage( + contentTopic = contentTopic, payload = getByteSequence(DefaultMaxPushSize) + ) # Max Size (Exclusive Limit) + + # When sending the 1KiB message + await wakuFilter.handleMessage(pubsubTopic, msg1) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + + # When sending the 10KiB message + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + await wakuFilter.handleMessage(pubsubTopic, msg2) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic2, pushedMsg2) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic2 == pubsubTopic + pushedMsg2 == msg2 + + # When sending the 100KiB message + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + await wakuFilter.handleMessage(pubsubTopic, msg3) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic3, pushedMsg3) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic3 == pubsubTopic + pushedMsg3 == msg3 + + # When sending the MaxPushSize - 1024B message + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + await wakuFilter.handleMessage(pubsubTopic, msg4) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic4, pushedMsg4) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic4 == pubsubTopic + pushedMsg4 == msg4 + + # When sending the MaxPushSize message + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + await wakuFilter.handleMessage(pubsubTopic, msg5) + + # Then the message is not pushed to the client + check not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + suite "Security and Privacy": + asyncTest "Filter Client can receive messages after Client and Server reboot": + # Given a clean client and server + check: + wakuFilter.subscriptions.subscribedPeerCount() == 0 + + # When subscribing to a topic + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + # Then the subscription is successful + assert subscribeResponse.isOk(), $subscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + + # When both are stopped and started + await allFutures(wakuFilter.stop(), wakuFilterClient.stop()) + await allFutures(wakuFilter.start(), wakuFilterClient.start()) + + # Then the suscription is maintained + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + + # When sending a message to the subscription + let msg1 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg1) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic, pushedMsg) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic == pubsubTopic + pushedMsg == msg1 + + # When refreshing the subscription after reboot + let refreshSubscriptionResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + # Then the refreshment is successful + assert refreshSubscriptionResponse.isOk(), $refreshSubscriptionResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + + # When sending a message to the refreshed subscription + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg2 = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg2) + + # Then the message is pushed to the client + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic2, pushedMsg2) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic2 == pubsubTopic + pushedMsg2 == msg2 + + asyncTest "Filter Client can receive messages after subscribing and stopping without unsubscribing": + # Given a valid subscription + let subscribeResponse = await wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + + # When the client is stopped + await wakuFilterClient.stop() + + # Then the subscription is not removed + check: + wakuFilter.subscriptions.subscribedPeerCount() == 1 + wakuFilter.subscriptions.isSubscribed(clientPeerId) + + # When the server receives a message + let msg = fakeWakuMessage(contentTopic = contentTopic) + await wakuFilter.handleMessage(pubsubTopic, msg) + + # Then the client receives the message + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic, pushedMsg) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic == pubsubTopic + pushedMsg == msg + + suite "Subscription timeout": + var server {.threadvar.}: WakuNode + var client {.threadvar.}: WakuNode + var client2nd {.threadvar.}: WakuNode + var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + var pubsubTopic {.threadvar.}: PubsubTopic + var contentTopic {.threadvar.}: ContentTopic + var contentTopicSeq {.threadvar.}: seq[ContentTopic] + var clientPeerId {.threadvar.}: PeerId + var clientPeerId2nd {.threadvar.}: PeerId + var messagePushHandler {.threadvar.}: FilterPushHandler + var messagePushHandler2nd {.threadvar.}: FilterPushHandler + var msgSeq {.threadvar.}: seq[(PubsubTopic, WakuMessage)] + var msgSeq2nd {.threadvar.}: seq[(PubsubTopic, WakuMessage)] + var pushHandlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] + var pushHandlerFuture2nd {.threadvar.}: Future[(PubsubTopic, WakuMessage)] + + asyncSetup: + msgSeq = @[] + pushHandlerFuture = newPushHandlerFuture() + messagePushHandler = proc( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.async, closure.} = + msgSeq.add((pubsubTopic, message)) + pushHandlerFuture.complete((pubsubTopic, message)) + + msgSeq2nd = @[] + pushHandlerFuture2nd = newPushHandlerFuture() + messagePushHandler2nd = proc( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.async, closure, gcsafe.} = + msgSeq2nd.add((pubsubTopic, message)) + pushHandlerFuture2nd.complete((pubsubTopic, message)) + + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + contentTopicSeq = @[contentTopic] + + client = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(23450)) + server = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(23451)) + client2nd = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(23452)) + + await allFutures(server.start(), client.start(), client2nd.start()) + + await client.mountFilterClient() + await client2nd.mountFilterClient() + await server.mountFilter() + + client.wakuFilterClient.registerPushHandler(messagePushHandler) + client2nd.wakuFilterClient.registerPushHandler(messagePushHandler2nd) + clientPeerId = client.switch.peerInfo.peerId + clientPeerId2nd = client2nd.switch.peerInfo.peerId + serverRemotePeerInfo = server.switch.peerInfo + + asyncTeardown: + await allFutures(client2nd.stop(), client.stop(), server.stop()) + + asyncTest "client unsubscribe by timeout": + server.wakuFilter.setSubscriptionTimeout(1.seconds) + + # Given + let subscribeResponse = await client.wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check server.wakuFilter.subscriptions.isSubscribed(clientPeerId) + + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg1 = fakeWakuMessage(contentTopic = contentTopic) + await server.wakuFilter.handleMessage(pubsubTopic, msg1) + + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + + await sleepAsync(1500) + + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg2 = fakeWakuMessage(contentTopic = contentTopic) + await server.wakuFilter.handleMessage(pubsubTopic, msg2) + + check: + server.wakuFilter.subscriptions.isSubscribed(clientPeerId) == false + not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + asyncTest "client reset subscription timeout with ping": + server.wakuFilter.setSubscriptionTimeout(1.seconds) + # Given + let subscribeResponse = await client.wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + + assert server.wakuFilter.subscriptions.subscribedPeerCount() == 1, + "wrong num of subscribed peers" + check server.wakuFilter.subscriptions.isSubscribed(clientPeerId) + + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + var msg1 = fakeWakuMessage(contentTopic = contentTopic) + await server.wakuFilter.handleMessage(pubsubTopic, msg1) + + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + var (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + + await sleepAsync(500) + + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + msg1 = fakeWakuMessage(contentTopic = contentTopic) + await server.wakuFilter.handleMessage(pubsubTopic, msg1) + + # the ping restarts the timeout counting. We will have 1 sec from now + let pingResponse = await client.wakuFilterClient.ping(serverRemotePeerInfo) + + assert pingResponse.isOk(), $pingResponse.error + + # wait more in sum of the timeout + await sleepAsync(700) + + check server.wakuFilter.subscriptions.isSubscribed(clientPeerId) + + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg2 = fakeWakuMessage(contentTopic = contentTopic) + await server.wakuFilter.handleMessage(pubsubTopic, msg2) + + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic2, pushedMsg2) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic2 == pubsubTopic + pushedMsg2 == msg2 + + asyncTest "client reset subscription timeout with subscribe": + # Given + let subscribeResponse = await client.wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check server.wakuFilter.subscriptions.isSubscribed(clientPeerId) + + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg1 = fakeWakuMessage(contentTopic = contentTopic) + await server.wakuFilter.handleMessage(pubsubTopic, msg1) + + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + + await sleepAsync(1000) + + let contentTopic2nd = "content-topic-2nd" + contentTopicSeq = @[contentTopic2nd] + let subscribeResponse2nd = await client.wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + assert subscribeResponse2nd.isOk(), $subscribeResponse2nd.error + + # wait more in sum of the timeout + await sleepAsync(1200) + + check server.wakuFilter.subscriptions.isSubscribed(clientPeerId) + + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg2 = fakeWakuMessage(contentTopic = contentTopic2nd) + await server.wakuFilter.handleMessage(pubsubTopic, msg2) + + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic2, pushedMsg2) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic2 == pubsubTopic + pushedMsg2 == msg2 + + asyncTest "client reset subscription timeout with unsubscribe": + # Given + let contentTopic2nd = "content-topic-2nd" + contentTopicSeq.add(contentTopic2nd) + let subscribeResponse = await client.wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check server.wakuFilter.subscriptions.isSubscribed(clientPeerId) + + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg1 = fakeWakuMessage(contentTopic = contentTopic2nd) + await server.wakuFilter.handleMessage(pubsubTopic, msg1) + + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + + await sleepAsync(1000) + + contentTopicSeq = @[contentTopic2nd] + let unsubscribeResponse = await client.wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + assert unsubscribeResponse.isOk(), $unsubscribeResponse.error + + # wait more in sum of the timeout + await sleepAsync(1200) + + check server.wakuFilter.subscriptions.isSubscribed(clientPeerId) + + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + let msg2 = fakeWakuMessage(contentTopic = contentTopic) + await server.wakuFilter.handleMessage(pubsubTopic, msg2) + + # shall still receive message on default content topic + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic2, pushedMsg2) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic2 == pubsubTopic + pushedMsg2 == msg2 + + asyncTest "two clients shifted subscription and timeout": + server.wakuFilter.setSubscriptionTimeout(1.seconds) + # Given + let contentTopic2nd = "content-topic-2nd" + contentTopicSeq.add(contentTopic2nd) + let subscribeResponse = await client.wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check server.wakuFilter.subscriptions.isSubscribed(clientPeerId) + + await sleepAsync(500) + + let subscribeResponse2nd = await client2nd.wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + + assert subscribeResponse2nd.isOk(), $subscribeResponse2nd.error + check server.wakuFilter.subscriptions.isSubscribed(clientPeerId2nd) + + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + pushHandlerFuture2nd = newPushHandlerFuture() # Clear previous future + let msg1 = fakeWakuMessage(contentTopic = contentTopic2nd) + await server.wakuFilter.handleMessage(pubsubTopic, msg1) + + # both clients get messages + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + check await pushHandlerFuture2nd.withTimeout(FUTURE_TIMEOUT) + block: + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + block: + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture2nd.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + + await sleepAsync(700) + + check not server.wakuFilter.subscriptions.isSubscribed(clientPeerId) + + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + pushHandlerFuture2nd = newPushHandlerFuture() # Clear previous future + let msg2 = fakeWakuMessage(contentTopic = contentTopic) + await server.wakuFilter.handleMessage(pubsubTopic, msg2) + + # shall still receive message on default content topic + check not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + check await pushHandlerFuture2nd.withTimeout(FUTURE_TIMEOUT) + let (pushedMsgPubsubTopic2, pushedMsg2) = pushHandlerFuture2nd.read() + check: + pushedMsgPubsubTopic2 == pubsubTopic + pushedMsg2 == msg2 + + await sleepAsync(500) + + check not server.wakuFilter.subscriptions.isSubscribed(clientPeerId2nd) + + asyncTest "two clients timeout maintenance": + server.wakuFilter.setSubscriptionTimeout(500.milliseconds) + # Given + let contentTopic2nd = "content-topic-2nd" + contentTopicSeq.add(contentTopic2nd) + let subscribeResponse = await client.wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse.isOk(), $subscribeResponse.error + check server.wakuFilter.subscriptions.isSubscribed(clientPeerId) + + let subscribeResponse2nd = await client2nd.wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + assert subscribeResponse2nd.isOk(), $subscribeResponse2nd.error + check server.wakuFilter.subscriptions.isSubscribed(clientPeerId2nd) + + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + pushHandlerFuture2nd = newPushHandlerFuture() # Clear previous future + let msg1 = fakeWakuMessage(contentTopic = contentTopic2nd) + await server.wakuFilter.handleMessage(pubsubTopic, msg1) + + # both clients get messages + check await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + check await pushHandlerFuture2nd.withTimeout(FUTURE_TIMEOUT) + block: + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + block: + let (pushedMsgPubsubTopic1, pushedMsg1) = pushHandlerFuture2nd.read() + check: + pushedMsgPubsubTopic1 == pubsubTopic + pushedMsg1 == msg1 + + await sleepAsync(700) + + await server.wakuFilter.maintainSubscriptions() + + check not server.wakuFilter.subscriptions.isSubscribed(clientPeerId) + check not server.wakuFilter.subscriptions.isSubscribed(clientPeerId2nd) + + pushHandlerFuture = newPushHandlerFuture() # Clear previous future + pushHandlerFuture2nd = newPushHandlerFuture() # Clear previous future + let msg2 = fakeWakuMessage(contentTopic = contentTopic) + await server.wakuFilter.handleMessage(pubsubTopic, msg2) + + # shall still receive message on default content topic + check not await pushHandlerFuture.withTimeout(FUTURE_TIMEOUT) + check not await pushHandlerFuture2nd.withTimeout(FUTURE_TIMEOUT) diff --git a/third-party/nwaku/tests/waku_filter_v2/test_waku_filter_dos_protection.nim b/third-party/nwaku/tests/waku_filter_v2/test_waku_filter_dos_protection.nim new file mode 100644 index 0000000..7c8c640 --- /dev/null +++ b/third-party/nwaku/tests/waku_filter_v2/test_waku_filter_dos_protection.nim @@ -0,0 +1,176 @@ +{.used.} + +import + std/[options, tables, json], + testutils/unittests, + results, + chronos, + chronicles, + libp2p/peerstore + +import + waku/[node/peer_manager, waku_core], + waku/waku_filter_v2/[common, client, subscriptions, protocol], + ../testlib/[wakucore, testasync, futures], + ./waku_filter_utils + +type AFilterClient = ref object of RootObj + clientSwitch*: Switch + wakuFilterClient*: WakuFilterClient + clientPeerId*: PeerId + messagePushHandler*: FilterPushHandler + msgSeq*: seq[(PubsubTopic, WakuMessage)] + pushHandlerFuture*: Future[(PubsubTopic, WakuMessage)] + +proc init(T: type[AFilterClient]): T = + var r = T( + clientSwitch: newStandardSwitch(), + msgSeq: @[], + pushHandlerFuture: newPushHandlerFuture(), + ) + r.wakuFilterClient = waitFor newTestWakuFilterClient(r.clientSwitch) + r.messagePushHandler = proc( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.async, closure, gcsafe.} = + r.msgSeq.add((pubsubTopic, message)) + r.pushHandlerFuture.complete((pubsubTopic, message)) + + r.clientPeerId = r.clientSwitch.peerInfo.toRemotePeerInfo().peerId + r.wakuFilterClient.registerPushHandler(r.messagePushHandler) + return r + +proc subscribe( + client: AFilterClient, + serverRemotePeerInfo: RemotePeerInfo, + pubsubTopic: PubsubTopic, + contentTopicSeq: seq[ContentTopic], +): Option[FilterSubscribeErrorKind] = + let subscribeResponse = waitFor client.wakuFilterClient.subscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + if subscribeResponse.isOk(): + return none[FilterSubscribeErrorKind]() + + return some(subscribeResponse.error().kind) + +proc unsubscribe( + client: AFilterClient, + serverRemotePeerInfo: RemotePeerInfo, + pubsubTopic: PubsubTopic, + contentTopicSeq: seq[ContentTopic], +): Option[FilterSubscribeErrorKind] = + let unsubscribeResponse = waitFor client.wakuFilterClient.unsubscribe( + serverRemotePeerInfo, pubsubTopic, contentTopicSeq + ) + if unsubscribeResponse.isOk(): + return none[FilterSubscribeErrorKind]() + + return some(unsubscribeResponse.error().kind) + +proc ping( + client: AFilterClient, serverRemotePeerInfo: RemotePeerInfo +): Option[FilterSubscribeErrorKind] = + let pingResponse = waitFor client.wakuFilterClient.ping(serverRemotePeerInfo) + if pingResponse.isOk(): + return none[FilterSubscribeErrorKind]() + + return some(pingResponse.error().kind) + +suite "Waku Filter - DOS protection": + var serverSwitch {.threadvar.}: Switch + var client1 {.threadvar.}: AFilterClient + var client2 {.threadvar.}: AFilterClient + var wakuFilter {.threadvar.}: WakuFilter + var serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + var pubsubTopic {.threadvar.}: PubsubTopic + var contentTopic {.threadvar.}: ContentTopic + var contentTopicSeq {.threadvar.}: seq[ContentTopic] + + asyncSetup: + client1 = AFilterClient.init() + client2 = AFilterClient.init() + + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + contentTopicSeq = @[contentTopic] + serverSwitch = newStandardSwitch() + wakuFilter = await newTestWakuFilter( + serverSwitch, rateLimitSetting = some((3, 1000.milliseconds)) + ) + + await allFutures( + serverSwitch.start(), client1.clientSwitch.start(), client2.clientSwitch.start() + ) + serverRemotePeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + client1.clientPeerId = client1.clientSwitch.peerInfo.toRemotePeerInfo().peerId + client2.clientPeerId = client2.clientSwitch.peerInfo.toRemotePeerInfo().peerId + + asyncTeardown: + await allFutures( + wakuFilter.stop(), + client1.wakuFilterClient.stop(), + client2.wakuFilterClient.stop(), + serverSwitch.stop(), + client1.clientSwitch.stop(), + client2.clientSwitch.stop(), + ) + + asyncTest "Limit number of subscriptions requests": + # Given + check client1.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == + none(FilterSubscribeErrorKind) + check client2.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == + none(FilterSubscribeErrorKind) + + await sleepAsync(20.milliseconds) + check client1.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == + none(FilterSubscribeErrorKind) + check client2.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == + none(FilterSubscribeErrorKind) + await sleepAsync(20.milliseconds) + check client1.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == + none(FilterSubscribeErrorKind) + await sleepAsync(20.milliseconds) + check client1.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == + some(FilterSubscribeErrorKind.TOO_MANY_REQUESTS) + check client2.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == + none(FilterSubscribeErrorKind) + check client2.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == + some(FilterSubscribeErrorKind.TOO_MANY_REQUESTS) + + # ensure period of time has passed and clients can again use the service + await sleepAsync(1000.milliseconds) + check client1.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == + none(FilterSubscribeErrorKind) + check client2.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == + none(FilterSubscribeErrorKind) + + asyncTest "Ensure normal usage allowed": + # Given + check client1.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == + none(FilterSubscribeErrorKind) + check wakuFilter.subscriptions.isSubscribed(client1.clientPeerId) + + await sleepAsync(500.milliseconds) + check client1.ping(serverRemotePeerInfo) == none(FilterSubscribeErrorKind) + check wakuFilter.subscriptions.isSubscribed(client1.clientPeerId) + + await sleepAsync(500.milliseconds) + check client1.ping(serverRemotePeerInfo) == none(FilterSubscribeErrorKind) + check wakuFilter.subscriptions.isSubscribed(client1.clientPeerId) + + await sleepAsync(50.milliseconds) + check client1.unsubscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == + none(FilterSubscribeErrorKind) + check wakuFilter.subscriptions.isSubscribed(client1.clientPeerId) == false + + await sleepAsync(50.milliseconds) + check client1.ping(serverRemotePeerInfo) == some(FilterSubscribeErrorKind.NOT_FOUND) + check client1.ping(serverRemotePeerInfo) == some(FilterSubscribeErrorKind.NOT_FOUND) + await sleepAsync(50.milliseconds) + check client1.ping(serverRemotePeerInfo) == + some(FilterSubscribeErrorKind.TOO_MANY_REQUESTS) + + check client2.subscribe(serverRemotePeerInfo, pubsubTopic, contentTopicSeq) == + none(FilterSubscribeErrorKind) + check wakuFilter.subscriptions.isSubscribed(client2.clientPeerId) == true diff --git a/third-party/nwaku/tests/waku_filter_v2/waku_filter_utils.nim b/third-party/nwaku/tests/waku_filter_v2/waku_filter_utils.nim new file mode 100644 index 0000000..2f04ceb --- /dev/null +++ b/third-party/nwaku/tests/waku_filter_v2/waku_filter_utils.nim @@ -0,0 +1,66 @@ +import std/[options, tables, sets, algorithm], chronos, chronicles, os + +import + waku/[ + node/peer_manager, + waku_filter_v2, + waku_filter_v2/client, + waku_filter_v2/subscriptions, + waku_core, + common/rate_limit/setting, + ], + ../testlib/[common, wakucore] + +proc newTestWakuFilter*( + switch: Switch, + subscriptionTimeout: Duration = DefaultSubscriptionTimeToLiveSec, + maxFilterPeers: uint32 = MaxFilterPeers, + maxFilterCriteriaPerPeer: uint32 = MaxFilterCriteriaPerPeer, + rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](), +): Future[WakuFilter] {.async.} = + let + peerManager = PeerManager.new(switch) + proto = WakuFilter.new( + peerManager, + subscriptionTimeout, + maxFilterPeers, + maxFilterCriteriaPerPeer, + rateLimitSetting = rateLimitSetting, + ) + + await proto.start() + switch.mount(proto) + + return proto + +proc newTestWakuFilterClient*(switch: Switch): Future[WakuFilterClient] {.async.} = + let + peerManager = PeerManager.new(switch) + proto = WakuFilterClient.new(peerManager, rng) + + await proto.start() + switch.mount(proto) + + return proto + +proc getSubscribedContentTopics*( + wakuFilter: WakuFilter, peerId: PeerId +): seq[ContentTopic] = + var contentTopics: seq[ContentTopic] = @[] + let peersCriteria = wakuFilter.subscriptions.getPeerSubscriptions(peerId) + + for filterCriterion in peersCriteria: + contentTopics.add(filterCriterion.contentTopic) + + return contentTopics + +proc unorderedCompare*[T](a, b: seq[T]): bool = + if a == b: + return true + + var aSorted = a + var bSorted = b + aSorted.sort() + bSorted.sort() + + return aSorted == bSorted diff --git a/third-party/nwaku/tests/waku_keystore/utils.nim b/third-party/nwaku/tests/waku_keystore/utils.nim new file mode 100644 index 0000000..8af2d2a --- /dev/null +++ b/third-party/nwaku/tests/waku_keystore/utils.nim @@ -0,0 +1,29 @@ +{.used.} +{.push raises: [].} + +import stint + +import + waku/[waku_keystore/protocol_types, waku_rln_relay, waku_rln_relay/protocol_types] + +func fromStrToBytesLe*(v: string): seq[byte] = + try: + return @(hexToUint[256](v).toBytesLE()) + except ValueError: + # this should never happen + return @[] + +func defaultIdentityCredential*(): IdentityCredential = + # zero out the values we don't need + return IdentityCredential( + idTrapdoor: default(IdentityTrapdoor), + idNullifier: default(IdentityNullifier), + idSecretHash: fromStrToBytesLe( + "7984f7c054ad7793d9f31a1e9f29eaa8d05966511e546bced89961eb8874ab9" + ), + idCommitment: fromStrToBytesLe( + "51c31de3bff7e52dc7b2eb34fc96813bacf38bde92d27fe326ce5d8296322a7" + ), + ) + +{.pop.} diff --git a/third-party/nwaku/tests/waku_lightpush/lightpush_utils.nim b/third-party/nwaku/tests/waku_lightpush/lightpush_utils.nim new file mode 100644 index 0000000..7bd44a3 --- /dev/null +++ b/third-party/nwaku/tests/waku_lightpush/lightpush_utils.nim @@ -0,0 +1,33 @@ +{.used.} + +import std/options, chronos, chronicles, libp2p/crypto/crypto + +import + waku/node/peer_manager, + waku/waku_core, + waku/waku_core/topics/sharding, + waku/waku_lightpush, + waku/waku_lightpush/[client, common], + waku/common/rate_limit/setting, + ../testlib/[common, wakucore] + +proc newTestWakuLightpushNode*( + switch: Switch, + handler: PushMessageHandler, + rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](), +): Future[WakuLightPush] {.async.} = + let + peerManager = PeerManager.new(switch) + wakuAutoSharding = Sharding(clusterId: 1, shardCountGenZero: 8) + proto = WakuLightPush.new( + peerManager, rng, handler, some(wakuAutoSharding), rateLimitSetting + ) + + await proto.start() + switch.mount(proto) + + return proto + +proc newTestWakuLightpushClient*(switch: Switch): WakuLightPushClient = + let peerManager = PeerManager.new(switch) + WakuLightPushClient.new(peerManager, rng) diff --git a/third-party/nwaku/tests/waku_lightpush/test_all.nim b/third-party/nwaku/tests/waku_lightpush/test_all.nim new file mode 100644 index 0000000..b5edd72 --- /dev/null +++ b/third-party/nwaku/tests/waku_lightpush/test_all.nim @@ -0,0 +1,3 @@ +{.used.} + +import ./test_client, ./test_ratelimit diff --git a/third-party/nwaku/tests/waku_lightpush/test_client.nim b/third-party/nwaku/tests/waku_lightpush/test_client.nim new file mode 100644 index 0000000..af22ffa --- /dev/null +++ b/third-party/nwaku/tests/waku_lightpush/test_client.nim @@ -0,0 +1,373 @@ +{.used.} + +import + std/[options, strscans], + testutils/unittests, + chronos, + chronicles, + libp2p/crypto/crypto + +import + waku/[ + node/peer_manager, + waku_core, + waku_lightpush, + waku_lightpush/client, + waku_lightpush/protocol_metrics, + ], + ../testlib/[assertions, wakucore, testasync, futures], + ./lightpush_utils, + ../resources/[pubsub_topics, content_topics, payloads] + +suite "Waku Lightpush Client": + var + handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] + handler {.threadvar.}: PushMessageHandler + + serverSwitch {.threadvar.}: Switch + clientSwitch {.threadvar.}: Switch + server {.threadvar.}: WakuLightPush + client {.threadvar.}: WakuLightPushClient + + serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + clientPeerId {.threadvar.}: PeerId + pubsubTopic {.threadvar.}: PubsubTopic + contentTopic {.threadvar.}: ContentTopic + message {.threadvar.}: WakuMessage + + asyncSetup: + handlerFuture = newPushHandlerFuture() + handler = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult] {.async.} = + let msgLen = message.encode().buffer.len + if msgLen > int(DefaultMaxWakuMessageSize) + 64 * 1024: + return lighpushErrorResult( + LightPushErrorCode.PAYLOAD_TOO_LARGE, "length greater than maxMessageSize" + ) + handlerFuture.complete((pubsubTopic, message)) + # return that we published the message to 1 peer. + return ok(1) + + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + server = await newTestWakuLightpushNode(serverSwitch, handler) + client = newTestWakuLightpushClient(clientSwitch) + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + serverRemotePeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + clientPeerId = clientSwitch.peerInfo.peerId + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + message = fakeWakuMessage() + + asyncTeardown: + await allFutures(clientSwitch.stop(), serverSwitch.stop()) + + suite "Verification of PushRequest Payload": + asyncTest "Valid Payload Types": + # Given the following payloads + let + message2 = fakeWakuMessage(payloads.ALPHABETIC, content_topics.CURRENT) + message3 = fakeWakuMessage(payloads.ALPHANUMERIC, content_topics.TESTNET) + message4 = fakeWakuMessage(payloads.ALPHANUMERIC_SPECIAL, content_topics.PLAIN) + message5 = fakeWakuMessage(payloads.EMOJI, content_topics.CURRENT) + message6 = fakeWakuMessage(payloads.CODE, content_topics.TESTNET) + message7 = fakeWakuMessage(payloads.QUERY, content_topics.PLAIN) + message8 = fakeWakuMessage(payloads.TEXT_SMALL, content_topics.CURRENT) + message9 = fakeWakuMessage(payloads.TEXT_LARGE, content_topics.TESTNET) + + # When publishing a valid payload + let publishResponse = + await client.publish(some(pubsubTopic), message, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsubTopic, message) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse2 = await client.publish( + some(pubsub_topics.CURRENT), message2, serverRemotePeerInfo + ) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse2 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.CURRENT, message2) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse3 = await client.publish( + some(pubsub_topics.CURRENT_NESTED), message3, serverRemotePeerInfo + ) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse3 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.CURRENT_NESTED, message3) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse4 = await client.publish( + some(pubsub_topics.SHARDING), message4, serverRemotePeerInfo + ) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse4 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.SHARDING, message4) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse5 = + await client.publish(some(pubsub_topics.PLAIN), message5, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse5 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.PLAIN, message5) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse6 = + await client.publish(some(pubsub_topics.LEGACY), message6, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse6 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.LEGACY, message6) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse7 = await client.publish( + some(pubsub_topics.LEGACY_NESTED), message7, serverRemotePeerInfo + ) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse7 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.LEGACY_NESTED, message7) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse8 = await client.publish( + some(pubsub_topics.LEGACY_ENCODING), message8, serverRemotePeerInfo + ) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse8 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.LEGACY_ENCODING, message8) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse9 = + await client.publish(some(pubsubTopic), message9, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse9 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsubTopic, message9) == handlerFuture.read() + + asyncTest "Valid Payload Sizes": + # Given some valid payloads + let + overheadBytes: uint64 = 112 + message1 = + fakeWakuMessage(contentTopic = contentTopic, payload = getByteSequence(1024)) + # 1KiB + message2 = fakeWakuMessage( + contentTopic = contentTopic, payload = getByteSequence(10 * 1024) + ) # 10KiB + message3 = fakeWakuMessage( + contentTopic = contentTopic, payload = getByteSequence(100 * 1024) + ) # 100KiB + message4 = fakeWakuMessage( + contentTopic = contentTopic, + payload = getByteSequence(DefaultMaxWakuMessageSize - overheadBytes - 1), + ) # Inclusive Limit + message5 = fakeWakuMessage( + contentTopic = contentTopic, + payload = getByteSequence(DefaultMaxWakuMessageSize + 64 * 1024), + ) # Exclusive Limit + + # When publishing the 1KiB payload + let publishResponse1 = + await client.publish(some(pubsubTopic), message1, serverRemotePeerInfo) + + # Then the message is received by the server + assertResultOk publishResponse1 + check (pubsubTopic, message1) == (await handlerFuture.waitForResult()).value() + + # When publishing the 10KiB payload + handlerFuture = newPushHandlerFuture() + let publishResponse2 = + await client.publish(some(pubsubTopic), message2, serverRemotePeerInfo) + + # Then the message is received by the server + assertResultOk publishResponse2 + check (pubsubTopic, message2) == (await handlerFuture.waitForResult()).value() + + # When publishing the 100KiB payload + handlerFuture = newPushHandlerFuture() + let publishResponse3 = + await client.publish(some(pubsubTopic), message3, serverRemotePeerInfo) + + # Then the message is received by the server + assertResultOk publishResponse3 + check (pubsubTopic, message3) == (await handlerFuture.waitForResult()).value() + + # When publishing the 1MiB + 63KiB + 911B payload (1113999B) + handlerFuture = newPushHandlerFuture() + let publishResponse4 = + await client.publish(some(pubsubTopic), message4, serverRemotePeerInfo) + + # Then the message is received by the server + assertResultOk publishResponse4 + check (pubsubTopic, message4) == (await handlerFuture.waitForResult()).value() + + # When publishing the 1MiB + 63KiB + 912B payload (1114000B) + handlerFuture = newPushHandlerFuture() + let publishResponse5 = + await client.publish(some(pubsubTopic), message5, serverRemotePeerInfo) + + # Then the message is not received by the server + check: + publishResponse5.isErr() + publishResponse5.error.code == LightPushErrorCode.PAYLOAD_TOO_LARGE + (await handlerFuture.waitForResult()).isErr() + + asyncTest "Invalid Encoding Payload": + # Given a payload with an invalid encoding + let fakeBuffer = @[byte(42)] + + # When publishing the payload + let publishResponse = await server.handleRequest(clientPeerId, fakeBuffer) + + # And the error is returned + check: + publishResponse.requestId == "N/A" + publishResponse.statusCode == LightPushErrorCode.BAD_REQUEST + publishResponse.statusDesc.isSome() + scanf(publishResponse.statusDesc.get(), decodeRpcFailure) + + asyncTest "Handle Error": + # Given a lightpush server that fails + let + handlerError = "handler-error" + handlerFuture2 = newFuture[void]() + handler2 = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult] {.async.} = + handlerFuture2.complete() + return lighpushErrorResult(LightPushErrorCode.PAYLOAD_TOO_LARGE, handlerError) + + let + serverSwitch2 = newTestSwitch() + server2 = await newTestWakuLightpushNode(serverSwitch2, handler2) + + await serverSwitch2.start() + + let serverRemotePeerInfo2 = serverSwitch2.peerInfo.toRemotePeerInfo() + + # When publishing a payload + let publishResponse = + await client.publish(some(pubsubTopic), message, serverRemotePeerInfo2) + + # Then the response is negative + check: + publishResponse.error.code == LightPushErrorCode.PAYLOAD_TOO_LARGE + publishResponse.error.desc == some(handlerError) + (await handlerFuture2.waitForResult()).isOk() + + # Cleanup + await serverSwitch2.stop() + + asyncTest "Check timestamp is not zero": + ## This test validates that, even the generated message has a timestamp of 0, + ## the node will eventually set a timestamp when publishing the message. + let + zeroTimestamp = 0 + meta = "TEST-META" + message = fakeWakuMessage( + payloads.ALPHABETIC, content_topics.CURRENT, meta, zeroTimestamp + ) + + # When publishing a valid payload + let publishResponse = + await client.publish(some(pubsubTopic), message, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + let (readPubsubTopic, readMessage) = handlerFuture.read() + + check: + pubsubTopic == readPubsubTopic + message.payload == readMessage.payload + message.contentTopic == readMessage.contentTopic + message.meta == readMessage.meta + message.timestamp != readMessage.timestamp + message.ephemeral == readMessage.ephemeral + message.proof == readMessage.proof + message.version == readMessage.version + readMessage.timestamp > 0 + + suite "Verification of PushResponse Payload": + asyncTest "Positive Responses": + # When sending a valid PushRequest + let publishResponse = + await client.publish(some(pubsubTopic), message, serverRemotePeerInfo) + + # Then the response is positive + assertResultOk publishResponse + + # TODO: Improve: Add more negative responses variations + asyncTest "Negative Responses": + # Given a server that does not support Waku Lightpush + let + serverSwitch2 = newTestSwitch() + serverRemotePeerInfo2 = serverSwitch2.peerInfo.toRemotePeerInfo() + + await serverSwitch2.start() + + # When sending an invalid PushRequest + let publishResponse = + await client.publish(some(pubsubTopic), message, serverRemotePeerInfo2) + + # Then the response is negative + check not publishResponse.isOk() + check publishResponse.error.code == LightPushErrorCode.NO_PEERS_TO_RELAY diff --git a/third-party/nwaku/tests/waku_lightpush/test_ratelimit.nim b/third-party/nwaku/tests/waku_lightpush/test_ratelimit.nim new file mode 100644 index 0000000..b2dcdc7 --- /dev/null +++ b/third-party/nwaku/tests/waku_lightpush/test_ratelimit.nim @@ -0,0 +1,137 @@ +{.used.} + +import std/options, testutils/unittests, chronos, libp2p/crypto/crypto + +import + waku/[node/peer_manager, waku_core, waku_lightpush, waku_lightpush/client], + ../testlib/wakucore, + ./lightpush_utils + +suite "Rate limited push service": + asyncTest "push message with rate limit not violated": + ## Setup + let + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + ## Given + var handlerFuture = newFuture[(string, WakuMessage)]() + let handler: PushMessageHandler = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult] {.async.} = + handlerFuture.complete((pubsubTopic, message)) + return lightpushSuccessResult(1) # succeed to publish to 1 peer. + + let + tokenPeriod = 500.millis + server = + await newTestWakuLightpushNode(serverSwitch, handler, some((3, tokenPeriod))) + client = newTestWakuLightpushClient(clientSwitch) + + let serverPeerId = serverSwitch.peerInfo.toRemotePeerInfo() + + let sendMsgProc = proc(): Future[void] {.async.} = + let message = fakeWakuMessage() + + handlerFuture = newFuture[(string, WakuMessage)]() + let requestRes = + await client.publish(some(DefaultPubsubTopic), message, peer = serverPeerId) + + check await handlerFuture.withTimeout(50.millis) + + check: + requestRes.isOk() + handlerFuture.finished() + + let (handledMessagePubsubTopic, handledMessage) = handlerFuture.read() + + check: + handledMessagePubsubTopic == DefaultPubsubTopic + handledMessage == message + + let waitInBetweenFor = 20.millis + + # Test cannot be too explicit about the time when the TokenBucket resets + # the internal timer, although in normal use there is no use case to care about it. + var firstWaitExtend = 300.millis + + for runCnt in 0 ..< 3: + let startTime = Moment.now() + for testCnt in 0 ..< 3: + await sendMsgProc() + await sleepAsync(20.millis) + + var endTime = Moment.now() + var elapsed: Duration = (endTime - startTime) + await sleepAsync(tokenPeriod - elapsed + firstWaitExtend) + firstWaitEXtend = 100.millis + + ## Cleanup + await allFutures(clientSwitch.stop(), serverSwitch.stop()) + + asyncTest "push message with rate limit reject": + ## Setup + let + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + ## Given + var handlerFuture = newFuture[(string, WakuMessage)]() + let handler = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult] {.async.} = + handlerFuture.complete((pubsubTopic, message)) + return lightpushSuccessResult(1) + + let + server = + await newTestWakuLightpushNode(serverSwitch, handler, some((3, 500.millis))) + client = newTestWakuLightpushClient(clientSwitch) + + let serverPeerId = serverSwitch.peerInfo.toRemotePeerInfo() + let topic = DefaultPubsubTopic + + let successProc = proc(): Future[void] {.async.} = + let message = fakeWakuMessage() + handlerFuture = newFuture[(string, WakuMessage)]() + let requestRes = + await client.publish(some(DefaultPubsubTopic), message, peer = serverPeerId) + discard await handlerFuture.withTimeout(10.millis) + + check: + requestRes.isOk() + handlerFuture.finished() + let (handledMessagePubsubTopic, handledMessage) = handlerFuture.read() + check: + handledMessagePubsubTopic == DefaultPubsubTopic + handledMessage == message + + let rejectProc = proc(): Future[void] {.async.} = + let message = fakeWakuMessage() + handlerFuture = newFuture[(string, WakuMessage)]() + let requestRes = + await client.publish(some(DefaultPubsubTopic), message, peer = serverPeerId) + discard await handlerFuture.withTimeout(10.millis) + + check: + requestRes.isErr() + requestRes.error.code == LightPushErrorCode.TOO_MANY_REQUESTS + requestRes.error.desc == some(TooManyRequestsMessage) + + for testCnt in 0 .. 2: + await successProc() + await sleepAsync(20.millis) + + await rejectProc() + + await sleepAsync(500.millis) + + ## next one shall succeed due to the rate limit time window has passed + await successProc() + + ## Cleanup + await allFutures(clientSwitch.stop(), serverSwitch.stop()) diff --git a/third-party/nwaku/tests/waku_lightpush_legacy/lightpush_utils.nim b/third-party/nwaku/tests/waku_lightpush_legacy/lightpush_utils.nim new file mode 100644 index 0000000..11c4bf9 --- /dev/null +++ b/third-party/nwaku/tests/waku_lightpush_legacy/lightpush_utils.nim @@ -0,0 +1,28 @@ +{.used.} + +import std/options, chronos, libp2p/crypto/crypto + +import + waku/node/peer_manager, + waku/waku_lightpush_legacy, + waku/waku_lightpush_legacy/[client, common], + waku/common/rate_limit/setting, + ../testlib/[common, wakucore] + +proc newTestWakuLegacyLightpushNode*( + switch: Switch, + handler: PushMessageHandler, + rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](), +): Future[WakuLegacyLightPush] {.async.} = + let + peerManager = PeerManager.new(switch) + proto = WakuLegacyLightPush.new(peerManager, rng, handler, rateLimitSetting) + + await proto.start() + switch.mount(proto) + + return proto + +proc newTestWakuLegacyLightpushClient*(switch: Switch): WakuLegacyLightPushClient = + let peerManager = PeerManager.new(switch) + WakuLegacyLightPushClient.new(peerManager, rng) diff --git a/third-party/nwaku/tests/waku_lightpush_legacy/test_all.nim b/third-party/nwaku/tests/waku_lightpush_legacy/test_all.nim new file mode 100644 index 0000000..b5edd72 --- /dev/null +++ b/third-party/nwaku/tests/waku_lightpush_legacy/test_all.nim @@ -0,0 +1,3 @@ +{.used.} + +import ./test_client, ./test_ratelimit diff --git a/third-party/nwaku/tests/waku_lightpush_legacy/test_client.nim b/third-party/nwaku/tests/waku_lightpush_legacy/test_client.nim new file mode 100644 index 0000000..1dcb466 --- /dev/null +++ b/third-party/nwaku/tests/waku_lightpush_legacy/test_client.nim @@ -0,0 +1,333 @@ +{.used.} + +import std/[options, strscans], testutils/unittests, chronos, libp2p/crypto/crypto + +import + waku/[ + node/peer_manager, + waku_core, + waku_lightpush_legacy, + waku_lightpush_legacy/client, + waku_lightpush_legacy/common, + waku_lightpush_legacy/protocol_metrics, + waku_lightpush_legacy/rpc, + ], + ../testlib/[assertions, wakucore, testasync, futures], + ./lightpush_utils, + ../resources/[pubsub_topics, content_topics, payloads] + +suite "Waku Legacy Lightpush Client": + var + handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] + handler {.threadvar.}: PushMessageHandler + + serverSwitch {.threadvar.}: Switch + clientSwitch {.threadvar.}: Switch + server {.threadvar.}: WakuLegacyLightPush + client {.threadvar.}: WakuLegacyLightPushClient + + serverRemotePeerInfo {.threadvar.}: RemotePeerInfo + clientPeerId {.threadvar.}: PeerId + pubsubTopic {.threadvar.}: PubsubTopic + contentTopic {.threadvar.}: ContentTopic + message {.threadvar.}: WakuMessage + + asyncSetup: + handlerFuture = newPushHandlerFuture() + handler = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + let msgLen = message.encode().buffer.len + if msgLen > int(DefaultMaxWakuMessageSize) + 64 * 1024: + return err("length greater than maxMessageSize") + handlerFuture.complete((pubsubTopic, message)) + return ok() + + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + server = await newTestWakuLegacyLightpushNode(serverSwitch, handler) + client = newTestWakuLegacyLightpushClient(clientSwitch) + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + serverRemotePeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + clientPeerId = clientSwitch.peerInfo.peerId + pubsubTopic = DefaultPubsubTopic + contentTopic = DefaultContentTopic + message = fakeWakuMessage() + + asyncTeardown: + await allFutures(clientSwitch.stop(), serverSwitch.stop()) + + suite "Verification of PushRequest Payload": + asyncTest "Valid Payload Types": + # Given the following payloads + let + message2 = fakeWakuMessage(payloads.ALPHABETIC, content_topics.CURRENT) + message3 = fakeWakuMessage(payloads.ALPHANUMERIC, content_topics.TESTNET) + message4 = fakeWakuMessage(payloads.ALPHANUMERIC_SPECIAL, content_topics.PLAIN) + message5 = fakeWakuMessage(payloads.EMOJI, content_topics.CURRENT) + message6 = fakeWakuMessage(payloads.CODE, content_topics.TESTNET) + message7 = fakeWakuMessage(payloads.QUERY, content_topics.PLAIN) + message8 = fakeWakuMessage(payloads.TEXT_SMALL, content_topics.CURRENT) + message9 = fakeWakuMessage(payloads.TEXT_LARGE, content_topics.TESTNET) + + # When publishing a valid payload + let publishResponse = + await client.publish(pubsubTopic, message, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsubTopic, message) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse2 = + await client.publish(pubsub_topics.CURRENT, message2, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse2 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.CURRENT, message2) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse3 = await client.publish( + pubsub_topics.CURRENT_NESTED, message3, serverRemotePeerInfo + ) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse3 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.CURRENT_NESTED, message3) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse4 = + await client.publish(pubsub_topics.SHARDING, message4, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse4 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.SHARDING, message4) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse5 = + await client.publish(pubsub_topics.PLAIN, message5, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse5 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.PLAIN, message5) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse6 = + await client.publish(pubsub_topics.LEGACY, message6, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse6 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.LEGACY, message6) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse7 = await client.publish( + pubsub_topics.LEGACY_NESTED, message7, serverRemotePeerInfo + ) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse7 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.LEGACY_NESTED, message7) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse8 = await client.publish( + pubsub_topics.LEGACY_ENCODING, message8, serverRemotePeerInfo + ) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse8 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsub_topics.LEGACY_ENCODING, message8) == handlerFuture.read() + + # When publishing a valid payload + handlerFuture = newPushHandlerFuture() + let publishResponse9 = + await client.publish(pubsubTopic, message9, serverRemotePeerInfo) + + # Then the message is received by the server + discard await handlerFuture.withTimeout(FUTURE_TIMEOUT) + assertResultOk publishResponse9 + check handlerFuture.finished() + + # And the message is received with the correct topic and payload + check (pubsubTopic, message9) == handlerFuture.read() + + asyncTest "Valid Payload Sizes": + # Given some valid payloads + let + overheadBytes: uint64 = 112 + message1 = + fakeWakuMessage(contentTopic = contentTopic, payload = getByteSequence(1024)) + # 1KiB + message2 = fakeWakuMessage( + contentTopic = contentTopic, payload = getByteSequence(10 * 1024) + ) # 10KiB + message3 = fakeWakuMessage( + contentTopic = contentTopic, payload = getByteSequence(100 * 1024) + ) # 100KiB + message4 = fakeWakuMessage( + contentTopic = contentTopic, + payload = getByteSequence(DefaultMaxWakuMessageSize - overheadBytes - 1), + ) # Inclusive Limit + message5 = fakeWakuMessage( + contentTopic = contentTopic, + payload = getByteSequence(DefaultMaxWakuMessageSize + 64 * 1024), + ) # Exclusive Limit + + # When publishing the 1KiB payload + let publishResponse1 = + await client.publish(pubsubTopic, message1, serverRemotePeerInfo) + + # Then the message is received by the server + assertResultOk publishResponse1 + check (pubsubTopic, message1) == (await handlerFuture.waitForResult()).value() + + # When publishing the 10KiB payload + handlerFuture = newPushHandlerFuture() + let publishResponse2 = + await client.publish(pubsubTopic, message2, serverRemotePeerInfo) + + # Then the message is received by the server + assertResultOk publishResponse2 + check (pubsubTopic, message2) == (await handlerFuture.waitForResult()).value() + + # When publishing the 100KiB payload + handlerFuture = newPushHandlerFuture() + let publishResponse3 = + await client.publish(pubsubTopic, message3, serverRemotePeerInfo) + + # Then the message is received by the server + assertResultOk publishResponse3 + check (pubsubTopic, message3) == (await handlerFuture.waitForResult()).value() + + # When publishing the 1MiB + 63KiB + 911B payload (1113999B) + handlerFuture = newPushHandlerFuture() + let publishResponse4 = + await client.publish(pubsubTopic, message4, serverRemotePeerInfo) + + # Then the message is received by the server + assertResultOk publishResponse4 + check (pubsubTopic, message4) == (await handlerFuture.waitForResult()).value() + + # When publishing the 1MiB + 63KiB + 912B payload (1114000B) + handlerFuture = newPushHandlerFuture() + let publishResponse5 = + await client.publish(pubsubTopic, message5, serverRemotePeerInfo) + + # Then the message is not received by the server + check: + not publishResponse5.isOk() + (await handlerFuture.waitForResult()).isErr() + + asyncTest "Invalid Encoding Payload": + # Given a payload with an invalid encoding + let fakeBuffer = @[byte(42)] + + # When publishing the payload + let publishResponse = await server.handleRequest(clientPeerId, fakeBuffer) + + # Then the response is negative + check: + publishResponse.requestId == "" + + # And the error is returned + let response = publishResponse.response.get() + check: + response.isSuccess == false + response.info.isSome() + scanf(response.info.get(), decodeRpcFailure) + + asyncTest "Handle Error": + # Given a lightpush server that fails + let + handlerError = "handler-error" + handlerFuture2 = newFuture[void]() + handler2 = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + handlerFuture2.complete() + return err(handlerError) + + let + serverSwitch2 = newTestSwitch() + server2 = await newTestWakuLegacyLightpushNode(serverSwitch2, handler2) + + await serverSwitch2.start() + + let serverRemotePeerInfo2 = serverSwitch2.peerInfo.toRemotePeerInfo() + + # When publishing a payload + let publishResponse = + await client.publish(pubsubTopic, message, serverRemotePeerInfo2) + + # Then the response is negative + check: + publishResponse.error() == handlerError + (await handlerFuture2.waitForResult()).isOk() + + # Cleanup + await serverSwitch2.stop() + + suite "Verification of PushResponse Payload": + asyncTest "Positive Responses": + # When sending a valid PushRequest + let publishResponse = + await client.publish(pubsubTopic, message, serverRemotePeerInfo) + + # Then the response is positive + assertResultOk publishResponse + + # TODO: Improve: Add more negative responses variations + asyncTest "Negative Responses": + # Given a server that does not support Waku Lightpush + let + serverSwitch2 = newTestSwitch() + serverRemotePeerInfo2 = serverSwitch2.peerInfo.toRemotePeerInfo() + + await serverSwitch2.start() + + # When sending an invalid PushRequest + let publishResponse = + await client.publish(pubsubTopic, message, serverRemotePeerInfo2) + + # Then the response is negative + check not publishResponse.isOk() diff --git a/third-party/nwaku/tests/waku_lightpush_legacy/test_ratelimit.nim b/third-party/nwaku/tests/waku_lightpush_legacy/test_ratelimit.nim new file mode 100644 index 0000000..3df8d36 --- /dev/null +++ b/third-party/nwaku/tests/waku_lightpush_legacy/test_ratelimit.nim @@ -0,0 +1,143 @@ +{.used.} + +import std/options, testutils/unittests, chronos, libp2p/crypto/crypto + +import + waku/[ + node/peer_manager, + waku_core, + waku_lightpush_legacy, + waku_lightpush_legacy/client, + waku_lightpush_legacy/common, + ], + ../testlib/wakucore, + ./lightpush_utils + +suite "Rate limited push service": + asyncTest "push message with rate limit not violated": + ## Setup + let + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + ## Given + var handlerFuture = newFuture[(string, WakuMessage)]() + let handler: PushMessageHandler = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + handlerFuture.complete((pubsubTopic, message)) + return ok() + + let + tokenPeriod = 500.millis + server = await newTestWakuLegacyLightpushNode( + serverSwitch, handler, some((3, tokenPeriod)) + ) + client = newTestWakuLegacyLightpushClient(clientSwitch) + + let serverPeerId = serverSwitch.peerInfo.toRemotePeerInfo() + + let sendMsgProc = proc(): Future[void] {.async.} = + let message = fakeWakuMessage() + + handlerFuture = newFuture[(string, WakuMessage)]() + let requestRes = + await client.publish(DefaultPubsubTopic, message, peer = serverPeerId) + + check await handlerFuture.withTimeout(50.millis) + + assert requestRes.isOk(), requestRes.error + check handlerFuture.finished() + + let (handledMessagePubsubTopic, handledMessage) = handlerFuture.read() + + check: + handledMessagePubsubTopic == DefaultPubsubTopic + handledMessage == message + + let waitInBetweenFor = 20.millis + + # Test cannot be too explicit about the time when the TokenBucket resets + # the internal timer, although in normal use there is no use case to care about it. + var firstWaitExtend = 300.millis + + for runCnt in 0 ..< 3: + let startTime = Moment.now() + for testCnt in 0 ..< 3: + await sendMsgProc() + await sleepAsync(20.millis) + + var endTime = Moment.now() + var elapsed: Duration = (endTime - startTime) + await sleepAsync(tokenPeriod - elapsed + firstWaitExtend) + firstWaitEXtend = 100.millis + + ## Cleanup + await allFutures(clientSwitch.stop(), serverSwitch.stop()) + + asyncTest "push message with rate limit reject": + ## Setup + let + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + ## Given + var handlerFuture = newFuture[(string, WakuMessage)]() + let handler = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + handlerFuture.complete((pubsubTopic, message)) + return ok() + + let + server = await newTestWakuLegacyLightpushNode( + serverSwitch, handler, some((3, 500.millis)) + ) + client = newTestWakuLegacyLightpushClient(clientSwitch) + + let serverPeerId = serverSwitch.peerInfo.toRemotePeerInfo() + let topic = DefaultPubsubTopic + + let successProc = proc(): Future[void] {.async.} = + let message = fakeWakuMessage() + handlerFuture = newFuture[(string, WakuMessage)]() + let requestRes = + await client.publish(DefaultPubsubTopic, message, peer = serverPeerId) + discard await handlerFuture.withTimeout(10.millis) + + check: + requestRes.isOk() + handlerFuture.finished() + let (handledMessagePubsubTopic, handledMessage) = handlerFuture.read() + check: + handledMessagePubsubTopic == DefaultPubsubTopic + handledMessage == message + + let rejectProc = proc(): Future[void] {.async.} = + let message = fakeWakuMessage() + handlerFuture = newFuture[(string, WakuMessage)]() + let requestRes = + await client.publish(DefaultPubsubTopic, message, peer = serverPeerId) + discard await handlerFuture.withTimeout(10.millis) + + check: + requestRes.isErr() + requestRes.error == "TOO_MANY_REQUESTS" + + for testCnt in 0 .. 2: + await successProc() + await sleepAsync(20.millis) + + await rejectProc() + + await sleepAsync(500.millis) + + ## next one shall succeed due to the rate limit time window has passed + await successProc() + + ## Cleanup + await allFutures(clientSwitch.stop(), serverSwitch.stop()) diff --git a/third-party/nwaku/tests/waku_peer_exchange/test_all.nim b/third-party/nwaku/tests/waku_peer_exchange/test_all.nim new file mode 100644 index 0000000..903b47c --- /dev/null +++ b/third-party/nwaku/tests/waku_peer_exchange/test_all.nim @@ -0,0 +1,3 @@ +{.used.} + +import ./test_protocol, ./test_rpc_codec diff --git a/third-party/nwaku/tests/waku_peer_exchange/test_protocol.nim b/third-party/nwaku/tests/waku_peer_exchange/test_protocol.nim new file mode 100644 index 0000000..1d10cf2 --- /dev/null +++ b/third-party/nwaku/tests/waku_peer_exchange/test_protocol.nim @@ -0,0 +1,456 @@ +{.used.} + +import + std/[options, sequtils, net], + testutils/unittests, + chronos, + libp2p/[switch, peerId, crypto/crypto], + eth/[keys, p2p/discoveryv5/enr] + +import + waku/[ + waku_node, + node/peer_manager, + discovery/waku_discv5, + waku_peer_exchange, + waku_peer_exchange/rpc, + waku_peer_exchange/rpc_codec, + waku_peer_exchange/protocol, + waku_peer_exchange/client, + node/peer_manager, + waku_core, + common/enr/builder, + waku_enr/sharding, + ], + ../testlib/[wakucore, wakunode, assertions], + ./utils.nim + +suite "Waku Peer Exchange": + # Some of this tests use node.wakuPeerExchange instead of just a standalone PeerExchange. + # This is because attempts to connect the switches for two standalones PeerExchanges failed. + # TODO: Try to make the tests work with standalone PeerExchanges + + suite "request": + asyncTest "Retrieve and provide peer exchange peers from discv5": + ## Given (copied from test_waku_discv5.nim) + let + # todo: px flag + flags = CapabilitiesBitfield.init( + lightpush = false, filter = false, store = false, relay = true + ) + bindIp = parseIpAddress("0.0.0.0") + extIp = parseIpAddress("127.0.0.1") + + nodeKey1 = generateSecp256k1Key() + nodeTcpPort1 = Port(64010) + nodeUdpPort1 = Port(9000) + node1 = newTestWakuNode( + nodeKey1, + bindIp, + nodeTcpPort1, + some(extIp), + wakuFlags = some(flags), + discv5UdpPort = some(nodeUdpPort1), + ) + + nodeKey2 = generateSecp256k1Key() + nodeTcpPort2 = Port(64012) + nodeUdpPort2 = Port(9002) + node2 = newTestWakuNode( + nodeKey2, + bindIp, + nodeTcpPort2, + some(extIp), + wakuFlags = some(flags), + discv5UdpPort = some(nodeUdpPort2), + ) + + nodeKey3 = generateSecp256k1Key() + nodeTcpPort3 = Port(64014) + nodeUdpPort3 = Port(9004) + node3 = newTestWakuNode( + nodeKey3, + bindIp, + nodeTcpPort3, + some(extIp), + wakuFlags = some(flags), + discv5UdpPort = some(nodeUdpPort3), + ) + + # discv5 + let conf1 = WakuDiscoveryV5Config( + discv5Config: none(DiscoveryConfig), + address: bindIp, + port: nodeUdpPort1, + privateKey: keys.PrivateKey(nodeKey1.skkey), + bootstrapRecords: @[], + autoupdateRecord: true, + ) + + let disc1 = + WakuDiscoveryV5.new(node1.rng, conf1, some(node1.enr), some(node1.peerManager)) + + let conf2 = WakuDiscoveryV5Config( + discv5Config: none(DiscoveryConfig), + address: bindIp, + port: nodeUdpPort2, + privateKey: keys.PrivateKey(nodeKey2.skkey), + bootstrapRecords: @[disc1.protocol.getRecord()], + autoupdateRecord: true, + ) + + let disc2 = + WakuDiscoveryV5.new(node2.rng, conf2, some(node2.enr), some(node2.peerManager)) + + await allFutures(node1.start(), node2.start(), node3.start()) + let resultDisc1StartRes = await disc1.start() + assert resultDisc1StartRes.isOk(), resultDisc1StartRes.error + let resultDisc2StartRes = await disc2.start() + assert resultDisc2StartRes.isOk(), resultDisc2StartRes.error + + ## When + var attempts = 10 + while (disc1.protocol.nodesDiscovered < 1 or disc2.protocol.nodesDiscovered < 1) and + attempts > 0: + await sleepAsync(1.seconds) + attempts -= 1 + + # node2 can be connected, so will be returned by peer exchange + require ( + await node1.peerManager.connectPeer(node2.switch.peerInfo.toRemotePeerInfo()) + ) + + # Mount peer exchange + await node1.mountPeerExchange() + await node3.mountPeerExchange() + + let dialResponse = + await node3.dialForPeerExchange(node1.switch.peerInfo.toRemotePeerInfo()) + let response = dialResponse.get() + + ## Then + check: + response.get().peerInfos.len == 1 + response.get().peerInfos[0].enr == disc2.protocol.localNode.record.raw + + await allFutures( + [node1.stop(), node2.stop(), node3.stop(), disc1.stop(), disc2.stop()] + ) + + asyncTest "Request returns some discovered peers": + let + node1 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + node2 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + + # Start and mount peer exchange + await allFutures([node1.start(), node2.start()]) + await allFutures([node1.mountPeerExchange(), node2.mountPeerExchangeClient()]) + + # Create connection + let connOpt = await node2.peerManager.dialPeer( + node1.switch.peerInfo.toRemotePeerInfo(), WakuPeerExchangeCodec + ) + require: + connOpt.isSome + + # Create some enr and add to peer exchange (simulating disv5) + var enr1, enr2 = enr.Record() + check enr1.fromUri( + "enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB" + ) + check enr2.fromUri( + "enr:-Iu4QGJllOWlviPIh_SGR-VVm55nhnBIU5L-s3ran7ARz_4oDdtJPtUs3Bc5aqZHCiPQX6qzNYF2ARHER0JPX97TFbEBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQP3ULycvday4EkvtVu0VqbBdmOkbfVLJx8fPe0lE_dRkIN0Y3CC6mCFd2FrdTIB" + ) + + # Mock that we have discovered these enrs + node1.wakuPeerExchange.enrCache.add(enr1) + node1.wakuPeerExchange.enrCache.add(enr2) + + # Request 2 peer from px. Test all request variants + let response1 = await node2.wakuPeerExchangeClient.request(2) + let response2 = + await node2.wakuPeerExchangeClient.request(2, node1.peerInfo.toRemotePeerInfo()) + let response3 = await node2.wakuPeerExchangeClient.request(2, connOpt.get()) + + # Check the response or dont even continue + require: + response1.isOk + response2.isOk + response3.isOk + + check: + response1.get().peerInfos.len == 2 + response2.get().peerInfos.len == 2 + response3.get().peerInfos.len == 2 + + # Since it can return duplicates test that at least one of the enrs is in the response + response1.get().peerInfos.anyIt(it.enr == enr1.raw) or + response1.get().peerInfos.anyIt(it.enr == enr2.raw) + response2.get().peerInfos.anyIt(it.enr == enr1.raw) or + response2.get().peerInfos.anyIt(it.enr == enr2.raw) + response3.get().peerInfos.anyIt(it.enr == enr1.raw) or + response3.get().peerInfos.anyIt(it.enr == enr2.raw) + + asyncTest "Request fails gracefully": + let + node1 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + node2 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + + # Start and mount peer exchange + await allFutures([node1.start(), node2.start()]) + await allFutures([node1.mountPeerExchange(), node2.mountPeerExchange()]) + + # Create connection + let connOpt = await node2.peerManager.dialPeer( + node1.switch.peerInfo.toRemotePeerInfo(), WakuPeerExchangeCodec + ) + require connOpt.isSome + + # Force closing the connection to simulate a failed peer + await connOpt.get().close() + + # Request 2 peer from px + let response = await node1.wakuPeerExchangeClient.request(2, connOpt.get()) + + # Check that it failed gracefully + check: + response.isErr + response.error.status_code == PeerExchangeResponseStatusCode.SERVICE_UNAVAILABLE + + asyncTest "Request 0 peers, with 0 peers in PeerExchange": + # Given a disconnected PeerExchange + let + switch = newTestSwitch() + peerManager = PeerManager.new(switch) + peerExchangeClient = WakuPeerExchangeClient.new(peerManager) + + # When requesting 0 peers + let response = await peerExchangeClient.request(0) + + # Then the response should be an error + check: + response.isErr + response.error.status_code == PeerExchangeResponseStatusCode.SERVICE_UNAVAILABLE + + asyncTest "Pool filtering": + let + key1 = generateSecp256k1Key() + key2 = generateSecp256k1Key() + cluster: Option[uint16] = some(uint16(16)) + bindIp = parseIpAddress("0.0.0.0") + nodeTcpPort = Port(64010) + nodeUdpPort = Port(9000) + + var + builder1 = EnrBuilder.init(key1) + builder2 = EnrBuilder.init(key2) + + builder1.withIpAddressAndPorts(some(bindIp), some(nodeTcpPort), some(nodeUdpPort)) + builder2.withIpAddressAndPorts(some(bindIp), some(nodeTcpPort), some(nodeUdpPort)) + builder1.withShardedTopics(@["/waku/2/rs/1/7"]).expect("valid topic") + builder2.withShardedTopics(@["/waku/2/rs/16/32"]).expect("valid topic") + + let + enr1 = builder1.build().expect("valid ENR") + enr2 = builder2.build().expect("valid ENR") + + var + peerInfo1 = enr1.toRemotePeerInfo().expect("valid PeerInfo") + peerInfo2 = enr2.toRemotePeerInfo().expect("valid PeerInfo") + + peerInfo1.origin = PeerOrigin.Discv5 + peerInfo2.origin = PeerOrigin.Discv5 + + check: + not poolFilter(cluster, peerInfo1) + poolFilter(cluster, peerInfo2) + + asyncTest "Request 0 peers, with 1 peer in PeerExchange": + # Given two valid nodes with PeerExchange + let + node1 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + node2 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + + # Start and mount peer exchange + await allFutures([node1.start(), node2.start()]) + await allFutures([node1.mountPeerExchange(), node2.mountPeerExchangeClient()]) + + # Connect the nodes + let dialResponse = await node2.peerManager.dialPeer( + node1.switch.peerInfo.toRemotePeerInfo(), WakuPeerExchangeCodec + ) + assert dialResponse.isSome + + # Mock that we have discovered one enr + var record = enr.Record() + check record.fromUri( + "enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB" + ) + node1.wakuPeerExchange.enrCache.add(record) + + # When requesting 0 peers + let response = await node2.wakuPeerExchangeClient.request(0) + + # Then the response should be empty + assertResultOk(response) + check response.get().peerInfos.len == 0 + + asyncTest "Request with invalid peer info": + # Given two valid nodes with PeerExchange + let + node1 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + node2 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + + # Start and mount peer exchange + await allFutures([node1.start(), node2.start()]) + await allFutures([node1.mountPeerExchangeClient(), node2.mountPeerExchange()]) + + # Mock that we have discovered one enr + var record = enr.Record() + check record.fromUri( + "enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB" + ) + node2.wakuPeerExchange.enrCache.add(record) + + # When making any request with an invalid peer info + var remotePeerInfo2 = node2.peerInfo.toRemotePeerInfo() + remotePeerInfo2.peerId.data.add(255.byte) + let response = await node1.wakuPeerExchangeClient.request(1, remotePeerInfo2) + + # Then the response should be an error + check: + response.isErr + response.error.status_code == PeerExchangeResponseStatusCode.DIAL_FAILURE + + asyncTest "Connections are closed after response is sent": + # Create 3 nodes + let nodes = toSeq(0 ..< 3).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + + await allFutures(nodes.mapIt(it.start())) + await allFutures(nodes.mapIt(it.mountPeerExchange())) + await allFutures(nodes.mapIt(it.mountPeerExchangeClient())) + + # Multiple nodes request to node 0 + for i in 1 ..< 3: + let resp = await nodes[i].wakuPeerExchangeClient.request( + 2, nodes[0].switch.peerInfo.toRemotePeerInfo() + ) + require resp.isOk + + # Wait for streams to be closed + await sleepAsync(1.seconds) + + # Check that all streams are closed for px + check: + nodes[0].peerManager.getNumStreams(WakuPeerExchangeCodec) == (0, 0) + nodes[1].peerManager.getNumStreams(WakuPeerExchangeCodec) == (0, 0) + nodes[2].peerManager.getNumStreams(WakuPeerExchangeCodec) == (0, 0) + + suite "Protocol Handler": + asyncTest "Works as expected": + let + node1 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + node2 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + + # Start and mount peer exchange + await allFutures([node1.start(), node2.start()]) + await allFutures([node1.mountPeerExchange(), node2.mountPeerExchange()]) + + # Mock that we have discovered these enrs + var enr1 = enr.Record() + check enr1.fromUri( + "enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB" + ) + node1.wakuPeerExchange.enrCache.add(enr1) + + # Create connection + let connOpt = await node2.peerManager.dialPeer( + node1.switch.peerInfo.toRemotePeerInfo(), WakuPeerExchangeCodec + ) + require connOpt.isSome + let conn = connOpt.get() + + # Send bytes so that they directly hit the handler + let rpc = PeerExchangeRpc.makeRequest(1) + + var buffer: seq[byte] + await conn.writeLP(rpc.encode().buffer) + buffer = await conn.readLp(DefaultMaxRpcSize.int) + + # Decode the response + let decodedBuff = PeerExchangeRpc.decode(buffer) + require decodedBuff.isOk + + # Check we got back the enr we mocked + check: + decodedBuff.get().response.status_code == PeerExchangeResponseStatusCode.SUCCESS + decodedBuff.get().response.peerInfos.len == 1 + decodedBuff.get().response.peerInfos[0].enr == enr1.raw + + asyncTest "RateLimit as expected": + let + node1 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + node2 = + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + + # Start and mount peer exchange + await allFutures([node1.start(), node2.start()]) + await allFutures( + [ + node1.mountPeerExchange(rateLimit = (1, 150.milliseconds)), + node2.mountPeerExchangeClient(), + ] + ) + + # Create connection + let connOpt = await node2.peerManager.dialPeer( + node1.switch.peerInfo.toRemotePeerInfo(), WakuPeerExchangeCodec + ) + require: + connOpt.isSome + + # Create some enr and add to peer exchange (simulating disv5) + var enr1, enr2 = enr.Record() + check enr1.fromUri( + "enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB" + ) + check enr2.fromUri( + "enr:-Iu4QGJllOWlviPIh_SGR-VVm55nhnBIU5L-s3ran7ARz_4oDdtJPtUs3Bc5aqZHCiPQX6qzNYF2ARHER0JPX97TFbEBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQP3ULycvday4EkvtVu0VqbBdmOkbfVLJx8fPe0lE_dRkIN0Y3CC6mCFd2FrdTIB" + ) + + # Mock that we have discovered these enrs + node1.wakuPeerExchange.enrCache.add(enr1) + node1.wakuPeerExchange.enrCache.add(enr2) + + await sleepAsync(150.milliseconds) + + # Request 2 peer from px. Test all request variants + let response1 = await node2.wakuPeerExchangeClient.request(1) + check: + response1.isOk + response1.get().peerInfos.len == 1 + + let response2 = + await node2.wakuPeerExchangeClient.request(1, node1.peerInfo.toRemotePeerInfo()) + check: + response2.isErr + response2.error().status_code == PeerExchangeResponseStatusCode.TOO_MANY_REQUESTS + + await sleepAsync(150.milliseconds) + let response3 = await node2.wakuPeerExchangeClient.request(1, connOpt.get()) + check: + response3.isOk + response3.get().peerInfos.len == 1 diff --git a/third-party/nwaku/tests/waku_peer_exchange/test_rpc_codec.nim b/third-party/nwaku/tests/waku_peer_exchange/test_rpc_codec.nim new file mode 100644 index 0000000..84aec7e --- /dev/null +++ b/third-party/nwaku/tests/waku_peer_exchange/test_rpc_codec.nim @@ -0,0 +1,73 @@ +{.used.} + +import + std/[options, net], + testutils/unittests, + chronos, + libp2p/switch, + libp2p/peerId, + libp2p/crypto/crypto, + eth/keys, + eth/p2p/discoveryv5/enr + +import + waku/[ + node/peer_manager, + discovery/waku_discv5, + waku_peer_exchange/rpc, + waku_peer_exchange/rpc_codec, + ], + ../testlib/[wakucore] + +suite "Peer Exchange RPC": + asyncTest "Encode - Decode": + # Setup + let rpcReq = PeerExchangeRpc.makeRequest(2) + let rpcReqBuffer: seq[byte] = rpcReq.encode().buffer + let resReq = PeerExchangeRpc.decode(rpcReqBuffer) + + check: + resReq.isOk + resReq.get().request.numPeers == 2 + + var + enr1 = enr.Record(seqNum: 0, raw: @[]) + enr2 = enr.Record(seqNum: 0, raw: @[]) + + check: + enr1.fromUri( + "enr:-JK4QPmO-sE2ELiWr8qVFs1kaY4jQZQpNaHvSPRmKiKcaDoqYRdki2c1BKSliImsxFeOD_UHnkddNL2l0XT9wlsP0WEBgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQIMwKqlOl3zpwnrsKRKHuWPSuFzit1Cl6IZvL2uzBRe8oN0Y3CC6mKDdWRwgiMqhXdha3UyDw" + ) + enr2.fromUri( + "enr:-Iu4QK_T7kzAmewG92u1pr7o6St3sBqXaiIaWIsFNW53_maJEaOtGLSN2FUbm6LmVxSfb1WfC7Eyk-nFYI7Gs3SlchwBgmlkgnY0gmlwhI5d6VKJc2VjcDI1NmsxoQLPYQDvrrFdCrhqw3JuFaGD71I8PtPfk6e7TJ3pg_vFQYN0Y3CC6mKDdWRwgiMq" + ) + + let peerInfos = + @[PeerExchangePeerInfo(enr: enr1.raw), PeerExchangePeerInfo(enr: enr2.raw)] + let rpc = PeerExchangeRpc.makeResponse(peerInfos) + + # When encoding and decoding + let rpcBuffer: seq[byte] = rpc.encode().buffer + let res = PeerExchangeRpc.decode(rpcBuffer) + + # Then the peerInfos match the originals + check: + res.isOk + res.get().response.status_code == PeerExchangeResponseStatusCode.SUCCESS + res.get().response.peerInfos == peerInfos + + # When using the decoded responses to create new enrs + var + resEnr1 = enr.Record(seqNum: 0, raw: @[]) + resEnr2 = enr.Record(seqNum: 0, raw: @[]) + + check: + res.get().response.status_code == PeerExchangeResponseStatusCode.SUCCESS + + discard resEnr1.fromBytes(res.get().response.peerInfos[0].enr) + discard resEnr2.fromBytes(res.get().response.peerInfos[1].enr) + + # Then they match the original enrs + check: + resEnr1 == enr1 + resEnr2 == enr2 diff --git a/third-party/nwaku/tests/waku_peer_exchange/utils.nim b/third-party/nwaku/tests/waku_peer_exchange/utils.nim new file mode 100644 index 0000000..ce7660b --- /dev/null +++ b/third-party/nwaku/tests/waku_peer_exchange/utils.nim @@ -0,0 +1,53 @@ +{.used.} + +import + std/options, + testutils/unittests, + chronos, + libp2p/switch, + libp2p/peerId, + libp2p/crypto/crypto, + eth/keys, + eth/p2p/discoveryv5/enr + +import + waku/[ + waku_node, + discovery/waku_discv5, + waku_peer_exchange, + waku_peer_exchange/rpc, + waku_peer_exchange/protocol, + waku_peer_exchange/client, + node/peer_manager, + waku_core, + ], + ../testlib/[futures, wakucore, assertions] + +proc dialForPeerExchange*( + client: WakuNode, + peerInfo: RemotePeerInfo, + requestedPeers: uint64 = 1, + minimumPeers: uint64 = 0, + attempts: uint64 = 100, +): Future[Result[WakuPeerExchangeResult[PeerExchangeResponse], string]] {.async.} = + # Dials a peer and awaits until it's able to receive a peer exchange response + # For the test, the relevant part is the dialPeer call. + # But because the test needs peers, and due to the asynchronous nature of the dialing, + # we await until we receive peers from the peer exchange protocol. + var attempts = attempts + + while attempts > 0: + let connOpt = await client.peerManager.dialPeer(peerInfo, WakuPeerExchangeCodec) + require connOpt.isSome() + await sleepAsync(FUTURE_TIMEOUT_SHORT) + + let response = + await client.wakuPeerExchangeClient.request(requestedPeers, connOpt.get()) + assertResultOk(response) + + if uint64(response.get().peerInfos.len) > minimumPeers: + return ok(response) + + attempts -= 1 + + return err("Attempts exhausted.") diff --git a/third-party/nwaku/tests/waku_relay/crypto_utils.nim b/third-party/nwaku/tests/waku_relay/crypto_utils.nim new file mode 100644 index 0000000..ef3f3a8 --- /dev/null +++ b/third-party/nwaku/tests/waku_relay/crypto_utils.nim @@ -0,0 +1,44 @@ +# Source: nimcrypto/examples/cfb.nim + +import nimcrypto + +proc cfbEncode*(key: string, iv: string, data: string): seq[byte] = + var context: CFB[aes256] + var pKey: array[aes256.sizeKey, byte] + var pIv: array[aes256.sizeBlock, byte] + var pData = newSeq[byte](len(data)) + var encodedData = newSeq[byte](len(data)) + + copyMem(addr pData[0], unsafeAddr data[0], len(data)) + # WARNING! Do not use 0 byte padding in applications, this is done as example. + copyMem(addr pKey[0], unsafeAddr key[0], len(key)) + copyMem(addr pIv[0], unsafeAddr iv[0], len(iv)) + + # Initialization of CFB[aes256] context with encryption key + context.init(pKey, pIv) + # Encryption process + context.encrypt(pData, encodedData) + # Clear context of CFB[aes256] + context.clear() + + return encodedData + +proc cfbDecode*(key: string, iv: string, encodedData: seq[byte]): seq[byte] = + var context: CFB[aes256] + var pKey: array[aes256.sizeKey, byte] + var pIv: array[aes256.sizeBlock, byte] + var decodedData = newSeq[byte](len(encodedData)) + + # copyMem(addr _data[0], addr data[0], len(data)) + # WARNING! Do not use 0 byte padding in applications, this is done as example. + copyMem(addr pKey[0], unsafeAddr key[0], len(key)) + copyMem(addr pIv[0], unsafeAddr iv[0], len(iv)) + + # Initialization of CFB[aes256] context with encryption key + context.init(pKey, pIv) + # Decryption process + context.decrypt(encodedData, decodedData) + # Clear context of CFB[aes256] + context.clear() + + return decodedData diff --git a/third-party/nwaku/tests/waku_relay/resources/test_cert.pem b/third-party/nwaku/tests/waku_relay/resources/test_cert.pem new file mode 100644 index 0000000..f267ee9 --- /dev/null +++ b/third-party/nwaku/tests/waku_relay/resources/test_cert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIUUdcusjDd3XQi3FPM8urdFG3qI+8wDQYJKoZIhvcNAQEL +BQAwXzELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDEYMBYGA1UEAwwPMTI3LjAuMC4xOjQz +ODA4MB4XDTIwMTAxMjIxNDUwMVoXDTMwMTAxMDIxNDUwMVowXzELMAkGA1UEBhMC +QVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoMGEludGVybmV0IFdpZGdp +dHMgUHR5IEx0ZDEYMBYGA1UEAwwPMTI3LjAuMC4xOjQzODA4MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp+7VxiyjCDMzhrVc1IsWsdf37ZUi3KRAJGTD +kboggp2I7SCYRmXc0RWQvHwZXwCWaSyADn19i9n9SWbsKcA5DiLzaijAa5eTL5Je +Wsz09K7Z47sK3KjeTRbW5mTyhWL243sdmlxqo5eTFp0CLP2QPvg1RF5mjfaA8XGb +L0TAyn6xkH/Yi3qFS+OxsfSqajag/ySyJ+C6YnrplcmInZurpEzqh0b61pt+sulb +yluExPe0x2nFC5pHmobU3/MwyW24eBDvoIn4MICdlgVEtPuYTHFrvNddZl5mp5Tl +ZXKFvHx/EZj4y9XitWd490lRxH+6FmbSFVuYBgNI0J3wOCudUQIDAQABo1MwUTAd +BgNVHQ4EFgQUBKha84woY5WkFxKw7qx1cONg1H8wHwYDVR0jBBgwFoAUBKha84wo +Y5WkFxKw7qx1cONg1H8wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC +AQEAHZMYt9Ry+Xj3vTbzpGFQzYQVTJlfJWSN6eWNOivRFQE5io9kOBEe5noa8aLo +dLkw6ztxRP2QRJmlhGCO9/HwS17ckrkgZp3EC2LFnzxcBmoZu+owfxOT1KqpO52O +IKOl8eVohi1pEicE4dtTJVcpI7VCMovnXUhzx1Ci4Vibns4a6H+BQa19a1JSpifN +tO8U5jkjJ8Jprs/VPFhJj2O3di53oDHaYSE5eOrm2ZO14KFHSk9cGcOGmcYkUv8B +nV5vnGadH5Lvfxb/BCpuONabeRdOxMt9u9yQ89vNpxFtRdZDCpGKZBCfmUP+5m3m +N8r5CwGcIX/XPC3lKazzbZ8baA== +-----END CERTIFICATE----- diff --git a/third-party/nwaku/tests/waku_relay/resources/test_key.pem b/third-party/nwaku/tests/waku_relay/resources/test_key.pem new file mode 100644 index 0000000..7aa77f6 --- /dev/null +++ b/third-party/nwaku/tests/waku_relay/resources/test_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCn7tXGLKMIMzOG +tVzUixax1/ftlSLcpEAkZMORuiCCnYjtIJhGZdzRFZC8fBlfAJZpLIAOfX2L2f1J +ZuwpwDkOIvNqKMBrl5Mvkl5azPT0rtnjuwrcqN5NFtbmZPKFYvbjex2aXGqjl5MW +nQIs/ZA++DVEXmaN9oDxcZsvRMDKfrGQf9iLeoVL47Gx9KpqNqD/JLIn4LpieumV +yYidm6ukTOqHRvrWm36y6VvKW4TE97THacULmkeahtTf8zDJbbh4EO+gifgwgJ2W +BUS0+5hMcWu8111mXmanlOVlcoW8fH8RmPjL1eK1Z3j3SVHEf7oWZtIVW5gGA0jQ +nfA4K51RAgMBAAECggEANZ7/R13tWKrwouy6DWuz/WlWUtgx333atUQvZhKmWs5u +cDjeJmxUC7b1FhoSB9GqNT7uTLIpKkSaqZthgRtNnIPwcU890Zz+dEwqMJgNByvl +it+oYjjRco/+YmaNQaYN6yjelPE5Y678WlYb4b29Fz4t0/zIhj/VgEKkKH2tiXpS +TIicoM7pSOscEUfaW3yp5bS5QwNU6/AaF1wws0feBACd19ZkcdPvr52jopbhxlXw +h3XTV/vXIJd5zWGp0h/Jbd4xcD4MVo2GjfkeORKY6SjDaNzt8OGtePcKnnbUVu8b +2XlDxukhDQXqJ3g0sHz47mhvo4JeIM+FgymRm+3QmQKBgQDTawrEA3Zy9WvucaC7 +Zah02oE9nuvpF12lZ7WJh7+tZ/1ss+Fm7YspEKaUiEk7nn1CAVFtem4X4YCXTBiC +Oqq/o+ipv1yTur0ae6m4pwLm5wcMWBh3H5zjfQTfrClNN8yjWv8u3/sq8KesHPnT +R92/sMAptAChPgTzQphWbxFiYwKBgQDLWFaBqXfZYVnTyUvKX8GorS6jGWc6Eh4l +lAFA+2EBWDICrUxsDPoZjEXrWCixdqLhyehaI3KEFIx2bcPv6X2c7yx3IG5lA/Gx +TZiKlY74c6jOTstkdLW9RJbg1VUHUVZMf/Owt802YmEfUI5S5v7jFmKW6VG+io+K ++5KYeHD1uwKBgQDMf53KPA82422jFwYCPjLT1QduM2q97HwIomhWv5gIg63+l4BP +rzYMYq6+vZUYthUy41OAMgyLzPQ1ZMXQMi83b7R9fTxvKRIBq9xfYCzObGnE5vHD +SDDZWvR75muM5Yxr9nkfPkgVIPMO6Hg+hiVYZf96V0LEtNjU9HWmJYkLQQKBgQCQ +ULGUdGHKtXy7AjH3/t3CiKaAupa4cANVSCVbqQy/l4hmvfdu+AbH+vXkgTzgNgKD +nHh7AI1Vj//gTSayLlQn/Nbh9PJkXtg5rYiFUn+VdQBo6yMOuIYDPZqXFtCx0Nge +kvCwisHpxwiG4PUhgS+Em259DDonsM8PJFx2OYRx4QKBgEQpGhg71Oi9MhPJshN7 +dYTowaMS5eLTk2264ARaY+hAIV7fgvUa+5bgTVaWL+Cfs33hi4sMRqlEwsmfds2T +cnQiJ4cU20Euldfwa5FLnk6LaWdOyzYt/ICBJnKFRwfCUbS4Bu5rtMEM+3t0wxnJ +IgaD04WhoL9EX0Qo3DC1+0kG +-----END PRIVATE KEY----- diff --git a/third-party/nwaku/tests/waku_relay/test_all.nim b/third-party/nwaku/tests/waku_relay/test_all.nim new file mode 100644 index 0000000..37ecd6b --- /dev/null +++ b/third-party/nwaku/tests/waku_relay/test_all.nim @@ -0,0 +1,3 @@ +{.used.} + +import ./test_message_id, ./test_protocol, ./test_wakunode_relay diff --git a/third-party/nwaku/tests/waku_relay/test_message_id.nim b/third-party/nwaku/tests/waku_relay/test_message_id.nim new file mode 100644 index 0000000..6dcd72a --- /dev/null +++ b/third-party/nwaku/tests/waku_relay/test_message_id.nim @@ -0,0 +1,39 @@ +import + unittest, + results, + stew/byteutils, + nimcrypto/sha2, + libp2p/protocols/pubsub/rpc/messages + +import waku/waku_relay/message_id + +suite "Message ID Provider": + test "Non-empty string": + let message = Message(data: "Hello, world!".toBytes()) + let result = defaultMessageIdProvider(message) + let expected = MDigest[256].fromHex( + "315f5bdb76d078c43b8ac0064e4a0164612b1fce77c869345bfc94c75894edd3" + ) + check: + result.isOk() + result.get() == expected.data + + test "Empty string": + let message = Message(data: "".toBytes()) + let result = defaultMessageIdProvider(message) + let expected = MDigest[256].fromHex( + "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855" + ) + check: + result.isOk() + result.get() == expected.data + + test "Empty array": + let message = Message(data: @[]) + let result = defaultMessageIdProvider(message) + let expected = MDigest[256].fromHex( + "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855" + ) + check: + result.isOk() + result.get() == expected.data diff --git a/third-party/nwaku/tests/waku_relay/test_protocol.nim b/third-party/nwaku/tests/waku_relay/test_protocol.nim new file mode 100644 index 0000000..46032b6 --- /dev/null +++ b/third-party/nwaku/tests/waku_relay/test_protocol.nim @@ -0,0 +1,1364 @@ +{.used.} + +import + std/[options, strformat], + testutils/unittests, + chronos, + libp2p/protocols/pubsub/[pubsub, gossipsub], + libp2p/[stream/connection, switch], + ./crypto_utils, + std/json + +import + waku/[ + node/peer_manager, + waku_relay/protocol, + waku_relay, + waku_core, + waku_core/message/codec, + ], + ../testlib/[wakucore, testasync, futures, sequtils], + ./utils, + ../resources/payloads + +suite "Waku Relay": + var messageSeq {.threadvar.}: seq[(PubsubTopic, WakuMessage)] + var handlerFuture {.threadvar.}: Future[(PubsubTopic, WakuMessage)] + var simpleFutureHandler {.threadvar.}: WakuRelayHandler + + var switch {.threadvar.}: Switch + var peerManager {.threadvar.}: PeerManager + var node {.threadvar.}: WakuRelay + + var remotePeerInfo {.threadvar.}: RemotePeerInfo + var peerId {.threadvar.}: PeerId + + var contentTopic {.threadvar.}: ContentTopic + var pubsubTopic {.threadvar.}: PubsubTopic + var pubsubTopicSeq {.threadvar.}: seq[PubsubTopic] + var testMessage {.threadvar.}: string + var wakuMessage {.threadvar.}: WakuMessage + + asyncSetup: + messageSeq = @[] + handlerFuture = newPushHandlerFuture() + simpleFutureHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, closure, gcsafe.} = + messageSeq.add((topic, msg)) + handlerFuture.complete((topic, msg)) + + switch = newTestSwitch() + peerManager = PeerManager.new(switch) + node = await newTestWakuRelay(switch) + + testMessage = "test-message" + contentTopic = DefaultContentTopic + pubsubTopic = DefaultPubsubTopic + pubsubTopicSeq = @[pubsubTopic] + wakuMessage = fakeWakuMessage(testMessage, pubsubTopic) + + await allFutures(switch.start()) + + remotePeerInfo = switch.peerInfo.toRemotePeerInfo() + peerId = remotePeerInfo.peerId + + asyncTeardown: + await allFutures(switch.stop()) + + suite "Subscribe": + asyncTest "Publish without Subscription": + # When publishing a message without being subscribed + discard await node.publish(pubsubTopic, wakuMessage) + + # Then the message is not published + check: + not await handlerFuture.withTimeout(FUTURE_TIMEOUT) + + asyncTest "Publish with Subscription (Network Size: 1)": + # When subscribing to a Pubsub Topic + + node.subscribe(pubsubTopic, simpleFutureHandler) + + # Then the node is subscribed + check: + node.isSubscribed(pubsubTopic) + node.subscribedTopics == pubsubTopicSeq + + # When publishing a message + discard await node.publish(pubsubTopic, wakuMessage) + + # Then the message is published + assert (await handlerFuture.withTimeout(FUTURE_TIMEOUT)) + let (topic, msg) = handlerFuture.read() + check: + topic == pubsubTopic + msg == wakuMessage + + asyncTest "Pubsub Topic Subscription (Network Size: 2, only one subscribed)": + # Given a second node connected to the first one + let + otherSwitch = newTestSwitch() + otherNode = await newTestWakuRelay(otherSwitch) + + await allFutures(otherSwitch.start(), otherNode.start()) + let otherRemotePeerInfo = otherSwitch.peerInfo.toRemotePeerInfo() + check await peerManager.connectPeer(otherRemotePeerInfo) + + var otherHandlerFuture = newPushHandlerFuture() + proc otherSimpleFutureHandler( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + otherHandlerFuture.complete((topic, message)) + + # When subscribing the second node to the Pubsub Topic + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + + # Then the second node is subscribed, but not the first one + check: + not node.isSubscribed(pubsubTopic) + node.subscribedTopics != pubsubTopicSeq + otherNode.isSubscribed(pubsubTopic) + otherNode.subscribedTopics == pubsubTopicSeq + + await sleepAsync(500.millis) + + # When publishing a message in the subscribed node + let fromOtherWakuMessage = fakeWakuMessage("fromOther") + discard await otherNode.publish(pubsubTopic, fromOtherWakuMessage) + + # Then the message is published only in the subscribed node + check: + not await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + let (otherTopic1, otherMessage1) = otherHandlerFuture.read() + check: + otherTopic1 == pubsubTopic + otherMessage1 == fromOtherWakuMessage + + # When publishing a message in the other node + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + let fromNodeWakuMessage = fakeWakuMessage("fromNode") + discard await node.publish(pubsubTopic, fromNodeWakuMessage) + + # Then the message is published only in the subscribed node + check: + not await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + let (otherTopic2, otherMessage2) = otherHandlerFuture.read() + check: + otherTopic2 == pubsubTopic + otherMessage2 == fromNodeWakuMessage + + # Finally stop the other node + await allFutures(otherSwitch.stop(), otherNode.stop()) + + asyncTest "Pubsub Topic Subscription (Network Size: 2, both subscribed to same pubsub topic)": + # Given a second node connected to the first one + let + otherSwitch = newTestSwitch() + otherNode = await newTestWakuRelay(otherSwitch) + + await allFutures(otherSwitch.start(), otherNode.start()) + let otherRemotePeerInfo = otherSwitch.peerInfo.toRemotePeerInfo() + check await peerManager.connectPeer(otherRemotePeerInfo) + + var otherHandlerFuture = newPushHandlerFuture() + proc otherSimpleFutureHandler( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + otherHandlerFuture.complete((topic, message)) + + # When subscribing both nodes to the same Pubsub Topic + node.subscribe(pubsubTopic, simpleFutureHandler) + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + + # Then both nodes are subscribed + check: + node.isSubscribed(pubsubTopic) + node.subscribedTopics == pubsubTopicSeq + otherNode.isSubscribed(pubsubTopic) + otherNode.subscribedTopics == pubsubTopicSeq + + await sleepAsync(500.millis) + + # When publishing a message in node + let fromOtherWakuMessage = fakeWakuMessage("fromOther") + discard await node.publish(pubsubTopic, fromOtherWakuMessage) + + # Then the message is published in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + let + (topic1, message1) = handlerFuture.read() + (otherTopic1, otherMessage1) = otherHandlerFuture.read() + check: + topic1 == pubsubTopic + message1 == fromOtherWakuMessage + otherTopic1 == pubsubTopic + otherMessage1 == fromOtherWakuMessage + + # When publishing a message in the other node + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + let fromNodeWakuMessage = fakeWakuMessage("fromNode") + discard await node.publish(pubsubTopic, fromNodeWakuMessage) + discard await otherNode.publish(pubsubTopic, fromNodeWakuMessage) + + # Then the message is published in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + + let + (topic2, message2) = handlerFuture.read() + (otherTopic2, otherMessage2) = otherHandlerFuture.read() + check: + topic2 == pubsubTopic + message2 == fromNodeWakuMessage + otherTopic2 == pubsubTopic + otherMessage2 == fromNodeWakuMessage + + # Finally stop the other node + await allFutures(otherSwitch.stop(), otherNode.stop()) + + asyncTest "Refreshing subscription": + # Given a subscribed node + node.subscribe(pubsubTopic, simpleFutureHandler) + check: + node.isSubscribed(pubsubTopic) + node.subscribedTopics == pubsubTopicSeq + let otherWakuMessage = fakeWakuMessage("fromOther") + discard await node.publish(pubsubTopic, otherWakuMessage) + check: + messageSeq == @[(pubsubTopic, otherWakuMessage)] + + # Given the subscription is refreshed + var otherHandlerFuture = newPushHandlerFuture() + proc otherSimpleFutureHandler( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + otherHandlerFuture.complete((topic, message)) + + node.subscribe(pubsubTopic, otherSimpleFutureHandler) + check: + node.isSubscribed(pubsubTopic) + node.subscribedTopics == pubsubTopicSeq + messageSeq == @[(pubsubTopic, otherWakuMessage)] + + # When publishing a message with the refreshed subscription + handlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, wakuMessage) + + # Then the message is published + check (await handlerFuture.withTimeout(FUTURE_TIMEOUT)) + let (topic, msg) = handlerFuture.read() + check: + topic == pubsubTopic + msg == wakuMessage + messageSeq == @[(pubsubTopic, otherWakuMessage), (pubsubTopic, wakuMessage)] + + asyncTest "With additional validator": + # Given a simple validator + var validatorFuture = newBoolFuture() + let len4Validator = proc( + pubsubTopic: string, message: WakuMessage + ): Future[ValidationResult] {.async.} = + if message.payload.len() == 8: + validatorFuture.complete(true) + return ValidationResult.Accept + else: + validatorFuture.complete(false) + return ValidationResult.Reject + + # And a second node connected to the first one + let + otherSwitch = newTestSwitch() + otherNode = await newTestWakuRelay(otherSwitch) + + await allFutures(otherSwitch.start(), otherNode.start()) + let otherRemotePeerInfo = otherSwitch.peerInfo.toRemotePeerInfo() + check await peerManager.connectPeer(otherRemotePeerInfo) + + var otherHandlerFuture = newPushHandlerFuture() + proc otherSimpleFutureHandler( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + otherHandlerFuture.complete((topic, message)) + + otherNode.addValidator(len4Validator) + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + await sleepAsync(500.millis) + check: + otherNode.isSubscribed(pubsubTopic) + + # Given a subscribed node with a validator + node.addValidator(len4Validator) + node.subscribe(pubsubTopic, simpleFutureHandler) + await sleepAsync(500.millis) + check: + node.isSubscribed(pubsubTopic) + node.subscribedTopics == pubsubTopicSeq + otherNode.isSubscribed(pubsubTopic) + otherNode.subscribedTopics == pubsubTopicSeq + + # When publishing a message that doesn't match the validator + discard await node.publish(pubsubTopic, wakuMessage) + + # Then the validator is ran in the other node, and fails + # Not run in the self node + check: + await validatorFuture.withTimeout(FUTURE_TIMEOUT) + validatorFuture.read() == false + + # And the message is published in the self node, but not in the other node, + # because it doesn't pass the validator check. + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + not await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (topic1, msg1) = handlerFuture.read() + # let (otherTopic1, otherMsg1) = otherHandlerFuture.read() + check: + topic1 == pubsubTopic + msg1 == wakuMessage + # otherTopic1 == pubsubTopic + # otherMsg1 == wakuMessage + + # When publishing a message that matches the validator + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + validatorFuture = newBoolFuture() + let wakuMessage2 = fakeWakuMessage("12345678", pubsubTopic) + discard await node.publish(pubsubTopic, wakuMessage2) + + # Then the validator is ran in the other node, and succeeds + # Not run in the self node + check: + await validatorFuture.withTimeout(FUTURE_TIMEOUT) + validatorFuture.read() == true + + # And the message is published in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (topic2, msg2) = handlerFuture.read() + let (otherTopic2, otherMsg2) = otherHandlerFuture.read() + check: + topic2 == pubsubTopic + msg2 == wakuMessage2 + otherTopic2 == pubsubTopic + otherMsg2 == wakuMessage2 + + # Finally stop the other node + await allFutures(otherSwitch.stop(), otherNode.stop()) + + asyncTest "Max Topic Size": + # NOT FOUND + discard + + asyncTest "Max subscriptions": + # NOT FOUND + discard + + asyncTest "Message encryption/decryption": + # Given a second node connected to the first one, both subscribed to the same Pubsub Topic + let + otherSwitch = newTestSwitch() + otherNode = await newTestWakuRelay(otherSwitch) + + await allFutures(otherSwitch.start(), otherNode.start()) + let otherRemotePeerInfo = otherSwitch.peerInfo.toRemotePeerInfo() + check await peerManager.connectPeer(otherRemotePeerInfo) + + var otherHandlerFuture = newPushHandlerFuture() + proc otherSimpleFutureHandler( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + otherHandlerFuture.complete((topic, message)) + + node.subscribe(pubsubTopic, simpleFutureHandler) + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + check: + node.isSubscribed(pubsubTopic) + node.subscribedTopics == pubsubTopicSeq + otherNode.isSubscribed(pubsubTopic) + otherNode.subscribedTopics == pubsubTopicSeq + + await sleepAsync(500.millis) + + # Given some crypto info + var key = "My fancy key" + var data = "Hello, Crypto!" + var iv = "0123456789ABCDEF" + + # When publishing an encrypted message + let encodedText = cfbEncode(key, iv, data) + let encodedWakuMessage = fakeWakuMessage(encodedText, pubsubTopic) + discard await node.publish(pubsubTopic, encodedWakuMessage) + + # Then the message is published in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + let (topic1, msg1) = handlerFuture.read() + let (otherTopic1, otherMsg1) = otherHandlerFuture.read() + check: + topic1 == pubsubTopic + msg1 == encodedWakuMessage + otherTopic1 == pubsubTopic + otherMsg1 == encodedWakuMessage + + # When decoding the message + let + decodedText = cfbDecode(key, iv, msg1.payload) + otherDecodedText = cfbDecode(key, iv, otherMsg1.payload) + + # Then the message is decrypted in both nodes + check: + decodedText.toString() == data + otherDecodedText.toString() == data + + # Finally stop the other node + await allFutures(otherSwitch.stop(), otherNode.stop()) + + asyncTest "How multiple interconnected nodes work": + # Given two other pubsub topics + let + pubsubTopicB = "/waku/2/rs/0/1" + pubsubTopicC = "/waku/2/rs/0/2" + + # Given two other nodes connected to the first one + let + otherSwitch = newTestSwitch() + otherPeerManager = PeerManager.new(otherSwitch) + otherNode = await newTestWakuRelay(otherSwitch) + anotherSwitch = newTestSwitch() + anotherPeerManager = PeerManager.new(anotherSwitch) + anotherNode = await newTestWakuRelay(anotherSwitch) + + await allFutures( + otherSwitch.start(), + otherNode.start(), + anotherSwitch.start(), + anotherNode.start(), + ) + + let + otherRemotePeerInfo = otherSwitch.peerInfo.toRemotePeerInfo() + otherPeerId = otherRemotePeerInfo.peerId + anotherRemotePeerInfo = anotherSwitch.peerInfo.toRemotePeerInfo() + anotherPeerId = anotherRemotePeerInfo.peerId + + check: + await peerManager.connectPeer(otherRemotePeerInfo) + await peerManager.connectPeer(anotherRemotePeerInfo) + + # Given the first node is subscribed to two pubsub topics + var handlerFuture2 = newPushHandlerFuture() + proc simpleFutureHandler2( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + handlerFuture2.complete((topic, message)) + + node.subscribe(pubsubTopic, simpleFutureHandler) + node.subscribe(pubsubTopicB, simpleFutureHandler2) + + # Given the other nodes are subscribed to two pubsub topics + var otherHandlerFuture1 = newPushHandlerFuture() + proc otherSimpleFutureHandler1( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + otherHandlerFuture1.complete((topic, message)) + + var otherHandlerFuture2 = newPushHandlerFuture() + proc otherSimpleFutureHandler2( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + otherHandlerFuture2.complete((topic, message)) + + var anotherHandlerFuture1 = newPushHandlerFuture() + proc anotherSimpleFutureHandler1( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + anotherHandlerFuture1.complete((topic, message)) + + var anotherHandlerFuture2 = newPushHandlerFuture() + proc anotherSimpleFutureHandler2( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + anotherHandlerFuture2.complete((topic, message)) + + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler1) + otherNode.subscribe(pubsubTopicC, otherSimpleFutureHandler2) + anotherNode.subscribe(pubsubTopicB, anotherSimpleFutureHandler1) + anotherNode.subscribe(pubsubTopicC, anotherSimpleFutureHandler2) + await sleepAsync(500.millis) + + # When publishing a message in node for each of the pubsub topics + let + fromNodeWakuMessage1 = fakeWakuMessage("fromNode1") + fromNodeWakuMessage2 = fakeWakuMessage("fromNode2") + fromNodeWakuMessage3 = fakeWakuMessage("fromNode3") + + discard await node.publish(pubsubTopic, fromNodeWakuMessage1) + discard await node.publish(pubsubTopicB, fromNodeWakuMessage2) + discard await node.publish(pubsubTopicC, fromNodeWakuMessage3) + + # Then the messages are published in all nodes (because it's published in the center node) + # Center meaning that all other nodes are connected to this one + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await handlerFuture2.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture1.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture2.withTimeout(FUTURE_TIMEOUT) + await anotherHandlerFuture1.withTimeout(FUTURE_TIMEOUT) + await anotherHandlerFuture2.withTimeout(FUTURE_TIMEOUT) + + let + (topic1, msg1) = handlerFuture.read() + (topic2, msg2) = handlerFuture2.read() + (otherTopic1, otherMsg1) = otherHandlerFuture1.read() + (otherTopic2, otherMsg2) = otherHandlerFuture2.read() + (anotherTopic1, anotherMsg1) = anotherHandlerFuture1.read() + (anotherTopic2, anotherMsg2) = anotherHandlerFuture2.read() + + check: + topic1 == pubsubTopic + msg1 == fromNodeWakuMessage1 + topic2 == pubsubTopicB + msg2 == fromNodeWakuMessage2 + otherTopic1 == pubsubTopic + otherMsg1 == fromNodeWakuMessage1 + otherTopic2 == pubsubTopicC + otherMsg2 == fromNodeWakuMessage3 + anotherTopic1 == pubsubTopicB + anotherMsg1 == fromNodeWakuMessage2 + anotherTopic2 == pubsubTopicC + anotherMsg2 == fromNodeWakuMessage3 + + # Given anotherNode is completely disconnected from the first one + await anotherPeerManager.switch.disconnect(peerId) + await peerManager.switch.disconnect(anotherPeerId) + check: + not anotherPeerManager.switch.isConnected(peerId) + not peerManager.switch.isConnected(anotherPeerId) + + # When publishing a message in node for each of the pubsub topics + handlerFuture = newPushHandlerFuture() + handlerFuture2 = newPushHandlerFuture() + otherHandlerFuture1 = newPushHandlerFuture() + otherHandlerFuture2 = newPushHandlerFuture() + anotherHandlerFuture1 = newPushHandlerFuture() + anotherHandlerFuture2 = newPushHandlerFuture() + + let + fromNodeWakuMessage4 = fakeWakuMessage("fromNode4") + fromNodeWakuMessage5 = fakeWakuMessage("fromNode5") + fromNodeWakuMessage6 = fakeWakuMessage("fromNode6") + + discard await node.publish(pubsubTopic, fromNodeWakuMessage4) + discard await node.publish(pubsubTopicB, fromNodeWakuMessage5) + discard await node.publish(pubsubTopicC, fromNodeWakuMessage6) + + # Then the message is published in node and otherNode, + # but not in anotherNode because it is not connected anymore + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await handlerFuture2.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture1.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture2.withTimeout(FUTURE_TIMEOUT) + not await anotherHandlerFuture1.withTimeout(FUTURE_TIMEOUT) + not await anotherHandlerFuture2.withTimeout(FUTURE_TIMEOUT) + + let + (topic3, msg3) = handlerFuture.read() + (topic4, msg4) = handlerFuture2.read() + (otherTopic3, otherMsg3) = otherHandlerFuture1.read() + (otherTopic4, otherMsg4) = otherHandlerFuture2.read() + + check: + topic3 == pubsubTopic + msg3 == fromNodeWakuMessage4 + topic4 == pubsubTopicB + msg4 == fromNodeWakuMessage5 + otherTopic3 == pubsubTopic + otherMsg3 == fromNodeWakuMessage4 + otherTopic4 == pubsubTopicC + otherMsg4 == fromNodeWakuMessage6 + + # When publishing a message in anotherNode for each of the pubsub topics + handlerFuture = newPushHandlerFuture() + handlerFuture2 = newPushHandlerFuture() + otherHandlerFuture1 = newPushHandlerFuture() + otherHandlerFuture2 = newPushHandlerFuture() + anotherHandlerFuture1 = newPushHandlerFuture() + anotherHandlerFuture2 = newPushHandlerFuture() + + let + fromAnotherNodeWakuMessage1 = fakeWakuMessage("fromAnotherNode1") + fromAnotherNodeWakuMessage2 = fakeWakuMessage("fromAnotherNode2") + fromAnotherNodeWakuMessage3 = fakeWakuMessage("fromAnotherNode3") + + discard await anotherNode.publish(pubsubTopic, fromAnotherNodeWakuMessage1) + discard await anotherNode.publish(pubsubTopicB, fromAnotherNodeWakuMessage2) + discard await anotherNode.publish(pubsubTopicC, fromAnotherNodeWakuMessage3) + + # Then the messages are only published in anotherNode because it's disconnected from + # the rest of the network + check: + not await handlerFuture.withTimeout(FUTURE_TIMEOUT) + not await handlerFuture2.withTimeout(FUTURE_TIMEOUT) + not await otherHandlerFuture1.withTimeout(FUTURE_TIMEOUT) + not await otherHandlerFuture2.withTimeout(FUTURE_TIMEOUT) + await anotherHandlerFuture1.withTimeout(FUTURE_TIMEOUT) + await anotherHandlerFuture2.withTimeout(FUTURE_TIMEOUT) + + let + (anotherTopic3, anotherMsg3) = anotherHandlerFuture1.read() + (anotherTopic4, anotherMsg4) = anotherHandlerFuture2.read() + + check: + anotherTopic3 == pubsubTopicB + anotherMsg3 == fromAnotherNodeWakuMessage2 + anotherTopic4 == pubsubTopicC + anotherMsg4 == fromAnotherNodeWakuMessage3 + + # When publishing a message in otherNode for each of the pubsub topics + handlerFuture = newPushHandlerFuture() + handlerFuture2 = newPushHandlerFuture() + otherHandlerFuture1 = newPushHandlerFuture() + otherHandlerFuture2 = newPushHandlerFuture() + anotherHandlerFuture1 = newPushHandlerFuture() + anotherHandlerFuture2 = newPushHandlerFuture() + + let + fromOtherNodeWakuMessage1 = fakeWakuMessage("fromOtherNode1") + fromOtherNodeWakuMessage2 = fakeWakuMessage("fromOtherNode2") + fromOtherNodeWakuMessage3 = fakeWakuMessage("fromOtherNode3") + + discard await otherNode.publish(pubsubTopic, fromOtherNodeWakuMessage1) + discard await otherNode.publish(pubsubTopicB, fromOtherNodeWakuMessage2) + discard await otherNode.publish(pubsubTopicC, fromOtherNodeWakuMessage3) + + # Then the messages are only published in otherNode and node, but not in anotherNode + # because it's disconnected from the rest of the network + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await handlerFuture2.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture1.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture2.withTimeout(FUTURE_TIMEOUT) + not await anotherHandlerFuture1.withTimeout(FUTURE_TIMEOUT) + not await anotherHandlerFuture2.withTimeout(FUTURE_TIMEOUT) + + let + (topic5, msg5) = handlerFuture.read() + (topic6, msg6) = handlerFuture2.read() + (otherTopic5, otherMsg5) = otherHandlerFuture1.read() + (otherTopic6, otherMsg6) = otherHandlerFuture2.read() + + check: + topic5 == pubsubTopic + msg5 == fromOtherNodeWakuMessage1 + topic6 == pubsubTopicB + msg6 == fromOtherNodeWakuMessage2 + otherTopic5 == pubsubTopic + otherMsg5 == fromOtherNodeWakuMessage1 + otherTopic6 == pubsubTopicC + otherMsg6 == fromOtherNodeWakuMessage3 + + # Given anotherNode is reconnected, but to otherNode + check await anotherPeerManager.connectPeer(otherRemotePeerInfo) + check: + anotherPeerManager.switch.isConnected(otherPeerId) + otherPeerManager.switch.isConnected(anotherPeerId) + + # When publishing a message in anotherNode for each of the pubsub topics + handlerFuture = newPushHandlerFuture() + handlerFuture2 = newPushHandlerFuture() + otherHandlerFuture1 = newPushHandlerFuture() + otherHandlerFuture2 = newPushHandlerFuture() + anotherHandlerFuture1 = newPushHandlerFuture() + anotherHandlerFuture2 = newPushHandlerFuture() + + let + fromAnotherNodeWakuMessage4 = fakeWakuMessage("fromAnotherNode4") + fromAnotherNodeWakuMessage5 = fakeWakuMessage("fromAnotherNode5") + fromAnotherNodeWakuMessage6 = fakeWakuMessage("fromAnotherNode6") + + discard await anotherNode.publish(pubsubTopic, fromAnotherNodeWakuMessage4) + discard await anotherNode.publish(pubsubTopicB, fromAnotherNodeWakuMessage5) + discard await anotherNode.publish(pubsubTopicC, fromAnotherNodeWakuMessage6) + + # Then the messages are published in all nodes except in node's B topic, because + # even if they're connected like so AnotherNode <-> OtherNode <-> Node, + # otherNode doesn't broadcast B topic messages because it's not subscribed to it + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + not await handlerFuture2.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture1.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture2.withTimeout(FUTURE_TIMEOUT) + await anotherHandlerFuture1.withTimeout(FUTURE_TIMEOUT) + await anotherHandlerFuture2.withTimeout(FUTURE_TIMEOUT) + + let + (topic7, msg7) = handlerFuture.read() + (otherTopic7, otherMsg7) = otherHandlerFuture1.read() + (otherTopic8, otherMsg8) = otherHandlerFuture2.read() + (anotherTopic7, anotherMsg7) = anotherHandlerFuture1.read() + (anotherTopic8, anotherMsg8) = anotherHandlerFuture2.read() + + check: + topic7 == pubsubTopic + msg7 == fromAnotherNodeWakuMessage4 + otherTopic7 == pubsubTopic + otherMsg7 == fromAnotherNodeWakuMessage4 + otherTopic8 == pubsubTopicC + otherMsg8 == fromAnotherNodeWakuMessage6 + anotherTopic7 == pubsubTopicB + anotherMsg7 == fromAnotherNodeWakuMessage5 + anotherTopic8 == pubsubTopicC + anotherMsg8 == fromAnotherNodeWakuMessage6 + + # Finally stop the other nodes + await allFutures( + otherSwitch.stop(), otherNode.stop(), anotherSwitch.stop(), anotherNode.stop() + ) + + suite "Unsubscribe": + asyncTest "Without Subscription": + # Given an external topic handler + let + otherSwitch = newTestSwitch() + otherNode = await newTestWakuRelay(otherSwitch) + await allFutures(otherSwitch.start(), otherNode.start()) + otherNode.subscribe(pubsubTopic, simpleFutureHandler) + + # Given a node without a subscription + check: + node.subscribedTopics == [] + + node.unsubscribe(pubsubTopic) + + # Then the node is still not subscribed + check: + node.subscribedTopics == [] + + # Finally stop the other node + await allFutures(otherSwitch.stop(), otherNode.stop()) + + asyncTest "Single Node with Single Pubsub Topic": + # Given a node subscribed to a pubsub topic + node.subscribe(pubsubTopic, simpleFutureHandler) + check node.subscribedTopics == pubsubTopicSeq + + # When unsubscribing from the pubsub topic + node.unsubscribe(pubsubTopic) + + # Then the node is not subscribed anymore + check node.subscribedTopics == [] + + asyncTest "Single Node with Multiple Pubsub Topics": + # Given other pubsub topic + let pubsubTopicB = "/waku/2/rs/0/1" + + # Given a node subscribed to multiple pubsub topics + node.subscribe(pubsubTopic, simpleFutureHandler) + node.subscribe(pubsubTopicB, simpleFutureHandler) + + assert pubsubTopic in node.subscribedTopics, + fmt"Node is not subscribed to {pubsubTopic}" + assert pubsubTopicB in node.subscribedTopics, + fmt"Node is not subscribed to {pubsubTopicB}" + + # When unsubscribing from one of the pubsub topics + node.unsubscribe(pubsubTopic) + + # Then the node is still subscribed to the other pubsub topic + check node.subscribedTopics == @[pubsubTopicB] + + # When unsubscribing from the other pubsub topic + node.unsubscribe(pubsubTopicB) + + # Then the node is not subscribed anymore + check node.subscribedTopics == [] + + suite "Unsubscribe All": + asyncTest "Without subscriptions": + # Given a node without subscriptions + check node.subscribedTopics == [] + + # When unsubscribing from all pubsub topics + node.unsubscribeAll(pubsubTopic) + + # Then the node is still not subscribed + check node.subscribedTopics == [] + + asyncTest "Single Node with Single Pubsub Topic": + # Given a node subscribed to a pubsub topic + node.subscribe(pubsubTopic, simpleFutureHandler) + check node.subscribedTopics == pubsubTopicSeq + + # When unsubscribing from all pubsub topics + node.unsubscribeAll(pubsubTopic) + + # Then the node is not subscribed anymore + check node.subscribedTopics == [] + + asyncTest "Single Node with Multiple Pubsub Topics": + # Given other pubsub topic + let pubsubTopicB = "/waku/2/rs/0/1" + + # Given a node subscribed to multiple pubsub topics + node.subscribe(pubsubTopic, simpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) + node.subscribe(pubsubTopicB, simpleFutureHandler) + + assert pubsubTopic in node.subscribedTopics, + fmt"Node is not subscribed to {pubsubTopic}" + assert pubsubTopicB in node.subscribedTopics, + fmt"Node is not subscribed to {pubsubTopicB}" + + # When unsubscribing all handlers from pubsubTopic + node.unsubscribeAll(pubsubTopic) + + # Then the node doesn't have pubsubTopic handlers + check node.subscribedTopics == @[pubsubTopicB] + + # When unsubscribing all handlers from pubsubTopicB + node.unsubscribeAll(pubsubTopicB) + + # Then the node is not subscribed to anything + check node.subscribedTopics == [] + + suite "Send & Retrieve Messages": + asyncTest "Valid Payload Types": + # Given a second node connected to the first one + let + otherSwitch = newTestSwitch() + otherNode = await newTestWakuRelay(otherSwitch) + + await allFutures(otherSwitch.start(), otherNode.start()) + let otherRemotePeerInfo = otherSwitch.peerInfo.toRemotePeerInfo() + check await peerManager.connectPeer(otherRemotePeerInfo) + + # Given both are subscribed to the same pubsub topic + var otherHandlerFuture = newPushHandlerFuture() + proc otherSimpleFutureHandler( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + otherHandlerFuture.complete((topic, message)) + + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) + check: + node.subscribedTopics == pubsubTopicSeq + otherNode.subscribedTopics == pubsubTopicSeq + + await sleepAsync(500.millis) + + # Given some payloads + let + JSON_DICTIONARY = getSampleJsonDictionary() + JSON_LIST = getSampleJsonList() + + # Given some valid messages + let + msg1 = fakeWakuMessage(contentTopic = contentTopic, payload = ALPHABETIC) + msg2 = fakeWakuMessage(contentTopic = contentTopic, payload = ALPHANUMERIC) + msg3 = + fakeWakuMessage(contentTopic = contentTopic, payload = ALPHANUMERIC_SPECIAL) + msg4 = fakeWakuMessage(contentTopic = contentTopic, payload = EMOJI) + msg5 = fakeWakuMessage(contentTopic = contentTopic, payload = CODE) + msg6 = fakeWakuMessage(contentTopic = contentTopic, payload = QUERY) + msg7 = + fakeWakuMessage(contentTopic = contentTopic, payload = ($JSON_DICTIONARY)) + msg8 = fakeWakuMessage(contentTopic = contentTopic, payload = ($JSON_LIST)) + msg9 = fakeWakuMessage(contentTopic = contentTopic, payload = TEXT_SMALL) + msg10 = fakeWakuMessage(contentTopic = contentTopic, payload = TEXT_LARGE) + + # When sending the alphabetic message + discard await node.publish(pubsubTopic, msg1) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg1) == handlerFuture.read() + (pubsubTopic, msg1) == otherHandlerFuture.read() + + # When sending the alphanumeric message + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg2) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg2) == handlerFuture.read() + (pubsubTopic, msg2) == otherHandlerFuture.read() + + # When sending the alphanumeric special message + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg3) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg3) == handlerFuture.read() + (pubsubTopic, msg3) == otherHandlerFuture.read() + + # When sending the emoji message + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg4) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg4) == handlerFuture.read() + (pubsubTopic, msg4) == otherHandlerFuture.read() + + # When sending the code message + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg5) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg5) == handlerFuture.read() + (pubsubTopic, msg5) == otherHandlerFuture.read() + + # When sending the query message + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg6) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg6) == handlerFuture.read() + (pubsubTopic, msg6) == otherHandlerFuture.read() + + # When sending the JSON dictionary message + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg7) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg7) == handlerFuture.read() + (pubsubTopic, msg7) == otherHandlerFuture.read() + + # When sending the JSON list message + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg8) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg8) == handlerFuture.read() + (pubsubTopic, msg8) == otherHandlerFuture.read() + + # When sending the small text message + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg9) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg9) == handlerFuture.read() + (pubsubTopic, msg9) == otherHandlerFuture.read() + + # When sending the large text message + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg10) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg10) == handlerFuture.read() + (pubsubTopic, msg10) == otherHandlerFuture.read() + + # Finally stop the other node + await allFutures(otherSwitch.stop(), otherNode.stop()) + + asyncTest "Valid Payload Sizes": + # Given a second node connected to the first one + let + otherSwitch = newTestSwitch() + otherNode = await newTestWakuRelay(otherSwitch) + + await allFutures(otherSwitch.start(), otherNode.start()) + let otherRemotePeerInfo = otherSwitch.peerInfo.toRemotePeerInfo() + check await peerManager.connectPeer(otherRemotePeerInfo) + + # Given both are subscribed to the same pubsub topic + var otherHandlerFuture = newPushHandlerFuture() + proc otherSimpleFutureHandler( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + otherHandlerFuture.complete((topic, message)) + + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) + check: + node.subscribedTopics == pubsubTopicSeq + otherNode.subscribedTopics == pubsubTopicSeq + + await sleepAsync(500.millis) + + # Given some valid payloads + let + msgWithoutPayload = + fakeWakuMessage(contentTopic = contentTopic, payload = getByteSequence(0)) + sizeEmptyMsg = uint64(msgWithoutPayload.encode().buffer.len) + + let + msg1 = + fakeWakuMessage(contentTopic = contentTopic, payload = getByteSequence(1024)) + # 1KiB + msg2 = fakeWakuMessage( + contentTopic = contentTopic, payload = getByteSequence(10 * 1024) + ) # 10KiB + msg3 = fakeWakuMessage( + contentTopic = contentTopic, payload = getByteSequence(100 * 1024) + ) # 100KiB + msg4 = fakeWakuMessage( + contentTopic = contentTopic, + payload = getByteSequence(DefaultMaxWakuMessageSize - sizeEmptyMsg - 26), + ) # Max Size (Inclusive Limit) + msg5 = fakeWakuMessage( + contentTopic = contentTopic, + payload = getByteSequence(DefaultMaxWakuMessageSize - sizeEmptyMsg - 25), + ) # Max Size (Exclusive Limit) + msg6 = fakeWakuMessage( + contentTopic = contentTopic, + payload = getByteSequence(DefaultMaxWakuMessageSize), + ) # MaxWakuMessageSize -> Out of Max Size + + # Notice that the message is wrapped with more data in https://github.com/status-im/nim-libp2p/blob/3011ba4326fa55220a758838835797ff322619fc/libp2p/protocols/pubsub/gossipsub.nim#L627-L632 + # And therefore, we need to substract a hard-coded values above (for msg4 & msg5), obtained empirically, + # running the tests with 'TRACE' level: nim c -r -d:chronicles_log_level=DEBUG -d:release -d:postgres -d:rln --passL:librln_v0.3.4.a --passL:-lm -d:nimDebugDlOpen tests/waku_relay/test_protocol.nim test "Valid Payload Sizes" + + # When sending the 1KiB message + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg1) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg1) == handlerFuture.read() + (pubsubTopic, msg1) == otherHandlerFuture.read() + + # When sending the 10KiB message + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg2) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg2) == handlerFuture.read() + (pubsubTopic, msg2) == otherHandlerFuture.read() + + # When sending the 100KiB message + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg3) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg3) == handlerFuture.read() + (pubsubTopic, msg3) == otherHandlerFuture.read() + + # When sending the 'DefaultMaxWakuMessageSize - sizeEmptyMsg - 38' message + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg4) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg4) == handlerFuture.read() + (pubsubTopic, msg4) == otherHandlerFuture.read() + + # When sending the 'DefaultMaxWakuMessageSize - sizeEmptyMsg - 37' message + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg5) + + # Then the message is received in self, because there's no checking, but not in other node + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + not await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg5) == handlerFuture.read() + + # When sending the 'DefaultMaxWakuMessageSize' message + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg6) + + # Then the message is received in self, because there's no checking, but not in other node + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + not await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg6) == handlerFuture.read() + + # Finally stop the other node + await allFutures(otherSwitch.stop(), otherNode.stop()) + + asyncTest "Multiple messages at once": + # Given a second node connected to the first one + let + otherSwitch = newTestSwitch() + otherNode = await newTestWakuRelay(otherSwitch) + + await allFutures(otherSwitch.start(), otherNode.start()) + let otherRemotePeerInfo = otherSwitch.peerInfo.toRemotePeerInfo() + check await peerManager.connectPeer(otherRemotePeerInfo) + + # Given both are subscribed to the same pubsub topic + # Create a different handler than the default to include messages in a seq + var thisHandlerFuture = newPushHandlerFuture() + var thisMessageSeq: seq[(PubsubTopic, WakuMessage)] = @[] + proc thisSimpleFutureHandler( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + thisMessageSeq.add((topic, message)) + thisHandlerFuture.complete((topic, message)) + + var otherHandlerFuture = newPushHandlerFuture() + var otherMessageSeq: seq[(PubsubTopic, WakuMessage)] = @[] + proc otherSimpleFutureHandler( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + otherMessageSeq.add((topic, message)) + otherHandlerFuture.complete((topic, message)) + + node.subscribe(pubsubTopic, thisSimpleFutureHandler) + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + check: + node.subscribedTopics == pubsubTopicSeq + otherNode.subscribedTopics == pubsubTopicSeq + await sleepAsync(500.millis) + + # When sending multiple messages from node + let + msg1 = fakeWakuMessage("msg1", pubsubTopic) + msg2 = fakeWakuMessage("msg2", pubsubTopic) + msg3 = fakeWakuMessage("msg3", pubsubTopic) + msg4 = fakeWakuMessage("msg4", pubsubTopic) + + discard await node.publish(pubsubTopic, msg1) + check await thisHandlerFuture.withTimeout(FUTURE_TIMEOUT) + check await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + thisHandlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg2) + check await thisHandlerFuture.withTimeout(FUTURE_TIMEOUT) + check await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + thisHandlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg3) + check await thisHandlerFuture.withTimeout(FUTURE_TIMEOUT) + check await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + thisHandlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + discard await node.publish(pubsubTopic, msg4) + + check: + await thisHandlerFuture.withTimeout(FUTURE_TIMEOUT) + thisMessageSeq == + @[ + (pubsubTopic, msg1), + (pubsubTopic, msg2), + (pubsubTopic, msg3), + (pubsubTopic, msg4), + ] + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + otherMessageSeq == + @[ + (pubsubTopic, msg1), + (pubsubTopic, msg2), + (pubsubTopic, msg3), + (pubsubTopic, msg4), + ] + + # Finally stop the other node + await allFutures(otherSwitch.stop(), otherNode.stop()) + + suite "Security and Privacy": + asyncTest "Relay can receive messages after reboot and reconnect": + # Given a second node connected to the first one + let + otherSwitch = newTestSwitch() + otherPeerManager = PeerManager.new(otherSwitch) + otherNode = await newTestWakuRelay(otherSwitch) + + await otherSwitch.start() + let + otherRemotePeerInfo = otherSwitch.peerInfo.toRemotePeerInfo() + otherPeerId = otherRemotePeerInfo.peerId + + check await peerManager.connectPeer(otherRemotePeerInfo) + + # Given both are subscribed to the same pubsub topic + var otherHandlerFuture = newPushHandlerFuture() + proc otherSimpleFutureHandler( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + otherHandlerFuture.complete((topic, message)) + + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) + check: + node.subscribedTopics == pubsubTopicSeq + otherNode.subscribedTopics == pubsubTopicSeq + await sleepAsync(500.millis) + + # Given other node is stopped and restarted + await otherSwitch.stop() + await otherSwitch.start() + + check await peerManager.connectPeer(otherRemotePeerInfo) + + # FIXME: Once stopped and started, nodes are not considered connected, nor do they reconnect after running connectPeer, as below + # check await otherPeerManager.connectPeer(otherRemotePeerInfo) + + # When sending a message from node + let msg1 = fakeWakuMessage(testMessage, pubsubTopic) + discard await node.publish(pubsubTopic, msg1) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg1) == handlerFuture.read() + (pubsubTopic, msg1) == otherHandlerFuture.read() + + # When sending a message from other node + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + let msg2 = fakeWakuMessage(testMessage, pubsubTopic) + discard await otherNode.publish(pubsubTopic, msg2) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg2) == handlerFuture.read() + (pubsubTopic, msg2) == otherHandlerFuture.read() + + # Given node is stopped and restarted + await switch.stop() + await switch.start() + check await peerManager.connectPeer(otherRemotePeerInfo) + + # When sending a message from node + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + let msg3 = fakeWakuMessage(testMessage, pubsubTopic) + discard await node.publish(pubsubTopic, msg3) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg3) == handlerFuture.read() + (pubsubTopic, msg3) == otherHandlerFuture.read() + + # When sending a message from other node + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + let msg4 = fakeWakuMessage(testMessage, pubsubTopic) + discard await otherNode.publish(pubsubTopic, msg4) + + # Then the message is received in both nodes + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg4) == handlerFuture.read() + (pubsubTopic, msg4) == otherHandlerFuture.read() + + # Finally stop the other node + await allFutures(otherSwitch.stop(), otherNode.stop()) + + asyncTest "Relay can't receive messages after subscribing and stopping without unsubscribing": + # Given a second node connected to the first one + let + otherSwitch = newTestSwitch() + otherPeerManager = PeerManager.new(otherSwitch) + otherNode = await newTestWakuRelay(otherSwitch) + + await allFutures(otherSwitch.start(), otherNode.start()) + let + otherRemotePeerInfo = otherSwitch.peerInfo.toRemotePeerInfo() + otherPeerId = otherRemotePeerInfo.peerId + + check await peerManager.connectPeer(otherRemotePeerInfo) + + # Given both are subscribed to the same pubsub topic + var otherHandlerFuture = newPushHandlerFuture() + proc otherSimpleFutureHandler( + topic: PubsubTopic, message: WakuMessage + ) {.async, gcsafe.} = + otherHandlerFuture.complete((topic, message)) + + otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler) + node.subscribe(pubsubTopic, simpleFutureHandler) + check: + node.subscribedTopics == pubsubTopicSeq + otherNode.subscribedTopics == pubsubTopicSeq + + await sleepAsync(500.millis) + + # Given other node is stopped without unsubscribing + await allFutures(otherSwitch.stop(), otherNode.stop()) + + # When sending a message from node + let msg1 = fakeWakuMessage(testMessage, pubsubTopic) + discard await node.publish(pubsubTopic, msg1) + + # Then the message is not received in any node + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + not await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg1) == handlerFuture.read() + + # When sending a message from other node + handlerFuture = newPushHandlerFuture() + otherHandlerFuture = newPushHandlerFuture() + let msg2 = fakeWakuMessage(testMessage, pubsubTopic) + discard await otherNode.publish(pubsubTopic, msg2) + + # Then the message is received in both nodes + check: + not await handlerFuture.withTimeout(FUTURE_TIMEOUT) + await otherHandlerFuture.withTimeout(FUTURE_TIMEOUT) + (pubsubTopic, msg2) == otherHandlerFuture.read() diff --git a/third-party/nwaku/tests/waku_relay/test_wakunode_relay.nim b/third-party/nwaku/tests/waku_relay/test_wakunode_relay.nim new file mode 100644 index 0000000..2b4f326 --- /dev/null +++ b/third-party/nwaku/tests/waku_relay/test_wakunode_relay.nim @@ -0,0 +1,704 @@ +{.used.} + +import + std/[os, sequtils, sysrand, math], + stew/byteutils, + testutils/unittests, + chronos, + libp2p/switch, + libp2p/protocols/pubsub/pubsub, + libp2p/protocols/pubsub/gossipsub +import + waku/[waku_core, node/peer_manager, waku_node, waku_relay], + ../testlib/testutils, + ../testlib/wakucore, + ../testlib/wakunode + +template sourceDir(): string = + currentSourcePath.parentDir() + +const KEY_PATH = sourceDir / "resources/test_key.pem" +const CERT_PATH = sourceDir / "resources/test_cert.pem" + +suite "WakuNode - Relay": + asyncTest "Relay protocol is started correctly": + let + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0)) + + # Relay protocol starts if mounted after node start + + await node1.start() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + check: + GossipSub(node1.wakuRelay).heartbeatFut.isNil() == false + + # Relay protocol starts if mounted before node start + + let + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0)) + + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + check: + # Relay has not yet started as node has not yet started + GossipSub(node2.wakuRelay).heartbeatFut.isNil() + + await node2.start() + + check: + # Relay started on node start + GossipSub(node2.wakuRelay).heartbeatFut.isNil() == false + + await allFutures([node1.stop(), node2.stop()]) + + asyncTest "Messages are correctly relayed": + let + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0)) + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0)) + nodeKey3 = generateSecp256k1Key() + node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"), Port(0)) + shard = DefaultRelayShard + contentTopic = ContentTopic("/waku/2/default-content/proto") + payload = "hello world".toBytes() + message = WakuMessage(payload: payload, contentTopic: contentTopic) + + await node1.start() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node2.start() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node3.start() + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await allFutures( + node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]), + node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]), + ) + + var completionFut = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + check: + topic == $shard + msg.contentTopic == contentTopic + msg.payload == payload + msg.timestamp > 0 + completionFut.complete(true) + + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + ## node1 and node2 explicitly subscribe to the same shard as node3 + node1.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + node2.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node3.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + await sleepAsync(500.millis) + + var res = await node1.publish(some($shard), message) + assert res.isOk(), $res.error + + ## Then + check: + (await completionFut.withTimeout(5.seconds)) == true + + ## Cleanup + await allFutures(node1.stop(), node2.stop(), node3.stop()) + + asyncTest "filtering relayed messages using topic validators": + ## test scenario: + ## node1 and node3 set node2 as their relay node + ## node3 publishes two messages with two different contentTopics but on the same pubsub topic + ## node1 is also subscribed to the same pubsub topic + ## node2 sets a validator for the same pubsub topic + ## only one of the messages gets delivered to node1 because the validator only validates one of the content topics + + let + # publisher node + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0)) + # Relay node + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0)) + # Subscriber + nodeKey3 = generateSecp256k1Key() + node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"), Port(0)) + + shard = DefaultRelayShard + contentTopic1 = ContentTopic("/waku/2/default-content/proto") + payload = "hello world".toBytes() + message1 = WakuMessage(payload: payload, contentTopic: contentTopic1) + + payload2 = "you should not see this message!".toBytes() + contentTopic2 = ContentTopic("2") + message2 = WakuMessage(payload: payload2, contentTopic: contentTopic2) + + # start all the nodes + await node1.start() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node2.start() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node3.start() + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + + var completionFutValidatorAcc = newFuture[bool]() + var completionFutValidatorRej = newFuture[bool]() + + # set a topic validator for pubSubTopic + proc validator( + topic: string, msg: WakuMessage + ): Future[ValidationResult] {.async.} = + ## the validator that only allows messages with contentTopic1 to be relayed + check: + topic == $shard + + # only relay messages with contentTopic1 + if msg.contentTopic != contentTopic1: + completionFutValidatorRej.complete(true) + return ValidationResult.Reject + + completionFutValidatorAcc.complete(true) + return ValidationResult.Accept + + node2.wakuRelay.addValidator(validator) + + var completionFut = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + check: + topic == $shard + # check that only messages with contentTopic1 is relayed (but not contentTopic2) + msg.contentTopic == contentTopic1 + # relay handler is called + completionFut.complete(true) + + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + ## node1 and node2 explicitly subscribe to the same shard as node3 + node1.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + node2.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node3.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + await sleepAsync(500.millis) + + var res = await node1.publish(some($shard), message1) + assert res.isOk(), $res.error + + await sleepAsync(500.millis) + + # message2 never gets relayed because of the validator + res = await node1.publish(some($shard), message2) + assert res.isOk(), $res.error + + await sleepAsync(500.millis) + + check: + (await completionFut.withTimeout(10.seconds)) == true + # check that validator is called for message1 + (await completionFutValidatorAcc.withTimeout(10.seconds)) == true + # check that validator is called for message2 + (await completionFutValidatorRej.withTimeout(10.seconds)) == true + + await allFutures(node1.stop(), node2.stop(), node3.stop()) + + # TODO: Add a function to validate the WakuMessage integrity + xasyncTest "Stats of peer sending wrong WakuMessages are updated": + # Create 2 nodes + let nodes = toSeq(0 .. 1).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + + # Start all the nodes and mount relay with + await allFutures(nodes.mapIt(it.start())) + await allFutures(nodes.mapIt(it.mountRelay())) + + # Connect nodes + let connOk = await nodes[0].peerManager.connectPeer( + nodes[1].switch.peerInfo.toRemotePeerInfo() + ) + require: + connOk == true + + # Node 1 subscribes to topic + nodes[1].subscribe((kind: PubsubSub, topic: DefaultPubsubTopic)).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + await sleepAsync(500.millis) + + # Node 0 publishes 5 messages not compliant with WakuMessage (aka random bytes) + for i in 0 .. 4: + discard + await nodes[0].wakuRelay.publish(DefaultPubsubTopic, urandom(1 * (10 ^ 2))) + + # Wait for gossip + await sleepAsync(500.millis) + + # Verify that node 1 has received 5 invalid messages from node 0 + # meaning that message validity is enforced to gossip messages + var peerStats = nodes[1].wakuRelay.peerStats + check: + peerStats[nodes[0].switch.peerInfo.peerId].topicInfos[DefaultPubsubTopic].invalidMessageDeliveries == + 5.0 + + await allFutures(nodes.mapIt(it.stop())) + + asyncTest "Messages are relayed between two websocket nodes": + let + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode( + nodeKey1, + parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wsBindPort = Port(0), + wsEnabled = true, + ) + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode( + nodeKey2, + parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wsBindPort = Port(0), + wsEnabled = true, + ) + shard = DefaultRelayShard + contentTopic = ContentTopic("/waku/2/default-content/proto") + payload = "hello world".toBytes() + message = WakuMessage(payload: payload, contentTopic: contentTopic) + + await node1.start() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node2.start() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + + var completionFut = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + check: + topic == $shard + msg.contentTopic == contentTopic + msg.payload == payload + msg.timestamp > 0 + completionFut.complete(true) + + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node1.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: + assert false, "Failed to unsubscribe from topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + await sleepAsync(500.millis) + + let res = await node2.publish(some($shard), message) + assert res.isOk(), $res.error + + await sleepAsync(500.millis) + + check: + (await completionFut.withTimeout(5.seconds)) == true + await node1.stop() + await node2.stop() + + asyncTest "Messages are relayed between nodes with multiple transports (TCP and Websockets)": + let + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode( + nodeKey1, + parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wsBindPort = Port(0), + wsEnabled = true, + ) + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), bindPort = Port(0)) + shard = DefaultRelayShard + contentTopic = ContentTopic("/waku/2/default-content/proto") + payload = "hello world".toBytes() + message = WakuMessage(payload: payload, contentTopic: contentTopic) + + await node1.start() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node2.start() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + + var completionFut = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + check: + topic == $shard + msg.contentTopic == contentTopic + msg.payload == payload + msg.timestamp > 0 + completionFut.complete(true) + + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node1.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: + assert false, "Failed to unsubscribe from topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + await sleepAsync(500.millis) + + let res = await node2.publish(some($shard), message) + assert res.isOk(), $res.error + + await sleepAsync(500.millis) + + check: + (await completionFut.withTimeout(5.seconds)) == true + await node1.stop() + await node2.stop() + + asyncTest "Messages relaying fails with non-overlapping transports (TCP or Websockets)": + let + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), bindPort = Port(0)) + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode( + nodeKey2, + parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wsBindPort = Port(0), + wsEnabled = true, + ) + shard = DefaultRelayShard + contentTopic = ContentTopic("/waku/2/default-content/proto") + payload = "hello world".toBytes() + message = WakuMessage(payload: payload, contentTopic: contentTopic) + + await node1.start() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node2.start() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + #delete websocket peer address + # TODO: a better way to find the index - this is too brittle + node2.switch.peerInfo.listenAddrs.delete(0) + + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + + var completionFut = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + check: + topic == $shard + msg.contentTopic == contentTopic + msg.payload == payload + msg.timestamp > 0 + completionFut.complete(true) + + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node1.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: + assert false, "Failed to unsubscribe from topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + await sleepAsync(500.millis) + + let res = await node2.publish(some($shard), message) + assert res.isOk(), $res.error + + await sleepAsync(500.millis) + + check: + (await completionFut.withTimeout(5.seconds)) == false + + await allFutures(node1.stop(), node2.stop()) + + asyncTest "Messages are relayed between nodes with multiple transports (TCP and secure Websockets)": + let + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode( + nodeKey1, + parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wsBindPort = Port(0), + wssEnabled = true, + secureKey = KEY_PATH, + secureCert = CERT_PATH, + ) + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), bindPort = Port(0)) + shard = DefaultRelayShard + contentTopic = ContentTopic("/waku/2/default-content/proto") + payload = "hello world".toBytes() + message = WakuMessage(payload: payload, contentTopic: contentTopic) + + await node1.start() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node2.start() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + + var completionFut = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + check: + topic == $shard + msg.contentTopic == contentTopic + msg.payload == payload + msg.timestamp > 0 + completionFut.complete(true) + + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node1.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: + assert false, "Failed to unsubscribe from topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + await sleepAsync(500.millis) + + let res = await node2.publish(some($shard), message) + assert res.isOk(), $res.error + + await sleepAsync(500.millis) + + check: + (await completionFut.withTimeout(5.seconds)) == true + + await allFutures(node1.stop(), node2.stop()) + + asyncTest "Messages are relayed between nodes with multiple transports (websocket and secure Websockets)": + let + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode( + nodeKey1, + parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wsBindPort = Port(0), + wssEnabled = true, + secureKey = KEY_PATH, + secureCert = CERT_PATH, + ) + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode( + nodeKey2, + parseIpAddress("0.0.0.0"), + bindPort = Port(0), + wsBindPort = Port(0), + wsEnabled = true, + ) + + let + shard = DefaultRelayShard + contentTopic = ContentTopic("/waku/2/default-content/proto") + payload = "hello world".toBytes() + message = WakuMessage(payload: payload, contentTopic: contentTopic) + + await node1.start() + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node2.start() + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + + var completionFut = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + check: + topic == $shard + msg.contentTopic == contentTopic + msg.payload == payload + msg.timestamp > 0 + completionFut.complete(true) + + ## The following unsubscription is necessary to remove the default relay handler, which is + ## added when mountRelay is called. + node1.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr: + assert false, "Failed to unsubscribe from topic: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + await sleepAsync(500.millis) + + let res = await node2.publish(some($shard), message) + assert res.isOk(), $res.error + + await sleepAsync(500.millis) + + check: + (await completionFut.withTimeout(5.seconds)) == true + await node1.stop() + await node2.stop() + + asyncTest "Bad peers with low reputation are disconnected": + # Create 5 nodes + let nodes = toSeq(0 ..< 5).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + await allFutures(nodes.mapIt(it.start())) + await allFutures(nodes.mapIt(it.mountRelay())) + + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + # subscribe all nodes to a topic + let topic = "topic" + for node in nodes: + node.wakuRelay.subscribe(topic, simpleHandler) + await sleepAsync(500.millis) + + # connect nodes in full mesh + for i in 0 ..< 5: + for j in 0 ..< 5: + if i == j: + continue + let connOk = await nodes[i].peerManager.connectPeer( + nodes[j].switch.peerInfo.toRemotePeerInfo() + ) + require connOk + + # connection triggers different actions, wait for them + await sleepAsync(1.seconds) + + # all peers are connected in a mesh, 4 conns each + for i in 0 ..< 5: + check: + nodes[i].peerManager.switch.connManager.getConnections().len == 4 + + # node[0] publishes wrong messages (random bytes not decoding into WakuMessage) + for j in 0 ..< 50: + discard await nodes[0].wakuRelay.publish(topic, urandom(1 * (10 ^ 3))) + + # long wait, must be higher than the configured decayInterval (how often score is updated) + await sleepAsync(20.seconds) + + # all nodes lower the score of nodes[0] (will change if gossipsub params or amount of msg changes) + for i in 1 ..< 5: + check: + nodes[i].wakuRelay.peerStats[nodes[0].switch.peerInfo.peerId].score == -249999.9 + + # nodes[0] was blacklisted from all other peers, no connections + check: + nodes[0].peerManager.switch.connManager.getConnections().len == 0 + + # the rest of the nodes now have 1 conn less (kicked nodes[0] out) + for i in 1 ..< 5: + check: + nodes[i].peerManager.switch.connManager.getConnections().len == 3 + + # Stop all nodes + await allFutures(nodes.mapIt(it.stop())) + + asyncTest "Multiple subscription calls are allowed for contenttopics that generate the same shard": + ## Setup + let + nodeKey = generateSecp256k1Key() + node = newTestWakuNode(nodeKey, parseIpAddress("0.0.0.0"), Port(0)) + + await node.start() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + require node.mountAutoSharding(1, 1).isOk + + ## Given + let + shard = "/waku/2/rs/1/0" + contentTopicA = DefaultContentTopic + contentTopicB = ContentTopic("/waku/2/default-content1/proto") + contentTopicC = ContentTopic("/waku/2/default-content2/proto") + handler: WakuRelayHandler = proc( + pubsubTopic: PubsubTopic, message: WakuMessage + ): Future[void] {.gcsafe, raises: [Defect].} = + discard pubsubTopic + discard message + assert shard == + node.wakuAutoSharding.get().getShard(contentTopicA).expect("Valid Topic"), + "topic must use the same shard" + assert shard == + node.wakuAutoSharding.get().getShard(contentTopicB).expect("Valid Topic"), + "topic must use the same shard" + assert shard == + node.wakuAutoSharding.get().getShard(contentTopicC).expect("Valid Topic"), + "topic must use the same shard" + + ## When + node.subscribe((kind: ContentSub, topic: contentTopicA), handler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + node.subscribe((kind: ContentSub, topic: contentTopicB), handler).isOkOr: + assert false, + "The subscription call shouldn't error even though it's already subscribed to that shard" + node.subscribe((kind: ContentSub, topic: contentTopicC), handler).isOkOr: + assert false, + "The subscription call shouldn't error even though it's already subscribed to that shard" + + ## The node should be subscribed to the shard + check node.wakuRelay.isSubscribed(shard) + + ## Then + node.unsubscribe((kind: ContentUnsub, topic: contentTopicB)).isOkOr: + assert false, "Failed to unsubscribe to topic: " & $error + + ## After unsubcription, the node should not be subscribed to the shard anymore + check not node.wakuRelay.isSubscribed(shard) + + ## Cleanup + await node.stop() diff --git a/third-party/nwaku/tests/waku_relay/utils.nim b/third-party/nwaku/tests/waku_relay/utils.nim new file mode 100644 index 0000000..d5703d4 --- /dev/null +++ b/third-party/nwaku/tests/waku_relay/utils.nim @@ -0,0 +1,110 @@ +{.used.} + +import + std/[strutils, sequtils, tempfiles], + stew/byteutils, + chronos, + chronicles, + libp2p/switch, + libp2p/protocols/pubsub/pubsub + +from std/times import epochTime + +import + waku/ + [ + waku_relay, + node/waku_node, + node/peer_manager, + waku_core, + waku_node, + waku_rln_relay, + ], + ../waku_store/store_utils, + ../waku_archive/archive_utils, + ../testlib/[wakucore, futures] + +proc noopRawHandler*(): WakuRelayHandler = + var handler: WakuRelayHandler + handler = proc(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} = + discard + handler + +proc newTestWakuRelay*(switch = newTestSwitch()): Future[WakuRelay] {.async.} = + let proto = WakuRelay.new(switch).tryGet() + + let protocolMatcher = proc(proto: string): bool {.gcsafe.} = + return proto.startsWith(WakuRelayCodec) + + switch.mount(proto, protocolMatcher) + + return proto + +proc setupRln*(node: WakuNode, identifier: uint) {.async.} = + await node.mountRlnRelay( + WakuRlnConfig(dynamic: false, credIndex: some(identifier), epochSizeSec: 1) + ) + +proc subscribeToContentTopicWithHandler*( + node: WakuNode, contentTopic: string +): Future[bool] = + var completionFut = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + if topic == topic: + completionFut.complete(true) + + (node.subscribe((kind: ContentSub, topic: contentTopic), relayHandler)).isOkOr: + error "Failed to subscribe to content topic", error + completionFut.complete(true) + return completionFut + +proc subscribeCompletionHandler*(node: WakuNode, pubsubTopic: string): Future[bool] = + var completionFut = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + if topic == pubsubTopic: + completionFut.complete(true) + + (node.subscribe((kind: PubsubSub, topic: pubsubTopic), relayHandler)).isOkOr: + error "Failed to subscribe to pubsub topic", error + completionFut.complete(false) + return completionFut + +proc sendRlnMessage*( + client: WakuNode, + pubsubTopic: string, + contentTopic: string, + completionFuture: Future[bool], + payload: seq[byte] = "Hello".toBytes(), +): Future[bool] {.async.} = + var message = WakuMessage(payload: payload, contentTopic: contentTopic) + doAssert(client.wakuRlnRelay.appendRLNProof(message, epochTime()).isOk()) + discard await client.publish(some(pubsubTopic), message) + let isCompleted = await completionFuture.withTimeout(FUTURE_TIMEOUT) + return isCompleted + +proc sendRlnMessageWithInvalidProof*( + client: WakuNode, + pubsubTopic: string, + contentTopic: string, + completionFuture: Future[bool], + payload: seq[byte] = "Hello".toBytes(), +): Future[bool] {.async.} = + let + extraBytes: seq[byte] = @[byte(1), 2, 3] + rateLimitProofRes = client.wakuRlnRelay.groupManager.generateProof( + concat(payload, extraBytes), + # we add extra bytes to invalidate proof verification against original payload + client.wakuRlnRelay.getCurrentEpoch(), + messageId = MessageId(0), + ) + rateLimitProof = rateLimitProofRes.get().encode().buffer + message = + WakuMessage(payload: @payload, contentTopic: contentTopic, proof: rateLimitProof) + + discard await client.publish(some(pubsubTopic), message) + let isCompleted = await completionFuture.withTimeout(FUTURE_TIMEOUT) + return isCompleted diff --git a/third-party/nwaku/tests/waku_rln_relay/rln/buffer_utils.nim b/third-party/nwaku/tests/waku_rln_relay/rln/buffer_utils.nim new file mode 100644 index 0000000..e38cc5c --- /dev/null +++ b/third-party/nwaku/tests/waku_rln_relay/rln/buffer_utils.nim @@ -0,0 +1,11 @@ +import waku/waku_rln_relay/rln/rln_interface + +proc `==`*(a: Buffer, b: seq[uint8]): bool = + if a.len != uint(b.len): + return false + + let bufferArray = cast[ptr UncheckedArray[uint8]](a.ptr) + for i in 0 ..< b.len: + if bufferArray[i] != b[i]: + return false + return true diff --git a/third-party/nwaku/tests/waku_rln_relay/rln/test_rln_interface.nim b/third-party/nwaku/tests/waku_rln_relay/rln/test_rln_interface.nim new file mode 100644 index 0000000..7aedf58 --- /dev/null +++ b/third-party/nwaku/tests/waku_rln_relay/rln/test_rln_interface.nim @@ -0,0 +1,17 @@ +import testutils/unittests + +import waku/waku_rln_relay/rln/rln_interface, ./buffer_utils + +suite "Buffer": + suite "toBuffer": + test "valid": + # Given + let bytes: seq[byte] = @[0x01, 0x02, 0x03] + + # When + let buffer = bytes.toBuffer() + + # Then + let expectedBuffer: seq[uint8] = @[1, 2, 3] + check: + buffer == expectedBuffer diff --git a/third-party/nwaku/tests/waku_rln_relay/rln/test_wrappers.nim b/third-party/nwaku/tests/waku_rln_relay/rln/test_wrappers.nim new file mode 100644 index 0000000..3652657 --- /dev/null +++ b/third-party/nwaku/tests/waku_rln_relay/rln/test_wrappers.nim @@ -0,0 +1,136 @@ +import + std/options, + testutils/unittests, + chronicles, + chronos, + eth/keys, + bearssl, + stew/[results], + metrics, + metrics/chronos_httpserver + +import + waku/waku_rln_relay, + waku/waku_rln_relay/rln, + waku/waku_rln_relay/rln/wrappers, + ./waku_rln_relay_utils, + ../../testlib/[simple_mock, assertions], + ../../waku_keystore/utils, + ../../testlib/testutils + +from std/times import epochTime + +const Empty32Array = default(array[32, byte]) + +proc valid(x: seq[byte]): bool = + if x.len != 32: + error "Length should be 32", length = x.len + return false + + if x == Empty32Array: + error "Should not be empty array", array = x + return false + + return true + +suite "membershipKeyGen": + var rlnRes {.threadvar.}: RLNResult + + setup: + rlnRes = createRLNInstanceWrapper() + + test "ok": + # Given we generate valid membership keys + let identityCredentialsRes = membershipKeyGen(rlnRes.get()) + + # Then it contains valid identity credentials + let identityCredentials = identityCredentialsRes.get() + + check: + identityCredentials.idTrapdoor.valid() + identityCredentials.idNullifier.valid() + identityCredentials.idSecretHash.valid() + identityCredentials.idCommitment.valid() + + test "done is false": + # Given the key_gen function fails + let backup = key_gen + mock(key_gen): + proc keyGenMock(ctx: ptr RLN, output_buffer: ptr Buffer): bool = + return false + + keyGenMock + + # When we generate the membership keys + let identityCredentialsRes = membershipKeyGen(rlnRes.get()) + + # Then it fails + check: + identityCredentialsRes.error() == "error in key generation" + + # Cleanup + mock(key_gen): + backup + + test "generatedKeys length is not 128": + # Given the key_gen function succeeds with wrong values + let backup = key_gen + mock(key_gen): + proc keyGenMock(ctx: ptr RLN, output_buffer: ptr Buffer): bool = + echo "# RUNNING MOCK" + output_buffer.len = 0 + output_buffer.ptr = cast[ptr uint8](newSeq[byte](0)) + return true + + keyGenMock + + # When we generate the membership keys + let identityCredentialsRes = membershipKeyGen(rlnRes.get()) + + # Then it fails + check: + identityCredentialsRes.error() == "keysBuffer is of invalid length" + + # Cleanup + mock(key_gen): + backup + +suite "RlnConfig": + suite "createRLNInstance": + test "ok": + # When we create the RLN instance + let rlnRes: RLNResult = createRLNInstance(15) + + # Then it succeeds + check: + rlnRes.isOk() + + test "default": + # When we create the RLN instance + let rlnRes: RLNResult = createRLNInstance() + + # Then it succeeds + check: + rlnRes.isOk() + + test "new_circuit fails": + # Given the new_circuit function fails + let backup = new_circuit + mock(new_circuit): + proc newCircuitMock( + tree_height: uint, input_buffer: ptr Buffer, ctx: ptr (ptr RLN) + ): bool = + return false + + newCircuitMock + + # When we create the RLN instance + let rlnRes: RLNResult = createRLNInstance(15) + + # Then it fails + check: + rlnRes.error() == "error in parameters generation" + + # Cleanup + mock(new_circuit): + backup diff --git a/third-party/nwaku/tests/waku_rln_relay/rln/waku_rln_relay_utils.nim b/third-party/nwaku/tests/waku_rln_relay/rln/waku_rln_relay_utils.nim new file mode 100644 index 0000000..4bdcbbd --- /dev/null +++ b/third-party/nwaku/tests/waku_rln_relay/rln/waku_rln_relay_utils.nim @@ -0,0 +1,57 @@ +import std/tempfiles + +import + waku/waku_rln_relay, + waku/waku_rln_relay/[ + group_manager, rln, conversion_utils, constants, protocol_types, protocol_metrics, + nonce_manager, + ] + +proc createRLNInstanceWrapper*(): RLNResult = + return createRlnInstance() + +proc unsafeAppendRLNProof*( + rlnPeer: WakuRLNRelay, msg: var WakuMessage, epoch: Epoch, messageId: MessageId +): RlnRelayResult[void] = + ## Test helper derived from `appendRLNProof`. + ## - Skips nonce validation to intentionally allow generating "bad" message IDs for tests. + ## - Forces a real-time on-chain Merkle root refresh via `updateRoots()` and fetches Merkle + ## proof elements, updating `merkleProofCache` (bypasses `trackRootsChanges`). + ## WARNING: For testing only + + let manager = cast[OnchainGroupManager](rlnPeer.groupManager) + let rootUpdated = waitFor manager.updateRoots() + + # Fetch Merkle proof either when a new root was detected *or* when the cache is empty. + if rootUpdated or manager.merkleProofCache.len == 0: + let proofResult = waitFor manager.fetchMerkleProofElements() + if proofResult.isErr(): + error "Failed to fetch Merkle proof", error = proofResult.error + manager.merkleProofCache = proofResult.get() + + let proof = manager.generateProof(msg.toRLNSignal(), epoch, messageId).valueOr: + return err("could not generate rln-v2 proof: " & $error) + + msg.proof = proof.encode().buffer + return ok() + +proc getWakuRlnConfig*( + manager: OnchainGroupManager, + userMessageLimit: uint64 = 1, + epochSizeSec: uint64 = 1, + index: MembershipIndex = MembershipIndex(0), +): WakuRlnConfig = + let wakuRlnConfig = WakuRlnConfig( + dynamic: true, + ethClientUrls: @[EthClient], + ethContractAddress: manager.ethContractAddress, + chainId: manager.chainId, + credIndex: some(index), + userMessageLimit: userMessageLimit, + epochSizeSec: epochSizeSec, + ethPrivateKey: some(manager.ethPrivateKey.get()), + onFatalErrorAction: proc(errStr: string) = + warn "non-fatal onchain test error", errStr + , + ) + return wakuRlnConfig diff --git a/third-party/nwaku/tests/waku_rln_relay/test_all.nim b/third-party/nwaku/tests/waku_rln_relay/test_all.nim new file mode 100644 index 0000000..706fff4 --- /dev/null +++ b/third-party/nwaku/tests/waku_rln_relay/test_all.nim @@ -0,0 +1,7 @@ +{.used.} + +import + ./test_rln_group_manager_onchain, + ./test_waku_rln_relay, + ./test_wakunode_rln_relay, + ./test_rln_nonce_manager diff --git a/third-party/nwaku/tests/waku_rln_relay/test_rln_group_manager_onchain.nim b/third-party/nwaku/tests/waku_rln_relay/test_rln_group_manager_onchain.nim new file mode 100644 index 0000000..5d77318 --- /dev/null +++ b/third-party/nwaku/tests/waku_rln_relay/test_rln_group_manager_onchain.nim @@ -0,0 +1,486 @@ +{.used.} + +{.push raises: [].} + +import + std/[options, sequtils, deques, random, locks, osproc], + results, + stew/byteutils, + testutils/unittests, + chronos, + chronicles, + stint, + web3, + libp2p/crypto/crypto, + eth/keys, + tests/testlib/testasync, + tests/testlib/testutils + +import + waku/[ + waku_rln_relay, + waku_rln_relay/protocol_types, + waku_rln_relay/constants, + waku_rln_relay/rln, + waku_rln_relay/conversion_utils, + waku_rln_relay/group_manager/on_chain/group_manager, + ], + ../testlib/wakucore, + ./utils_onchain + +suite "Onchain group manager": + var anvilProc {.threadVar.}: Process + var manager {.threadVar.}: OnchainGroupManager + + setup: + anvilProc = runAnvil() + manager = waitFor setupOnchainGroupManager() + + teardown: + stopAnvil(anvilProc) + + test "should initialize successfully": + (waitFor manager.init()).isOkOr: + raiseAssert $error + + check: + manager.ethRpc.isSome() + manager.wakuRlnContract.isSome() + manager.initialized + manager.rlnRelayMaxMessageLimit == 600 + + test "should error on initialization when chainId does not match": + manager.chainId = utils_onchain.CHAIN_ID + 1 + + (waitFor manager.init()).isErrOr: + raiseAssert "Expected error when chainId does not match" + + test "should initialize when chainId is set to 0": + manager.chainId = 0x0'u256 + (waitFor manager.init()).isOkOr: + raiseAssert $error + + test "should error on initialization when loaded metadata does not match": + (waitFor manager.init()).isOkOr: + assert false, $error + let metadataSetRes = manager.setMetadata() + assert metadataSetRes.isOk(), metadataSetRes.error + let metadataOpt = manager.rlnInstance.getMetadata().valueOr: + assert false, $error + return + assert metadataOpt.isSome(), "metadata is not set" + let metadata = metadataOpt.get() + assert metadata.chainId == 1234, "chainId is not equal to 1234" + assert metadata.contractAddress == manager.ethContractAddress, + "contractAddress is not equal to " & manager.ethContractAddress + let web3 = manager.ethRpc.get() + let accounts = waitFor web3.provider.eth_accounts() + web3.defaultAccount = accounts[2] + let (privateKey, acc) = createEthAccount(web3) + let tokenAddress = (waitFor deployTestToken(privateKey, acc, web3)).valueOr: + assert false, "Failed to deploy test token contract: " & $error + return + let differentContractAddress = ( + waitFor executeForgeContractDeployScripts(privateKey, acc, web3) + ).valueOr: + assert false, "Failed to deploy RLN contract: " & $error + return + # simulating a change in the contractAddress + let manager2 = OnchainGroupManager( + ethClientUrls: @[EthClient], + ethContractAddress: $differentContractAddress, + rlnInstance: manager.rlnInstance, + onFatalErrorAction: proc(errStr: string) = + assert false, errStr + , + ) + let e = waitFor manager2.init() + (e).isErrOr: + assert false, "Expected error when contract address doesn't match" + + test "should error if contract does not exist": + manager.ethContractAddress = "0x0000000000000000000000000000000000000000" + + (waitFor manager.init()).isErrOr: + raiseAssert "Expected error when contract address doesn't exist" + + test "should error when keystore path and password are provided but file doesn't exist": + manager.keystorePath = some("/inexistent/file") + manager.keystorePassword = some("password") + + (waitFor manager.init()).isErrOr: + raiseAssert "Expected error when keystore file doesn't exist" + + test "trackRootChanges: should guard against uninitialized state": + try: + discard manager.trackRootChanges() + except CatchableError: + check getCurrentExceptionMsg().len == 38 + + test "trackRootChanges: should sync to the state of the group": + let credentials = generateCredentials(manager.rlnInstance) + (waitFor manager.init()).isOkOr: + raiseAssert $error + + let merkleRootBefore = waitFor manager.fetchMerkleRoot() + + try: + waitFor manager.register(credentials, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, "exception raised: " & getCurrentExceptionMsg() + + discard waitFor withTimeout(trackRootChanges(manager), 15.seconds) + + let merkleRootAfter = waitFor manager.fetchMerkleRoot() + + let metadataSetRes = manager.setMetadata() + assert metadataSetRes.isOk(), metadataSetRes.error + + let metadataOpt = getMetadata(manager.rlnInstance).valueOr: + raiseAssert $error + + assert metadataOpt.isSome(), "metadata is not set" + let metadata = metadataOpt.get() + + check: + metadata.validRoots == manager.validRoots.toSeq() + merkleRootBefore != merkleRootAfter + + test "trackRootChanges: should fetch history correctly": + # TODO: We can't use `trackRootChanges()` directly in this test because its current implementation + # relies on a busy loop rather than event-based monitoring. but that busy loop fetch root every 5 seconds + # so we can't use it in this test. + + const credentialCount = 6 + let credentials = generateCredentials(manager.rlnInstance, credentialCount) + (waitFor manager.init()).isOkOr: + raiseAssert $error + + let merkleRootBefore = waitFor manager.fetchMerkleRoot() + + try: + for i in 0 ..< credentials.len(): + debug "Registering credential", index = i, credential = credentials[i] + waitFor manager.register(credentials[i], UserMessageLimit(20)) + discard waitFor manager.updateRoots() + except Exception, CatchableError: + assert false, "exception raised: " & getCurrentExceptionMsg() + + let merkleRootAfter = waitFor manager.fetchMerkleRoot() + + check: + merkleRootBefore != merkleRootAfter + manager.validRoots.len() == credentialCount + + test "register: should guard against uninitialized state": + let dummyCommitment = default(IDCommitment) + + try: + waitFor manager.register( + RateCommitment( + idCommitment: dummyCommitment, userMessageLimit: UserMessageLimit(20) + ) + ) + except CatchableError: + assert true + except Exception: + assert false, "exception raised: " & getCurrentExceptionMsg() + + test "register: should register successfully": + # TODO :- similar to ```trackRootChanges: should fetch history correctly``` + (waitFor manager.init()).isOkOr: + raiseAssert $error + + let idCredentials = generateCredentials(manager.rlnInstance) + let merkleRootBefore = waitFor manager.fetchMerkleRoot() + + try: + waitFor manager.register(idCredentials, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let merkleRootAfter = waitFor manager.fetchMerkleRoot() + + check: + merkleRootAfter != merkleRootBefore + manager.latestIndex == 1 + + test "register: callback is called": + let idCredentials = generateCredentials(manager.rlnInstance) + let idCommitment = idCredentials.idCommitment + + let fut = newFuture[void]() + + proc callback(registrations: seq[Membership]): Future[void] {.async.} = + let rateCommitment = getRateCommitment(idCredentials, UserMessageLimit(20)).get() + check: + registrations.len == 1 + registrations[0].rateCommitment == rateCommitment + registrations[0].index == 0 + fut.complete() + + (waitFor manager.init()).isOkOr: + raiseAssert $error + + manager.onRegister(callback) + + try: + waitFor manager.register( + RateCommitment( + idCommitment: idCommitment, userMessageLimit: UserMessageLimit(20) + ) + ) + except Exception, CatchableError: + assert false, "exception raised: " & getCurrentExceptionMsg() + + waitFor fut + + test "withdraw: should guard against uninitialized state": + let idSecretHash = generateCredentials(manager.rlnInstance).idSecretHash + + try: + waitFor manager.withdraw(idSecretHash) + except CatchableError: + assert true + except Exception: + assert false, "exception raised: " & getCurrentExceptionMsg() + + test "validateRoot: should validate good root": + let idCredentials = generateCredentials(manager.rlnInstance) + let idCommitment = idCredentials.idCommitment + + let fut = newFuture[void]() + + proc callback(registrations: seq[Membership]): Future[void] {.async.} = + if registrations.len == 1 and + registrations[0].rateCommitment == + getRateCommitment(idCredentials, UserMessageLimit(20)).get() and + registrations[0].index == 0: + manager.idCredentials = some(idCredentials) + fut.complete() + + manager.onRegister(callback) + + (waitFor manager.init()).isOkOr: + raiseAssert $error + + try: + waitFor manager.register(idCredentials, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, "exception raised: " & getCurrentExceptionMsg() + + waitFor fut + + let rootUpdated = waitFor manager.updateRoots() + + if rootUpdated: + let proofResult = waitFor manager.fetchMerkleProofElements() + if proofResult.isErr(): + error "Failed to fetch Merkle proof", error = proofResult.error + manager.merkleProofCache = proofResult.get() + let messageBytes = "Hello".toBytes() + + let epoch = default(Epoch) + debug "epoch in bytes", epochHex = epoch.inHex() + + let validProofRes = manager.generateProof( + data = messageBytes, epoch = epoch, messageId = MessageId(1) + ) + + check: + validProofRes.isOk() + let validProof = validProofRes.get() + + let validated = manager.validateRoot(validProof.merkleRoot) + + check: + validated + + test "validateRoot: should reject bad root": + let idCredentials = generateCredentials(manager.rlnInstance) + let idCommitment = idCredentials.idCommitment + + (waitFor manager.init()).isOkOr: + raiseAssert $error + + manager.userMessageLimit = some(UserMessageLimit(20)) + manager.membershipIndex = some(MembershipIndex(0)) + manager.idCredentials = some(idCredentials) + + manager.merkleProofCache = newSeq[byte](640) + for i in 0 ..< 640: + manager.merkleProofCache[i] = byte(rand(255)) + + let messageBytes = "Hello".toBytes() + + let epoch = default(Epoch) + debug "epoch in bytes", epochHex = epoch.inHex() + + let validProofRes = manager.generateProof( + data = messageBytes, epoch = epoch, messageId = MessageId(1) + ) + + check: + validProofRes.isOk() + let validProof = validProofRes.get() + + let validated = manager.validateRoot(validProof.merkleRoot) + + check: + validated == false + + test "verifyProof: should verify valid proof": + let credentials = generateCredentials(manager.rlnInstance) + (waitFor manager.init()).isOkOr: + raiseAssert $error + + let fut = newFuture[void]() + + proc callback(registrations: seq[Membership]): Future[void] {.async.} = + if registrations.len == 1 and + registrations[0].rateCommitment == + getRateCommitment(credentials, UserMessageLimit(20)).get() and + registrations[0].index == 0: + manager.idCredentials = some(credentials) + fut.complete() + + manager.onRegister(callback) + + try: + waitFor manager.register(credentials, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, "exception raised: " & getCurrentExceptionMsg() + waitFor fut + + let rootUpdated = waitFor manager.updateRoots() + + if rootUpdated: + let proofResult = waitFor manager.fetchMerkleProofElements() + if proofResult.isErr(): + error "Failed to fetch Merkle proof", error = proofResult.error + manager.merkleProofCache = proofResult.get() + + let messageBytes = "Hello".toBytes() + + # prepare the epoch + let epoch = default(Epoch) + debug "epoch in bytes", epochHex = epoch.inHex() + + # generate proof + let validProof = manager.generateProof( + data = messageBytes, epoch = epoch, messageId = MessageId(0) + ).valueOr: + raiseAssert $error + + let verified = manager.verifyProof(messageBytes, validProof).valueOr: + raiseAssert $error + + check: + verified + + test "verifyProof: should reject invalid proof": + (waitFor manager.init()).isOkOr: + raiseAssert $error + + let idCredential = generateCredentials(manager.rlnInstance) + + try: + waitFor manager.register(idCredential, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling startGroupSync: " & getCurrentExceptionMsg() + + let messageBytes = "Hello".toBytes() + + let rootUpdated = waitFor manager.updateRoots() + + manager.merkleProofCache = newSeq[byte](640) + for i in 0 ..< 640: + manager.merkleProofCache[i] = byte(rand(255)) + + let epoch = default(Epoch) + debug "epoch in bytes", epochHex = epoch.inHex() + + # generate proof + let invalidProofRes = manager.generateProof( + data = messageBytes, epoch = epoch, messageId = MessageId(0) + ) + + check: + invalidProofRes.isOk() + let invalidProof = invalidProofRes.get() + + # verify the proof (should be false) + let verified = manager.verifyProof(messageBytes, invalidProof).valueOr: + raiseAssert $error + + check: + verified == false + + test "root queue should be updated correctly": + const credentialCount = 12 + let credentials = generateCredentials(manager.rlnInstance, credentialCount) + (waitFor manager.init()).isOkOr: + raiseAssert $error + + type TestBackfillFuts = array[0 .. credentialCount - 1, Future[void]] + var futures: TestBackfillFuts + for i in 0 ..< futures.len(): + futures[i] = newFuture[void]() + + proc generateCallback( + futs: TestBackfillFuts, credentials: seq[IdentityCredential] + ): OnRegisterCallback = + var futureIndex = 0 + proc callback(registrations: seq[Membership]): Future[void] {.async.} = + if registrations.len == 1 and + registrations[0].rateCommitment == + getRateCommitment(credentials[futureIndex], UserMessageLimit(20)).get() and + registrations[0].index == MembershipIndex(futureIndex): + futs[futureIndex].complete() + futureIndex += 1 + + return callback + + try: + manager.onRegister(generateCallback(futures, credentials)) + + for i in 0 ..< credentials.len(): + waitFor manager.register(credentials[i], UserMessageLimit(20)) + discard waitFor manager.updateRoots() + except Exception, CatchableError: + assert false, "exception raised: " & getCurrentExceptionMsg() + + waitFor allFutures(futures) + + check: + manager.validRoots.len() == credentialCount + + test "isReady should return false if ethRpc is none": + (waitFor manager.init()).isOkOr: + raiseAssert $error + + manager.ethRpc = none(Web3) + + var isReady = true + try: + isReady = waitFor manager.isReady() + except Exception, CatchableError: + assert false, "exception raised: " & getCurrentExceptionMsg() + + check: + isReady == false + + test "isReady should return true if ethRpc is ready": + (waitFor manager.init()).isOkOr: + raiseAssert $error + + var isReady = false + try: + isReady = waitFor manager.isReady() + except Exception, CatchableError: + assert false, "exception raised: " & getCurrentExceptionMsg() + + check: + isReady == true diff --git a/third-party/nwaku/tests/waku_rln_relay/test_rln_nonce_manager.nim b/third-party/nwaku/tests/waku_rln_relay/test_rln_nonce_manager.nim new file mode 100644 index 0000000..3a473f1 --- /dev/null +++ b/third-party/nwaku/tests/waku_rln_relay/test_rln_nonce_manager.nim @@ -0,0 +1,43 @@ +{.used.} + +import testutils/unittests, chronos, os +import waku/waku_rln_relay/nonce_manager + +suite "Nonce manager": + test "should initialize successfully": + let nm = NonceManager.init(nonceLimit = 100.uint) + + check: + nm.nonceLimit == 100.uint + nm.nextNonce == 0.uint + + test "should generate a new nonce": + let nm = NonceManager.init(nonceLimit = 100.uint) + let nonce = nm.getNonce().valueOr: + raiseAssert $error + + check: + nonce == 0.uint + nm.nextNonce == 1.uint + + test "should fail to generate a new nonce if limit is reached": + let nm = NonceManager.init(nonceLimit = 1.uint) + let nonce = nm.getNonce().valueOr: + raiseAssert $error + let failedNonceRes = nm.getNonce() + + check: + failedNonceRes.isErr() + failedNonceRes.error.kind == NonceManagerErrorKind.NonceLimitReached + + test "should generate a new nonce if epoch is crossed": + let nm = NonceManager.init(nonceLimit = 1.uint, epoch = float(0.000001)) + let nonce = nm.getNonce().valueOr: + raiseAssert $error + sleep(1) + let nonce2 = nm.getNonce().valueOr: + raiseAssert $error + + check: + nonce == 0.uint + nonce2 == 0.uint diff --git a/third-party/nwaku/tests/waku_rln_relay/test_waku_rln_relay.nim b/third-party/nwaku/tests/waku_rln_relay/test_waku_rln_relay.nim new file mode 100644 index 0000000..94a2315 --- /dev/null +++ b/third-party/nwaku/tests/waku_rln_relay/test_waku_rln_relay.nim @@ -0,0 +1,640 @@ +{.used.} + +import + std/[options, os, sequtils, tempfiles, strutils, osproc], + stew/byteutils, + testutils/unittests, + chronos, + chronicles, + stint, + libp2p/crypto/crypto +import + waku/[ + waku_core, + waku_rln_relay, + waku_rln_relay/rln, + waku_rln_relay/protocol_metrics, + waku_keystore, + ], + ./rln/waku_rln_relay_utils, + ./utils_onchain, + ../testlib/[wakucore, futures, wakunode, testutils] + +from std/times import epochTime + +suite "Waku rln relay": + var anvilProc {.threadVar.}: Process + var manager {.threadVar.}: OnchainGroupManager + + setup: + anvilProc = runAnvil() + manager = waitFor setupOnchainGroupManager() + + teardown: + stopAnvil(anvilProc) + + test "key_gen Nim Wrappers": + let merkleDepth: csize_t = 20 + + let rlnInstance = createRLNInstanceWrapper() + require: + rlnInstance.isOk() + + # keysBufferPtr will hold the generated identity credential i.e., id trapdoor, nullifier, secret hash and commitment + var keysBuffer: Buffer + let + keysBufferPtr = addr(keysBuffer) + done = key_gen(rlnInstance.get(), keysBufferPtr) + require: + # check whether the keys are generated successfully + done + + let generatedKeys = cast[ptr array[4 * 32, byte]](keysBufferPtr.`ptr`)[] + check: + # the id trapdoor, nullifier, secert hash and commitment together are 4*32 bytes + generatedKeys.len == 4 * 32 + debug "generated keys: ", generatedKeys + + test "membership Key Generation": + # create an RLN instance + let rlnInstance = createRLNInstanceWrapper() + require: + rlnInstance.isOk() + + let idCredentialsRes = membershipKeyGen(rlnInstance.get()) + require: + idCredentialsRes.isOk() + + let idCredential = idCredentialsRes.get() + let empty = default(array[32, byte]) + check: + idCredential.idTrapdoor.len == 32 + idCredential.idNullifier.len == 32 + idCredential.idSecretHash.len == 32 + idCredential.idCommitment.len == 32 + idCredential.idTrapdoor != empty + idCredential.idNullifier != empty + idCredential.idSecretHash != empty + idCredential.idCommitment != empty + + debug "the generated identity credential: ", idCredential + + test "setMetadata rln utils": + # create an RLN instance which also includes an empty Merkle tree + let rlnInstance = createRLNInstanceWrapper() + require: + rlnInstance.isOk() + let rln = rlnInstance.get() + check: + rln + .setMetadata( + RlnMetadata( + lastProcessedBlock: 128, + chainId: 1155511'u256, + contractAddress: "0x9c09146844c1326c2dbc41c451766c7138f88155", + ) + ) + .isOk() + + test "getMetadata rln utils": + # create an RLN instance which also includes an empty Merkle tree + let rlnInstance = createRLNInstanceWrapper() + require: + rlnInstance.isOk() + let rln = rlnInstance.get() + + require: + rln + .setMetadata( + RlnMetadata( + lastProcessedBlock: 128, + chainId: 1155511'u256, + contractAddress: "0x9c09146844c1326c2dbc41c451766c7138f88155", + ) + ) + .isOk() + + let metadataOpt = rln.getMetadata().valueOr: + raiseAssert $error + + assert metadataOpt.isSome(), "metadata is not set" + let metadata = metadataOpt.get() + check: + metadata.lastProcessedBlock == 128 + metadata.chainId == 1155511'u256 + metadata.contractAddress == "0x9c09146844c1326c2dbc41c451766c7138f88155" + + test "getMetadata: empty rln metadata": + # create an RLN instance which also includes an empty Merkle tree + let rln = createRLNInstanceWrapper().valueOr: + raiseAssert $error + let metadata = rln.getMetadata().valueOr: + raiseAssert $error + + check: + metadata.isNone() + + test "hash Nim Wrappers": + # create an RLN instance + let rlnInstance = createRLNInstanceWrapper() + require: + rlnInstance.isOk() + + # prepare the input + let + msg = "Hello".toBytes() + hashInput = encodeLengthPrefix(msg) + hashInputBuffer = toBuffer(hashInput) + + # prepare other inputs to the hash function + let outputBuffer = default(Buffer) + + let hashSuccess = sha256(unsafeAddr hashInputBuffer, unsafeAddr outputBuffer) + require: + hashSuccess + let outputArr = cast[ptr array[32, byte]](outputBuffer.`ptr`)[] + + check: + "1e32b3ab545c07c8b4a7ab1ca4f46bc31e4fdc29ac3b240ef1d54b4017a26e4c" == + outputArr.inHex() + + let + hashOutput = cast[ptr array[32, byte]](outputBuffer.`ptr`)[] + hashOutputHex = hashOutput.toHex() + + debug "hash output", hashOutputHex + + test "sha256 hash utils": + # create an RLN instance + let rlnInstance = createRLNInstanceWrapper() + require: + rlnInstance.isOk() + let rln = rlnInstance.get() + + # prepare the input + let msg = "Hello".toBytes() + + let hashRes = sha256(msg) + + check: + hashRes.isOk() + "1e32b3ab545c07c8b4a7ab1ca4f46bc31e4fdc29ac3b240ef1d54b4017a26e4c" == + hashRes.get().inHex() + + test "poseidon hash utils": + # create an RLN instance + let rlnInstance = createRLNInstanceWrapper() + require: + rlnInstance.isOk() + let rln = rlnInstance.get() + + # prepare the input + let msg = + @[ + "126f4c026cd731979365f79bd345a46d673c5a3f6f588bdc718e6356d02b6fdc".toBytes(), + "1f0e5db2b69d599166ab16219a97b82b662085c93220382b39f9f911d3b943b1".toBytes(), + ] + + let hashRes = poseidon(msg) + + # Value taken from zerokit + check: + hashRes.isOk() + "28a15a991fe3d2a014485c7fa905074bfb55c0909112f865ded2be0a26a932c3" == + hashRes.get().inHex() + + test "RateLimitProof Protobuf encode/init test": + var + proof: ZKSNARK + merkleRoot: MerkleNode + epoch: Epoch + shareX: MerkleNode + shareY: MerkleNode + nullifier: Nullifier + rlnIdentifier: RlnIdentifier + + # populate fields with dummy values + for x in proof.mitems: + x = 1 + for x in merkleRoot.mitems: + x = 2 + for x in epoch.mitems: + x = 3 + for x in shareX.mitems: + x = 4 + for x in shareY.mitems: + x = 5 + for x in nullifier.mitems: + x = 6 + for x in rlnIdentifier.mitems: + x = 7 + + let + rateLimitProof = RateLimitProof( + proof: proof, + merkleRoot: merkleRoot, + epoch: epoch, + shareX: shareX, + shareY: shareY, + nullifier: nullifier, + rlnIdentifier: rlnIdentifier, + ) + protobuf = rateLimitProof.encode() + decodednsp = RateLimitProof.init(protobuf.buffer) + + require: + decodednsp.isOk() + check: + decodednsp.value == rateLimitProof + + test "toEpoch and fromEpoch consistency check": + # check edge cases + let + epoch = uint64.high # rln epoch + epochBytes = epoch.toEpoch() + decodedEpoch = epochBytes.fromEpoch() + check: + epoch == decodedEpoch + debug "encoded and decode time", + epoch = epoch, epochBytes = epochBytes, decodedEpoch = decodedEpoch + + test "Epoch comparison, epoch1 > epoch2": + # check edge cases + let + time1 = uint64.high + time2 = uint64.high - 1 + epoch1 = time1.toEpoch() + epoch2 = time2.toEpoch() + check: + absDiff(epoch1, epoch2) == uint64(1) + absDiff(epoch2, epoch1) == uint64(1) + + test "updateLog and hasDuplicate tests": + let + wakuRlnRelay = WakuRLNRelay() + epoch = wakuRlnRelay.getCurrentEpoch() + + # create some dummy nullifiers and secret shares + var nullifier1: Nullifier + for index, x in nullifier1.mpairs: + nullifier1[index] = 1 + var shareX1: MerkleNode + for index, x in shareX1.mpairs: + shareX1[index] = 1 + let shareY1 = shareX1 + + var nullifier2: Nullifier + for index, x in nullifier2.mpairs: + nullifier2[index] = 2 + var shareX2: MerkleNode + for index, x in shareX2.mpairs: + shareX2[index] = 2 + let shareY2 = shareX2 + + let nullifier3 = nullifier1 + var shareX3: MerkleNode + for index, x in shareX3.mpairs: + shareX3[index] = 3 + let shareY3 = shareX3 + + proc encodeAndGetBuf(proof: RateLimitProof): seq[byte] = + return proof.encode().buffer + + let + proof1 = RateLimitProof( + epoch: epoch, nullifier: nullifier1, shareX: shareX1, shareY: shareY1 + ) + wm1 = WakuMessage(proof: proof1.encodeAndGetBuf()) + proof2 = RateLimitProof( + epoch: epoch, nullifier: nullifier2, shareX: shareX2, shareY: shareY2 + ) + wm2 = WakuMessage(proof: proof2.encodeAndGetBuf()) + proof3 = RateLimitProof( + epoch: epoch, nullifier: nullifier3, shareX: shareX3, shareY: shareY3 + ) + wm3 = WakuMessage(proof: proof3.encodeAndGetBuf()) + + # check whether hasDuplicate correctly finds records with the same nullifiers but different secret shares + # no duplicate for proof1 should be found, since the log is empty + let proofMetadata1 = proof1.extractMetadata().tryGet() + let isDuplicate1 = wakuRlnRelay.hasDuplicate(epoch, proofMetadata1).valueOr: + raiseAssert $error + assert isDuplicate1 == false, "no duplicate should be found" + # add it to the log + discard wakuRlnRelay.updateLog(epoch, proofMetadata1) + + # no duplicate for proof2 should be found, its nullifier differs from proof1 + let proofMetadata2 = proof2.extractMetadata().tryGet() + let isDuplicate2 = wakuRlnRelay.hasDuplicate(epoch, proofMetadata2).valueOr: + raiseAssert $error + # no duplicate is found + assert isDuplicate2 == false, "no duplicate should be found" + # add it to the log + discard wakuRlnRelay.updateLog(epoch, proofMetadata2) + + # proof3 has the same nullifier as proof1 but different secret shares, it should be detected as duplicate + let isDuplicate3 = wakuRlnRelay.hasDuplicate( + epoch, proof3.extractMetadata().tryGet() + ).valueOr: + raiseAssert $error + # it is a duplicate + assert isDuplicate3, "duplicate should be found" + + asyncTest "validateMessageAndUpdateLog: against epoch gap": + let index = MembershipIndex(5) + + let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = index) + let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr: + raiseAssert $error + + let manager = cast[OnchainGroupManager](wakuRlnRelay.groupManager) + let idCredentials = generateCredentials(manager.rlnInstance) + + try: + waitFor manager.register(idCredentials, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let epoch1 = wakuRlnRelay.getCurrentEpoch() + + # Create messages from the same peer and append RLN proof to them (except wm4) + var + wm1 = WakuMessage(payload: "Valid message".toBytes(), timestamp: now()) + # Another message in the same epoch as wm1, expected to break the rate limit + wm2 = WakuMessage(payload: "Spam message".toBytes(), timestamp: now()) + + await sleepAsync(1.seconds) + let epoch2 = wakuRlnRelay.getCurrentEpoch() + + var + # wm3 points to the next epoch due to the sleep + wm3 = WakuMessage(payload: "Valid message".toBytes(), timestamp: now()) + wm4 = WakuMessage(payload: "Invalid message".toBytes(), timestamp: now()) + + # Append RLN proofs + wakuRlnRelay.unsafeAppendRLNProof(wm1, epoch1, MessageId(1)).isOkOr: + raiseAssert $error + wakuRlnRelay.unsafeAppendRLNProof(wm2, epoch1, MessageId(1)).isOkOr: + raiseAssert $error + wakuRlnRelay.unsafeAppendRLNProof(wm3, epoch2, MessageId(3)).isOkOr: + raiseAssert $error + + # Validate messages + let + msgValidate1 = wakuRlnRelay.validateMessageAndUpdateLog(wm1) + # wm2 is within the same epoch as wm1 → should be spam + msgValidate2 = wakuRlnRelay.validateMessageAndUpdateLog(wm2) + # wm3 is in the next epoch → should be valid + msgValidate3 = wakuRlnRelay.validateMessageAndUpdateLog(wm3) + # wm4 has no RLN proof → should be invalid + msgValidate4 = wakuRlnRelay.validateMessageAndUpdateLog(wm4) + + check: + msgValidate1 == MessageValidationResult.Valid + msgValidate2 == MessageValidationResult.Spam + msgValidate3 == MessageValidationResult.Valid + msgValidate4 == MessageValidationResult.Invalid + + asyncTest "validateMessageAndUpdateLog: against timestamp gap": + let index = MembershipIndex(5) + + let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = index) + + let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr: + raiseAssert $error + + let manager = cast[OnchainGroupManager](wakuRlnRelay.groupManager) + let idCredentials = generateCredentials(manager.rlnInstance) + + try: + waitFor manager.register(idCredentials, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + # usually it's 20 seconds but we set it to 1 for testing purposes which make the test faster + wakuRlnRelay.rlnMaxTimestampGap = 1 + + var epoch = wakuRlnRelay.getCurrentEpoch() + + var + wm1 = WakuMessage( + payload: "timestamp message".toBytes(), + contentTopic: DefaultPubsubTopic, + timestamp: now(), + ) + wm2 = WakuMessage( + payload: "timestamp message".toBytes(), + contentTopic: DefaultPubsubTopic, + timestamp: now(), + ) + + wakuRlnRelay.unsafeAppendRLNProof(wm1, epoch, MessageId(1)).isOkOr: + raiseAssert $error + + wakuRlnRelay.unsafeAppendRLNProof(wm2, epoch, MessageId(2)).isOkOr: + raiseAssert $error + + # validate the first message because it's timestamp is the same as the generated timestamp + let msgValidate1 = wakuRlnRelay.validateMessageAndUpdateLog(wm1) + + # wait for 2 seconds to make the timestamp different from generated timestamp + await sleepAsync(2.seconds) + + let msgValidate2 = wakuRlnRelay.validateMessageAndUpdateLog(wm2) + + check: + msgValidate1 == MessageValidationResult.Valid + msgValidate2 == MessageValidationResult.Invalid + + asyncTest "multiple senders with same external nullifier": + let index1 = MembershipIndex(5) + let rlnConf1 = getWakuRlnConfig(manager = manager, index = index1) + let wakuRlnRelay1 = (await WakuRlnRelay.new(rlnConf1)).valueOr: + raiseAssert "failed to create waku rln relay: " & $error + + let manager1 = cast[OnchainGroupManager](wakuRlnRelay1.groupManager) + let idCredentials1 = generateCredentials(manager1.rlnInstance) + + try: + waitFor manager1.register(idCredentials1, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let index2 = MembershipIndex(6) + let rlnConf2 = getWakuRlnConfig(manager = manager, index = index2) + let wakuRlnRelay2 = (await WakuRlnRelay.new(rlnConf2)).valueOr: + raiseAssert "failed to create waku rln relay: " & $error + + let manager2 = cast[OnchainGroupManager](wakuRlnRelay2.groupManager) + let idCredentials2 = generateCredentials(manager2.rlnInstance) + + try: + waitFor manager2.register(idCredentials2, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + # get the current epoch time + let epoch = wakuRlnRelay1.getCurrentEpoch() + + # create messages from different peers and append rln proofs to them + var + wm1 = + WakuMessage(payload: "Valid message from sender 1".toBytes(), timestamp: now()) + # another message in the same epoch as wm1, it will break the messaging rate limit + wm2 = + WakuMessage(payload: "Valid message from sender 2".toBytes(), timestamp: now()) + + wakuRlnRelay1.unsafeAppendRLNProof(wm1, epoch, MessageId(1)).isOkOr: + raiseAssert $error + wakuRlnRelay2.unsafeAppendRLNProof(wm2, epoch, MessageId(1)).isOkOr: + raiseAssert $error + + let + msgValidate1 = wakuRlnRelay1.validateMessageAndUpdateLog(wm1) + msgValidate2 = wakuRlnRelay1.validateMessageAndUpdateLog(wm2) + + check: + msgValidate1 == MessageValidationResult.Valid + msgValidate2 == MessageValidationResult.Valid + + test "toIDCommitment and toUInt256": + # create an instance of rln + let rlnInstance = createRLNInstanceWrapper() + require: + rlnInstance.isOk() + + let rln = rlnInstance.get() + + # create an idendity credential + let idCredentialRes = rln.membershipKeyGen() + require: + idCredentialRes.isOk() + + let idCredential = idCredentialRes.get() + + # convert the idCommitment to UInt256 + let idCUInt = idCredential.idCommitment.toUInt256() + # convert the UInt256 back to ICommitment + let idCommitment = toIDCommitment(idCUInt) + + # check that the conversion has not distorted the original value + check: + idCredential.idCommitment == idCommitment + + test "Read/Write RLN credentials": + # create an RLN instance + let rlnInstance = createRLNInstanceWrapper() + require: + rlnInstance.isOk() + + let idCredentialRes = membershipKeyGen(rlnInstance.get()) + require: + idCredentialRes.isOk() + + let idCredential = idCredentialRes.get() + let empty = default(array[32, byte]) + require: + idCredential.idTrapdoor.len == 32 + idCredential.idNullifier.len == 32 + idCredential.idSecretHash.len == 32 + idCredential.idCommitment.len == 32 + idCredential.idTrapdoor != empty + idCredential.idNullifier != empty + idCredential.idSecretHash != empty + idCredential.idCommitment != empty + + debug "the generated identity credential: ", idCredential + + let index = MembershipIndex(1) + + let keystoreMembership = KeystoreMembership( + membershipContract: MembershipContract( + chainId: "5", address: "0x0123456789012345678901234567890123456789" + ), + treeIndex: index, + identityCredential: idCredential, + ) + let password = "%m0um0ucoW%" + + let filepath = "./testRLNCredentials.txt" + defer: + removeFile(filepath) + + # Write RLN credentials + require: + addMembershipCredentials( + path = filepath, + membership = keystoreMembership, + password = password, + appInfo = RLNAppInfo, + ) + .isOk() + + let readKeystoreRes = getMembershipCredentials( + path = filepath, + password = password, + # here the query would not include + # the identityCredential, + # since it is not part of the query + # but have used the same value + # to avoid re-declaration + query = keystoreMembership, + appInfo = RLNAppInfo, + ) + assert readKeystoreRes.isOk(), $readKeystoreRes.error + + # getMembershipCredentials returns the credential in the keystore which matches + # the query, in this case the query is = + # chainId = "5" and + # address = "0x0123456789012345678901234567890123456789" and + # treeIndex = 1 + let readKeystoreMembership = readKeystoreRes.get() + check: + readKeystoreMembership == keystoreMembership + + test "histogram static bucket generation": + let buckets = generateBucketsForHistogram(10) + + check: + buckets.len == 5 + buckets == [2.0, 4.0, 6.0, 8.0, 10.0] + + asyncTest "nullifierLog clearing only after epoch has passed": + let index = MembershipIndex(0) + + proc runTestForEpochSizeSec(rlnEpochSizeSec: uint) {.async.} = + let wakuRlnConfig = getWakuRlnConfig( + manager = manager, index = index, epochSizeSec = rlnEpochSizeSec.uint64 + ) + + let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr: + raiseAssert $error + + let rlnMaxEpochGap = wakuRlnRelay.rlnMaxEpochGap + let testProofMetadata = default(ProofMetadata) + let testProofMetadataTable = + {testProofMetadata.nullifier: testProofMetadata}.toTable() + + for i in 0 .. rlnMaxEpochGap: + # we add epochs to the nullifierLog + let testEpoch = wakuRlnRelay.calcEpoch(epochTime() + float(rlnEpochSizeSec * i)) + wakuRlnRelay.nullifierLog[testEpoch] = testProofMetadataTable + check: + wakuRlnRelay.nullifierLog.len().uint == i + 1 + + check: + wakuRlnRelay.nullifierLog.len().uint == rlnMaxEpochGap + 1 + + # clearing it now will remove 1 epoch + wakuRlnRelay.clearNullifierLog() + + check: + wakuRlnRelay.nullifierLog.len().uint == rlnMaxEpochGap + + var testEpochSizes: seq[uint] = @[1, 5, 10, 30, 60, 600] + for i in testEpochSizes: + await runTestForEpochSizeSec(i) diff --git a/third-party/nwaku/tests/waku_rln_relay/test_wakunode_rln_relay.nim b/third-party/nwaku/tests/waku_rln_relay/test_wakunode_rln_relay.nim new file mode 100644 index 0000000..3a13fc3 --- /dev/null +++ b/third-party/nwaku/tests/waku_rln_relay/test_wakunode_rln_relay.nim @@ -0,0 +1,742 @@ +{.used.} + +import + std/[options, os, sequtils, tempfiles, strutils, osproc], + stew/byteutils, + testutils/unittests, + chronicles, + chronos, + libp2p/switch, + libp2p/protocols/pubsub/pubsub +import + waku/[waku_core, waku_node, waku_rln_relay], + ../testlib/[wakucore, futures, wakunode, testutils], + ./utils_onchain, + ./rln/waku_rln_relay_utils + +from std/times import epochTime + +proc waitForNullifierLog(node: WakuNode, expectedLen: int): Future[bool] {.async.} = + ## Helper function + for i in 0 .. 100: # Try for up to 50 seconds (100 * 500ms) + if node.wakuRlnRelay.nullifierLog.len() == expectedLen: + return true + await sleepAsync(500.millis) + return false + +procSuite "WakuNode - RLN relay": + # NOTE: we set the rlnRelayUserMessageLimit to 1 to make the tests easier to reason about + var anvilProc {.threadVar.}: Process + var manager {.threadVar.}: OnchainGroupManager + + setup: + anvilProc = runAnvil() + manager = waitFor setupOnchainGroupManager() + + teardown: + stopAnvil(anvilProc) + + asyncTest "testing rln-relay with valid proof": + let + # publisher node + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0)) + # Relay node + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0)) + # Subscriber + nodeKey3 = generateSecp256k1Key() + node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"), Port(0)) + + contentTopic = ContentTopic("/waku/2/default-content/proto") + + # set up three nodes + # node1 + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # mount rlnrelay in off-chain mode + let wakuRlnConfig1 = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + + await node1.mountRlnRelay(wakuRlnConfig1) + await node1.start() + + # Registration is mandatory before sending messages with rln-relay + let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) + let idCredentials1 = generateCredentials(manager1.rlnInstance) + + try: + waitFor manager1.register(idCredentials1, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated1 = waitFor manager1.updateRoots() + debug "Updated root for node1", rootUpdated1 + + # node 2 + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + # mount rlnrelay in off-chain mode + let wakuRlnConfig2 = getWakuRlnConfig(manager = manager, index = MembershipIndex(2)) + + await node2.mountRlnRelay(wakuRlnConfig2) + await node2.start() + + let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) + let rootUpdated2 = waitFor manager2.updateRoots() + debug "Updated root for node2", rootUpdated2 + + # node 3 + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + let wakuRlnConfig3 = getWakuRlnConfig(manager = manager, index = MembershipIndex(3)) + + await node3.mountRlnRelay(wakuRlnConfig3) + await node3.start() + + let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager) + let rootUpdated3 = waitFor manager3.updateRoots() + debug "Updated root for node3", rootUpdated3 + + # connect them together + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + + var completionFut = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + debug "The received topic:", topic + if topic == DefaultPubsubTopic: + completionFut.complete(true) + + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node1.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node1: " & $error + node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node2: " & $error + + ## Subscribe to the relay topic to add the custom relay handler defined above + node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error + await sleepAsync(2000.millis) + + # prepare the message payload + let payload = "Hello".toBytes() + + # prepare the epoch + var message = + WakuMessage(payload: @payload, contentTopic: contentTopic, timestamp: now()) + doAssert( + node1.wakuRlnRelay + .unsafeAppendRLNProof(message, node1.wakuRlnRelay.getCurrentEpoch(), MessageId(0)) + .isOk() + ) + + debug " Nodes participating in the test", + node1 = shortLog(node1.switch.peerInfo.peerId), + node2 = shortLog(node2.switch.peerInfo.peerId), + node3 = shortLog(node3.switch.peerInfo.peerId) + + ## node1 publishes a message with a rate limit proof, the message is then relayed to node2 which in turn + ## verifies the rate limit proof of the message and relays the message to node3 + ## verification at node2 occurs inside a topic validator which is installed as part of the waku-rln-relay mount proc + discard await node1.publish(some(DefaultPubsubTopic), message) + assert (await completionFut.withTimeout(15.seconds)), "completionFut timed out" + + await node1.stop() + await node2.stop() + await node3.stop() + + asyncTest "testing rln-relay is applied in all rln shards/content topics": + # create 3 nodes + let nodes = toSeq(0 ..< 3).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + await allFutures(nodes.mapIt(it.start())) + + let shards = + @[RelayShard(clusterId: 0, shardId: 0), RelayShard(clusterId: 0, shardId: 1)] + let contentTopics = + @[ + ContentTopic("/waku/2/content-topic-a/proto"), + ContentTopic("/waku/2/content-topic-b/proto"), + ] + + # set up three nodes + await allFutures(nodes.mapIt(it.mountRelay())) + + # mount rlnrelay in off-chain mode + for index, node in nodes: + let wakuRlnConfig = + getWakuRlnConfig(manager = manager, index = MembershipIndex(index + 1)) + + await node.mountRlnRelay(wakuRlnConfig) + await node.start() + let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager) + let idCredentials = generateCredentials(manager.rlnInstance) + + try: + waitFor manager.register(idCredentials, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated = waitFor manager.updateRoots() + debug "Updated root for node", node = index + 1, rootUpdated = rootUpdated + + # connect them together + await nodes[0].connectToNodes(@[nodes[1].switch.peerInfo.toRemotePeerInfo()]) + await nodes[2].connectToNodes(@[nodes[1].switch.peerInfo.toRemotePeerInfo()]) + + var rxMessagesTopic1 = 0 + var rxMessagesTopic2 = 0 + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + info "relayHandler. The received topic:", topic + if topic == $shards[0]: + rxMessagesTopic1 = rxMessagesTopic1 + 1 + elif topic == $shards[1]: + rxMessagesTopic2 = rxMessagesTopic2 + 1 + + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + nodes[0].subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in nodes[0]: " & $error + nodes[1].subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in nodes[1]: " & $error + + # mount the relay handlers + nodes[2].subscribe((kind: PubsubSub, topic: $shards[0]), relayHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error + nodes[2].subscribe((kind: PubsubSub, topic: $shards[1]), relayHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error + await sleepAsync(1000.millis) + + # generate some messages with rln proofs first. generating + # the proof takes some time, so this is done before publishing + # to avoid blocking the test + var messages1: seq[WakuMessage] = @[] + var messages2: seq[WakuMessage] = @[] + + for i in 0 ..< 3: + var message = WakuMessage( + payload: ("Payload_" & $i).toBytes(), + timestamp: now(), + contentTopic: contentTopics[0], + ) + + nodes[0].wakuRlnRelay.unsafeAppendRLNProof( + message, nodes[0].wakuRlnRelay.getCurrentEpoch(), MessageId(i.uint8) + ).isOkOr: + raiseAssert $error + messages1.add(message) + + for i in 0 ..< 3: + var message = WakuMessage( + payload: ("Payload_" & $i).toBytes(), + timestamp: now(), + contentTopic: contentTopics[1], + ) + + nodes[1].wakuRlnRelay.unsafeAppendRLNProof( + message, nodes[1].wakuRlnRelay.getCurrentEpoch(), MessageId(i.uint8) + ).isOkOr: + raiseAssert $error + messages2.add(message) + + # publish 3 messages from node[0] (last 2 are spam, window is 10 secs) + # publish 3 messages from node[1] (last 2 are spam, window is 10 secs) + for msg in messages1: + discard await nodes[0].publish(some($shards[0]), msg) + for msg in messages2: + discard await nodes[1].publish(some($shards[1]), msg) + + # wait for gossip to propagate + await sleepAsync(5000.millis) + + # check that node[2] got messages from both topics + # and that rln was applied (just 1 msg is rx, rest are spam) + check: + rxMessagesTopic1 == 3 + rxMessagesTopic2 == 3 + + await allFutures(nodes.mapIt(it.stop())) + + asyncTest "testing rln-relay with invalid proof": + let + # publisher node + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0)) + # Relay node + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0)) + # Subscriber + nodeKey3 = generateSecp256k1Key() + node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"), Port(0)) + + contentTopic = ContentTopic("/waku/2/default-content/proto") + + # set up three nodes + # node1 + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # mount rlnrelay in off-chain mode + let wakuRlnConfig1 = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + + await node1.mountRlnRelay(wakuRlnConfig1) + await node1.start() + + let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) + let idCredentials1 = generateCredentials(manager1.rlnInstance) + + try: + waitFor manager1.register(idCredentials1, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated1 = waitFor manager1.updateRoots() + debug "Updated root for node1", rootUpdated1 + + # node 2 + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + # mount rlnrelay in off-chain mode + let wakuRlnConfig2 = getWakuRlnConfig(manager = manager, index = MembershipIndex(2)) + + await node2.mountRlnRelay(wakuRlnConfig2) + await node2.start() + + let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) + let rootUpdated2 = waitFor manager2.updateRoots() + debug "Updated root for node2", rootUpdated2 + + # node 3 + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + let wakuRlnConfig3 = getWakuRlnConfig(manager = manager, index = MembershipIndex(3)) + + await node3.mountRlnRelay(wakuRlnConfig3) + await node3.start() + + let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager) + let rootUpdated3 = waitFor manager3.updateRoots() + debug "Updated root for node3", rootUpdated3 + + # connect them together + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + + # define a custom relay handler + var completionFut = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + debug "The received topic:", topic + if topic == DefaultPubsubTopic: + completionFut.complete(true) + + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node1.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node1: " & $error + node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node2: " & $error + + # mount the relay handler + node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error + await sleepAsync(2000.millis) + + # prepare the message payload + let payload = "valid".toBytes() + # prepare the epoch + let epoch = node1.wakuRlnRelay.getCurrentEpoch() + + var message = + WakuMessage(payload: @payload, contentTopic: DefaultPubsubTopic, timestamp: now()) + + node1.wakuRlnRelay.unsafeAppendRLNProof(message, epoch, MessageId(0)).isOkOr: + assert false, "Failed to append rln proof: " & $error + + # message.payload = "Invalid".toBytes() + message.proof[0] = message.proof[0] xor 0x01 + + discard await node1.publish(some(DefaultPubsubTopic), message) + await sleepAsync(2000.millis) + + check: + # the relayHandler of node3 never gets called + (await completionFut.withTimeout(10.seconds)) == false + + await node1.stop() + await node2.stop() + await node3.stop() + + asyncTest "testing rln-relay double-signaling detection": + let + # publisher node + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0)) + # Relay node + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0)) + # Subscriber + nodeKey3 = generateSecp256k1Key() + node3 = newTestWakuNode(nodeKey3, parseIpAddress("0.0.0.0"), Port(0)) + + contentTopic = ContentTopic("/waku/2/default-content/proto") + + # set up three nodes + # node1 + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # mount rlnrelay in off-chain mode + let wakuRlnConfig1 = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + + await node1.mountRlnRelay(wakuRlnConfig1) + await node1.start() + + # Registration is mandatory before sending messages with rln-relay + let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) + let idCredentials1 = generateCredentials(manager1.rlnInstance) + + try: + waitFor manager1.register(idCredentials1, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated1 = waitFor manager1.updateRoots() + debug "Updated root for node1", rootUpdated1 + + # node 2 + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # mount rlnrelay in off-chain mode + let wakuRlnConfig2 = getWakuRlnConfig(manager = manager, index = MembershipIndex(2)) + + await node2.mountRlnRelay(wakuRlnConfig2) + await node2.start() + + # Registration is mandatory before sending messages with rln-relay + let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) + let rootUpdated2 = waitFor manager2.updateRoots() + debug "Updated root for node2", rootUpdated2 + + # node 3 + (await node3.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + # mount rlnrelay in off-chain mode + let wakuRlnConfig3 = getWakuRlnConfig(manager = manager, index = MembershipIndex(3)) + + await node3.mountRlnRelay(wakuRlnConfig3) + await node3.start() + + # Registration is mandatory before sending messages with rln-relay + let manager3 = cast[OnchainGroupManager](node3.wakuRlnRelay.groupManager) + let rootUpdated3 = waitFor manager3.updateRoots() + debug "Updated root for node3", rootUpdated3 + + # connect the nodes together node1 <-> node2 <-> node3 + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + await node3.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + + # get the current epoch time + let epoch_1 = node1.wakuRlnRelay.getCurrentEpoch() + + # create some messages with rate limit proofs + var + wm1 = WakuMessage( + payload: "message 1".toBytes(), + timestamp: now(), + contentTopic: DefaultPubsubTopic, + ) + # another message in the same epoch as wm1, it will break the messaging rate limit + wm2 = WakuMessage( + payload: "message 2".toBytes(), + timestamp: now(), + contentTopic: DefaultPubsubTopic, + ) + # wm3 points to the next epoch + + await sleepAsync(1000.millis) + let epoch_2 = node1.wakuRlnRelay.getCurrentEpoch() + + var + wm3 = WakuMessage( + payload: "message 3".toBytes(), + timestamp: now(), + contentTopic: DefaultPubsubTopic, + ) + wm4 = WakuMessage( + payload: "message 4".toBytes(), + timestamp: now(), + contentTopic: DefaultPubsubTopic, + ) + + node1.wakuRlnRelay.unsafeAppendRLNProof(wm1, epoch_1, MessageId(0)).isOkOr: + raiseAssert $error + node1.wakuRlnRelay.unsafeAppendRLNProof(wm2, epoch_1, MessageId(0)).isOkOr: + raiseAssert $error + + node1.wakuRlnRelay.unsafeAppendRLNProof(wm3, epoch_2, MessageId(2)).isOkOr: + raiseAssert $error + + # relay handler for node3 + var completionFut1 = newFuture[bool]() + var completionFut2 = newFuture[bool]() + var completionFut3 = newFuture[bool]() + var completionFut4 = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + debug "The received topic:", topic + if topic == DefaultPubsubTopic: + if msg == wm1: + completionFut1.complete(true) + if msg == wm2: + completionFut2.complete(true) + if msg.payload == wm3.payload: + completionFut3.complete(true) + if msg.payload == wm4.payload: + completionFut4.complete(true) + + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node1.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node1: " & $error + node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic in node2: " & $error + + # mount the relay handler for node3 + node3.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error + await sleepAsync(2000.millis) + + ## node1 publishes and relays 4 messages to node2 + ## verification at node2 occurs inside a topic validator which is installed as part of the waku-rln-relay mount proc + ## node2 relays either of wm1 or wm2 to node3, depending on which message arrives at node2 first + ## node2 should detect either of wm1 or wm2 as spam and not relay it + ## node2 should relay wm3 to node3 + ## node2 should not relay wm4 because it has no valid rln proof + discard await node1.publish(some(DefaultPubsubTopic), wm1) + discard await node1.publish(some(DefaultPubsubTopic), wm2) + discard await node1.publish(some(DefaultPubsubTopic), wm3) + discard await node1.publish(some(DefaultPubsubTopic), wm4) + await sleepAsync(2000.millis) + + let + res1 = await completionFut1.withTimeout(10.seconds) + res2 = await completionFut2.withTimeout(10.seconds) + + check: + (res1 and res2) == false + # either of the wm1 and wm2 is found as spam hence not relayed + (await completionFut3.withTimeout(10.seconds)) == true + (await completionFut4.withTimeout(10.seconds)) == false + + await node1.stop() + await node2.stop() + await node3.stop() + + xasyncTest "clearNullifierLog: should clear epochs > MaxEpochGap": + ## This is skipped because is flaky and made CI randomly fail but is useful to run manually + # Given two nodes + let + contentTopic = ContentTopic("/waku/2/default-content/proto") + shardSeq = @[DefaultRelayShard] + nodeKey1 = generateSecp256k1Key() + node1 = newTestWakuNode(nodeKey1, parseIpAddress("0.0.0.0"), Port(0)) + nodeKey2 = generateSecp256k1Key() + node2 = newTestWakuNode(nodeKey2, parseIpAddress("0.0.0.0"), Port(0)) + epochSizeSec: uint64 = 5 # This means rlnMaxEpochGap = 4 + + # Given both nodes mount relay and rlnrelay + (await node1.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + let wakuRlnConfig1 = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + await node1.mountRlnRelay(wakuRlnConfig1) + await node1.start() + + # Registration is mandatory before sending messages with rln-relay + let manager1 = cast[OnchainGroupManager](node1.wakuRlnRelay.groupManager) + let idCredentials1 = generateCredentials(manager1.rlnInstance) + + try: + waitFor manager1.register(idCredentials1, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated1 = waitFor manager1.updateRoots() + debug "Updated root for node1", rootUpdated1 + + # Mount rlnrelay in node2 in off-chain mode + (await node2.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + let wakuRlnConfig2 = getWakuRlnConfig(manager = manager, index = MembershipIndex(2)) + await node2.mountRlnRelay(wakuRlnConfig2) + await node2.start() + + # Registration is mandatory before sending messages with rln-relay + let manager2 = cast[OnchainGroupManager](node2.wakuRlnRelay.groupManager) + let rootUpdated2 = waitFor manager2.updateRoots() + debug "Updated root for node2", rootUpdated2 + + # Given the two nodes are started and connected + waitFor allFutures(node1.start(), node2.start()) + await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()]) + + # Given some messages + var + wm1 = + WakuMessage(payload: "message 1".toBytes(), contentTopic: DefaultPubsubTopic) + wm2 = + WakuMessage(payload: "message 2".toBytes(), contentTopic: DefaultPubsubTopic) + wm3 = + WakuMessage(payload: "message 3".toBytes(), contentTopic: DefaultPubsubTopic) + wm4 = + WakuMessage(payload: "message 4".toBytes(), contentTopic: DefaultPubsubTopic) + wm5 = + WakuMessage(payload: "message 5".toBytes(), contentTopic: DefaultPubsubTopic) + wm6 = + WakuMessage(payload: "message 6".toBytes(), contentTopic: DefaultPubsubTopic) + + # And node2 mounts a relay handler that completes the respective future when a message is received + var + completionFut1 = newFuture[bool]() + completionFut2 = newFuture[bool]() + completionFut3 = newFuture[bool]() + completionFut4 = newFuture[bool]() + completionFut5 = newFuture[bool]() + completionFut6 = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + debug "The received topic:", topic + if topic == DefaultPubsubTopic: + if msg == wm1: + completionFut1.complete(true) + if msg == wm2: + completionFut2.complete(true) + if msg == wm3: + completionFut3.complete(true) + if msg == wm4: + completionFut4.complete(true) + if msg == wm5: + completionFut5.complete(true) + if msg == wm6: + completionFut6.complete(true) + + node2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error + + # Given all messages have an rln proof and are published by the node 1 + let publishSleepDuration: Duration = 5000.millis + let epoch_1 = node1.wakuRlnRelay.calcEpoch(epochTime().float64) + let epoch_2 = node1.wakuRlnRelay.calcEpoch( + epochTime().float64 + node1.wakuRlnRelay.rlnEpochSizeSec.float64 * 1 + ) + let epoch_3 = node1.wakuRlnRelay.calcEpoch( + epochTime().float64 + node1.wakuRlnRelay.rlnEpochSizeSec.float64 * 2 + ) + let epoch_4 = node1.wakuRlnRelay.calcEpoch( + epochTime().float64 + node1.wakuRlnRelay.rlnEpochSizeSec.float64 * 3 + ) + let epoch_5 = node1.wakuRlnRelay.calcEpoch( + epochTime().float64 + node1.wakuRlnRelay.rlnEpochSizeSec.float64 * 4 + ) + + # Epoch 1 + node1.wakuRlnRelay.unsafeAppendRLNProof(wm1, epoch_1, MessageId(0)).isOkOr: + raiseAssert $error + + # Message wm2 is published in the same epoch as wm1, so it'll be considered spam + node1.wakuRlnRelay.unsafeAppendRLNProof(wm2, epoch_1, MessageId(0)).isOkOr: + raiseAssert $error + + discard await node1.publish(some(DefaultPubsubTopic), wm1) + discard await node1.publish(some(DefaultPubsubTopic), wm2) + await sleepAsync(publishSleepDuration) + check: + await node1.waitForNullifierLog(0) + await node2.waitForNullifierLog(1) + + # Epoch 2 + + node1.wakuRlnRelay.unsafeAppendRLNProof(wm3, epoch_2, MessageId(0)).isOkOr: + raiseAssert $error + + discard await node1.publish(some(DefaultPubsubTopic), wm3) + + await sleepAsync(publishSleepDuration) + + check: + await node1.waitForNullifierLog(0) + await node2.waitForNullifierLog(2) + + # Epoch 3 + node1.wakuRlnRelay.unsafeAppendRLNProof(wm4, epoch_3, MessageId(0)).isOkOr: + raiseAssert $error + + discard await node1.publish(some(DefaultPubsubTopic), wm4) + await sleepAsync(publishSleepDuration) + check: + await node1.waitForNullifierLog(0) + await node2.waitForNullifierLog(3) + + # Epoch 4 + node1.wakuRlnRelay.unsafeAppendRLNProof(wm5, epoch_4, MessageId(0)).isOkOr: + raiseAssert $error + + discard await node1.publish(some(DefaultPubsubTopic), wm5) + await sleepAsync(publishSleepDuration) + check: + await node1.waitForNullifierLog(0) + await node2.waitForNullifierLog(4) + + # Epoch 5 + node1.wakuRlnRelay.unsafeAppendRLNProof(wm6, epoch_5, MessageId(0)).isOkOr: + raiseAssert $error + + discard await node1.publish(some(DefaultPubsubTopic), wm6) + await sleepAsync(publishSleepDuration) + check: + await node1.waitForNullifierLog(0) + await node2.waitForNullifierLog(4) + + # Then the node 2 should have cleared the nullifier log for epochs > MaxEpochGap + # Therefore, with 4 max epochs, the first 4 messages will be published (except wm2, which shares epoch with wm1) + check: + (await completionFut1.waitForResult()).value() == true + (await completionFut2.waitForResult()).isErr() + (await completionFut3.waitForResult()).value() == true + (await completionFut4.waitForResult()).value() == true + (await completionFut5.waitForResult()).value() == true + (await completionFut6.waitForResult()).value() == true + + # Cleanup + waitFor allFutures(node1.stop(), node2.stop()) diff --git a/third-party/nwaku/tests/waku_rln_relay/utils.nim b/third-party/nwaku/tests/waku_rln_relay/utils.nim new file mode 100644 index 0000000..a4247ab --- /dev/null +++ b/third-party/nwaku/tests/waku_rln_relay/utils.nim @@ -0,0 +1,32 @@ +import web3, chronos, stew/byteutils + +proc deployContract*( + web3: Web3, code: string, gasPrice = 0, contractInput = "" +): Future[ReceiptObject] {.async.} = + # the contract input is the encoded version of contract constructor's input + # use nim-web3/encoding.nim module to find the appropriate encoding procedure for different argument types + # e.g., consider the following contract constructor in solidity + # constructor(uint256 x, uint256 y) + # + # the contractInput can be calculated as follows + # let + # x = 1.u256 + # y = 5.u256 + # contractInput = encode(x).data & encode(y).data + # Note that the order of encoded inputs should match the order of the constructor inputs + let provider = web3.provider + let accounts = await provider.eth_accounts() + + var code = code + if code[1] notin {'x', 'X'}: + code = "0x" & code + var tr: TransactionArgs + tr.`from` = Opt.some(web3.defaultAccount) + let sData = code & contractInput + tr.data = Opt.some(hexToSeqByte(sData)) + tr.gas = Opt.some(Quantity(3000000000000)) + if gasPrice != 0: + tr.gasPrice = Opt.some(gasPrice.Quantity) + + let r = await web3.send(tr) + return await web3.getMinedTransactionReceipt(r) diff --git a/third-party/nwaku/tests/waku_rln_relay/utils_offchain.nim b/third-party/nwaku/tests/waku_rln_relay/utils_offchain.nim new file mode 100644 index 0000000..e0e1bd1 --- /dev/null +++ b/third-party/nwaku/tests/waku_rln_relay/utils_offchain.nim @@ -0,0 +1,84 @@ +{.used.} + +import + std/[sequtils, tempfiles], + stew/byteutils, + chronos, + chronicles, + libp2p/switch, + libp2p/protocols/pubsub/pubsub + +from std/times import epochTime + +import + ../../../waku/ + [node/waku_node, node/peer_manager, waku_core, waku_node, waku_rln_relay], + ../waku_store/store_utils, + ../waku_archive/archive_utils, + ../testlib/[wakucore, futures, assertions] + +proc setupStaticRln*( + node: WakuNode, + identifier: uint, + rlnRelayEthContractAddress: Option[string] = none(string), +) {.async.} = + await node.mountRlnRelay( + WakuRlnConfig(dynamic: false, credIndex: some(identifier), epochSizeSec: 1) + ) + +proc setupRelayWithStaticRln*( + node: WakuNode, identifier: uint, shards: seq[RelayShard] +) {.async.} = + await node.mountRelay(shards) + await setupStaticRln(node, identifier) + +proc subscribeCompletionHandler*(node: WakuNode, pubsubTopic: string): Future[bool] = + var completionFut = newFuture[bool]() + proc relayHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + if topic == pubsubTopic: + completionFut.complete(true) + + node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(relayHandler)).isOkOr: + error "failed to subscribe to relay", topic = pubsubTopic, error = error + completionFut.complete(false) + + return completionFut + +proc sendRlnMessage*( + client: WakuNode, + pubsubTopic: string, + contentTopic: string, + completionFuture: Future[bool], + payload: seq[byte] = "Hello".toBytes(), +): Future[bool] {.async.} = + var message = WakuMessage(payload: payload, contentTopic: contentTopic) + let appendResult = client.wakuRlnRelay.appendRLNProof(message, epochTime()) + # Assignment required or crashess + assertResultOk(appendResult) + discard await client.publish(some(pubsubTopic), message) + let isCompleted = await completionFuture.withTimeout(FUTURE_TIMEOUT) + return isCompleted + +proc sendRlnMessageWithInvalidProof*( + client: WakuNode, + pubsubTopic: string, + contentTopic: string, + completionFuture: Future[bool], + payload: seq[byte] = "Hello".toBytes(), +): Future[bool] {.async.} = + let + extraBytes: seq[byte] = @[byte(1), 2, 3] + rateLimitProofRes = client.wakuRlnRelay.groupManager.generateProof( + concat(payload, extraBytes), + # we add extra bytes to invalidate proof verification against original payload + client.wakuRlnRelay.getCurrentEpoch(), + ) + rateLimitProof = rateLimitProofRes.get().encode().buffer + message = + WakuMessage(payload: @payload, contentTopic: contentTopic, proof: rateLimitProof) + + discard await client.publish(some(pubsubTopic), message) + let isCompleted = await completionFuture.withTimeout(FUTURE_TIMEOUT) + return isCompleted diff --git a/third-party/nwaku/tests/waku_rln_relay/utils_onchain.nim b/third-party/nwaku/tests/waku_rln_relay/utils_onchain.nim new file mode 100644 index 0000000..39c9ff2 --- /dev/null +++ b/third-party/nwaku/tests/waku_rln_relay/utils_onchain.nim @@ -0,0 +1,610 @@ +{.used.} + +{.push raises: [].} + +import + std/[options, os, osproc, deques, streams, strutils, tempfiles, strformat], + results, + stew/byteutils, + testutils/unittests, + chronos, + chronicles, + stint, + web3, + web3/conversions, + web3/eth_api_types, + json_rpc/rpcclient, + json, + libp2p/crypto/crypto, + eth/keys, + results + +import + waku/[ + waku_rln_relay, + waku_rln_relay/protocol_types, + waku_rln_relay/constants, + waku_rln_relay/contract, + waku_rln_relay/rln, + ], + ../testlib/common, + ./utils + +const CHAIN_ID* = 1234'u256 + +template skip0xPrefix(hexStr: string): int = + ## Returns the index of the first meaningful char in `hexStr` by skipping + ## "0x" prefix + if hexStr.len > 1 and hexStr[0] == '0' and hexStr[1] in {'x', 'X'}: 2 else: 0 + +func strip0xPrefix(s: string): string = + let prefixLen = skip0xPrefix(s) + if prefixLen != 0: + s[prefixLen .. ^1] + else: + s + +proc generateCredentials*(rlnInstance: ptr RLN): IdentityCredential = + let credRes = membershipKeyGen(rlnInstance) + return credRes.get() + +proc getRateCommitment*( + idCredential: IdentityCredential, userMessageLimit: UserMessageLimit +): RlnRelayResult[RawRateCommitment] = + return RateCommitment( + idCommitment: idCredential.idCommitment, userMessageLimit: userMessageLimit + ).toLeaf() + +proc generateCredentials*(rlnInstance: ptr RLN, n: int): seq[IdentityCredential] = + var credentials: seq[IdentityCredential] + for i in 0 ..< n: + credentials.add(generateCredentials(rlnInstance)) + return credentials + +proc getContractAddressFromDeployScriptOutput(output: string): Result[string, string] = + const searchStr = "Return ==\n0: address " + const addressLength = 42 # Length of an Ethereum address in hex format + let idx = output.find(searchStr) + if idx >= 0: + let startPos = idx + searchStr.len + let endPos = output.find('\n', startPos) + if (endPos - startPos) >= addressLength: + let address = output[startPos ..< endPos] + return ok(address) + return err("Unable to find contract address in deploy script output") + +proc getForgePath(): string = + var forgePath = "" + if existsEnv("XDG_CONFIG_HOME"): + forgePath = joinPath(forgePath, os.getEnv("XDG_CONFIG_HOME", "")) + else: + forgePath = joinPath(forgePath, os.getEnv("HOME", "")) + forgePath = joinPath(forgePath, ".foundry/bin/forge") + return $forgePath + +contract(ERC20Token): + proc allowance(owner: Address, spender: Address): UInt256 {.view.} + proc balanceOf(account: Address): UInt256 {.view.} + +proc getTokenBalance( + web3: Web3, tokenAddress: Address, account: Address +): Future[UInt256] {.async.} = + let token = web3.contractSender(ERC20Token, tokenAddress) + return await token.balanceOf(account).call() + +proc ethToWei(eth: UInt256): UInt256 = + eth * 1000000000000000000.u256 + +proc sendMintCall( + web3: Web3, + accountFrom: Address, + tokenAddress: Address, + recipientAddress: Address, + amountTokens: UInt256, + recipientBalanceBeforeExpectedTokens: Option[UInt256] = none(UInt256), +): Future[TxHash] {.async.} = + let doBalanceAssert = recipientBalanceBeforeExpectedTokens.isSome() + + if doBalanceAssert: + let balanceBeforeMint = await getTokenBalance(web3, tokenAddress, recipientAddress) + let balanceBeforeExpectedTokens = recipientBalanceBeforeExpectedTokens.get() + assert balanceBeforeMint == balanceBeforeExpectedTokens, + fmt"Balance is {balanceBeforeMint} before minting but expected {balanceBeforeExpectedTokens}" + + # Create mint transaction + # Method ID for mint(address,uint256) is 0x40c10f19 which is part of the openzeppelin ERC20 standard + # The method ID for a deployed test token can be viewed here https://sepolia.lineascan.build/address/0x185A0015aC462a0aECb81beCc0497b649a64B9ea#writeContract + let mintSelector = "0x40c10f19" + let addressHex = recipientAddress.toHex() + # Pad the address and amount to 32 bytes each + let paddedAddress = addressHex.align(64, '0') + + let amountHex = amountTokens.toHex() + let amountWithout0x = + if amountHex.toLower().startsWith("0x"): + amountHex[2 .. ^1] + else: + amountHex + let paddedAmount = amountWithout0x.align(64, '0') + let mintCallData = mintSelector & paddedAddress & paddedAmount + let gasPrice = int(await web3.provider.eth_gasPrice()) + + # Create the transaction + var tx: TransactionArgs + tx.`from` = Opt.some(accountFrom) + tx.to = Opt.some(tokenAddress) + tx.value = Opt.some(0.u256) # No ETH is sent for token operations + tx.gasPrice = Opt.some(Quantity(gasPrice)) + tx.data = Opt.some(byteutils.hexToSeqByte(mintCallData)) + + trace "Sending mint call" + let txHash = await web3.send(tx) + + let balanceOfSelector = "0x70a08231" + let balanceCallData = balanceOfSelector & paddedAddress + + # Wait a bit for transaction to be mined + await sleepAsync(500.milliseconds) + + if doBalanceAssert: + let balanceAfterMint = await getTokenBalance(web3, tokenAddress, recipientAddress) + let balanceAfterExpectedTokens = + recipientBalanceBeforeExpectedTokens.get() + amountTokens + assert balanceAfterMint == balanceAfterExpectedTokens, + fmt"Balance is {balanceAfterMint} after transfer but expected {balanceAfterExpectedTokens}" + + return txHash + +# Check how many tokens a spender (the RLN contract) is allowed to spend on behalf of the owner (account which wishes to register a membership) +proc checkTokenAllowance( + web3: Web3, tokenAddress: Address, owner: Address, spender: Address +): Future[UInt256] {.async.} = + let token = web3.contractSender(ERC20Token, tokenAddress) + let allowance = await token.allowance(owner, spender).call() + trace "Current allowance", owner = owner, spender = spender, allowance = allowance + return allowance + +proc setupContractDeployment( + forgePath: string, submodulePath: string +): Result[void, string] = + trace "Contract deployer paths", forgePath = forgePath, submodulePath = submodulePath + # Build the Foundry project + try: + let (forgeCleanOutput, forgeCleanExitCode) = + execCmdEx(fmt"""cd {submodulePath} && {forgePath} clean""") + trace "Executed forge clean command", output = forgeCleanOutput + if forgeCleanExitCode != 0: + return err("forge clean command failed") + + let (forgeInstallOutput, forgeInstallExitCode) = + execCmdEx(fmt"""cd {submodulePath} && {forgePath} install""") + trace "Executed forge install command", output = forgeInstallOutput + if forgeInstallExitCode != 0: + return err("forge install command failed") + + let (pnpmInstallOutput, pnpmInstallExitCode) = + execCmdEx(fmt"""cd {submodulePath} && pnpm install""") + trace "Executed pnpm install command", output = pnpmInstallOutput + if pnpmInstallExitCode != 0: + return err("pnpm install command failed" & pnpmInstallOutput) + + let (forgeBuildOutput, forgeBuildExitCode) = + execCmdEx(fmt"""cd {submodulePath} && {forgePath} build""") + trace "Executed forge build command", output = forgeBuildOutput + if forgeBuildExitCode != 0: + return err("forge build command failed") + + # Set the environment variable API keys to anything for local testnet deployment + putEnv("API_KEY_CARDONA", "123") + putEnv("API_KEY_LINEASCAN", "123") + putEnv("API_KEY_ETHERSCAN", "123") + except OSError, IOError: + return err("Command execution failed: " & getCurrentExceptionMsg()) + return ok() + +proc deployTestToken*( + pk: keys.PrivateKey, acc: Address, web3: Web3 +): Future[Result[Address, string]] {.async.} = + ## Executes a Foundry forge script that deploys the a token contract (ERC-20) used for testing. This is a prerequisite to enable the contract deployment and this token contract address needs to be minted and approved for the accounts that need to register memberships with the contract + ## submodulePath: path to the submodule containing contract deploy scripts + + # All RLN related tests should be run from the root directory of the project + let submodulePath = absolutePath("./vendor/waku-rlnv2-contract") + + # Verify submodule path exists + if not dirExists(submodulePath): + error "Submodule path does not exist", submodulePath = submodulePath + return err("Submodule path does not exist: " & submodulePath) + + let forgePath = getForgePath() + + setupContractDeployment(forgePath, submodulePath).isOkOr: + error "Failed to setup contract deployment", error = $error + return err("Failed to setup contract deployment: " & $error) + + # Deploy TestToken contract + let forgeCmdTestToken = + fmt"""cd {submodulePath} && {forgePath} script test/TestToken.sol --broadcast -vvv --rpc-url http://localhost:8540 --tc TestTokenFactory --private-key {pk} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json""" + let (outputDeployTestToken, exitCodeDeployTestToken) = execCmdEx(forgeCmdTestToken) + trace "Executed forge command to deploy TestToken contract", + output = outputDeployTestToken + if exitCodeDeployTestToken != 0: + return error("Forge command to deploy TestToken contract failed") + + # Parse the command output to find contract address + let testTokenAddress = getContractAddressFromDeployScriptOutput(outputDeployTestToken).valueOr: + error "Failed to get TestToken contract address from deploy script output", + error = $error + return err( + "Failed to get TestToken contract address from deploy script output: " & $error + ) + debug "Address of the TestToken contract", testTokenAddress + + let testTokenAddressBytes = hexToByteArray[20](testTokenAddress) + let testTokenAddressAddress = Address(testTokenAddressBytes) + putEnv("TOKEN_ADDRESS", testTokenAddressAddress.toHex()) + + return ok(testTokenAddressAddress) + +# Sends an ERC20 token approval call to allow a spender to spend a certain amount of tokens on behalf of the owner +proc approveTokenAllowanceAndVerify*( + web3: Web3, + accountFrom: Address, + privateKey: keys.PrivateKey, + tokenAddress: Address, + spender: Address, + amountWei: UInt256, + expectedAllowanceBefore: Option[UInt256] = none(UInt256), +): Future[Result[TxHash, string]] {.async.} = + var allowanceBefore: UInt256 + if expectedAllowanceBefore.isSome(): + allowanceBefore = + await checkTokenAllowance(web3, tokenAddress, accountFrom, spender) + let expected = expectedAllowanceBefore.get() + if allowanceBefore != expected: + return + err(fmt"Allowance is {allowanceBefore} before approval but expected {expected}") + + # Temporarily set the private key + let oldPrivateKey = web3.privateKey + web3.privateKey = Opt.some(privateKey) + web3.lastKnownNonce = Opt.none(Quantity) + + try: + # ERC20 approve function signature: approve(address spender, uint256 amount) + # Method ID for approve(address,uint256) is 0x095ea7b3 + const APPROVE_SELECTOR = "0x095ea7b3" + let addressHex = spender.toHex().align(64, '0') + let amountHex = amountWei.toHex().align(64, '0') + let approveCallData = APPROVE_SELECTOR & addressHex & amountHex + + let gasPrice = await web3.provider.eth_gasPrice() + + var tx: TransactionArgs + tx.`from` = Opt.some(accountFrom) + tx.to = Opt.some(tokenAddress) + tx.value = Opt.some(0.u256) + tx.gasPrice = Opt.some(gasPrice) + tx.gas = Opt.some(Quantity(100000)) + tx.data = Opt.some(byteutils.hexToSeqByte(approveCallData)) + tx.chainId = Opt.some(CHAIN_ID) + + trace "Sending approve call", tx = tx + let txHash = await web3.send(tx) + let receipt = await web3.getMinedTransactionReceipt(txHash) + + if receipt.status.isNone(): + return err("Approval transaction failed receipt is none") + if receipt.status.get() != 1.Quantity: + return err("Approval transaction failed status quantity not 1") + + # Single verification check after mining (no extra sleep needed) + let allowanceAfter = + await checkTokenAllowance(web3, tokenAddress, accountFrom, spender) + let expectedAfter = + if expectedAllowanceBefore.isSome(): + expectedAllowanceBefore.get() + amountWei + else: + amountWei + + if allowanceAfter < expectedAfter: + return err( + fmt"Allowance is {allowanceAfter} after approval but expected at least {expectedAfter}" + ) + + return ok(txHash) + except CatchableError as e: + return err(fmt"Failed to send approve transaction: {e.msg}") + finally: + # Restore the old private key + web3.privateKey = oldPrivateKey + +proc executeForgeContractDeployScripts*( + privateKey: keys.PrivateKey, acc: Address, web3: Web3 +): Future[Result[Address, string]] {.async, gcsafe.} = + ## Executes a set of foundry forge scripts required to deploy the RLN contract and returns the deployed proxy contract address + ## submodulePath: path to the submodule containing contract deploy scripts + + # All RLN related tests should be run from the root directory of the project + let submodulePath = "./vendor/waku-rlnv2-contract" + + # Verify submodule path exists + if not dirExists(submodulePath): + error "Submodule path does not exist", submodulePath = submodulePath + return err("Submodule path does not exist: " & submodulePath) + + let forgePath = getForgePath() + debug "Forge path", forgePath + + # Verify forge executable exists + if not fileExists(forgePath): + error "Forge executable not found", forgePath = forgePath + return err("Forge executable not found: " & forgePath) + + trace "contract deployer account details", account = acc, privateKey = privateKey + let setupContractEnv = setupContractDeployment(forgePath, submodulePath) + if setupContractEnv.isErr(): + error "Failed to setup contract deployment" + return err("Failed to setup contract deployment") + + # Deploy LinearPriceCalculator contract + let forgeCmdPriceCalculator = + fmt"""cd {submodulePath} && {forgePath} script script/Deploy.s.sol --broadcast -vvvv --rpc-url http://localhost:8540 --tc DeployPriceCalculator --private-key {privateKey} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json""" + let (outputDeployPriceCalculator, exitCodeDeployPriceCalculator) = + execCmdEx(forgeCmdPriceCalculator) + trace "Executed forge command to deploy LinearPriceCalculator contract", + output = outputDeployPriceCalculator + if exitCodeDeployPriceCalculator != 0: + return error("Forge command to deploy LinearPriceCalculator contract failed") + + # Parse the output to find contract address + let priceCalculatorAddressRes = + getContractAddressFromDeployScriptOutput(outputDeployPriceCalculator) + if priceCalculatorAddressRes.isErr(): + error "Failed to get LinearPriceCalculator contract address from deploy script output" + let priceCalculatorAddress = priceCalculatorAddressRes.get() + debug "Address of the LinearPriceCalculator contract", priceCalculatorAddress + putEnv("PRICE_CALCULATOR_ADDRESS", priceCalculatorAddress) + + let forgeCmdWakuRln = + fmt"""cd {submodulePath} && {forgePath} script script/Deploy.s.sol --broadcast -vvvv --rpc-url http://localhost:8540 --tc DeployWakuRlnV2 --private-key {privateKey} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json""" + let (outputDeployWakuRln, exitCodeDeployWakuRln) = execCmdEx(forgeCmdWakuRln) + trace "Executed forge command to deploy WakuRlnV2 contract", + output = outputDeployWakuRln + if exitCodeDeployWakuRln != 0: + error "Forge command to deploy WakuRlnV2 contract failed", + output = outputDeployWakuRln + + # Parse the output to find contract address + let wakuRlnV2AddressRes = + getContractAddressFromDeployScriptOutput(outputDeployWakuRln) + if wakuRlnV2AddressRes.isErr(): + error "Failed to get WakuRlnV2 contract address from deploy script output" + ##TODO: raise exception here? + let wakuRlnV2Address = wakuRlnV2AddressRes.get() + debug "Address of the WakuRlnV2 contract", wakuRlnV2Address + putEnv("WAKURLNV2_ADDRESS", wakuRlnV2Address) + + # Deploy Proxy contract + let forgeCmdProxy = + fmt"""cd {submodulePath} && {forgePath} script script/Deploy.s.sol --broadcast -vvvv --rpc-url http://localhost:8540 --tc DeployProxy --private-key {privateKey} && rm -rf broadcast/*/*/run-1*.json && rm -rf cache/*/*/run-1*.json""" + let (outputDeployProxy, exitCodeDeployProxy) = execCmdEx(forgeCmdProxy) + trace "Executed forge command to deploy proxy contract", output = outputDeployProxy + if exitCodeDeployProxy != 0: + error "Forge command to deploy Proxy failed", error = outputDeployProxy + return err("Forge command to deploy Proxy failed") + + let proxyAddress = getContractAddressFromDeployScriptOutput(outputDeployProxy) + let proxyAddressBytes = hexToByteArray[20](proxyAddress.get()) + let proxyAddressAddress = Address(proxyAddressBytes) + + info "Address of the Proxy contract", proxyAddressAddress + + await web3.close() + return ok(proxyAddressAddress) + +proc sendEthTransfer*( + web3: Web3, + accountFrom: Address, + accountTo: Address, + amountWei: UInt256, + accountToBalanceBeforeExpectedWei: Option[UInt256] = none(UInt256), +): Future[TxHash] {.async.} = + let doBalanceAssert = accountToBalanceBeforeExpectedWei.isSome() + + if doBalanceAssert: + let balanceBeforeWei = await web3.provider.eth_getBalance(accountTo, "latest") + let balanceBeforeExpectedWei = accountToBalanceBeforeExpectedWei.get() + assert balanceBeforeWei == balanceBeforeExpectedWei, + fmt"Balance is {balanceBeforeWei} before transfer but expected {balanceBeforeExpectedWei}" + + let gasPrice = int(await web3.provider.eth_gasPrice()) + + var tx: TransactionArgs + tx.`from` = Opt.some(accountFrom) + tx.to = Opt.some(accountTo) + tx.value = Opt.some(amountWei) + tx.gasPrice = Opt.some(Quantity(gasPrice)) + + # TODO: handle the error if sending fails + let txHash = await web3.send(tx) + + # Wait a bit for transaction to be mined + await sleepAsync(200.milliseconds) + + if doBalanceAssert: + let balanceAfterWei = await web3.provider.eth_getBalance(accountTo, "latest") + let balanceAfterExpectedWei = accountToBalanceBeforeExpectedWei.get() + amountWei + assert balanceAfterWei == balanceAfterExpectedWei, + fmt"Balance is {balanceAfterWei} after transfer but expected {balanceAfterExpectedWei}" + + return txHash + +proc createEthAccount*( + ethAmount: UInt256 = 1000.u256 +): Future[(keys.PrivateKey, Address)] {.async.} = + let web3 = await newWeb3(EthClient) + let accounts = await web3.provider.eth_accounts() + let gasPrice = Quantity(await web3.provider.eth_gasPrice()) + web3.defaultAccount = accounts[0] + + let pk = keys.PrivateKey.random(rng[]) + let acc = Address(toCanonicalAddress(pk.toPublicKey())) + + var tx: TransactionArgs + tx.`from` = Opt.some(accounts[0]) + tx.value = Opt.some(ethToWei(ethAmount)) + tx.to = Opt.some(acc) + tx.gasPrice = Opt.some(Quantity(gasPrice)) + + # Send ethAmount to acc + discard await web3.send(tx) + let balance = await web3.provider.eth_getBalance(acc, "latest") + assert balance == ethToWei(ethAmount), + fmt"Balance is {balance} but expected {ethToWei(ethAmount)}" + + return (pk, acc) + +proc createEthAccount*(web3: Web3): (keys.PrivateKey, Address) = + let pk = keys.PrivateKey.random(rng[]) + let acc = Address(toCanonicalAddress(pk.toPublicKey())) + + return (pk, acc) + +proc getAnvilPath*(): string = + var anvilPath = "" + if existsEnv("XDG_CONFIG_HOME"): + anvilPath = joinPath(anvilPath, os.getEnv("XDG_CONFIG_HOME", "")) + else: + anvilPath = joinPath(anvilPath, os.getEnv("HOME", "")) + anvilPath = joinPath(anvilPath, ".foundry/bin/anvil") + return $anvilPath + +# Runs Anvil daemon +proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process = + # Passed options are + # --port Port to listen on. + # --gas-limit Sets the block gas limit in WEI. + # --balance The default account balance, specified in ether. + # --chain-id Chain ID of the network. + # See anvil documentation https://book.getfoundry.sh/reference/anvil/ for more details + try: + let anvilPath = getAnvilPath() + debug "Anvil path", anvilPath + let runAnvil = startProcess( + anvilPath, + args = [ + "--port", + $port, + "--gas-limit", + "300000000000000", + "--balance", + "1000000000", + "--chain-id", + $chainId, + ], + options = {poUsePath}, + ) + let anvilPID = runAnvil.processID + + # We read stdout from Anvil to see when daemon is ready + var anvilStartLog: string + var cmdline: string + while true: + try: + if runAnvil.outputstream.readLine(cmdline): + anvilStartLog.add(cmdline) + if cmdline.contains("Listening on 127.0.0.1:" & $port): + break + except Exception, CatchableError: + break + debug "Anvil daemon is running and ready", pid = anvilPID, startLog = anvilStartLog + return runAnvil + except: # TODO: Fix "BareExcept" warning + error "Anvil daemon run failed", err = getCurrentExceptionMsg() + +# Stops Anvil daemon +proc stopAnvil*(runAnvil: Process) {.used.} = + if runAnvil.isNil: + debug "stopAnvil called with nil Process" + return + + let anvilPID = runAnvil.processID + debug "Stopping Anvil daemon", anvilPID = anvilPID + + try: + # Send termination signals + when not defined(windows): + discard execCmdEx(fmt"kill -TERM {anvilPID}") + discard execCmdEx(fmt"kill -9 {anvilPID}") + else: + discard execCmdEx(fmt"taskkill /F /PID {anvilPID}") + + # Close Process object to release resources + close(runAnvil) + debug "Anvil daemon stopped", anvilPID = anvilPID + except Exception as e: + debug "Error stopping Anvil daemon", anvilPID = anvilPID, error = e.msg + +proc setupOnchainGroupManager*( + ethClientUrl: string = EthClient, amountEth: UInt256 = 10.u256 +): Future[OnchainGroupManager] {.async.} = + let rlnInstanceRes = createRlnInstance() + check: + rlnInstanceRes.isOk() + + let rlnInstance = rlnInstanceRes.get() + + # connect to the eth client + let web3 = await newWeb3(ethClientUrl) + let accounts = await web3.provider.eth_accounts() + web3.defaultAccount = accounts[1] + + let (privateKey, acc) = createEthAccount(web3) + + # we just need to fund the default account + # the send procedure returns a tx hash that we don't use, hence discard + discard await sendEthTransfer( + web3, web3.defaultAccount, acc, ethToWei(1000.u256), some(0.u256) + ) + + let testTokenAddress = (await deployTestToken(privateKey, acc, web3)).valueOr: + assert false, "Failed to deploy test token contract: " & $error + return + + # mint the token from the generated account + discard await sendMintCall( + web3, web3.defaultAccount, testTokenAddress, acc, ethToWei(1000.u256), some(0.u256) + ) + + let contractAddress = (await executeForgeContractDeployScripts(privateKey, acc, web3)).valueOr: + assert false, "Failed to deploy RLN contract: " & $error + return + + # If the generated account wishes to register a membership, it needs to approve the contract to spend its tokens + let tokenApprovalResult = await approveTokenAllowanceAndVerify( + web3, + acc, + privateKey, + testTokenAddress, + contractAddress, + ethToWei(200.u256), + some(0.u256), + ) + + assert tokenApprovalResult.isOk, tokenApprovalResult.error() + + let manager = OnchainGroupManager( + ethClientUrls: @[ethClientUrl], + ethContractAddress: $contractAddress, + chainId: CHAIN_ID, + ethPrivateKey: some($privateKey), + rlnInstance: rlnInstance, + onFatalErrorAction: proc(errStr: string) = + raiseAssert errStr + , + ) + + return manager + +{.pop.} diff --git a/third-party/nwaku/tests/waku_store/store_utils.nim b/third-party/nwaku/tests/waku_store/store_utils.nim new file mode 100644 index 0000000..4586a06 --- /dev/null +++ b/third-party/nwaku/tests/waku_store/store_utils.nim @@ -0,0 +1,22 @@ +{.used.} + +import std/options, chronos, chronicles + +import + waku/[node/peer_manager, waku_store, waku_store/client], ../testlib/[common, wakucore] + +proc newTestWakuStore*( + switch: Switch, handler: StoreQueryRequestHandler +): Future[WakuStore] {.async.} = + let + peerManager = PeerManager.new(switch) + proto = WakuStore.new(peerManager, rng, handler) + + await proto.start() + switch.mount(proto) + + return proto + +proc newTestWakuStoreClient*(switch: Switch): WakuStoreClient {.gcsafe.} = + let peerManager = PeerManager.new(switch) + WakuStoreClient.new(peerManager, rng) diff --git a/third-party/nwaku/tests/waku_store/test_all.nim b/third-party/nwaku/tests/waku_store/test_all.nim new file mode 100644 index 0000000..d0a426e --- /dev/null +++ b/third-party/nwaku/tests/waku_store/test_all.nim @@ -0,0 +1,8 @@ +{.used.} + +import + ./test_client, + ./test_rpc_codec, + ./test_waku_store, + ./test_wakunode_store, + ./test_resume diff --git a/third-party/nwaku/tests/waku_store/test_client.nim b/third-party/nwaku/tests/waku_store/test_client.nim new file mode 100644 index 0000000..38b07bd --- /dev/null +++ b/third-party/nwaku/tests/waku_store/test_client.nim @@ -0,0 +1,226 @@ +{.used.} + +import std/options, testutils/unittests, chronos, libp2p/crypto/crypto + +import + waku/[node/peer_manager, waku_core, waku_store, waku_store/client, common/paging], + ../testlib/[wakucore, testasync, futures], + ./store_utils + +suite "Store Client": + var message1 {.threadvar.}: WakuMessage + var message2 {.threadvar.}: WakuMessage + var message3 {.threadvar.}: WakuMessage + var hash1 {.threadvar.}: WakuMessageHash + var hash2 {.threadvar.}: WakuMessageHash + var hash3 {.threadvar.}: WakuMessageHash + var messageSeq {.threadvar.}: seq[WakuMessageKeyValue] + var handlerFuture {.threadvar.}: Future[StoreQueryRequest] + var handler {.threadvar.}: StoreQueryRequestHandler + var storeQuery {.threadvar.}: StoreQueryRequest + + var serverSwitch {.threadvar.}: Switch + var clientSwitch {.threadvar.}: Switch + + var server {.threadvar.}: WakuStore + var client {.threadvar.}: WakuStoreClient + + var serverPeerInfo {.threadvar.}: RemotePeerInfo + var clientPeerInfo {.threadvar.}: RemotePeerInfo + + asyncSetup: + message1 = fakeWakuMessage(contentTopic = DefaultContentTopic) + message2 = fakeWakuMessage(contentTopic = DefaultContentTopic) + message3 = fakeWakuMessage(contentTopic = DefaultContentTopic) + hash1 = computeMessageHash(DefaultPubsubTopic, message1) + hash2 = computeMessageHash(DefaultPubsubTopic, message2) + hash3 = computeMessageHash(DefaultPubsubTopic, message3) + messageSeq = + @[ + WakuMessageKeyValue( + messageHash: hash1, + message: some(message1), + pubsubTopic: some(DefaultPubsubTopic), + ), + WakuMessageKeyValue( + messageHash: hash2, + message: some(message2), + pubsubTopic: some(DefaultPubsubTopic), + ), + WakuMessageKeyValue( + messageHash: hash3, + message: some(message3), + pubsubTopic: some(DefaultPubsubTopic), + ), + ] + handlerFuture = newHistoryFuture() + handler = proc(req: StoreQueryRequest): Future[StoreQueryResult] {.async, gcsafe.} = + var request = req + request.requestId = "" + handlerFuture.complete(request) + return ok(StoreQueryResponse(messages: messageSeq)) + storeQuery = StoreQueryRequest( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + paginationForward: PagingDirection.FORWARD, + ) + + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + server = await newTestWakuStore(serverSwitch, handler = handler) + client = newTestWakuStoreClient(clientSwitch) + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + ## The following sleep is aimed to prevent macos failures in CI + #[ +2024-05-16T13:24:45.5106200Z INF 2024-05-16 13:24:45.509+00:00 Stopping AutonatService topics="libp2p autonatservice" tid=53712 file=service.nim:203 +2024-05-16T13:24:45.5107960Z WRN 2024-05-16 13:24:45.509+00:00 service is already stopped topics="libp2p switch" tid=53712 file=switch.nim:86 +2024-05-16T13:24:45.5109010Z . (1.68s) +2024-05-16T13:24:45.5109320Z Store Client (0.00s) +2024-05-16T13:24:45.5109870Z SIGSEGV: Illegal storage access. (Attempt to read from nil?) +2024-05-16T13:24:45.5111470Z stack trace: (most recent call last) + ]# + await sleepAsync(500.millis) + + serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + clientPeerInfo = clientSwitch.peerInfo.toRemotePeerInfo() + + asyncTeardown: + await allFutures(serverSwitch.stop(), clientSwitch.stop()) + + suite "StoreQueryRequest Creation and Execution": + asyncTest "Valid Queries": + # When a valid query is sent to the server + let queryResponse = await client.query(storeQuery, peer = serverPeerInfo) + + # Then the query is processed successfully + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == storeQuery + queryResponse.get().messages == messageSeq + + asyncTest "Invalid Queries": + # TODO: IMPROVE: We can't test "actual" invalid queries because + # it directly depends on the handler implementation, to achieve + # proper coverage we'd need an example implementation. + + # Given some invalid queries + let + invalidQuery1 = StoreQueryRequest( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[], + paginationForward: PagingDirection.FORWARD, + ) + invalidQuery2 = StoreQueryRequest( + pubsubTopic: PubsubTopic.none(), + contentTopics: @[DefaultContentTopic], + paginationForward: PagingDirection.FORWARD, + ) + invalidQuery3 = StoreQueryRequest( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + paginationLimit: some(uint64(0)), + ) + invalidQuery4 = StoreQueryRequest( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + paginationLimit: some(uint64(0)), + ) + invalidQuery5 = StoreQueryRequest( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + startTime: some(0.Timestamp), + endTime: some(0.Timestamp), + ) + invalidQuery6 = StoreQueryRequest( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + startTime: some(0.Timestamp), + endTime: some(-1.Timestamp), + ) + + # When the query is sent to the server + let queryResponse1 = await client.query(invalidQuery1, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery1 + queryResponse1.get().messages == messageSeq + + # When the query is sent to the server + handlerFuture = newHistoryFuture() + let queryResponse2 = await client.query(invalidQuery2, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery2 + queryResponse2.get().messages == messageSeq + + # When the query is sent to the server + handlerFuture = newHistoryFuture() + let queryResponse3 = await client.query(invalidQuery3, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery3 + queryResponse3.get().messages == messageSeq + + # When the query is sent to the server + handlerFuture = newHistoryFuture() + let queryResponse4 = await client.query(invalidQuery4, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery4 + queryResponse4.get().messages == messageSeq + + # When the query is sent to the server + handlerFuture = newHistoryFuture() + let queryResponse5 = await client.query(invalidQuery5, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery5 + queryResponse5.get().messages == messageSeq + + # When the query is sent to the server + handlerFuture = newHistoryFuture() + let queryResponse6 = await client.query(invalidQuery6, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery6 + queryResponse6.get().messages == messageSeq + + suite "Verification of StoreQueryResponse Payload": + asyncTest "Positive Responses": + # When a valid query is sent to the server + let queryResponse = await client.query(storeQuery, peer = serverPeerInfo) + + # Then the query is processed successfully, and is of the expected type + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + type(queryResponse.get()) is StoreQueryResponse + + asyncTest "Negative Responses - PeerDialFailure": + # Given a stopped peer + let + otherServerSwitch = newTestSwitch() + otherServerPeerInfo = otherServerSwitch.peerInfo.toRemotePeerInfo() + + # When a query is sent to the stopped peer + let queryResponse = await client.query(storeQuery, peer = otherServerPeerInfo) + + # Then the query is not processed + check: + not await handlerFuture.withTimeout(FUTURE_TIMEOUT) + queryResponse.isErr() + queryResponse.error.kind == ErrorCode.PEER_DIAL_FAILURE diff --git a/third-party/nwaku/tests/waku_store/test_resume.nim b/third-party/nwaku/tests/waku_store/test_resume.nim new file mode 100644 index 0000000..93e07ec --- /dev/null +++ b/third-party/nwaku/tests/waku_store/test_resume.nim @@ -0,0 +1,113 @@ +{.used.} + +import std/[options, net], testutils/unittests, chronos, results + +import + waku/[ + node/peer_manager, + node/waku_node, + waku_core, + waku_store/resume, + waku_store/common, + waku_archive/driver, + ], + ../testlib/[wakucore, testasync, wakunode], + ./store_utils, + ../waku_archive/archive_utils + +suite "Store Resume": + var resume {.threadvar.}: StoreResume + + asyncSetup: + let resumeRes: Result[StoreResume, string] = + StoreResume.new(peerManager = nil, wakuArchive = nil, wakuStoreClient = nil) + + assert resumeRes.isOk(), $resumeRes.error + + resume = resumeRes.get() + + asyncTeardown: + await resume.stopWait() + + asyncTest "get set roundtrip": + let ts = getNowInNanosecondTime() + + let setRes = resume.setLastOnlineTimestamp(ts) + assert setRes.isOk(), $setRes.error + + let getRes = resume.getLastOnlineTimestamp() + assert getRes.isOk(), $getRes.error + + let getTs = getRes.get() + + assert getTs == ts, "wrong timestamp" + +suite "Store Resume - End to End": + var server {.threadvar.}: WakuNode + var client {.threadvar.}: WakuNode + + var serverDriver {.threadvar.}: ArchiveDriver + var clientDriver {.threadvar.}: ArchiveDriver + + asyncSetup: + let messages = + @[ + fakeWakuMessage(@[byte 00]), + fakeWakuMessage(@[byte 01]), + fakeWakuMessage(@[byte 02]), + fakeWakuMessage(@[byte 03]), + fakeWakuMessage(@[byte 04]), + fakeWakuMessage(@[byte 05]), + fakeWakuMessage(@[byte 06]), + fakeWakuMessage(@[byte 07]), + fakeWakuMessage(@[byte 08]), + fakeWakuMessage(@[byte 09]), + ] + + let + serverKey = generateSecp256k1Key() + clientKey = generateSecp256k1Key() + + server = newTestWakuNode(serverKey, IPv4_any(), Port(0)) + client = newTestWakuNode(clientKey, IPv4_any(), Port(0)) + + serverDriver = newArchiveDriverWithMessages(DefaultPubsubTopic, messages) + clientDriver = newSqliteArchiveDriver() + + let mountServerArchiveRes = server.mountArchive(serverDriver) + let mountClientArchiveRes = client.mountArchive(clientDriver) + + assert mountServerArchiveRes.isOk() + assert mountClientArchiveRes.isOk() + + await server.mountStore() + await client.mountStore() + + client.mountStoreClient() + server.mountStoreClient() + + client.setupStoreResume() + + await server.start() + + let serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() + + client.peerManager.addServicePeer(serverRemotePeerInfo, WakuStoreCodec) + + asyncTeardown: + await allFutures(client.stop(), server.stop()) + + asyncTest "10 messages resume": + var countRes = await clientDriver.getMessagesCount() + assert countRes.isOk(), $countRes.error + + check: + countRes.get() == 0 + + await client.start() + + countRes = await clientDriver.getMessagesCount() + assert countRes.isOk(), $countRes.error + + check: + countRes.get() == 10 diff --git a/third-party/nwaku/tests/waku_store/test_rpc_codec.nim b/third-party/nwaku/tests/waku_store/test_rpc_codec.nim new file mode 100644 index 0000000..961e3c0 --- /dev/null +++ b/third-party/nwaku/tests/waku_store/test_rpc_codec.nim @@ -0,0 +1,95 @@ +{.used.} + +import std/options, testutils/unittests, chronos +import + waku/ + [common/protobuf, common/paging, waku_core, waku_store/common, waku_store/rpc_codec], + ../testlib/wakucore + +procSuite "Waku Store - RPC codec": + test "StoreQueryRequest protobuf codec": + ## Given + let query = StoreQueryRequest( + requestId: "0", + includeData: true, + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + startTime: some(Timestamp(10)), + endTime: some(Timestamp(11)), + messageHashes: @[], + paginationCursor: none(WakuMessageHash), + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(DefaultPageSize), + ) + + ## When + let pb = query.encode() + let decodedQuery = StoreQueryRequest.decode(pb.buffer) + + ## Then + check: + decodedQuery.isOk() + + check: + # the fields of decoded query decodedQuery must be the same as the original query query + decodedQuery.value == query + + test "StoreQueryRequest protobuf codec - empty history query": + ## Given + let emptyQuery = StoreQueryRequest() + + ## When + let pb = emptyQuery.encode() + let decodedEmptyQuery = StoreQueryRequest.decode(pb.buffer) + + ## Then + check: + decodedEmptyQuery.isOk() + + check: + # check the correctness of init and encode for an empty HistoryQueryRPC + decodedEmptyQuery.value == emptyQuery + + test "StoreQueryResponse protobuf codec": + ## Given + let + message = fakeWakuMessage() + hash = computeMessageHash(DefaultPubsubTopic, message) + keyValue = WakuMessageKeyValue( + messageHash: hash, message: some(message), pubsubTopic: some(DefaultPubsubTopic) + ) + res = StoreQueryResponse( + requestId: "1", + statusCode: 200, + statusDesc: "it's fine", + messages: @[keyValue], + paginationCursor: none(WakuMessageHash), + ) + + ## When + let pb = res.encode() + let decodedRes = StoreQueryResponse.decode(pb.buffer) + + ## Then + check: + decodedRes.isOk() + + check: + # the fields of decoded response decodedRes must be the same as the original response res + decodedRes.value == res + + test "StoreQueryResponse protobuf codec - empty history response": + ## Given + let emptyRes = StoreQueryResponse() + + ## When + let pb = emptyRes.encode() + let decodedEmptyRes = StoreQueryResponse.decode(pb.buffer) + + ## Then + check: + decodedEmptyRes.isOk() + + check: + # check the correctness of init and encode for an empty HistoryResponseRPC + decodedEmptyRes.value == emptyRes diff --git a/third-party/nwaku/tests/waku_store/test_waku_store.nim b/third-party/nwaku/tests/waku_store/test_waku_store.nim new file mode 100644 index 0000000..815b3ac --- /dev/null +++ b/third-party/nwaku/tests/waku_store/test_waku_store.nim @@ -0,0 +1,119 @@ +{.used.} + +import std/options, testutils/unittests, chronos, libp2p/crypto/crypto + +import + waku/[ + common/paging, + node/peer_manager, + waku_core, + waku_core/message/digest, + waku_store, + waku_store/client, + waku_store/common, + ], + ../testlib/wakucore, + ./store_utils + +suite "Waku Store - query handler": + asyncTest "history query handler should be called": + ## Setup + let + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + ## Given + let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + + let msg = fakeWakuMessage(contentTopic = DefaultContentTopic) + let hash = computeMessageHash(DefaultPubsubTopic, msg) + let kv = WakuMessageKeyValue( + messageHash: hash, message: some(msg), pubsubTopic: some(DefaultPubsubTopic) + ) + + var queryHandlerFut = newFuture[(StoreQueryRequest)]() + + let queryHandler = proc( + req: StoreQueryRequest + ): Future[StoreQueryResult] {.async, gcsafe.} = + var request = req + request.requestId = "" # Must remove the id for equality + queryHandlerFut.complete(request) + return ok(StoreQueryResponse(messages: @[kv])) + + let + server = await newTestWakuStore(serverSwitch, handler = queryhandler) + client = newTestWakuStoreClient(clientSwitch) + + let req = StoreQueryRequest( + contentTopics: @[DefaultContentTopic], paginationForward: PagingDirection.FORWARD + ) + + ## When + let queryRes = await client.query(req, peer = serverPeerInfo) + + ## Then + check: + not queryHandlerFut.failed() + queryRes.isOk() + + let request = queryHandlerFut.read() + check: + request == req + + let response = queryRes.tryGet() + check: + response.messages.len == 1 + response.messages == @[kv] + + ## Cleanup + await allFutures(serverSwitch.stop(), clientSwitch.stop()) + + asyncTest "history query handler should be called and return an error": + ## Setup + let + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + ## Given + let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + + var queryHandlerFut = newFuture[(StoreQueryRequest)]() + let queryHandler = proc( + req: StoreQueryRequest + ): Future[StoreQueryResult] {.async, gcsafe.} = + var request = req + request.requestId = "" # Must remove the id for equality + queryHandlerFut.complete(request) + return err(StoreError(kind: ErrorCode.BAD_REQUEST)) + + let + server = await newTestWakuStore(serverSwitch, handler = queryhandler) + client = newTestWakuStoreClient(clientSwitch) + + let req = StoreQueryRequest( + contentTopics: @[DefaultContentTopic], paginationForward: PagingDirection.FORWARD + ) + + ## When + let queryRes = await client.query(req, peer = serverPeerInfo) + + ## Then + check: + not queryHandlerFut.failed() + queryRes.isErr() + + let request = queryHandlerFut.read() + check: + request == req + + let error = queryRes.tryError() + check: + error.kind == ErrorCode.BAD_REQUEST + + ## Cleanup + await allFutures(serverSwitch.stop(), clientSwitch.stop()) diff --git a/third-party/nwaku/tests/waku_store/test_wakunode_store.nim b/third-party/nwaku/tests/waku_store/test_wakunode_store.nim new file mode 100644 index 0000000..b203090 --- /dev/null +++ b/third-party/nwaku/tests/waku_store/test_wakunode_store.nim @@ -0,0 +1,427 @@ +{.used.} + +import + std/sequtils, + testutils/unittests, + chronicles, + chronos, + libp2p/crypto/crypto, + libp2p/peerid, + libp2p/multiaddress, + libp2p/switch, + libp2p/protocols/pubsub/rpc/messages, + libp2p/protocols/pubsub/pubsub, + libp2p/protocols/pubsub/gossipsub +import + waku/[ + common/paging, + waku_core, + waku_core/message/digest, + node/peer_manager, + waku_archive, + waku_filter_v2, + waku_filter_v2/client, + waku_store, + waku_node, + ], + ../waku_store/store_utils, + ../waku_archive/archive_utils, + ../testlib/wakucore, + ../testlib/wakunode + +procSuite "WakuNode - Store": + ## Fixtures + let timeOrigin = now() + let msgListA = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)), + ] + + let hashes = msgListA.mapIt(computeMessageHash(DefaultPubsubTopic, it)) + + let kvs = zip(hashes, msgListA).mapIt( + WakuMessageKeyValue( + messageHash: it[0], message: some(it[1]), pubsubTopic: some(DefaultPubsubTopic) + ) + ) + + let archiveA = block: + let driver = newSqliteArchiveDriver() + + for kv in kvs: + let message = kv.message.get() + require (waitFor driver.put(kv.messageHash, DefaultPubsubTopic, message)).isOk() + + driver + + test "Store protocol returns expected messages": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start()) + + let mountArchiveRes = server.mountArchive(archiveA) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountStore() + + client.mountStoreClient() + + ## Given + let req = + StoreQueryRequest(includeData: true, contentTopics: @[DefaultContentTopic]) + let serverPeer = server.peerInfo.toRemotePeerInfo() + + ## When + let queryRes = waitFor client.query(req, peer = serverPeer) + + ## Then + check queryRes.isOk() + + let response = queryRes.get() + check: + response.messages == kvs + + # Cleanup + waitFor allFutures(client.stop(), server.stop()) + + test "Store node history response - forward pagination": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start()) + + let mountArchiveRes = server.mountArchive(archiveA) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountStore() + + client.mountStoreClient() + + ## Given + let req = StoreQueryRequest( + includeData: true, + contentTopics: @[DefaultContentTopic], + paginationForward: PagingDirection.FORWARD, + paginationLimit: some(uint64(7)), + ) + let serverPeer = server.peerInfo.toRemotePeerInfo() + + ## When + var nextReq = req # copy + + var pages = newSeq[seq[WakuMessageKeyValue]](2) + var cursors = newSeq[Option[WakuMessageHash]](2) + + for i in 0 ..< 2: + let res = waitFor client.query(nextReq, peer = serverPeer) + require res.isOk() + + # Keep query response content + let response = res.get() + pages[i] = response.messages + cursors[i] = response.paginationCursor + + # Set/update the request cursor + nextReq.paginationCursor = cursors[i] + + ## Then + check: + cursors[0] == some(kvs[6].messageHash) + cursors[1] == none(WakuMessageHash) + + check: + pages[0] == kvs[0 .. 6] + pages[1] == kvs[7 .. 9] + + # Cleanup + waitFor allFutures(client.stop(), server.stop()) + + test "Store node history response - backward pagination": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start()) + + let mountArchiveRes = server.mountArchive(archiveA) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountStore() + + client.mountStoreClient() + + ## Given + let req = StoreQueryRequest( + includeData: true, + contentTopics: @[DefaultContentTopic], + paginationLimit: some(uint64(7)), + paginationForward: PagingDirection.BACKWARD, + ) + let serverPeer = server.peerInfo.toRemotePeerInfo() + + ## When + var nextReq = req # copy + + var pages = newSeq[seq[WakuMessageKeyValue]](2) + var cursors = newSeq[Option[WakuMessageHash]](2) + + for i in 0 ..< 2: + let res = waitFor client.query(nextReq, peer = serverPeer) + require res.isOk() + + # Keep query response content + let response = res.get() + pages[i] = response.messages + cursors[i] = response.paginationCursor + + # Set/update the request cursor + nextReq.paginationCursor = cursors[i] + + ## Then + check: + cursors[0] == some(kvs[3].messageHash) + cursors[1] == none(WakuMessageHash) + + check: + pages[0] == kvs[3 .. 9] + pages[1] == kvs[0 .. 2] + + # Cleanup + waitFor allFutures(client.stop(), server.stop()) + + test "Store protocol returns expected message when relay is disabled and filter enabled": + ## See nwaku issue #937: 'Store: ability to decouple store from relay' + ## Setup + let + filterSourceKey = generateSecp256k1Key() + filterSource = + newTestWakuNode(filterSourceKey, parseIpAddress("0.0.0.0"), Port(0)) + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start(), filterSource.start()) + + waitFor filterSource.mountFilter() + let driver = newSqliteArchiveDriver() + + let mountArchiveRes = server.mountArchive(driver) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountStore() + waitFor server.mountFilterClient() + client.mountStoreClient() + + ## Given + let message = fakeWakuMessage() + let hash = computeMessageHash(DefaultPubSubTopic, message) + let + serverPeer = server.peerInfo.toRemotePeerInfo() + filterSourcePeer = filterSource.peerInfo.toRemotePeerInfo() + + ## Then + let filterFut = newFuture[(PubsubTopic, WakuMessage)]() + proc filterHandler( + pubsubTopic: PubsubTopic, msg: WakuMessage + ) {.async, gcsafe, closure.} = + await server.wakuArchive.handleMessage(pubsubTopic, msg) + filterFut.complete((pubsubTopic, msg)) + + server.wakuFilterClient.registerPushHandler(filterHandler) + let resp = waitFor server.filterSubscribe( + some(DefaultPubsubTopic), DefaultContentTopic, peer = filterSourcePeer + ) + + waitFor sleepAsync(100.millis) + + waitFor filterSource.wakuFilter.handleMessage(DefaultPubsubTopic, message) + + # Wait for the server filter to receive the push message + require waitFor filterFut.withTimeout(5.seconds) + + let req = + StoreQueryRequest(includeData: true, contentTopics: @[DefaultContentTopic]) + let res = waitFor client.query(req, serverPeer) + + ## Then + check res.isOk() + + let response = res.get() + check: + response.messages.len == 1 + response.messages[0] == + WakuMessageKeyValue( + messageHash: hash, + message: some(message), + pubsubTopic: some(DefaultPubSubTopic), + ) + + let (handledPubsubTopic, handledMsg) = filterFut.read() + check: + handledPubsubTopic == DefaultPubsubTopic + handledMsg == message + + ## Cleanup + waitFor allFutures(client.stop(), server.stop(), filterSource.stop()) + + test "history query should return INVALID_CURSOR if the cursor has empty data in the request": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start()) + + let mountArchiveRes = server.mountArchive(archiveA) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountStore() + + client.mountStoreClient() + + ## Forcing a bad cursor with empty digest data + var cursor: WakuMessageHash = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + ] + + ## Given + let req = StoreQueryRequest( + contentTopics: @[DefaultContentTopic], paginationCursor: some(cursor) + ) + let serverPeer = server.peerInfo.toRemotePeerInfo() + + ## When + let queryRes = waitFor client.query(req, peer = serverPeer) + + ## Then + check queryRes.isOk() + + let response = queryRes.get() + + check response.statusCode == 400 + check response.statusDesc == "BAD_REQUEST: invalid cursor" + + # Cleanup + waitFor allFutures(client.stop(), server.stop()) + + test "Store protocol queries does not violate request rate limitation": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start()) + + let mountArchiveRes = server.mountArchive(archiveA) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountStore((4, 500.millis)) + + client.mountStoreClient() + + ## Given + let req = + StoreQueryRequest(includeData: true, contentTopics: @[DefaultContentTopic]) + let serverPeer = server.peerInfo.toRemotePeerInfo() + + let requestProc = proc() {.async.} = + let queryRes = await client.query(req, peer = serverPeer) + + assert queryRes.isOk(), queryRes.error + + let response = queryRes.get() + check: + response.messages.mapIt(it.message.get()) == msgListA + + for count in 0 ..< 4: + waitFor requestProc() + waitFor sleepAsync(20.millis) + + waitFor sleepAsync(500.millis) + + for count in 0 ..< 4: + waitFor requestProc() + waitFor sleepAsync(20.millis) + + # Cleanup + waitFor allFutures(client.stop(), server.stop()) + + test "Store protocol queries overrun request rate limitation": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start()) + + let mountArchiveRes = server.mountArchive(archiveA) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountStore((3, 500.millis)) + + client.mountStoreClient() + + ## Given + let req = + StoreQueryRequest(includeData: true, contentTopics: @[DefaultContentTopic]) + let serverPeer = server.peerInfo.toRemotePeerInfo() + + let successProc = proc() {.async.} = + let queryRes = await client.query(req, peer = serverPeer) + + check queryRes.isOk() + let response = queryRes.get() + check: + response.messages.mapIt(it.message.get()) == msgListA + + let failsProc = proc() {.async.} = + let queryRes = await client.query(req, peer = serverPeer) + + check queryRes.isOk() + let response = queryRes.get() + + check response.statusCode == 429 + + for count in 0 ..< 3: + waitFor successProc() + waitFor sleepAsync(20.millis) + + waitFor failsProc() + + waitFor sleepAsync(500.millis) + + for count in 0 ..< 3: + waitFor successProc() + waitFor sleepAsync(20.millis) + + # Cleanup + waitFor allFutures(client.stop(), server.stop()) diff --git a/third-party/nwaku/tests/waku_store_legacy/store_utils.nim b/third-party/nwaku/tests/waku_store_legacy/store_utils.nim new file mode 100644 index 0000000..a70ca93 --- /dev/null +++ b/third-party/nwaku/tests/waku_store_legacy/store_utils.nim @@ -0,0 +1,33 @@ +{.used.} + +import std/options, chronos + +import + waku/[node/peer_manager, waku_core, waku_store_legacy, waku_store_legacy/client], + ../testlib/[common, wakucore] + +proc newTestWakuStore*( + switch: Switch, handler: HistoryQueryHandler +): Future[WakuStore] {.async.} = + let + peerManager = PeerManager.new(switch) + proto = WakuStore.new(peerManager, rng, handler) + + await proto.start() + switch.mount(proto) + + return proto + +proc newTestWakuStoreClient*(switch: Switch): WakuStoreClient = + let peerManager = PeerManager.new(switch) + WakuStoreClient.new(peerManager, rng) + +proc computeHistoryCursor*( + pubsubTopic: PubsubTopic, message: WakuMessage +): HistoryCursor = + HistoryCursor( + pubsubTopic: pubsubTopic, + senderTime: message.timestamp, + storeTime: message.timestamp, + digest: computeDigest(message), + ) diff --git a/third-party/nwaku/tests/waku_store_legacy/test_all.nim b/third-party/nwaku/tests/waku_store_legacy/test_all.nim new file mode 100644 index 0000000..b495310 --- /dev/null +++ b/third-party/nwaku/tests/waku_store_legacy/test_all.nim @@ -0,0 +1,8 @@ +{.used.} + +import + ./test_client, + ./test_resume, + ./test_rpc_codec, + ./test_waku_store, + ./test_wakunode_store diff --git a/third-party/nwaku/tests/waku_store_legacy/test_client.nim b/third-party/nwaku/tests/waku_store_legacy/test_client.nim new file mode 100644 index 0000000..2a86163 --- /dev/null +++ b/third-party/nwaku/tests/waku_store_legacy/test_client.nim @@ -0,0 +1,214 @@ +{.used.} + +import std/options, testutils/unittests, chronos, libp2p/crypto/crypto + +import + waku/[ + node/peer_manager, + waku_core, + waku_store_legacy, + waku_store_legacy/client, + common/paging, + ], + ../testlib/[wakucore, testasync, futures], + ./store_utils + +suite "Store Client": + var message1 {.threadvar.}: WakuMessage + var message2 {.threadvar.}: WakuMessage + var message3 {.threadvar.}: WakuMessage + var messageSeq {.threadvar.}: seq[WakuMessage] + var handlerFuture {.threadvar.}: Future[HistoryQuery] + var handler {.threadvar.}: HistoryQueryHandler + var historyQuery {.threadvar.}: HistoryQuery + + var serverSwitch {.threadvar.}: Switch + var clientSwitch {.threadvar.}: Switch + + var server {.threadvar.}: WakuStore + var client {.threadvar.}: WakuStoreClient + + var serverPeerInfo {.threadvar.}: RemotePeerInfo + var clientPeerInfo {.threadvar.}: RemotePeerInfo + + asyncSetup: + message1 = fakeWakuMessage(contentTopic = DefaultContentTopic) + message2 = fakeWakuMessage(contentTopic = DefaultContentTopic) + message3 = fakeWakuMessage(contentTopic = DefaultContentTopic) + messageSeq = @[message1, message2, message3] + handlerFuture = newLegacyHistoryFuture() + handler = proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.} = + handlerFuture.complete(req) + return ok(HistoryResponse(messages: messageSeq)) + historyQuery = HistoryQuery( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + direction: PagingDirection.FORWARD, + requestId: "customRequestId", + ) + + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + server = await newTestWakuStore(serverSwitch, handler = handler) + client = newTestWakuStoreClient(clientSwitch) + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + ## The following sleep is aimed to prevent macos failures in CI + #[ +2024-05-16T13:24:45.5106200Z INF 2024-05-16 13:24:45.509+00:00 Stopping AutonatService topics="libp2p autonatservice" tid=53712 file=service.nim:203 +2024-05-16T13:24:45.5107960Z WRN 2024-05-16 13:24:45.509+00:00 service is already stopped topics="libp2p switch" tid=53712 file=switch.nim:86 +2024-05-16T13:24:45.5109010Z . (1.68s) +2024-05-16T13:24:45.5109320Z Store Client (0.00s) +2024-05-16T13:24:45.5109870Z SIGSEGV: Illegal storage access. (Attempt to read from nil?) +2024-05-16T13:24:45.5111470Z stack trace: (most recent call last) + ]# + await sleepAsync(500.millis) + + serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + clientPeerInfo = clientSwitch.peerInfo.toRemotePeerInfo() + + asyncTeardown: + await allFutures(serverSwitch.stop(), clientSwitch.stop()) + + suite "HistoryQuery Creation and Execution": + asyncTest "Valid Queries": + # When a valid query is sent to the server + let queryResponse = await client.query(historyQuery, peer = serverPeerInfo) + + # Then the query is processed successfully + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == historyQuery + queryResponse.get().messages == messageSeq + + asyncTest "Invalid Queries": + # TODO: IMPROVE: We can't test "actual" invalid queries because + # it directly depends on the handler implementation, to achieve + # proper coverage we'd need an example implementation. + + # Given some invalid queries + let + invalidQuery1 = HistoryQuery( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[], + direction: PagingDirection.FORWARD, + requestId: "reqId1", + ) + invalidQuery2 = HistoryQuery( + pubsubTopic: PubsubTopic.none(), + contentTopics: @[DefaultContentTopic], + direction: PagingDirection.FORWARD, + requestId: "reqId2", + ) + invalidQuery3 = HistoryQuery( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + pageSize: 0, + requestId: "reqId3", + ) + invalidQuery4 = HistoryQuery( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + pageSize: 0, + requestId: "reqId4", + ) + invalidQuery5 = HistoryQuery( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + startTime: some(0.Timestamp), + endTime: some(0.Timestamp), + requestId: "reqId5", + ) + invalidQuery6 = HistoryQuery( + pubsubTopic: some(DefaultPubsubTopic), + contentTopics: @[DefaultContentTopic], + startTime: some(0.Timestamp), + endTime: some(-1.Timestamp), + requestId: "reqId6", + ) + + # When the query is sent to the server + let queryResponse1 = await client.query(invalidQuery1, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery1 + queryResponse1.get().messages == messageSeq + + # When the query is sent to the server + handlerFuture = newLegacyHistoryFuture() + let queryResponse2 = await client.query(invalidQuery2, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery2 + queryResponse2.get().messages == messageSeq + + # When the query is sent to the server + handlerFuture = newLegacyHistoryFuture() + let queryResponse3 = await client.query(invalidQuery3, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery3 + queryResponse3.get().messages == messageSeq + + # When the query is sent to the server + handlerFuture = newLegacyHistoryFuture() + let queryResponse4 = await client.query(invalidQuery4, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery4 + queryResponse4.get().messages == messageSeq + + # When the query is sent to the server + handlerFuture = newLegacyHistoryFuture() + let queryResponse5 = await client.query(invalidQuery5, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery5 + queryResponse5.get().messages == messageSeq + + # When the query is sent to the server + handlerFuture = newLegacyHistoryFuture() + let queryResponse6 = await client.query(invalidQuery6, peer = serverPeerInfo) + + # Then the query is not processed + assert await handlerFuture.withTimeout(FUTURE_TIMEOUT) + check: + handlerFuture.read() == invalidQuery6 + queryResponse6.get().messages == messageSeq + + suite "Verification of HistoryResponse Payload": + asyncTest "Positive Responses": + # When a valid query is sent to the server + let queryResponse = await client.query(historyQuery, peer = serverPeerInfo) + + # Then the query is processed successfully, and is of the expected type + check: + await handlerFuture.withTimeout(FUTURE_TIMEOUT) + type(queryResponse.get()) is HistoryResponse + + asyncTest "Negative Responses - PeerDialFailure": + # Given a stopped peer + let + otherServerSwitch = newTestSwitch() + otherServerPeerInfo = otherServerSwitch.peerInfo.toRemotePeerInfo() + + # When a query is sent to the stopped peer + let queryResponse = await client.query(historyQuery, peer = otherServerPeerInfo) + + # Then the query is not processed + check: + not await handlerFuture.withTimeout(FUTURE_TIMEOUT) + queryResponse.isErr() + queryResponse.error.kind == HistoryErrorKind.PEER_DIAL_FAILURE diff --git a/third-party/nwaku/tests/waku_store_legacy/test_resume.nim b/third-party/nwaku/tests/waku_store_legacy/test_resume.nim new file mode 100644 index 0000000..53e4883 --- /dev/null +++ b/third-party/nwaku/tests/waku_store_legacy/test_resume.nim @@ -0,0 +1,342 @@ +{.used.} + +when defined(waku_exp_store_resume): + # TODO: Review store resume test cases (#1282) + # Ongoing changes to test code base had ruin this test meanwhile, need to investigate and fix + + import + std/[options, tables, sets], + testutils/unittests, + chronos, + chronicles, + libp2p/crypto/crypto + import + waku/[ + common/databases/db_sqlite, + waku_archive_legacy/driver, + waku_archive_legacy/driver/sqlite_driver/sqlite_driver, + node/peer_manager, + waku_core, + waku_core/message/digest, + waku_store_legacy, + ], + ../waku_store_legacy/store_utils, + ../waku_archive_legacy/archive_utils, + ./testlib/common, + ./testlib/switch + + procSuite "Waku Store - resume store": + ## Fixtures + let storeA = block: + let store = newTestMessageStore() + let msgList = + @[ + fakeWakuMessage( + payload = @[byte 0], contentTopic = ContentTopic("2"), ts = ts(0) + ), + fakeWakuMessage( + payload = @[byte 1], contentTopic = ContentTopic("1"), ts = ts(1) + ), + fakeWakuMessage( + payload = @[byte 2], contentTopic = ContentTopic("2"), ts = ts(2) + ), + fakeWakuMessage( + payload = @[byte 3], contentTopic = ContentTopic("1"), ts = ts(3) + ), + fakeWakuMessage( + payload = @[byte 4], contentTopic = ContentTopic("2"), ts = ts(4) + ), + fakeWakuMessage( + payload = @[byte 5], contentTopic = ContentTopic("1"), ts = ts(5) + ), + fakeWakuMessage( + payload = @[byte 6], contentTopic = ContentTopic("2"), ts = ts(6) + ), + fakeWakuMessage( + payload = @[byte 7], contentTopic = ContentTopic("1"), ts = ts(7) + ), + fakeWakuMessage( + payload = @[byte 8], contentTopic = ContentTopic("2"), ts = ts(8) + ), + fakeWakuMessage( + payload = @[byte 9], contentTopic = ContentTopic("1"), ts = ts(9) + ), + ] + + for msg in msgList: + require store + .put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + .isOk() + + store + + let storeB = block: + let store = newTestMessageStore() + let msgList2 = + @[ + fakeWakuMessage( + payload = @[byte 0], contentTopic = ContentTopic("2"), ts = ts(0) + ), + fakeWakuMessage( + payload = @[byte 11], contentTopic = ContentTopic("1"), ts = ts(1) + ), + fakeWakuMessage( + payload = @[byte 12], contentTopic = ContentTopic("2"), ts = ts(2) + ), + fakeWakuMessage( + payload = @[byte 3], contentTopic = ContentTopic("1"), ts = ts(3) + ), + fakeWakuMessage( + payload = @[byte 4], contentTopic = ContentTopic("2"), ts = ts(4) + ), + fakeWakuMessage( + payload = @[byte 5], contentTopic = ContentTopic("1"), ts = ts(5) + ), + fakeWakuMessage( + payload = @[byte 13], contentTopic = ContentTopic("2"), ts = ts(6) + ), + fakeWakuMessage( + payload = @[byte 14], contentTopic = ContentTopic("1"), ts = ts(7) + ), + ] + + for msg in msgList2: + require store + .put( + DefaultPubsubTopic, + msg, + computeDigest(msg), + computeMessageHash(DefaultPubsubTopic, msg), + msg.timestamp, + ) + .isOk() + + store + + asyncTest "multiple query to multiple peers with pagination": + ## Setup + let + serverSwitchA = newTestSwitch() + serverSwitchB = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures( + serverSwitchA.start(), serverSwitchB.start(), clientSwitch.start() + ) + + let + serverA = await newTestWakuStoreNode(serverSwitchA, store = testStore) + serverB = await newTestWakuStoreNode(serverSwitchB, store = testStore) + client = newTestWakuStoreClient(clientSwitch) + + ## Given + let peers = + @[ + serverSwitchA.peerInfo.toRemotePeerInfo(), + serverSwitchB.peerInfo.toRemotePeerInfo(), + ] + let req = HistoryQuery(contentTopics: @[DefaultContentTopic], pageSize: 5) + + ## When + let res = await client.queryLoop(req, peers) + + ## Then + check: + res.isOk() + + let response = res.tryGet() + check: + response.len == 10 + + ## Cleanup + await allFutures(clientSwitch.stop(), serverSwitchA.stop(), serverSwitchB.stop()) + + asyncTest "resume message history": + ## Setup + let + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + let + server = await newTestWakuStore(serverSwitch, store = storeA) + client = await newTestWakuStore(clientSwitch) + + client.setPeer(serverSwitch.peerInfo.toRemotePeerInfo()) + + ## When + let res = await client.resume() + + ## Then + check res.isOk() + + let resumedMessagesCount = res.tryGet() + let storedMessagesCount = client.store.getMessagesCount().tryGet() + check: + resumedMessagesCount == 10 + storedMessagesCount == 10 + + ## Cleanup + await allFutures(clientSwitch.stop(), serverSwitch.stop()) + + asyncTest "resume history from a list of candidates - offline peer": + ## Setup + let + clientSwitch = newTestSwitch() + offlineSwitch = newTestSwitch() + + await clientSwitch.start() + + let client = await newTestWakuStore(clientSwitch) + + ## Given + let peers = @[offlineSwitch.peerInfo.toRemotePeerInfo()] + + ## When + let res = await client.resume(some(peers)) + + ## Then + check res.isErr() + + ## Cleanup + await clientSwitch.stop() + + asyncTest "resume history from a list of candidates - online and offline peers": + ## Setup + let + offlineSwitch = newTestSwitch() + serverASwitch = newTestSwitch() + serverBSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures( + serverASwitch.start(), serverBSwitch.start(), clientSwitch.start() + ) + + let + serverA = await newTestWakuStore(serverASwitch, store = storeA) + serverB = await newTestWakuStore(serverBSwitch, store = storeB) + client = await newTestWakuStore(clientSwitch) + + ## Given + let peers = + @[ + offlineSwitch.peerInfo.toRemotePeerInfo(), + serverASwitch.peerInfo.toRemotePeerInfo(), + serverBSwitch.peerInfo.toRemotePeerInfo(), + ] + + ## When + let res = await client.resume(some(peers)) + + ## Then + # `client` is expected to retrieve 14 messages: + # - The store mounted on `serverB` holds 10 messages (see `storeA` fixture) + # - The store mounted on `serverB` holds 7 messages (see `storeB` fixture) + # Both stores share 3 messages, resulting in 14 unique messages in total + check res.isOk() + + let restoredMessagesCount = res.tryGet() + let storedMessagesCount = client.store.getMessagesCount().tryGet() + check: + restoredMessagesCount == 14 + storedMessagesCount == 14 + + ## Cleanup + await allFutures(serverASwitch.stop(), serverBSwitch.stop(), clientSwitch.stop()) + + suite "WakuNode - waku store": + asyncTest "Resume proc fetches the history": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + await allFutures(client.start(), server.start()) + + let driver = newSqliteArchiveDriver() + server.mountArchive(some(driver), none(MessageValidator), none(RetentionPolicy)) + await server.mountStore() + + let clientStore = StoreQueueRef.new() + await client.mountStore(store = clientStore) + client.mountStoreClient(store = clientStore) + + ## Given + let message = fakeWakuMessage() + require server.wakuStore.store.put(DefaultPubsubTopic, message).isOk() + + let serverPeer = server.peerInfo.toRemotePeerInfo() + + ## When + await client.resume(some(@[serverPeer])) + + # Then + check: + client.wakuStore.store.getMessagesCount().tryGet() == 1 + + ## Cleanup + await allFutures(client.stop(), server.stop()) + + asyncTest "Resume proc discards duplicate messages": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + await allFutures(server.start(), client.start()) + await server.mountStore(store = StoreQueueRef.new()) + + let clientStore = StoreQueueRef.new() + await client.mountStore(store = clientStore) + client.mountStoreClient(store = clientStore) + + ## Given + let timeOrigin = now() + let + msg1 = fakeWakuMessage( + payload = "hello world1", ts = (timeOrigin + getNanoSecondTime(1)) + ) + msg2 = fakeWakuMessage( + payload = "hello world2", ts = (timeOrigin + getNanoSecondTime(2)) + ) + msg3 = fakeWakuMessage( + payload = "hello world3", ts = (timeOrigin + getNanoSecondTime(3)) + ) + + require server.wakuStore.store.put(DefaultPubsubTopic, msg1).isOk() + require server.wakuStore.store.put(DefaultPubsubTopic, msg2).isOk() + + # Insert the same message in both node's store + let + receivedTime3 = now() + getNanosecondTime(10) + digest3 = computeDigest(msg3) + require server.wakuStore.store + .put(DefaultPubsubTopic, msg3, digest3, receivedTime3) + .isOk() + require client.wakuStore.store + .put(DefaultPubsubTopic, msg3, digest3, receivedTime3) + .isOk() + + let serverPeer = server.peerInfo.toRemotePeerInfo() + + ## When + await client.resume(some(@[serverPeer])) + + ## Then + check: + # If the duplicates are discarded properly, then the total number of messages after resume should be 3 + client.wakuStore.store.getMessagesCount().tryGet() == 3 + + await allFutures(client.stop(), server.stop()) diff --git a/third-party/nwaku/tests/waku_store_legacy/test_rpc_codec.nim b/third-party/nwaku/tests/waku_store_legacy/test_rpc_codec.nim new file mode 100644 index 0000000..6897bab --- /dev/null +++ b/third-party/nwaku/tests/waku_store_legacy/test_rpc_codec.nim @@ -0,0 +1,185 @@ +{.used.} + +import std/options, testutils/unittests, chronos +import + waku/[ + common/protobuf, + common/paging, + waku_core, + waku_store_legacy/rpc, + waku_store_legacy/rpc_codec, + ], + ../testlib/wakucore + +procSuite "Waku Store - RPC codec": + test "PagingIndexRPC protobuf codec": + ## Given + let index = PagingIndexRPC.compute( + fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic + ) + + ## When + let encodedIndex = index.encode() + let decodedIndexRes = PagingIndexRPC.decode(encodedIndex.buffer) + + ## Then + check: + decodedIndexRes.isOk() + + let decodedIndex = decodedIndexRes.tryGet() + check: + # The fields of decodedIndex must be the same as the original index + decodedIndex == index + + test "PagingIndexRPC protobuf codec - empty index": + ## Given + let emptyIndex = PagingIndexRPC() + + let encodedIndex = emptyIndex.encode() + let decodedIndexRes = PagingIndexRPC.decode(encodedIndex.buffer) + + ## Then + check: + decodedIndexRes.isOk() + + let decodedIndex = decodedIndexRes.tryGet() + check: + # Check the correctness of init and encode for an empty PagingIndexRPC + decodedIndex == emptyIndex + + test "PagingInfoRPC protobuf codec": + ## Given + let + index = PagingIndexRPC.compute( + fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic + ) + pagingInfo = PagingInfoRPC( + pageSize: some(1'u64), + cursor: some(index), + direction: some(PagingDirection.FORWARD), + ) + + ## When + let pb = pagingInfo.encode() + let decodedPagingInfo = PagingInfoRPC.decode(pb.buffer) + + ## Then + check: + decodedPagingInfo.isOk() + + check: + # The fields of decodedPagingInfo must be the same as the original pagingInfo + decodedPagingInfo.value == pagingInfo + decodedPagingInfo.value.direction == pagingInfo.direction + + test "PagingInfoRPC protobuf codec - empty paging info": + ## Given + let emptyPagingInfo = PagingInfoRPC() + + ## When + let pb = emptyPagingInfo.encode() + let decodedEmptyPagingInfo = PagingInfoRPC.decode(pb.buffer) + + ## Then + check: + decodedEmptyPagingInfo.isOk() + + check: + # check the correctness of init and encode for an empty PagingInfoRPC + decodedEmptyPagingInfo.value == emptyPagingInfo + + test "HistoryQueryRPC protobuf codec": + ## Given + let + index = PagingIndexRPC.compute( + fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic + ) + pagingInfo = PagingInfoRPC( + pageSize: some(1'u64), + cursor: some(index), + direction: some(PagingDirection.BACKWARD), + ) + query = HistoryQueryRPC( + contentFilters: + @[ + HistoryContentFilterRPC(contentTopic: DefaultContentTopic), + HistoryContentFilterRPC(contentTopic: DefaultContentTopic), + ], + pagingInfo: some(pagingInfo), + startTime: some(Timestamp(10)), + endTime: some(Timestamp(11)), + ) + + ## When + let pb = query.encode() + let decodedQuery = HistoryQueryRPC.decode(pb.buffer) + + ## Then + check: + decodedQuery.isOk() + + check: + # the fields of decoded query decodedQuery must be the same as the original query query + decodedQuery.value == query + + test "HistoryQueryRPC protobuf codec - empty history query": + ## Given + let emptyQuery = HistoryQueryRPC() + + ## When + let pb = emptyQuery.encode() + let decodedEmptyQuery = HistoryQueryRPC.decode(pb.buffer) + + ## Then + check: + decodedEmptyQuery.isOk() + + check: + # check the correctness of init and encode for an empty HistoryQueryRPC + decodedEmptyQuery.value == emptyQuery + + test "HistoryResponseRPC protobuf codec": + ## Given + let + message = fakeWakuMessage() + index = PagingIndexRPC.compute( + message, receivedTime = ts(), pubsubTopic = DefaultPubsubTopic + ) + pagingInfo = PagingInfoRPC( + pageSize: some(1'u64), + cursor: some(index), + direction: some(PagingDirection.BACKWARD), + ) + res = HistoryResponseRPC( + messages: @[message], + pagingInfo: some(pagingInfo), + error: HistoryResponseErrorRPC.INVALID_CURSOR, + ) + + ## When + let pb = res.encode() + let decodedRes = HistoryResponseRPC.decode(pb.buffer) + + ## Then + check: + decodedRes.isOk() + + check: + # the fields of decoded response decodedRes must be the same as the original response res + decodedRes.value == res + + test "HistoryResponseRPC protobuf codec - empty history response": + ## Given + let emptyRes = HistoryResponseRPC() + + ## When + let pb = emptyRes.encode() + let decodedEmptyRes = HistoryResponseRPC.decode(pb.buffer) + + ## Then + check: + decodedEmptyRes.isOk() + + check: + # check the correctness of init and encode for an empty HistoryResponseRPC + decodedEmptyRes.value == emptyRes diff --git a/third-party/nwaku/tests/waku_store_legacy/test_waku_store.nim b/third-party/nwaku/tests/waku_store_legacy/test_waku_store.nim new file mode 100644 index 0000000..b8dc835 --- /dev/null +++ b/third-party/nwaku/tests/waku_store_legacy/test_waku_store.nim @@ -0,0 +1,113 @@ +{.used.} + +import testutils/unittests, chronos, libp2p/crypto/crypto + +import + waku/[ + common/paging, + node/peer_manager, + waku_core, + waku_store_legacy, + waku_store_legacy/client, + ], + ../testlib/wakucore, + ./store_utils + +suite "Waku Store - query handler legacy": + asyncTest "history query handler should be called": + ## Setup + let + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + ## Given + let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + + let msg = fakeWakuMessage(contentTopic = DefaultContentTopic) + + var queryHandlerFut = newFuture[(HistoryQuery)]() + + let queryHandler = proc( + req: HistoryQuery + ): Future[HistoryResult] {.async, gcsafe.} = + queryHandlerFut.complete(req) + return ok(HistoryResponse(messages: @[msg])) + + let + server = await newTestWakuStore(serverSwitch, handler = queryhandler) + client = newTestWakuStoreClient(clientSwitch) + + let req = HistoryQuery( + contentTopics: @[DefaultContentTopic], + direction: PagingDirection.FORWARD, + requestId: "reqId", + ) + + ## When + let queryRes = await client.query(req, peer = serverPeerInfo) + + ## Then + check: + not queryHandlerFut.failed() + queryRes.isOk() + + let request = queryHandlerFut.read() + check: + request == req + + let response = queryRes.tryGet() + check: + response.messages.len == 1 + response.messages == @[msg] + + ## Cleanup + await allFutures(serverSwitch.stop(), clientSwitch.stop()) + + asyncTest "history query handler should be called and return an error": + ## Setup + let + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + ## Given + let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + + var queryHandlerFut = newFuture[(HistoryQuery)]() + let queryHandler = proc( + req: HistoryQuery + ): Future[HistoryResult] {.async, gcsafe.} = + queryHandlerFut.complete(req) + return err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST)) + + let + server = await newTestWakuStore(serverSwitch, handler = queryhandler) + client = newTestWakuStoreClient(clientSwitch) + + let req = HistoryQuery( + contentTopics: @[DefaultContentTopic], + direction: PagingDirection.FORWARD, + requestId: "reqId", + ) + + ## When + let queryRes = await client.query(req, peer = serverPeerInfo) + + ## Then + check: + not queryHandlerFut.failed() + queryRes.isErr() + + let request = queryHandlerFut.read() + check: + request == req + + let error = queryRes.tryError() + check: + error.kind == HistoryErrorKind.BAD_REQUEST + + ## Cleanup + await allFutures(serverSwitch.stop(), clientSwitch.stop()) diff --git a/third-party/nwaku/tests/waku_store_legacy/test_wakunode_store.nim b/third-party/nwaku/tests/waku_store_legacy/test_wakunode_store.nim new file mode 100644 index 0000000..549033e --- /dev/null +++ b/third-party/nwaku/tests/waku_store_legacy/test_wakunode_store.nim @@ -0,0 +1,316 @@ +{.used.} + +import + std/net, + testutils/unittests, + chronos, + libp2p/crypto/crypto, + libp2p/peerid, + libp2p/multiaddress, + libp2p/switch, + libp2p/protocols/pubsub/pubsub, + libp2p/protocols/pubsub/gossipsub +import + waku/[ + common/paging, + waku_core, + waku_core/message/digest, + node/peer_manager, + waku_archive_legacy, + waku_filter_v2, + waku_filter_v2/client, + waku_store_legacy, + waku_node, + ], + ../waku_store_legacy/store_utils, + ../waku_archive_legacy/archive_utils, + ../testlib/wakucore, + ../testlib/wakunode + +procSuite "WakuNode - Store Legacy": + ## Fixtures + let timeOrigin = now() + let msgListA = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)), + ] + + let archiveA = block: + let driver = newSqliteArchiveDriver() + + for msg in msgListA: + let msg_digest = waku_archive_legacy.computeDigest(msg) + let msg_hash = computeMessageHash(DefaultPubsubTopic, msg) + require ( + waitFor driver.put(DefaultPubsubTopic, msg, msg_digest, msg_hash, msg.timestamp) + ).isOk() + + driver + + test "Store protocol returns expected messages": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start()) + + let mountArchiveRes = server.mountLegacyArchive(archiveA) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountLegacyStore() + + client.mountLegacyStoreClient() + + ## Given + let req = HistoryQuery(contentTopics: @[DefaultContentTopic]) + let serverPeer = server.peerInfo.toRemotePeerInfo() + + ## When + let queryRes = waitFor client.query(req, peer = serverPeer) + + ## Then + check queryRes.isOk() + + let response = queryRes.get() + check: + response.messages == msgListA + + # Cleanup + waitFor allFutures(client.stop(), server.stop()) + + test "Store node history response - forward pagination": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start()) + + let mountArchiveRes = server.mountLegacyArchive(archiveA) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountLegacyStore() + + client.mountLegacyStoreClient() + + ## Given + let req = HistoryQuery( + contentTopics: @[DefaultContentTopic], + pageSize: 7, + direction: PagingDirection.FORWARD, + ) + let serverPeer = server.peerInfo.toRemotePeerInfo() + + ## When + var nextReq = req # copy + + var pages = newSeq[seq[WakuMessage]](2) + var cursors = newSeq[Option[HistoryCursor]](2) + + for i in 0 ..< 2: + let res = waitFor client.query(nextReq, peer = serverPeer) + require res.isOk() + + # Keep query response content + let response = res.get() + pages[i] = response.messages + cursors[i] = response.cursor + + # Set/update the request cursor + nextReq.cursor = cursors[i] + + ## Then + check: + cursors[0] == some(computeHistoryCursor(DefaultPubsubTopic, msgListA[6])) + cursors[1] == none(HistoryCursor) + + check: + pages[0] == msgListA[0 .. 6] + pages[1] == msgListA[7 .. 9] + + # Cleanup + waitFor allFutures(client.stop(), server.stop()) + + test "Store node history response - backward pagination": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start()) + + let mountArchiveRes = server.mountLegacyArchive(archiveA) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountLegacyStore() + + client.mountLegacyStoreClient() + + ## Given + let req = HistoryQuery( + contentTopics: @[DefaultContentTopic], + pageSize: 7, + direction: PagingDirection.BACKWARD, + ) + let serverPeer = server.peerInfo.toRemotePeerInfo() + + ## When + var nextReq = req # copy + + var pages = newSeq[seq[WakuMessage]](2) + var cursors = newSeq[Option[HistoryCursor]](2) + + for i in 0 ..< 2: + let res = waitFor client.query(nextReq, peer = serverPeer) + require res.isOk() + + # Keep query response content + let response = res.get() + pages[i] = response.messages + cursors[i] = response.cursor + + # Set/update the request cursor + nextReq.cursor = cursors[i] + + ## Then + check: + cursors[0] == some(computeHistoryCursor(DefaultPubsubTopic, msgListA[3])) + cursors[1] == none(HistoryCursor) + + check: + pages[0] == msgListA[3 .. 9] + pages[1] == msgListA[0 .. 2] + + # Cleanup + waitFor allFutures(client.stop(), server.stop()) + + test "Store protocol returns expected message when relay is disabled and filter enabled": + ## See nwaku issue #937: 'Store: ability to decouple store from relay' + ## Setup + let + filterSourceKey = generateSecp256k1Key() + filterSource = + newTestWakuNode(filterSourceKey, parseIpAddress("0.0.0.0"), Port(0)) + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start(), filterSource.start()) + + waitFor filterSource.mountFilter() + let driver = newSqliteArchiveDriver() + + let mountArchiveRes = server.mountLegacyArchive(driver) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountLegacyStore() + waitFor server.mountFilterClient() + client.mountLegacyStoreClient() + + ## Given + let message = fakeWakuMessage() + let + serverPeer = server.peerInfo.toRemotePeerInfo() + filterSourcePeer = filterSource.peerInfo.toRemotePeerInfo() + + ## Then + let filterFut = newFuture[(PubsubTopic, WakuMessage)]() + proc filterHandler( + pubsubTopic: PubsubTopic, msg: WakuMessage + ) {.async, gcsafe, closure.} = + await server.wakuLegacyArchive.handleMessage(pubsubTopic, msg) + filterFut.complete((pubsubTopic, msg)) + + server.wakuFilterClient.registerPushHandler(filterHandler) + let resp = waitFor server.filterSubscribe( + some(DefaultPubsubTopic), DefaultContentTopic, peer = filterSourcePeer + ) + + waitFor sleepAsync(100.millis) + + waitFor filterSource.wakuFilter.handleMessage(DefaultPubsubTopic, message) + + # Wait for the server filter to receive the push message + require waitFor filterFut.withTimeout(5.seconds) + + let res = waitFor client.query( + HistoryQuery(contentTopics: @[DefaultContentTopic]), peer = serverPeer + ) + + ## Then + check res.isOk() + + let response = res.get() + check: + response.messages.len == 1 + response.messages[0] == message + + let (handledPubsubTopic, handledMsg) = filterFut.read() + check: + handledPubsubTopic == DefaultPubsubTopic + handledMsg == message + + ## Cleanup + waitFor allFutures(client.stop(), server.stop(), filterSource.stop()) + + test "history query should return INVALID_CURSOR if the cursor has empty data in the request": + ## Setup + let + serverKey = generateSecp256k1Key() + server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) + clientKey = generateSecp256k1Key() + client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) + + waitFor allFutures(client.start(), server.start()) + + let mountArchiveRes = server.mountLegacyArchive(archiveA) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + waitFor server.mountLegacyStore() + + client.mountLegacyStoreClient() + + ## Forcing a bad cursor with empty digest data + var data: array[32, byte] = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, + ] + let cursor = HistoryCursor( + pubsubTopic: "pubsubTopic", + senderTime: now(), + storeTime: now(), + digest: waku_archive_legacy.MessageDigest(data: data), + ) + + ## Given + let req = HistoryQuery(contentTopics: @[DefaultContentTopic], cursor: some(cursor)) + let serverPeer = server.peerInfo.toRemotePeerInfo() + + ## When + let queryRes = waitFor client.query(req, peer = serverPeer) + + ## Then + check not queryRes.isOk() + + check queryRes.error == + "legacy store client query error: BAD_REQUEST: invalid cursor" + + # Cleanup + waitFor allFutures(client.stop(), server.stop()) diff --git a/third-party/nwaku/tests/waku_store_sync/sync_utils.nim b/third-party/nwaku/tests/waku_store_sync/sync_utils.nim new file mode 100644 index 0000000..fe62e02 --- /dev/null +++ b/third-party/nwaku/tests/waku_store_sync/sync_utils.nim @@ -0,0 +1,72 @@ +import std/[options, random], chronos, chronicles + +import + waku/[ + node/peer_manager, + waku_core, + waku_store_sync/common, + waku_store_sync/reconciliation, + waku_store_sync/transfer, + ], + ../testlib/wakucore + +randomize() + +proc randomHash*(rng: var Rand): WakuMessageHash = + var hash = EmptyWakuMessageHash + + for i in 0 ..< hash.len: + hash[i] = rng.rand(uint8) + + return hash + +proc newTestWakuRecon*( + switch: Switch, + pubsubTopics: seq[PubsubTopic] = @[], + contentTopics: seq[ContentTopic] = @[], + syncRange: timer.Duration = DefaultSyncRange, + idsRx: AsyncQueue[(SyncID, PubsubTopic, ContentTopic)], + wantsTx: AsyncQueue[PeerId], + needsTx: AsyncQueue[(PeerId, WakuMessageHash)], +): Future[SyncReconciliation] {.async.} = + let peerManager = PeerManager.new(switch) + + let res = await SyncReconciliation.new( + pubsubTopics = pubsubTopics, + contentTopics = contentTopics, + peerManager = peerManager, + wakuArchive = nil, + syncRange = syncRange, + relayJitter = 0.seconds, + idsRx = idsRx, + localWantsTx = wantsTx, + remoteNeedsTx = needsTx, + ) + + let proto = res.get() + + proto.start() + switch.mount(proto) + + return proto + +proc newTestWakuTransfer*( + switch: Switch, + idsTx: AsyncQueue[(SyncID, PubsubTopic, ContentTopic)], + wantsRx: AsyncQueue[PeerId], + needsRx: AsyncQueue[(PeerId, WakuMessageHash)], +): SyncTransfer = + let peerManager = PeerManager.new(switch) + + let proto = SyncTransfer.new( + peerManager = peerManager, + wakuArchive = nil, + idsTx = idsTx, + localWantsRx = wantsRx, + remoteNeedsRx = needsRx, + ) + + proto.start() + switch.mount(proto) + + return proto diff --git a/third-party/nwaku/tests/waku_store_sync/test_all.nim b/third-party/nwaku/tests/waku_store_sync/test_all.nim new file mode 100644 index 0000000..82daa38 --- /dev/null +++ b/third-party/nwaku/tests/waku_store_sync/test_all.nim @@ -0,0 +1,3 @@ +{.used.} + +import ./test_protocol, ./test_storage, ./test_codec diff --git a/third-party/nwaku/tests/waku_store_sync/test_codec.nim b/third-party/nwaku/tests/waku_store_sync/test_codec.nim new file mode 100644 index 0000000..fdfd3f2 --- /dev/null +++ b/third-party/nwaku/tests/waku_store_sync/test_codec.nim @@ -0,0 +1,214 @@ +{.used.} + +import std/[options, random], testutils/unittests, chronos + +import + ../../waku/waku_core, + ../../waku/waku_core/message/digest, + ../../waku/waku_core/time, + ../../waku/waku_store_sync/common, + ../../waku/waku_store_sync/codec, + ./sync_utils + +proc randomItemSet(count: int, startTime: Timestamp, rng: var Rand): ItemSet = + var + elements = newSeqOfCap[SyncID](count) + lastTime = startTime + + for i in 0 ..< count: + let diff = rng.rand(9.uint8) + 1 + + let timestamp = lastTime + diff * 1_000 + lastTime = timestamp + + let hash = randomHash(rng) + + let id = SyncID(time: Timestamp(timestamp), hash: hash) + + elements.add(id) + + return ItemSet(elements: elements, reconciled: true) + +proc randomSetRange( + count: int, startTime: Timestamp, rng: var Rand +): (Slice[SyncID], ItemSet) = + let itemSet = randomItemSet(count, startTime, rng) + + var + lb = itemSet.elements[0] + ub = itemSet.elements[^1] + + #for test check equality + lb.hash = EmptyWakuMessageHash + ub.hash = EmptyWakuMessageHash + + let bounds = lb .. ub + + return (bounds, itemSet) + +suite "Waku Store Sync Codec": + test "empty item set encoding roundtrip": + var origItemSet = ItemSet() + + origItemSet.reconciled = true + + var encodedSet = origItemSet.deltaEncode() + + var itemSet = ItemSet() + let _ = deltaDecode(itemSet, encodedSet, 0) + + check: + origItemSet == itemSet + + test "item set encoding roundtrip": + let + count = 10 + time = getNowInNanosecondTime() + + var rng = initRand() + + let origItemSet = randomItemSet(count, time, rng) + var encodedSet = origItemSet.deltaEncode() + + #faking a longer payload + let pad: seq[byte] = + @[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + encodedSet &= pad + + var itemSet = ItemSet() + let _ = deltaDecode(itemSet, encodedSet, count) + + check: + origItemSet == itemSet + + test "payload item set encoding roundtrip": + let count = 5 + + var + rng = initRand() + time = getNowInNanosecondTime() + + let (bounds1, itemSet1) = randomSetRange(count, time, rng) + let (bounds2, itemSet2) = randomSetRange(count, time + 11_000_000, rng) + let (bounds3, itemSet3) = randomSetRange(count, time + 21_000_000, rng) + let (bounds4, itemSet4) = randomSetRange(count, time + 31_000_000, rng) + + let range1 = (bounds1, RangeType.ItemSet) + let range2 = (bounds2, RangeType.ItemSet) + let range3 = (bounds3, RangeType.ItemSet) + let range4 = (bounds4, RangeType.ItemSet) + + let payload = RangesData( + pubsubTopics: @[DefaultPubsubTopic], + contentTopics: @[], + ranges: @[range1, range2, range3, range4], + fingerprints: @[], + itemSets: @[itemSet1, itemSet2, itemSet3, itemSet4], + ) + + let encodedPayload = payload.deltaEncode() + + let res = RangesData.deltaDecode(encodedPayload) + assert res.isOk(), $res.error + + let decodedPayload = res.get() + + check: + payload.ranges[0][0].b == decodedPayload.ranges[0][0].b + payload.ranges[1][0].b == decodedPayload.ranges[1][0].b + payload.ranges[2][0].b == decodedPayload.ranges[2][0].b + payload.ranges[3][0].b == decodedPayload.ranges[3][0].b + payload.itemSets == decodedPayload.itemSets + + test "payload fingerprint encoding roundtrip": + let count = 4 + + var + rng = initRand() + lastTime = getNowInNanosecondTime() + ranges = newSeqOfCap[(Slice[SyncID], RangeType)](4) + + for i in 0 ..< count: + let lb = SyncID(time: Timestamp(lastTime), hash: EmptyWakuMessageHash) + + let nowTime = lastTime + 10_000_000_000 # 10s + + lastTime = nowTime + let ub = SyncID(time: Timestamp(nowTime), hash: EmptyWakuMessageHash) + let bounds = lb .. ub + let range = (bounds, RangeType.Fingerprint) + + ranges.add(range) + + let payload = RangesData( + pubsubTopics: @[DefaultPubsubTopic], + contentTopics: @[], + ranges: ranges, + fingerprints: + @[randomHash(rng), randomHash(rng), randomHash(rng), randomHash(rng)], + itemSets: @[], + ) + + let encodedPayload = payload.deltaEncode() + + let res = RangesData.deltaDecode(encodedPayload) + assert res.isOk(), $res.error + + let decodedPayload = res.get() + + check: + payload.ranges[0][0].b == decodedPayload.ranges[0][0].b + payload.ranges[1][0].b == decodedPayload.ranges[1][0].b + payload.ranges[2][0].b == decodedPayload.ranges[2][0].b + payload.ranges[3][0].b == decodedPayload.ranges[3][0].b + payload.fingerprints == decodedPayload.fingerprints + + test "payload mixed encoding roundtrip": + let count = 2 + + var + rng = initRand() + lastTime = getNowInNanosecondTime() + ranges = newSeqOfCap[(Slice[SyncID], RangeType)](4) + itemSets = newSeqOfCap[ItemSet](4) + fingerprints = newSeqOfCap[Fingerprint](4) + + for i in 1 .. count: + let lb = SyncID(time: Timestamp(lastTime), hash: EmptyWakuMessageHash) + let nowTime = lastTime + 10_000_000_000 # 10s + lastTime = nowTime + let ub = SyncID(time: Timestamp(nowTime), hash: EmptyWakuMessageHash) + let bounds = lb .. ub + let range = (bounds, RangeType.Fingerprint) + + ranges.add(range) + fingerprints.add(randomHash(rng)) + + let (bound, itemSet) = randomSetRange(5, lastTime, rng) + lastTime += 50_000_000_000 # 50s + + ranges.add((bound, RangeType.ItemSet)) + itemSets.add(itemSet) + + let payload = RangesData( + pubsubTopics: @[DefaultPubsubTopic], + contentTopics: @[], + ranges: ranges, + fingerprints: fingerprints, + itemSets: itemSets, + ) + + let encodedPayload = payload.deltaEncode() + + let res = RangesData.deltaDecode(encodedPayload) + assert res.isOk(), $res.error + + let decodedPayload = res.get() + + check: + payload.ranges[0][0].b == decodedPayload.ranges[0][0].b + payload.ranges[1][0].b == decodedPayload.ranges[1][0].b + payload.ranges[2][0].b == decodedPayload.ranges[2][0].b + payload.ranges[3][0].b == decodedPayload.ranges[3][0].b + payload.fingerprints == decodedPayload.fingerprints + payload.itemSets == decodedPayload.itemSets diff --git a/third-party/nwaku/tests/waku_store_sync/test_protocol.nim b/third-party/nwaku/tests/waku_store_sync/test_protocol.nim new file mode 100644 index 0000000..bd13716 --- /dev/null +++ b/third-party/nwaku/tests/waku_store_sync/test_protocol.nim @@ -0,0 +1,935 @@ +{.used.} + +import + std/[options, sets, random, math, algorithm], + testutils/unittests, + chronos, + libp2p/crypto/crypto +import chronos, chronos/asyncsync +import nimcrypto +import + ../../waku/[ + node/peer_manager, + waku_core, + waku_core/message, + waku_core/message/digest, + waku_store_sync/common, + waku_store_sync/storage/range_processing, + waku_store_sync/reconciliation, + waku_store_sync/transfer, + waku_archive/archive, + waku_archive/driver, + waku_archive/common, + ], + ../testlib/[wakucore, testasync], + ../waku_archive/archive_utils, + ./sync_utils + +proc collectDiffs*( + chan: var Channel[SyncID], diffCount: int +): HashSet[WakuMessageHash] = + var received: HashSet[WakuMessageHash] + while received.len < diffCount: + let sid = chan.recv() # synchronous receive + received.incl sid.hash + result = received + +suite "Waku Sync: reconciliation": + var serverSwitch {.threadvar.}: Switch + var clientSwitch {.threadvar.}: Switch + + var + idsChannel {.threadvar.}: AsyncQueue[(SyncID, PubsubTopic, ContentTopic)] + localWants {.threadvar.}: AsyncQueue[PeerId] + remoteNeeds {.threadvar.}: AsyncQueue[(PeerId, WakuMessageHash)] + + var server {.threadvar.}: SyncReconciliation + var client {.threadvar.}: SyncReconciliation + + var serverPeerInfo {.threadvar.}: RemotePeerInfo + var clientPeerInfo {.threadvar.}: RemotePeerInfo + + asyncSetup: + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + idsChannel = newAsyncQueue[(SyncID, PubsubTopic, ContentTopic)]() + localWants = newAsyncQueue[PeerId]() + remoteNeeds = newAsyncQueue[(PeerId, WakuMessageHash)]() + + serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + clientPeerInfo = clientSwitch.peerInfo.toRemotePeerInfo() + + asyncTeardown: + server.stop() + client.stop() + + await allFutures(serverSwitch.stop(), clientSwitch.stop()) + + asyncTest "sync 2 nodes both empty": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + check: + idsChannel.len == 0 + remoteNeeds.len == 0 + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), res.error + + check: + idsChannel.len == 0 + remoteNeeds.len == 0 + + asyncTest "sync 2 nodes empty client full server": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + let + msg1 = fakeWakuMessage(ts = now(), contentTopic = DefaultContentTopic) + msg2 = fakeWakuMessage(ts = now() + 1, contentTopic = DefaultContentTopic) + msg3 = fakeWakuMessage(ts = now() + 2, contentTopic = DefaultContentTopic) + hash1 = computeMessageHash(pubsubTopic = DefaultPubsubTopic, msg1) + hash2 = computeMessageHash(pubsubTopic = DefaultPubsubTopic, msg2) + hash3 = computeMessageHash(pubsubTopic = DefaultPubsubTopic, msg3) + + server.messageIngress(hash1, DefaultPubsubTopic, msg1) + server.messageIngress(hash2, DefaultPubsubTopic, msg2) + server.messageIngress(hash3, DefaultPubsubTopic, msg3) + + check: + remoteNeeds.len == 0 + localWants.len == 0 + remoteNeeds.contains((clientPeerInfo.peerId, hash1)) == false + remoteNeeds.contains((clientPeerInfo.peerId, hash2)) == false + remoteNeeds.contains((clientPeerInfo.peerId, hash3)) == false + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), res.error + + check: + remoteNeeds.len == 3 + remoteNeeds.contains((clientPeerInfo.peerId, hash1)) == true + remoteNeeds.contains((clientPeerInfo.peerId, hash2)) == true + remoteNeeds.contains((clientPeerInfo.peerId, hash3)) == true + + asyncTest "sync 2 nodes full client empty server": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + let + msg1 = fakeWakuMessage(ts = now(), contentTopic = DefaultContentTopic) + msg2 = fakeWakuMessage(ts = now() + 1, contentTopic = DefaultContentTopic) + msg3 = fakeWakuMessage(ts = now() + 2, contentTopic = DefaultContentTopic) + hash1 = computeMessageHash(pubsubTopic = DefaultPubsubTopic, msg1) + hash2 = computeMessageHash(pubsubTopic = DefaultPubsubTopic, msg2) + hash3 = computeMessageHash(pubsubTopic = DefaultPubsubTopic, msg3) + + client.messageIngress(hash1, DefaultPubsubTopic, msg1) + client.messageIngress(hash2, DefaultPubsubTopic, msg2) + client.messageIngress(hash3, DefaultPubsubTopic, msg3) + + check: + remoteNeeds.len == 0 + localWants.len == 0 + remoteNeeds.contains((serverPeerInfo.peerId, hash1)) == false + remoteNeeds.contains((serverPeerInfo.peerId, hash2)) == false + remoteNeeds.contains((serverPeerInfo.peerId, hash3)) == false + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), res.error + + check: + remoteNeeds.len == 3 + remoteNeeds.contains((serverPeerInfo.peerId, hash1)) == true + remoteNeeds.contains((serverPeerInfo.peerId, hash2)) == true + remoteNeeds.contains((serverPeerInfo.peerId, hash3)) == true + + asyncTest "sync 2 nodes different hashes": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + let + msg1 = fakeWakuMessage(ts = now(), contentTopic = DefaultContentTopic) + msg2 = fakeWakuMessage(ts = now() + 1, contentTopic = DefaultContentTopic) + msg3 = fakeWakuMessage(ts = now() + 2, contentTopic = DefaultContentTopic) + hash1 = computeMessageHash(DefaultPubsubTopic, msg1) + hash2 = computeMessageHash(DefaultPubsubTopic, msg2) + hash3 = computeMessageHash(DefaultPubsubTopic, msg3) + + server.messageIngress(hash1, DefaultPubsubTopic, msg1) + server.messageIngress(hash2, DefaultPubsubTopic, msg2) + client.messageIngress(hash1, DefaultPubsubTopic, msg1) + client.messageIngress(hash3, DefaultPubsubTopic, msg3) + + check: + remoteNeeds.len == 0 + localWants.len == 0 + remoteNeeds.contains((serverPeerInfo.peerId, hash3)) == false + remoteNeeds.contains((clientPeerInfo.peerId, hash2)) == false + + var syncRes = await client.storeSynchronization(some(serverPeerInfo)) + assert syncRes.isOk(), $syncRes.error + + check: + remoteNeeds.len == 2 + remoteNeeds.contains((serverPeerInfo.peerId, hash3)) == true + remoteNeeds.contains((clientPeerInfo.peerId, hash2)) == true + + asyncTest "sync 2 nodes different shards": + server = await newTestWakuRecon( + serverSwitch, + @["/waku/2/rs/2/1", "/waku/2/rs/2/2", "/waku/2/rs/2/3", "/waku/2/rs/2/4"], + @[DefaultContentTopic], + DefaultSyncRange, + idsChannel, + localWants, + remoteNeeds, + ) + + client = await newTestWakuRecon( + clientSwitch, + @["/waku/2/rs/2/3", "/waku/2/rs/2/4", "/waku/2/rs/2/5", "/waku/2/rs/2/6"], + @[DefaultContentTopic], + DefaultSyncRange, + idsChannel, + localWants, + remoteNeeds, + ) + + let + msg1 = fakeWakuMessage(ts = now(), contentTopic = DefaultContentTopic) + msg2 = fakeWakuMessage(ts = now() + 1, contentTopic = DefaultContentTopic) + msg3 = fakeWakuMessage(ts = now() + 2, contentTopic = DefaultContentTopic) + msg4 = fakeWakuMessage(ts = now() + 3, contentTopic = DefaultContentTopic) + msg5 = fakeWakuMessage(ts = now() + 4, contentTopic = DefaultContentTopic) + msg6 = fakeWakuMessage(ts = now() + 5, contentTopic = DefaultContentTopic) + hash1 = computeMessageHash("/waku/2/rs/2/1", msg1) + hash2 = computeMessageHash("/waku/2/rs/2/2", msg2) + hash3 = computeMessageHash("/waku/2/rs/2/3", msg3) + hash4 = computeMessageHash("/waku/2/rs/2/4", msg4) + hash5 = computeMessageHash("/waku/2/rs/2/5", msg5) + hash6 = computeMessageHash("/waku/2/rs/2/6", msg6) + + server.messageIngress(hash1, "/waku/2/rs/2/1", msg1) + server.messageIngress(hash2, "/waku/2/rs/2/2", msg2) + server.messageIngress(hash3, "/waku/2/rs/2/3", msg3) + + client.messageIngress(hash4, "/waku/2/rs/2/4", msg4) + client.messageIngress(hash5, "/waku/2/rs/2/5", msg5) + client.messageIngress(hash6, "/waku/2/rs/2/6", msg6) + + check: + remoteNeeds.len == 0 + + var syncRes = await client.storeSynchronization(some(serverPeerInfo)) + assert syncRes.isOk(), $syncRes.error + + check: + remoteNeeds.len == 2 + remoteNeeds.contains((clientPeerInfo.peerId, hash3)) == true + remoteNeeds.contains((serverPeerInfo.peerId, hash4)) == true + + asyncTest "sync 2 nodes same hashes": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + let + msg1 = fakeWakuMessage(ts = now(), contentTopic = DefaultContentTopic) + msg2 = fakeWakuMessage(ts = now() + 1, contentTopic = DefaultContentTopic) + hash1 = computeMessageHash(pubsubTopic = DefaultPubsubTopic, msg1) + hash2 = computeMessageHash(pubsubTopic = DefaultPubsubTopic, msg2) + + server.messageIngress(hash1, DefaultPubsubTopic, msg1) + client.messageIngress(hash1, DefaultPubsubTopic, msg1) + server.messageIngress(hash2, DefaultPubsubTopic, msg2) + client.messageIngress(hash2, DefaultPubsubTopic, msg2) + + check: + remoteNeeds.len == 0 + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check: + remoteNeeds.len == 0 + + asyncTest "sync 2 nodes 100K msgs 1 diff": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + let msgCount = 100_000 + var diffIndex = rand(msgCount) + var diff: WakuMessageHash + + # the sync window is 1 hour, spread msg equally in that time + let timeSlice = calculateTimeRange() + let timeWindow = int64(timeSlice.b) - int64(timeSlice.a) + let (part, _) = divmod(timeWindow, 100_000) + + var timestamp = timeSlice.a + + for i in 0 ..< msgCount: + let msg = fakeWakuMessage(ts = timestamp, contentTopic = DefaultContentTopic) + let hash = computeMessageHash(DefaultPubsubTopic, msg) + + server.messageIngress(hash, DefaultPubsubTopic, msg) + + if i != diffIndex: + client.messageIngress(hash, DefaultPubsubTopic, msg) + else: + diff = hash + + timestamp += Timestamp(part) + + check: + remoteNeeds.len == 0 + remoteNeeds.contains((clientPeerInfo.peerId, WakuMessageHash(diff))) == false + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check: + remoteNeeds.len == 1 + remoteNeeds.contains((clientPeerInfo.peerId, WakuMessageHash(diff))) == true + + asyncTest "sync 2 nodes 10K msgs 1K diffs": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + const + msgCount = 10_000 # total messages on the server + diffCount = 1000 # messages initially missing on the client + + ## ── choose which messages will be absent from the client ───────────── + var missingIdx: HashSet[int] + while missingIdx.len < diffCount: + missingIdx.incl rand(0 ..< msgCount) + + ## ── generate messages and pre-load the two reconcilers ─────────────── + let slice = calculateTimeRange() # 1-hour window + let step = (int64(slice.b) - int64(slice.a)) div msgCount + var ts = slice.a + + for i in 0 ..< msgCount: + let + msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + h = computeMessageHash(DefaultPubsubTopic, msg) + + server.messageIngress(h, DefaultPubsubTopic, msg) # every msg is on the server + if i notin missingIdx: + client.messageIngress(h, DefaultPubsubTopic, msg) # all but 100 are on the client + ts += Timestamp(step) + + ## ── sanity before we start the round ───────────────────────────────── + check remoteNeeds.len == 0 + + ## ── launch reconciliation from the client towards the server ───────── + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + ## ── verify that ≈1000 diffs were queued (allow 10 % slack) ──────────── + check remoteNeeds.len >= 900 # ≈ 1000 × 0.9 + + asyncTest "sync 2 nodes 400K msgs 100k diffs": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + const + msgCount = 400_000 + diffCount = 100_000 + tol = 1000 + + var diffMsgHashes: HashSet[WakuMessageHash] + var missingIdx: HashSet[int] + while missingIdx.len < diffCount: + missingIdx.incl rand(0 ..< msgCount) + + let slice = calculateTimeRange() + let step = (int64(slice.b) - int64(slice.a)) div msgCount + var ts = slice.a + + for i in 0 ..< msgCount: + let + msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + h = computeMessageHash(DefaultPubsubTopic, msg) + + server.messageIngress(h, DefaultPubsubTopic, msg) + if i notin missingIdx: + client.messageIngress(h, DefaultPubsubTopic, msg) + else: + diffMsgHashes.incl h + + ts += Timestamp(step) + + check remoteNeeds.len == 0 + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check remoteNeeds.len >= diffCount - tol and remoteNeeds.len < diffCount + let (_, deliveredHash) = await remoteNeeds.get() + check deliveredHash in diffMsgHashes + + asyncTest "sync 2 nodes 100 msgs 20 diff – 1-second window": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + const + msgCount = 100 + diffCount = 20 + + var missingIdx: seq[int] = @[] + while missingIdx.len < diffCount: + let n = rand(0 ..< msgCount) + if n notin missingIdx: + missingIdx.add n + + var diffMsgHashes: HashSet[WakuMessageHash] + + let sliceEnd = now() + let sliceStart = Timestamp uint64(sliceEnd) - 1_000_000_000'u64 + let step = (int64(sliceEnd) - int64(sliceStart)) div msgCount + var ts = sliceStart + + for i in 0 ..< msgCount: + let msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + let hash = computeMessageHash(DefaultPubsubTopic, msg) + server.messageIngress(hash, DefaultPubsubTopic, msg) + + if i in missingIdx: + diffMsgHashes.incl hash + else: + client.messageIngress(hash, DefaultPubsubTopic, msg) + + ts += Timestamp(step) + + check remoteNeeds.len == 0 + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check remoteNeeds.len == diffCount + + for _ in 0 ..< diffCount: + let (_, deliveredHash) = await remoteNeeds.get() + check deliveredHash in diffMsgHashes + + asyncTest "sync 2 nodes 500k msgs 300k diff – stress window": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + const + msgCount = 500_000 + diffCount = 300_000 + + randomize() + var allIdx = newSeq[int](msgCount) + for i in 0 ..< msgCount: + allIdx[i] = i + shuffle(allIdx) + + let missingIdx = allIdx[0 ..< diffCount] + var missingSet: HashSet[int] + for idx in missingIdx: + missingSet.incl idx + + var diffMsgHashes: HashSet[WakuMessageHash] + + let sliceEnd = now() + let sliceStart = Timestamp uint64(sliceEnd) - 1_000_000_000'u64 + let step = (int64(sliceEnd) - int64(sliceStart)) div msgCount + var ts = sliceStart + + for i in 0 ..< msgCount: + let msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + let hash = computeMessageHash(DefaultPubsubTopic, msg) + server.messageIngress(hash, DefaultPubsubTopic, msg) + + if i in missingSet: + diffMsgHashes.incl hash + else: + client.messageIngress(hash, DefaultPubsubTopic, msg) + + ts += Timestamp(step) + + check remoteNeeds.len == 0 + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check remoteNeeds.len == diffCount + + for _ in 0 ..< 1000: + let (_, deliveredHash) = await remoteNeeds.get() + check deliveredHash in diffMsgHashes + + asyncTest "sync 2 nodes, 40 msgs: 18 in-window diff, 20 out-window ignored": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + const + diffInWin = 18 + diffOutWin = 20 + stepOutNs = 100_000_000'u64 + outOffsetNs = 2_300_000_000'u64 # for 20 mesg they sent 2 seconds earlier + + randomize() + + let nowNs = getNowInNanosecondTime() + let sliceStart = Timestamp(uint64(nowNs) - 700_000_000'u64) + let sliceEnd = nowNs + let stepIn = (sliceEnd.int64 - sliceStart.int64) div diffInWin + + let oldStart = Timestamp(uint64(sliceStart) - outOffsetNs) + let stepOut = Timestamp(stepOutNs) + + var inWinHashes, outWinHashes: HashSet[WakuMessageHash] + + var ts = sliceStart + (Timestamp(stepIn) * 2) + for _ in 0 ..< diffInWin: + let msg = fakeWakuMessage(ts = Timestamp ts, contentTopic = DefaultContentTopic) + let hash = computeMessageHash(DefaultPubsubTopic, msg) + server.messageIngress(hash, DefaultPubsubTopic, msg) + inWinHashes.incl hash + ts += Timestamp(stepIn) + + ts = oldStart + for _ in 0 ..< diffOutWin: + let msg = fakeWakuMessage(ts = Timestamp ts, contentTopic = DefaultContentTopic) + let hash = computeMessageHash(DefaultPubsubTopic, msg) + server.messageIngress(hash, DefaultPubsubTopic, msg) + outWinHashes.incl hash + ts += Timestamp(stepOut) + + check remoteNeeds.len == 0 + + let oneSec = timer.seconds(1) + + server = await newTestWakuRecon( + serverSwitch, @[], @[], oneSec, idsChannel, localWants, remoteNeeds + ) + + client = await newTestWakuRecon( + clientSwitch, @[], @[], oneSec, idsChannel, localWants, remoteNeeds + ) + + defer: + server.stop() + client.stop() + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check remoteNeeds.len == diffInWin + + for _ in 0 ..< diffInWin: + let (_, deliveredHashes) = await remoteNeeds.popFirst() + check deliveredHashes in inWinHashes + check deliveredHashes notin outWinHashes + + asyncTest "hash-fingerprint collision, same timestamp – stable sort": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + let ts = Timestamp(getNowInNanosecondTime()) + + var msg1 = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + var msg2 = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + msg2.payload[0] = msg2.payload[0] xor 0x01 + var h1 = computeMessageHash(DefaultPubsubTopic, msg1) + var h2 = computeMessageHash(DefaultPubsubTopic, msg2) + + for i in 0 ..< 8: + h2[i] = h1[i] + for i in 0 ..< 8: + check h1[i] == h2[i] + + check h1 != h2 + + server.messageIngress(h1, DefaultPubsubTopic, msg1) + client.messageIngress(h2, DefaultPubsubTopic, msg2) + + check remoteNeeds.len == 0 + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + defer: + server.stop() + client.stop() + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check remoteNeeds.len == 1 + + var vec = @[SyncID(time: ts, hash: h2), SyncID(time: ts, hash: h1)] + vec.shuffle() + vec.sort() + + let hFirst = vec[0].hash + let hSecond = vec[1].hash + check vec[0].time == ts and vec[1].time == ts + + asyncTest "malformed message-ID is ignored during reconciliation": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + let nowTs = Timestamp(getNowInNanosecondTime()) + + let goodMsg = fakeWakuMessage(ts = nowTs, contentTopic = DefaultContentTopic) + var goodHash = computeMessageHash(DefaultPubsubTopic, goodMsg) + + var badHash: WakuMessageHash + for i in 0 ..< 32: + badHash[i] = 0'u8 + let badMsg = fakeWakuMessage(ts = Timestamp(0), contentTopic = DefaultContentTopic) + + server.messageIngress(goodHash, DefaultPubsubTopic, goodMsg) + server.messageIngress(badHash, DefaultPubsubTopic, badMsg) + + check remoteNeeds.len == 0 + + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + defer: + server.stop() + client.stop() + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check remoteNeeds.len == 1 + let (_, neededHash) = await remoteNeeds.get() + check neededHash == goodHash + check neededHash != badHash + + asyncTest "malformed ID: future-timestamp msg is ignored": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + let nowNs = getNowInNanosecondTime() + let tsNow = Timestamp(nowNs) + + let goodMsg = fakeWakuMessage(ts = tsNow, contentTopic = DefaultContentTopic) + let goodHash = computeMessageHash(DefaultPubsubTopic, goodMsg) + + const tenYearsSec = 10 * 365 * 24 * 60 * 60 + let futureNs = nowNs + int64(tenYearsSec) * 1_000_000_000'i64 + let badTs = Timestamp(futureNs.uint64) + + let badMsg = fakeWakuMessage(ts = badTs, contentTopic = DefaultContentTopic) + let badHash = computeMessageHash(DefaultPubsubTopic, badMsg) + + server.messageIngress(goodHash, DefaultPubsubTopic, goodMsg) + server.messageIngress(badHash, DefaultPubsubTopic, badMsg) + + check remoteNeeds.len == 0 + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + defer: + server.stop() + client.stop() + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check remoteNeeds.len == 1 + let (_, neededHash) = await remoteNeeds.get() + check neededHash == goodHash + check neededHash != badHash + + asyncTest "duplicate ID is queued only once": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + let ts = Timestamp(getNowInNanosecondTime()) + let msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + let h = computeMessageHash(DefaultPubsubTopic, msg) + + server.messageIngress(h, DefaultPubsubTopic, msg) + server.messageIngress(h, DefaultPubsubTopic, msg) + check remoteNeeds.len == 0 + + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + defer: + server.stop() + client.stop() + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check remoteNeeds.len == 1 + let (_, neededHash) = await remoteNeeds.get() + check neededHash == h + + asyncTest "sync terminates immediately when no diffs exist": + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsChannel, localWants, remoteNeeds + ) + + let ts = Timestamp(getNowInNanosecondTime()) + let msg = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + let hash = computeMessageHash(DefaultPubsubTopic, msg) + + server.messageIngress(hash, DefaultPubsubTopic, msg) + client.messageIngress(hash, DefaultPubsubTopic, msg) + + let idsQ = newAsyncQueue[(SyncID, PubsubTopic, ContentTopic)]() + let wantsQ = newAsyncQueue[PeerId]() + let needsQ = newAsyncQueue[(PeerId, Fingerprint)]() + + server = await newTestWakuRecon( + serverSwitch, @[], @[], DefaultSyncRange, idsQ, wantsQ, needsQ + ) + client = await newTestWakuRecon( + clientSwitch, @[], @[], DefaultSyncRange, idsQ, wantsQ, needsQ + ) + + defer: + server.stop() + client.stop() + + let res = await client.storeSynchronization(some(serverPeerInfo)) + assert res.isOk(), $res.error + + check needsQ.len == 0 + +suite "Waku Sync: transfer": + var + serverSwitch {.threadvar.}: Switch + clientSwitch {.threadvar.}: Switch + + var + serverDriver {.threadvar.}: ArchiveDriver + clientDriver {.threadvar.}: ArchiveDriver + serverArchive {.threadvar.}: WakuArchive + clientArchive {.threadvar.}: WakuArchive + + var + serverIds {.threadvar.}: AsyncQueue[(SyncID, PubsubTopic, ContentTopic)] + serverLocalWants {.threadvar.}: AsyncQueue[PeerId] + serverRemoteNeeds {.threadvar.}: AsyncQueue[(PeerId, WakuMessageHash)] + clientIds {.threadvar.}: AsyncQueue[(SyncID, PubsubTopic, ContentTopic)] + clientLocalWants {.threadvar.}: AsyncQueue[PeerId] + clientRemoteNeeds {.threadvar.}: AsyncQueue[(PeerId, WakuMessageHash)] + + var + server {.threadvar.}: SyncTransfer + client {.threadvar.}: SyncTransfer + + var + serverPeerInfo {.threadvar.}: RemotePeerInfo + clientPeerInfo {.threadvar.}: RemotePeerInfo + + asyncSetup: + serverSwitch = newTestSwitch() + clientSwitch = newTestSwitch() + + await allFutures(serverSwitch.start(), clientSwitch.start()) + + serverDriver = newSqliteArchiveDriver() + clientDriver = newSqliteArchiveDriver() + + serverArchive = newWakuArchive(serverDriver) + clientArchive = newWakuArchive(clientDriver) + + let + serverPeerManager = PeerManager.new(serverSwitch) + clientPeerManager = PeerManager.new(clientSwitch) + + serverIds = newAsyncQueue[(SyncID, PubsubTopic, ContentTopic)]() + serverLocalWants = newAsyncQueue[PeerId]() + serverRemoteNeeds = newAsyncQueue[(PeerId, WakuMessageHash)]() + + server = SyncTransfer.new( + peerManager = serverPeerManager, + wakuArchive = serverArchive, + idsTx = serverIds, + localWantsRx = serverLocalWants, + remoteNeedsRx = serverRemoteNeeds, + ) + + clientIds = newAsyncQueue[(SyncID, PubsubTopic, ContentTopic)]() + clientLocalWants = newAsyncQueue[PeerId]() + clientRemoteNeeds = newAsyncQueue[(PeerId, WakuMessageHash)]() + + client = SyncTransfer.new( + peerManager = clientPeerManager, + wakuArchive = clientArchive, + idsTx = clientIds, + localWantsRx = clientLocalWants, + remoteNeedsRx = clientRemoteNeeds, + ) + + server.start() + client.start() + + serverSwitch.mount(server) + clientSwitch.mount(client) + + serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo() + clientPeerInfo = clientSwitch.peerInfo.toRemotePeerInfo() + + serverPeerManager.addPeer(clientPeerInfo) + clientPeermanager.addPeer(serverPeerInfo) + + asyncTeardown: + server.stop() + client.stop() + + await allFutures(serverSwitch.stop(), clientSwitch.stop()) + + asyncTest "transfer 1 message": + let msg = fakeWakuMessage() + let hash = computeMessageHash(DefaultPubsubTopic, msg) + let msgs = @[msg] + + serverDriver = serverDriver.put(DefaultPubsubTopic, msgs) + + # add server info to client want channel + let want = serverPeerInfo.peerId + await clientLocalWants.put(want) + + # add client info and msg hash to server need channel + let need = (clientPeerInfo.peerId, hash) + await serverRemoteNeeds.put(need) + + # give time for transfer to happen + await sleepAsync(500.milliseconds) + + var query = ArchiveQuery() + query.includeData = true + query.hashes = @[hash] + + let res = await clientArchive.findMessages(query) + assert res.isOk(), $res.error + + let response = res.get() + + check: + response.messages.len > 0 + + ## Disabled until we impl. DOS protection again + #[ asyncTest "Check the exact missing messages are received": + let timeSlice = calculateTimeRange() + let timeWindow = int64(timeSlice.b) - int64(timeSlice.a) + let (part, _) = divmod(timeWindow, 3) + + var ts = timeSlice.a + + let msgA = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + ts += Timestamp(part) + let msgB = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + ts += Timestamp(part) + let msgC = fakeWakuMessage(ts = ts, contentTopic = DefaultContentTopic) + + let hA = computeMessageHash(DefaultPubsubTopic, msgA) + let hB = computeMessageHash(DefaultPubsubTopic, msgB) + let hC = computeMessageHash(DefaultPubsubTopic, msgC) + + discard serverDriver.put(DefaultPubsubTopic, @[msgA, msgB, msgC]) + discard clientDriver.put(DefaultPubsubTopic, @[msgA]) + + await serverRemoteNeeds.put((clientPeerInfo.peerId, hB)) + await serverRemoteNeeds.put((clientPeerInfo.peerId, hC)) + await clientLocalWants.put(serverPeerInfo.peerId) + + await sleepAsync(1.seconds) + check serverRemoteNeeds.len == 0 + + let sid1 = await clientIds.get() + let sid2 = await clientIds.get() + + let received = [sid1.hash, sid2.hash].toHashSet() + let expected = [hB, hC].toHashSet + + check received == expected + + check clientIds.len == 0 ]# diff --git a/third-party/nwaku/tests/waku_store_sync/test_range_split.nim b/third-party/nwaku/tests/waku_store_sync/test_range_split.nim new file mode 100644 index 0000000..546f2cf --- /dev/null +++ b/third-party/nwaku/tests/waku_store_sync/test_range_split.nim @@ -0,0 +1,297 @@ +import unittest, nimcrypto, std/sequtils, results +import ../../waku/waku_store_sync/[reconciliation, common] +import ../../waku/waku_store_sync/storage/seq_storage +import ../../waku/waku_core/message/digest +import ../../waku/waku_core/topics/pubsub_topic +import ../../waku/waku_core/topics/content_topic + +proc toDigest(s: string): WakuMessageHash = + let d = nimcrypto.keccak256.digest((s & "").toOpenArrayByte(0, (s.len - 1))) + var res: WakuMessageHash + for i in 0 .. 31: + res[i] = d.data[i] + return res + +proc `..`(a, b: SyncID): Slice[SyncID] = + Slice[SyncID](a: a, b: b) + +suite "Waku Sync – reconciliation": + test "fan-out: eight fingerprint sub-ranges for large slice": + const N = 2_048 + const mismatchI = 70 + + let local = SeqStorage.new(@[], @[], @[]) + let remote = SeqStorage.new(@[], @[], @[]) + + var baseHashMismatch: WakuMessageHash + var remoteHashMismatch: WakuMessageHash + + for i in 0 ..< N: + let ts = 1000 + i + let hashLocal = toDigest("msg" & $i) + + local.insert( + SyncID(time: ts, hash: hashLocal), DefaultPubsubTopic, DefaultContentTopic + ).isOkOr: + assert false, "failed to insert hash: " & $error + + var hashRemote = hashLocal + if i == mismatchI: + baseHashMismatch = hashLocal + remoteHashMismatch = toDigest("msg" & $i & "_x") + hashRemote = remoteHashMismatch + + remote.insert( + SyncID(time: ts, hash: hashRemote), DefaultPubsubTopic, DefaultContentTopic + ).isOkOr: + assert false, "failed to insert hash: " & $error + + var z: WakuMessageHash + let whole = SyncID(time: 1000, hash: z) .. SyncID(time: 1000 + N - 1, hash: z) + + check local.computeFingerprint(whole, @[DefaultPubsubTopic], @[DefaultContentTopic]) != + remote.computeFingerprint(whole, @[DefaultPubsubTopic], @[DefaultContentTopic]) + + let remoteFp = + remote.computeFingerprint(whole, @[DefaultPubsubTopic], @[DefaultContentTopic]) + let payload = RangesData( + pubsubTopics: @[DefaultPubsubTopic], + contentTopics: @[DefaultContentTopic], + ranges: @[(whole, RangeType.Fingerprint)], + fingerprints: @[remoteFp], + itemSets: @[], + ) + + var toSend, toRecv: seq[WakuMessageHash] + let reply = local.processPayload(payload, toSend, toRecv) + + check reply.ranges.len == 8 + check reply.ranges.allIt(it[1] == RangeType.Fingerprint) + check reply.itemSets.len == 0 + check reply.fingerprints.len == 8 + + let mismTime = 1000 + mismatchI + var covered = false + for (slc, _) in reply.ranges: + if mismTime >= slc.a.time and mismTime <= slc.b.time: + covered = true + break + check covered + + check toSend.len == 0 + check toRecv.len == 0 + + test "splits mismatched fingerprint into two sub-ranges then item-set": + const threshold = 4 + const partitions = 2 + + let local = + SeqStorage.new(@[], @[], @[], threshold = threshold, partitions = partitions) + let remote = + SeqStorage.new(@[], @[], @[], threshold = threshold, partitions = partitions) + + var mismatchHash: WakuMessageHash + for i in 0 ..< 8: + let t = 1000 + i + let baseHash = toDigest("msg" & $i) + + var localHash = baseHash + var remoteHash = baseHash + + if i == 3: + mismatchHash = toDigest("msg" & $i & "_x") + localHash = mismatchHash + + discard local.insert( + SyncID(time: t, hash: localHash), DefaultPubsubTopic, DefaultContentTopic + ) + discard remote.insert( + SyncID(time: t, hash: remoteHash), DefaultPubsubTopic, DefaultContentTopic + ) + + var zeroHash: WakuMessageHash + let wholeRange = + SyncID(time: 1000, hash: zeroHash) .. SyncID(time: 1007, hash: zeroHash) + + var toSend, toRecv: seq[WakuMessageHash] + + let payload = RangesData( + pubsubTopics: @[DefaultPubsubTopic], + contentTopics: @[DefaultContentTopic], + ranges: @[(wholeRange, RangeType.Fingerprint)], + fingerprints: + @[ + remote.computeFingerprint( + wholeRange, @[DefaultPubsubTopic], @[DefaultContentTopic] + ) + ], + itemSets: @[], + ) + + let reply = local.processPayload(payload, toSend, toRecv) + + check reply.ranges.len == partitions + check reply.itemSets.len == partitions + + check reply.itemSets.anyIt( + it.elements.anyIt(it.hash == mismatchHash and it.time == 1003) + ) + + test "second round when N =2048 & local ": + const N = 2_048 + const mismatchI = 70 + + let local = SeqStorage.new(@[], @[], @[]) + let remote = SeqStorage.new(@[], @[], @[]) + + var baseHashMismatch, remoteHashMismatch: WakuMessageHash + + for i in 0 ..< N: + let ts = 1000 + i + let hashLocal = toDigest("msg" & $i) + + local.insert( + SyncID(time: ts, hash: hashLocal), DefaultPubsubTopic, DefaultContentTopic + ).isOkOr: + assert false, "failed to insert hash: " & $error + + var hashRemote = hashLocal + if i == mismatchI: + baseHashMismatch = hashLocal + remoteHashMismatch = toDigest("msg" & $i & "_x") + hashRemote = remoteHashMismatch + + remote.insert( + SyncID(time: ts, hash: hashRemote), DefaultPubsubTopic, DefaultContentTopic + ).isOkOr: + assert false, "failed to insert hash: " & $error + + var zero: WakuMessageHash + let sliceWhole = + SyncID(time: 1000, hash: zero) .. SyncID(time: 1000 + N - 1, hash: zero) + check local.computeFingerprint( + sliceWhole, @[DefaultPubsubTopic], @[DefaultContentTopic] + ) != + remote.computeFingerprint( + sliceWhole, @[DefaultPubsubTopic], @[DefaultContentTopic] + ) + + let payload1 = RangesData( + pubsubTopics: @[DefaultPubsubTopic], + contentTopics: @[DefaultContentTopic], + ranges: @[(sliceWhole, RangeType.Fingerprint)], + fingerprints: + @[ + remote.computeFingerprint( + sliceWhole, @[DefaultPubsubTopic], @[DefaultContentTopic] + ) + ], + itemSets: @[], + ) + + var toSend, toRecv: seq[WakuMessageHash] + let reply1 = local.processPayload(payload1, toSend, toRecv) + + check reply1.ranges.len == 8 + check reply1.ranges.allIt(it[1] == RangeType.Fingerprint) + + let mismTime = 1000 + mismatchI + var subSlice: Slice[SyncID] + for (sl, _) in reply1.ranges: + if mismTime >= sl.a.time and mismTime <= sl.b.time: + subSlice = sl + break + check subSlice.a.time != 0 + + let payload2 = RangesData( + pubsubTopics: @[DefaultPubsubTopic], + contentTopics: @[DefaultContentTopic], + ranges: @[(subSlice, RangeType.Fingerprint)], + fingerprints: + @[ + remote.computeFingerprint( + subSlice, @[DefaultPubsubTopic], @[DefaultContentTopic] + ) + ], + itemSets: @[], + ) + + var toSend2, toRecv2: seq[WakuMessageHash] + let reply2 = local.processPayload(payload2, toSend2, toRecv2) + + check reply2.ranges.len == 8 + check reply2.ranges.allIt(it[1] == RangeType.ItemSet) + check reply2.itemSets.len == 8 + + var matchCount = 0 + for iset in reply2.itemSets: + if iset.elements.anyIt(it.time == mismTime and it.hash == baseHashMismatch): + inc matchCount + check not iset.elements.anyIt(it.hash == remoteHashMismatch) + check matchCount == 1 + + check toSend2.len == 0 + check toRecv2.len == 0 + + test "second-round payload remote": + let local = SeqStorage.new(@[], @[], @[]) + let remote = SeqStorage.new(@[], @[], @[]) + + var baseHash: WakuMessageHash + var alteredHash: WakuMessageHash + + for i in 0 ..< 8: + let ts = 1000 + i + let hashLocal = toDigest("msg" & $i) + + local.insert( + SyncID(time: ts, hash: hashLocal), DefaultPubsubTopic, DefaultContentTopic + ).isOkOr: + assert false, "failed to insert hash: " & $error + + var hashRemote = hashLocal + if i == 3: + baseHash = hashLocal + alteredHash = toDigest("msg" & $i & "_x") + hashRemote = alteredHash + + remote.insert( + SyncID(time: ts, hash: hashRemote), DefaultPubsubTopic, DefaultContentTopic + ).isOkOr: + assert false, "failed to insert hash: " & $error + + var zero: WakuMessageHash + let slice = SyncID(time: 1000, hash: zero) .. SyncID(time: 1007, hash: zero) + + check local.computeFingerprint(slice, @[DefaultPubsubTopic], @[DefaultContentTopic]) != + remote.computeFingerprint(slice, @[DefaultPubsubTopic], @[DefaultContentTopic]) + + var toSend1, toRecv1: seq[WakuMessageHash] + + let rangeData = RangesData( + pubsubTopics: @[DefaultPubsubTopic], + contentTopics: @[DefaultContentTopic], + ranges: @[(slice, RangeType.Fingerprint)], + fingerprints: + @[ + remote.computeFingerprint( + slice, @[DefaultPubsubTopic], @[DefaultContentTopic] + ) + ], + itemSets: @[], + ) + + let rep1 = local.processPayload(rangeData, toSend1, toRecv1) + + check rep1.ranges.len == 1 + check rep1.ranges[0][1] == RangeType.ItemSet + check toSend1.len == 0 + check toRecv1.len == 0 + + var toSend2, toRecv2: seq[WakuMessageHash] + discard remote.processPayload(rep1, toSend2, toRecv2) + + check toSend2.len == 1 + check toSend2[0] == alteredHash + check toRecv2.len == 1 + check toRecv2[0] == baseHash diff --git a/third-party/nwaku/tests/waku_store_sync/test_state_transition.nim b/third-party/nwaku/tests/waku_store_sync/test_state_transition.nim new file mode 100644 index 0000000..d94d6be --- /dev/null +++ b/third-party/nwaku/tests/waku_store_sync/test_state_transition.nim @@ -0,0 +1,291 @@ +import unittest, nimcrypto, std/sequtils +import ../../waku/waku_store_sync/[reconciliation, common] +import ../../waku/waku_store_sync/storage/seq_storage +import ../../waku/waku_core/message/digest +import ../../waku/waku_core/topics/pubsub_topic +import ../../waku/waku_core/topics/content_topic + +proc toDigest*(s: string): WakuMessageHash = + let d = nimcrypto.keccak256.digest((s & "").toOpenArrayByte(0, s.high)) + for i in 0 .. 31: + result[i] = d.data[i] + +proc `..`(a, b: SyncID): Slice[SyncID] = + Slice[SyncID](a: a, b: b) + +suite "Waku Sync – reconciliation": + test "Fingerprint → ItemSet → zero (default thresholds)": + const N = 2_000 + const idx = 137 + + let local = SeqStorage.new(@[], @[], @[]) + let remote = SeqStorage.new(@[], @[], @[]) + + var baseH, altH: WakuMessageHash + for i in 0 ..< N: + let ts = 1000 + i + let h = toDigest("msg" & $i) + discard + local.insert(SyncID(time: ts, hash: h), DefaultPubsubTopic, DefaultContentTopic) + var hr = h + if i == idx: + baseH = h + altH = toDigest("msg" & $i & "x") + hr = altH + discard remote.insert( + SyncID(time: ts, hash: hr), DefaultPubsubTopic, DefaultContentTopic + ) + + var z: WakuMessageHash + let whole = SyncID(time: 1000, hash: z) .. SyncID(time: 1000 + N - 1, hash: z) + + var s1, r1: seq[WakuMessageHash] + let p1 = RangesData( + pubsubTopics: @[DefaultPubsubTopic], + contentTopics: @[DefaultContentTopic], + ranges: @[(whole, RangeType.Fingerprint)], + fingerprints: + @[ + remote.computeFingerprint( + whole, @[DefaultPubsubTopic], @[DefaultContentTopic] + ) + ], + itemSets: @[], + ) + let rep1 = local.processPayload(p1, s1, r1) + check rep1.ranges.len == 8 + check rep1.ranges.allIt(it[1] == RangeType.Fingerprint) + + let mismT = 1000 + idx + let sub = + rep1.ranges.filterIt(mismT >= it[0].a.time and mismT <= it[0].b.time)[0][0] + + var s2, r2: seq[WakuMessageHash] + let p2 = RangesData( + pubsubTopics: @[DefaultPubsubTopic], + contentTopics: @[DefaultContentTopic], + ranges: @[(sub, RangeType.Fingerprint)], + fingerprints: + @[remote.computeFingerprint(sub, @[DefaultPubsubTopic], @[DefaultContentTopic])], + itemSets: @[], + ) + let rep2 = local.processPayload(p2, s2, r2) + check rep2.ranges.len == 8 + check rep2.ranges.allIt(it[1] == RangeType.ItemSet) + + var s3, r3: seq[WakuMessageHash] + discard remote.processPayload(rep2, s3, r3) + check s3.len == 1 and s3[0] == altH + check r3.len == 1 and r3[0] == baseH + + discard local.insert( + SyncID(time: mismT, hash: altH), DefaultPubsubTopic, DefaultContentTopic + ) + discard remote.insert( + SyncID(time: mismT, hash: baseH), DefaultPubsubTopic, DefaultContentTopic + ) + + var s4, r4: seq[WakuMessageHash] + let p3 = RangesData( + pubsubTopics: @[DefaultPubsubTopic], + contentTopics: @[DefaultContentTopic], + ranges: @[(sub, RangeType.Fingerprint)], + fingerprints: + @[remote.computeFingerprint(sub, @[DefaultPubsubTopic], @[DefaultContentTopic])], + itemSets: @[], + ) + let rep3 = local.processPayload(p3, s4, r4) + check rep3.ranges.len == 0 + check s4.len == 0 and r4.len == 0 + + test "test 2 ranges includes 1 skip": + const N = 120 + const pivot = 60 + + let local = SeqStorage.new(@[], @[], @[]) + let remote = SeqStorage.new(@[], @[], @[]) + + var diffHash: WakuMessageHash + for i in 0 ..< N: + let ts = 1000 + i + let h = toDigest("msg" & $i) + discard + local.insert(SyncID(time: ts, hash: h), DefaultPubsubTopic, DefaultContentTopic) + var hr: WakuMessageHash + if i >= pivot: + diffHash = toDigest("msg" & $i & "_x") + hr = diffHash + else: + hr = h + + discard remote.insert( + SyncID(time: ts, hash: hr), DefaultPubsubTopic, DefaultContentTopic + ) + + var z: WakuMessageHash + let sliceA = SyncID(time: 1000, hash: z) .. SyncID(time: 1059, hash: z) + let sliceB = SyncID(time: 1060, hash: z) .. SyncID(time: 1119, hash: z) + + var s, r: seq[WakuMessageHash] + let payload = RangesData( + pubsubTopics: @[DefaultPubsubTopic], + contentTopics: @[DefaultContentTopic], + ranges: @[(sliceA, RangeType.Fingerprint), (sliceB, RangeType.Fingerprint)], + fingerprints: + @[ + remote.computeFingerprint( + sliceA, @[DefaultPubsubTopic], @[DefaultContentTopic] + ), + remote.computeFingerprint( + sliceB, @[DefaultPubsubTopic], @[DefaultContentTopic] + ), + ], + itemSets: @[], + ) + let reply = local.processPayload(payload, s, r) + + check reply.ranges.len == 2 + check reply.ranges[0][1] == RangeType.Skip + check reply.ranges[1][1] == RangeType.ItemSet + check reply.itemSets.len == 1 + check not reply.itemSets[0].elements.anyIt(it.hash == diffHash) + + test "custom threshold (50) → eight ItemSets first round": + const N = 300 + const idx = 123 + + let local = SeqStorage.new(capacity = N, threshold = 50, partitions = 8) + let remote = SeqStorage.new(capacity = N, threshold = 50, partitions = 8) + + var baseH, altH: WakuMessageHash + for i in 0 ..< N: + let ts = 1000 + i + let h = toDigest("msg" & $i) + discard + local.insert(SyncID(time: ts, hash: h), DefaultPubsubTopic, DefaultContentTopic) + var hr = h + if i == idx: + baseH = h + altH = toDigest("msg" & $i & "_x") + hr = altH + discard remote.insert( + SyncID(time: ts, hash: hr), DefaultPubsubTopic, DefaultContentTopic + ) + + var z: WakuMessageHash + let slice = SyncID(time: 1000, hash: z) .. SyncID(time: 1000 + N - 1, hash: z) + + var toS, toR: seq[WakuMessageHash] + let p = RangesData( + pubsubTopics: @[DefaultPubsubTopic], + contentTopics: @[DefaultContentTopic], + ranges: @[(slice, RangeType.Fingerprint)], + fingerprints: + @[ + remote.computeFingerprint( + slice, @[DefaultPubsubTopic], @[DefaultContentTopic] + ) + ], + itemSets: @[], + ) + let reply = local.processPayload(p, toS, toR) + + check reply.ranges.len == 8 + check reply.ranges.allIt(it[1] == RangeType.ItemSet) + check reply.itemSets.len == 8 + + let mismT = 1000 + idx + var hit = 0 + for ist in reply.itemSets: + if ist.elements.anyIt(it.time == mismT and it.hash == baseH): + inc hit + check hit == 1 + + test "test N=80K,3FP,2IS,SKIP": + const N = 80_000 + const bad = N - 10 + + let local = SeqStorage.new(@[], @[], @[]) + let remote = SeqStorage.new(@[], @[], @[]) + + var baseH, altH: WakuMessageHash + for i in 0 ..< N: + let ts = 1000 + i + let h = toDigest("msg" & $i) + discard + local.insert(SyncID(time: ts, hash: h), DefaultPubsubTopic, DefaultContentTopic) + + let hr = + if i == bad: + baseH = h + altH = toDigest("msg" & $i & "_x") + altH + else: + h + discard remote.insert( + SyncID(time: ts, hash: hr), DefaultPubsubTopic, DefaultContentTopic + ) + + var slice = + SyncID(time: 1000, hash: EmptyFingerprint) .. + SyncID(time: 1000 + N - 1, hash: FullFingerprint) + + proc fpReply(s: Slice[SyncID], sendQ, recvQ: var seq[WakuMessageHash]): RangesData = + local.processPayload( + RangesData( + pubsubTopics: @[DefaultPubsubTopic], + contentTopics: @[DefaultContentTopic], + ranges: @[(s, RangeType.Fingerprint)], + fingerprints: + @[ + remote.computeFingerprint( + s, @[DefaultPubsubTopic], @[DefaultContentTopic] + ) + ], + itemSets: @[], + ), + sendQ, + recvQ, + ) + + var tmpS, tmpR: seq[WakuMessageHash] + + for r in 1 .. 3: + let rep = fpReply(slice, tmpS, tmpR) + check rep.ranges.len == 8 + check rep.ranges.allIt(it[1] == RangeType.Fingerprint) + for (sl, _) in rep.ranges: + if local.computeFingerprint(sl, @[DefaultPubsubTopic], @[DefaultContentTopic]) != + remote.computeFingerprint(sl, @[DefaultPubsubTopic], @[DefaultContentTopic]): + slice = sl + break + + let rep4 = fpReply(slice, tmpS, tmpR) + check rep4.ranges.len == 8 + check rep4.ranges.allIt(it[1] == RangeType.ItemSet) + for (sl, _) in rep4.ranges: + if sl.a.time <= 1000 + bad and sl.b.time >= 1000 + bad: + slice = sl + break + + var send5, recv5: seq[WakuMessageHash] + let rep5 = fpReply(slice, send5, recv5) + check rep5.ranges.len == 1 + check rep5.ranges[0][1] == RangeType.ItemSet + + var qSend, qRecv: seq[WakuMessageHash] + discard remote.processPayload(rep5, qSend, qRecv) + check qSend.len == 1 and qSend[0] == altH + check qRecv.len == 1 and qRecv[0] == baseH + + discard local.insert( + SyncID(time: slice.a.time, hash: altH), DefaultPubsubTopic, DefaultContentTopic + ) + discard remote.insert( + SyncID(time: slice.a.time, hash: baseH), DefaultPubsubTopic, DefaultContentTopic + ) + + var send6, recv6: seq[WakuMessageHash] + let rep6 = fpReply(slice, send6, recv6) + check rep6.ranges.len == 0 + check send6.len == 0 and recv6.len == 0 diff --git a/third-party/nwaku/tests/waku_store_sync/test_storage.nim b/third-party/nwaku/tests/waku_store_sync/test_storage.nim new file mode 100644 index 0000000..930d3f7 --- /dev/null +++ b/third-party/nwaku/tests/waku_store_sync/test_storage.nim @@ -0,0 +1,278 @@ +{.used.} + +import std/[options, random, sequtils, packedsets], testutils/unittests, chronos + +import + ../../waku/waku_core, + ../../waku/waku_store_sync/common, + ../../waku/waku_store_sync/storage/seq_storage, + ./sync_utils + +suite "Waku Sync Storage": + test "process hash range": + var rng = initRand() + let count = 10_000 + var elements = newSeqOfCap[SyncID](count) + var pubsub = newSeqOfCap[PubsubTopic](count) + var content = newSeqOfCap[ContentTopic](count) + + var emptySet = @[0].toPackedSet() + emptySet.excl(0) + + for i in 0 ..< count: + let id = SyncID(time: Timestamp(i), hash: randomHash(rng)) + + elements.add(id) + pubsub.add(DefaultPubsubTopic) + content.add(DefaultContentTopic) + + var storage1 = SeqStorage.new(elements, pubsub, content) + var storage2 = SeqStorage.new(elements, pubsub, content) + + let lb = elements[0] + let ub = elements[count - 1] + let bounds = lb .. ub + let fingerprint1 = storage1.computeFingerprint(bounds, @[], @[]) + + var outputPayload: RangesData + + storage2.processFingerprintRange( + bounds, emptySet, emptySet, fingerprint1, outputPayload + ) + + let expected = + RangesData(ranges: @[(bounds, RangeType.Skip)], fingerprints: @[], itemSets: @[]) + + check: + outputPayload == expected + + test "process item set range": + var rng = initRand() + let count = 1000 + var elements1 = newSeqOfCap[SyncID](count) + var elements2 = newSeqOfCap[SyncID](count) + var pubsub = newSeqOfCap[PubsubTopic](count) + var content = newSeqOfCap[ContentTopic](count) + + var emptySet = @[0].toPackedSet() + emptySet.excl(0) + + var diffs: seq[Fingerprint] + + for i in 0 ..< count: + let id = SyncID(time: Timestamp(i), hash: randomHash(rng)) + + elements1.add(id) + if rng.rand(0 .. 9) == 0: + elements2.add(id) + else: + diffs.add(id.hash) + + pubsub.add(DefaultPubsubTopic) + content.add(DefaultContentTopic) + + var storage1 = SeqStorage.new(elements1, pubsub, content) + + let lb = elements1[0] + let ub = elements1[count - 1] + let bounds = lb .. ub + + let itemSet2 = ItemSet(elements: elements2, reconciled: true) + + var + toSend: seq[Fingerprint] + toRecv: seq[Fingerprint] + outputPayload: RangesData + + storage1.processItemSetRange( + bounds, emptySet, emptySet, itemSet2, toSend, toRecv, outputPayload + ) + + check: + toSend == diffs + + test "insert new element": + var rng = initRand() + + let storage = SeqStorage.new(10) + + let element1 = SyncID(time: Timestamp(1000), hash: randomHash(rng)) + let element2 = SyncID(time: Timestamp(2000), hash: randomHash(rng)) + + let res1 = storage.insert(element1, DefaultPubsubTopic, DefaultContentTopic) + assert res1.isOk(), $res1.error + let count1 = storage.length() + + let res2 = storage.insert(element2, DefaultPubsubTopic, DefaultContentTopic) + assert res2.isOk(), $res2.error + let count2 = storage.length() + + check: + count1 == 1 + count2 == 2 + + test "insert duplicate": + var rng = initRand() + + let element = SyncID(time: Timestamp(1000), hash: randomHash(rng)) + + let storage = + SeqStorage.new(@[element], @[DefaultPubsubTopic], @[DefaultContentTopic]) + + let res = storage.insert(element, DefaultPubsubTopic, DefaultContentTopic) + + check: + res.isErr() == false + storage.length() == 1 + + test "prune elements": + var rng = initRand() + let count = 1000 + var elements = newSeqOfCap[SyncID](count) + var pubsub = newSeqOfCap[PubsubTopic](count) + var content = newSeqOfCap[ContentTopic](count) + + var emptySet = @[0].toPackedSet() + emptySet.excl(0) + + for i in 0 ..< count: + let id = SyncID(time: Timestamp(i), hash: randomHash(rng)) + + elements.add(id) + pubsub.add(DefaultPubsubTopic) + content.add(DefaultContentTopic) + + let storage = SeqStorage.new(elements, pubsub, content) + + let beforeCount = storage.length() + + let pruned = storage.prune(Timestamp(500)) + + let afterCount = storage.length() + + check: + beforeCount == 1000 + pruned == 500 + afterCount == 500 + + test "topics recycling": + var rng = initRand() + let count = 1000 + var elements = newSeqOfCap[SyncID](count) + var pubsub = newSeqOfCap[PubsubTopic](count) + var content = newSeqOfCap[ContentTopic](count) + + var emptySet = @[0].toPackedSet() + emptySet.excl(0) + + for i in 0 ..< (count div 2): + let id = SyncID(time: Timestamp(i), hash: randomHash(rng)) + + elements.add(id) + pubsub.add(DefaultPubsubTopic) + content.add("my/custom/topic") + + for i in (count div 2) ..< count: + let id = SyncID(time: Timestamp(i), hash: randomHash(rng)) + + elements.add(id) + pubsub.add(DefaultPubsubTopic) + content.add(DefaultContentTopic) + + let storage = SeqStorage.new(elements, pubsub, content) + + let beforeCount = storage.unusedContentTopicsLen() + + let pruned = storage.prune(Timestamp(500)) + + let afterCount = storage.unusedContentTopicsLen() + + check: + beforeCount == 0 + pruned == 500 + afterCount == 1 + + let id = SyncID(time: Timestamp(1001), hash: randomHash(rng)) + let res = storage.insert(id, DefaultPubsubTopic, "my/other/topic") + assert res.isOk(), $res.error + + let reuseCount = storage.unusedContentTopicsLen() + + check: + reuseCount == 0 + + ## disabled tests are rough benchmark + #[ test "10M fingerprint": + var rng = initRand() + + let count = 10_000_000 + + var elements = newSeqOfCap[SyncID](count) + + for i in 0 .. count: + let id = SyncID(time: Timestamp(i), hash: randomHash(rng)) + + elements.add(id) + + let storage = SeqStorage.new(elements) + + let before = getMonoTime() + + discard storage.fingerprinting(some(0 .. count)) + + let after = getMonoTime() + + echo "Fingerprint Time: " & $(after - before) ]# + + #[ test "random inserts": + var rng = initRand() + + let count = 10_000_000 + + var elements = newSeqOfCap[SyncID](count) + + for i in 0 .. count: + let id = SyncID(time: Timestamp(i), hash: randomHash(rng)) + + elements.add(id) + + var storage = SeqStorage.new(elements) + + var avg: times.Duration + for i in 0 ..< 1000: + let newId = + SyncID(time: Timestamp(rng.rand(0 .. count)), hash: randomHash(rng)) + + let before = getMonoTime() + + discard storage.insert(newId) + + let after = getMonoTime() + + avg += after - before + + avg = avg div 1000 + + echo "Avg Time 1K Inserts: " & $avg ]# + + #[ test "trim": + var rng = initRand() + + let count = 10_000_000 + + var elements = newSeqOfCap[SyncID](count) + + for i in 0 .. count: + let id = SyncID(time: Timestamp(i), hash: randomHash(rng)) + + elements.add(id) + + var storage = SeqStorage.new(elements) + + let before = getMonoTime() + + discard storage.trim(Timestamp(count div 4)) + + let after = getMonoTime() + + echo "Trim Time: " & $(after - before) ]# diff --git a/third-party/nwaku/tests/wakunode2/test_all.nim b/third-party/nwaku/tests/wakunode2/test_all.nim new file mode 100644 index 0000000..7e658d4 --- /dev/null +++ b/third-party/nwaku/tests/wakunode2/test_all.nim @@ -0,0 +1,3 @@ +{.used.} + +import ./test_app, ./test_validators, ./test_cli_args diff --git a/third-party/nwaku/tests/wakunode2/test_app.nim b/third-party/nwaku/tests/wakunode2/test_app.nim new file mode 100644 index 0000000..b168807 --- /dev/null +++ b/third-party/nwaku/tests/wakunode2/test_app.nim @@ -0,0 +1,99 @@ +{.used.} + +import + testutils/unittests, + chronicles, + chronos, + libp2p/crypto/crypto, + libp2p/crypto/secp, + libp2p/multiaddress, + libp2p/switch +import ../testlib/wakucore, ../testlib/wakunode + +include waku/factory/waku, waku/common/enr/typed_record + +suite "Wakunode2 - Waku": + test "compilation version should be reported": + ## Given + let conf = defaultTestWakuConf() + + let waku = (waitFor Waku.new(conf)).valueOr: + raiseAssert error + + ## When + let version = waku.version + + ## Then + check: + version == git_version + +suite "Wakunode2 - Waku initialization": + test "peer persistence setup should be successfully mounted": + ## Given + var conf = defaultTestWakuConf() + conf.peerPersistence = true + + let waku = (waitFor Waku.new(conf)).valueOr: + raiseAssert error + + check: + not waku.node.peerManager.storage.isNil() + + test "node setup is successful with default configuration": + ## Given + var conf = defaultTestWakuConf() + + ## When + var waku = (waitFor Waku.new(conf)).valueOr: + raiseAssert error + + (waitFor startWaku(addr waku)).isOkOr: + raiseAssert error + + ## Then + let node = waku.node + check: + not node.isNil() + node.wakuArchive.isNil() + node.wakuStore.isNil() + not node.wakuStoreClient.isNil() + not node.wakuRendezvous.isNil() + + ## Cleanup + waitFor waku.stop() + + test "app properly handles dynamic port configuration": + ## Given + var conf = defaultTestWakuConf() + conf.endpointConf.p2pTcpPort = Port(0) + + ## When + var waku = (waitFor Waku.new(conf)).valueOr: + raiseAssert error + + (waitFor startWaku(addr waku)).isOkOr: + raiseAssert error + + ## Then + let + node = waku.node + typedNodeEnr = node.enr.toTyped() + + assert typedNodeEnr.isOk(), $typedNodeEnr.error + let tcpPort = typedNodeEnr.value.tcp() + assert tcpPort.isSome() + check tcpPort.get() != 0 + + check: + # Waku started properly + not node.isNil() + node.wakuArchive.isNil() + node.wakuStore.isNil() + not node.wakuStoreClient.isNil() + not node.wakuRendezvous.isNil() + + # DS structures are updated with dynamic ports + typedNodeEnr.get().tcp.get() != 0 + + ## Cleanup + waitFor waku.stop() diff --git a/third-party/nwaku/tests/wakunode2/test_cli_args.nim b/third-party/nwaku/tests/wakunode2/test_cli_args.nim new file mode 100644 index 0000000..dabc780 --- /dev/null +++ b/third-party/nwaku/tests/wakunode2/test_cli_args.nim @@ -0,0 +1,402 @@ +{.used.} + +import + std/options, + testutils/unittests, + chronos, + libp2p/crypto/[crypto, secp], + libp2p/multiaddress, + nimcrypto/utils, + secp256k1, + confutils, + stint + +import tools/confutils/cli_args + +import + ../../waku/factory/networks_config, + ../../waku/factory/waku_conf, + ../../waku/common/logging, + ../../waku/common/utils/parse_size_units, + ../../waku/waku_core/message/default_values + +suite "Waku external config - default values": + test "Default sharding value": + ## Setup + let defaultShardingMode = AutoSharding + let defaultNumShardsInCluster = 1.uint16 + let defaultSubscribeShards = @[0.uint16] + + ## Given + let preConfig = defaultWakuNodeConf().get() + + ## When + let res = preConfig.toWakuConf() + assert res.isOk(), $res.error + + ## Then + let conf = res.get() + check conf.shardingConf.kind == defaultShardingMode + check conf.shardingConf.numShardsInCluster == defaultNumShardsInCluster + check conf.subscribeShards == defaultSubscribeShards + + test "Default shards value in static sharding": + ## Setup + let defaultSubscribeShards: seq[uint16] = @[] + + ## Given + var preConfig = defaultWakuNodeConf().get() + preConfig.numShardsInNetwork = 0.uint16 + + ## When + let res = preConfig.toWakuConf() + assert res.isOk(), $res.error + + ## Then + let conf = res.get() + check conf.subscribeShards == defaultSubscribeShards + +suite "Waku external config - apply preset": + test "Preset is TWN": + ## Setup + let expectedConf = NetworkConf.TheWakuNetworkConf() + + ## Given + let preConfig = WakuNodeConf( + cmd: noCommand, + preset: "twn", + relay: true, + ethClientUrls: @["http://someaddress".EthRpcUrl], + ) + + ## When + let res = preConfig.toWakuConf() + assert res.isOk(), $res.error + + ## Then + let conf = res.get() + check conf.maxMessageSizeBytes == + uint64(parseCorrectMsgSize(expectedConf.maxMessageSize)) + check conf.clusterId == expectedConf.clusterId + check conf.rlnRelayConf.isSome() == expectedConf.rlnRelay + if conf.rlnRelayConf.isSome(): + let rlnRelayConf = conf.rlnRelayConf.get() + check rlnRelayConf.ethContractAddress == expectedConf.rlnRelayEthContractAddress + check rlnRelayConf.dynamic == expectedConf.rlnRelayDynamic + check rlnRelayConf.chainId == expectedConf.rlnRelayChainId + check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec + check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit + check conf.shardingConf.kind == expectedConf.shardingConf.kind + check conf.shardingConf.numShardsInCluster == + expectedConf.shardingConf.numShardsInCluster + check conf.discv5Conf.isSome() == expectedConf.discv5Discovery + if conf.discv5Conf.isSome(): + let discv5Conf = conf.discv5Conf.get() + check discv5Conf.bootstrapNodes == expectedConf.discv5BootstrapNodes + + test "Subscribes to all valid shards in twn": + ## Setup + let expectedConf = NetworkConf.TheWakuNetworkConf() + + ## Given + let shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7] + let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards) + + ## When + let res = preConfig.toWakuConf() + assert res.isOk(), $res.error + + ## Then + let conf = res.get() + check conf.subscribeShards.len == expectedConf.shardingConf.numShardsInCluster.int + + test "Subscribes to some valid shards in twn": + ## Setup + let expectedConf = NetworkConf.TheWakuNetworkConf() + + ## Given + let shards: seq[uint16] = @[0, 4, 7] + let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards) + + ## When + let resConf = preConfig.toWakuConf() + assert resConf.isOk(), $resConf.error + + ## Then + let conf = resConf.get() + assert conf.subscribeShards.len() == shards.len() + for index, shard in shards: + assert shard in conf.subscribeShards + + test "Subscribes to invalid shards in twn": + ## Setup + + ## Given + let shards: seq[uint16] = @[0, 4, 7, 10] + let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards) + + ## When + let res = preConfig.toWakuConf() + + ## Then + assert res.isErr(), "Invalid shard was accepted" + + test "Apply TWN preset when cluster id = 1": + ## Setup + let expectedConf = NetworkConf.TheWakuNetworkConf() + + ## Given + let preConfig = WakuNodeConf( + cmd: noCommand, + clusterId: 1.uint16, + relay: true, + ethClientUrls: @["http://someaddress".EthRpcUrl], + ) + + ## When + let res = preConfig.toWakuConf() + assert res.isOk(), $res.error + + ## Then + let conf = res.get() + check conf.maxMessageSizeBytes == + uint64(parseCorrectMsgSize(expectedConf.maxMessageSize)) + check conf.clusterId == expectedConf.clusterId + check conf.rlnRelayConf.isSome() == expectedConf.rlnRelay + if conf.rlnRelayConf.isSome(): + let rlnRelayConf = conf.rlnRelayConf.get() + check rlnRelayConf.ethContractAddress == expectedConf.rlnRelayEthContractAddress + check rlnRelayConf.dynamic == expectedConf.rlnRelayDynamic + check rlnRelayConf.chainId == expectedConf.rlnRelayChainId + check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec + check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit + check conf.shardingConf.kind == expectedConf.shardingConf.kind + check conf.shardingConf.numShardsInCluster == + expectedConf.shardingConf.numShardsInCluster + check conf.discv5Conf.isSome() == expectedConf.discv5Discovery + if conf.discv5Conf.isSome(): + let discv5Conf = conf.discv5Conf.get() + check discv5Conf.bootstrapNodes == expectedConf.discv5BootstrapNodes + +suite "Waku external config - node key": + test "Passed node key is used": + ## Setup + let nodeKeyStr = + "0011223344556677889900aabbccddeeff0011223344556677889900aabbccddeeff" + let nodekey = block: + let key = SkPrivateKey.init(utils.fromHex(nodeKeyStr)).tryGet() + crypto.PrivateKey(scheme: Secp256k1, skkey: key) + + ## Given + let config = WakuNodeConf.load(version = "", cmdLine = @["--nodekey=" & nodeKeyStr]) + + ## When + let res = config.toWakuConf() + assert res.isOk(), $res.error + + ## Then + let resKey = res.get().nodeKey + assert utils.toHex(resKey.getRawBytes().get()) == + utils.toHex(nodekey.getRawBytes().get()) + +suite "Waku external config - Shards": + test "Shards are valid": + ## Setup + + ## Given + let shards: seq[uint16] = @[0, 2, 4] + let numShardsInNetwork = 5.uint16 + let wakuNodeConf = WakuNodeConf( + cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork + ) + + ## When + let res = wakuNodeConf.toWakuConf() + assert res.isOk(), $res.error + + ## Then + let wakuConf = res.get() + let vRes = wakuConf.validate() + assert vRes.isOk(), $vRes.error + + test "Shards are not in range": + ## Setup + + ## Given + let shards: seq[uint16] = @[0, 2, 5] + let numShardsInNetwork = 5.uint16 + let wakuNodeConf = WakuNodeConf( + cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork + ) + + ## When + let res = wakuNodeConf.toWakuConf() + + ## Then + assert res.isErr(), "Invalid shard was accepted" + + test "Shard is passed without num shards": + ## Setup + + ## Given + let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=0"]) + + ## When + let res = wakuNodeConf.toWakuConf() + + ## Then + let wakuConf = res.get() + let vRes = wakuConf.validate() + assert vRes.isOk(), $vRes.error + + test "Imvalid shard is passed without num shards": + ## Setup + + ## Given + let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"]) + + ## When + let res = wakuNodeConf.toWakuConf() + + ## Then + assert res.isErr(), "Invalid shard was accepted" + +suite "Waku external config - http url parsing": + test "Basic HTTP URLs without authentication": + check string(parseCmdArg(EthRpcUrl, "https://example.com/path")) == + "https://example.com/path" + check string(parseCmdArg(EthRpcUrl, "https://example.com/")) == + "https://example.com/" + check string(parseCmdArg(EthRpcUrl, "http://localhost:8545")) == + "http://localhost:8545" + check string(parseCmdArg(EthRpcUrl, "https://mainnet.infura.io")) == + "https://mainnet.infura.io" + + test "Basic authentication with simple credentials": + check string(parseCmdArg(EthRpcUrl, "https://user:pass@example.com/path")) == + "https://user:pass@example.com/path" + check string( + parseCmdArg(EthRpcUrl, "https://john.doe:secret123@example.com/api/v1") + ) == "https://john.doe:secret123@example.com/api/v1" + check string(parseCmdArg(EthRpcUrl, "https://user_name:pass_word@example.com/")) == + "https://user_name:pass_word@example.com/" + check string(parseCmdArg(EthRpcUrl, "https://user-name:pass-word@example.com/")) == + "https://user-name:pass-word@example.com/" + check string(parseCmdArg(EthRpcUrl, "https://user123:pass456@example.com/")) == + "https://user123:pass456@example.com/" + + test "Special characters (percent-encoded) in credentials": + check string( + parseCmdArg(EthRpcUrl, "https://user%40email:pass%21%23%24@example.com/") + ) == "https://user%40email:pass%21%23%24@example.com/" + check string(parseCmdArg(EthRpcUrl, "https://user%2Bplus:pass%26and@example.com/")) == + "https://user%2Bplus:pass%26and@example.com/" + check string( + parseCmdArg(EthRpcUrl, "https://user%3Acolon:pass%3Bsemi@example.com/") + ) == "https://user%3Acolon:pass%3Bsemi@example.com/" + check string( + parseCmdArg(EthRpcUrl, "https://user%2Fslash:pass%3Fquest@example.com/") + ) == "https://user%2Fslash:pass%3Fquest@example.com/" + check string( + parseCmdArg(EthRpcUrl, "https://user%5Bbracket:pass%5Dbracket@example.com/") + ) == "https://user%5Bbracket:pass%5Dbracket@example.com/" + check string( + parseCmdArg(EthRpcUrl, "https://user%20space:pass%20space@example.com/") + ) == "https://user%20space:pass%20space@example.com/" + check string( + parseCmdArg(EthRpcUrl, "https://user%3Cless:pass%3Egreater@example.com/") + ) == "https://user%3Cless:pass%3Egreater@example.com/" + check string( + parseCmdArg(EthRpcUrl, "https://user%7Bbrace:pass%7Dbrace@example.com/") + ) == "https://user%7Bbrace:pass%7Dbrace@example.com/" + check string(parseCmdArg(EthRpcUrl, "https://user%5Cback:pass%7Cpipe@example.com/")) == + "https://user%5Cback:pass%7Cpipe@example.com/" + + test "Complex passwords with special characters": + check string( + parseCmdArg( + EthRpcUrl, "https://admin:P%40ssw0rd%21%23%24%25%5E%26*()@example.com/" + ) + ) == "https://admin:P%40ssw0rd%21%23%24%25%5E%26*()@example.com/" + check string( + parseCmdArg(EthRpcUrl, "https://user:abc123%21%40%23DEF456@example.com/") + ) == "https://user:abc123%21%40%23DEF456@example.com/" + check string( + parseCmdArg( + EthRpcUrl, + "https://user:P%40%24%24w0rd%21%23%24%25%5E%26%2A%28%29_%2B-%3D%5B%5D%7B%7D%7C%3B%27%3A%22%2C.%2F%3C%3E%3F%60~%5C@example.com", + ) + ) == + "https://user:P%40%24%24w0rd%21%23%24%25%5E%26%2A%28%29_%2B-%3D%5B%5D%7B%7D%7C%3B%27%3A%22%2C.%2F%3C%3E%3F%60~%5C@example.com" + + test "Different hostname types": + check string(parseCmdArg(EthRpcUrl, "https://user:pass@subdomain.example.com/path")) == + "https://user:pass@subdomain.example.com/path" + check string(parseCmdArg(EthRpcUrl, "https://user:pass@192.168.1.1/admin")) == + "https://user:pass@192.168.1.1/admin" + check string(parseCmdArg(EthRpcUrl, "https://user:pass@[2001:db8::1]/path")) == + "https://user:pass@[2001:db8::1]/path" + check string(parseCmdArg(EthRpcUrl, "https://user:pass@example.co.uk/path")) == + "https://user:pass@example.co.uk/path" + + test "URLs with port numbers": + check string(parseCmdArg(EthRpcUrl, "https://user:pass@example.com:8080/path")) == + "https://user:pass@example.com:8080/path" + check string(parseCmdArg(EthRpcUrl, "https://user:pass@example.com:443/")) == + "https://user:pass@example.com:443/" + check string(parseCmdArg(EthRpcUrl, "http://user:pass@example.com:80/path")) == + "http://user:pass@example.com:80/path" + + test "URLs with query parameters and fragments": + check string( + parseCmdArg(EthRpcUrl, "https://user:pass@example.com/path?query=1#section") + ) == "https://user:pass@example.com/path?query=1#section" + check string( + parseCmdArg(EthRpcUrl, "https://user:pass@example.com/?foo=bar&baz=qux") + ) == "https://user:pass@example.com/?foo=bar&baz=qux" + check string(parseCmdArg(EthRpcUrl, "https://api.example.com/rpc?key=value")) == + "https://api.example.com/rpc?key=value" + check string(parseCmdArg(EthRpcUrl, "https://api.example.com/rpc#section")) == + "https://api.example.com/rpc#section" + + test "Edge cases with credentials": + check string(parseCmdArg(EthRpcUrl, "https://a:b@example.com/")) == + "https://a:b@example.com/" + check string(parseCmdArg(EthRpcUrl, "https://user:@example.com/")) == + "https://user:@example.com/" + check string(parseCmdArg(EthRpcUrl, "https://:pass@example.com/")) == + "https://:pass@example.com/" + check string(parseCmdArg(EthRpcUrl, "http://user:pass@example.com/")) == + "http://user:pass@example.com/" + + test "Websocket URLs are rejected": + expect(ValueError): + discard parseCmdArg(EthRpcUrl, "ws://localhost:8545") + expect(ValueError): + discard parseCmdArg(EthRpcUrl, "wss://mainnet.infura.io") + expect(ValueError): + discard parseCmdArg(EthRpcUrl, "ws://user:pass@localhost:8545") + + test "Invalid URLs are rejected": + expect(ValueError): + discard parseCmdArg(EthRpcUrl, "https://user@pass@example.com/") + expect(ValueError): + discard parseCmdArg(EthRpcUrl, "https://user:pass:extra@example.com/") + expect(ValueError): + discard parseCmdArg(EthRpcUrl, "ftp://user:pass@example.com/") + expect(ValueError): + discard parseCmdArg(EthRpcUrl, "https://user pass@example.com/") + expect(ValueError): + discard parseCmdArg(EthRpcUrl, "https://user:pass word@example.com/") + expect(ValueError): + discard parseCmdArg(EthRpcUrl, "user:pass@example.com/") + expect(ValueError): + discard parseCmdArg(EthRpcUrl, "https://user:pass@") + expect(ValueError): + discard parseCmdArg(EthRpcUrl, "https://user:pass@@example.com/") + expect(ValueError): + discard parseCmdArg(EthRpcUrl, "not-a-url") + expect(ValueError): + discard parseCmdArg(EthRpcUrl, "http://") + expect(ValueError): + discard parseCmdArg(EthRpcUrl, "https://") diff --git a/third-party/nwaku/tests/wakunode2/test_validators.nim b/third-party/nwaku/tests/wakunode2/test_validators.nim new file mode 100644 index 0000000..83928e4 --- /dev/null +++ b/third-party/nwaku/tests/wakunode2/test_validators.nim @@ -0,0 +1,413 @@ +{.used.} + +import + std/[sequtils, sysrand, math], + testutils/unittests, + chronos, + libp2p/crypto/crypto, + libp2p/crypto/secp, + libp2p/multiaddress, + libp2p/protocols/pubsub/pubsub, + libp2p/protocols/pubsub/gossipsub, + libp2p/multihash, + secp256k1 +import + waku/[waku_core, node/peer_manager, waku_node, factory/validator_signed], + tools/confutils/cli_args, + ../testlib/wakucore, + ../testlib/wakunode + +suite "WakuNode2 - Validators": + asyncTest "Spam protected topic accepts signed messages": + # Create 5 nodes + let nodes = toSeq(0 ..< 5).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + + # Protected shard and key to sign + let spamProtectedShard = RelayShard(clusterId: 0, shardId: 7) + let secretKey = SkSecretKey + .fromHex("5526a8990317c9b7b58d07843d270f9cd1d9aaee129294c1c478abf7261dd9e6") + .expect("valid key") + let publicKey = secretKey.toPublicKey() + let shardsPrivateKeys = {spamProtectedShard: secretKey}.toTable + let shardsPublicKeys = {spamProtectedShard: publicKey}.toTable + + # Start all the nodes and mount relay with protected topic + await allFutures(nodes.mapIt(it.start())) + + # Mount relay for all nodes + await allFutures(nodes.mapIt(it.mountRelay())) + + # Add signed message validator to all nodes. They will only route signed messages + for node in nodes: + var signedShards: seq[ProtectedShard] + for shard, publicKey in shardsPublicKeys: + signedShards.add(ProtectedShard(shard: shard.shardId, key: publicKey)) + node.wakuRelay.addSignedShardsValidator( + signedShards, spamProtectedShard.clusterId + ) + + # Connect the nodes in a full mesh + for i in 0 ..< 5: + for j in 0 ..< 5: + if i == j: + continue + let connOk = await nodes[i].peerManager.connectPeer( + nodes[j].switch.peerInfo.toRemotePeerInfo() + ) + require connOk + + # Connection triggers different actions, wait for them + await sleepAsync(500.millis) + + var msgReceived = 0 + proc handler(pubsubTopic: PubsubTopic, data: WakuMessage) {.async, gcsafe.} = + msgReceived += 1 + + # Subscribe all nodes to the same topic/handler + for node in nodes: + node.subscribe((kind: PubsubSub, topic: $spamProtectedShard), handler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + + await sleepAsync(500.millis) + + # Each node publishes 10 signed messages + for i in 0 ..< 5: + for j in 0 ..< 10: + var msg = WakuMessage( + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedShard, + version: 2, + timestamp: now(), + ephemeral: true, + ) + + # Include signature + msg.meta = + secretKey.sign(SkMessage(spamProtectedShard.msgHash(msg))).toRaw()[0 .. 63] + + discard await nodes[i].publish(some($spamProtectedShard), msg) + + # Wait for gossip + await sleepAsync(2.seconds) + + # 50 messages were sent to 5 peers = 250 messages + check: + msgReceived == 250 + + # No invalid messages were received by any peer + for i in 0 ..< 5: + for k, v in nodes[i].wakuRelay.peerStats.mpairs: + check: + v.topicInfos[spamProtectedShard].invalidMessageDeliveries == 0.0 + + # Stop all nodes + await allFutures(nodes.mapIt(it.stop())) + + asyncTest "Spam protected topic rejects non-signed/wrongly-signed/no-timestamp messages": + # Create 5 nodes + let nodes = toSeq(0 ..< 5).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + + # Protected shard and key to sign + let spamProtectedShard = RelayShard(clusterId: 0, shardId: 7) + let secretKey = SkSecretKey + .fromHex("5526a8990317c9b7b58d07843d270f9cd1d9aaee129294c1c478abf7261dd9e6") + .expect("valid key") + let publicKey = secretKey.toPublicKey() + let shardsPrivateKeys = {spamProtectedShard: secretKey}.toTable + let shardsPublicKeys = {spamProtectedShard: publicKey}.toTable + + # Non whitelisted secret key + let wrongSecretKey = SkSecretKey + .fromHex("32ad0cc8edeb9f8a3e8635c5fe5bd200b9247a33da5e7171bd012691805151f3") + .expect("valid key") + + # Start all the nodes and mount relay with protected topic + await allFutures(nodes.mapIt(it.start())) + + # Mount relay with spam protected topics + await allFutures(nodes.mapIt(it.mountRelay())) + + # Add signed message validator to all nodes. They will only route signed messages + for node in nodes: + var signedShards: seq[ProtectedShard] + for shard, publicKey in shardsPublicKeys: + signedShards.add(ProtectedShard(shard: shard.shardId, key: publicKey)) + node.wakuRelay.addSignedShardsValidator( + signedShards, spamProtectedShard.clusterId + ) + + # Connect the nodes in a full mesh + for i in 0 ..< 5: + for j in 0 ..< 5: + if i == j: + continue + let connOk = await nodes[i].peerManager.connectPeer( + nodes[j].switch.peerInfo.toRemotePeerInfo() + ) + require connOk + + var msgReceived = 0 + proc handler(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} = + msgReceived += 1 + + # Connection triggers different actions, wait for them + await sleepAsync(500.millis) + + # Subscribe all nodes to the same topic/handler + for node in nodes: + node.subscribe((kind: PubsubSub, topic: $spamProtectedShard), handler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + + await sleepAsync(500.millis) + + # Each node sends 5 messages, signed but with a non-whitelisted key (total = 25) + for i in 0 ..< 5: + for j in 0 ..< 5: + var msg = WakuMessage( + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedShard, + version: 2, + timestamp: now(), + ephemeral: true, + ) + + # Sign the message with a wrong key + msg.meta = wrongSecretKey.sign(SkMessage(spamProtectedShard.msgHash(msg))).toRaw()[ + 0 .. 63 + ] + + discard await nodes[i].publish(some($spamProtectedShard), msg) + + # Each node sends 5 messages that are not signed (total = 25) + for i in 0 ..< 5: + for j in 0 ..< 5: + let unsignedMessage = WakuMessage( + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedShard, + version: 2, + timestamp: now(), + ephemeral: true, + ) + discard await nodes[i].publish(some($spamProtectedShard), unsignedMessage) + + # Each node sends 5 messages that dont contain timestamp (total = 25) + for i in 0 ..< 5: + for j in 0 ..< 5: + let unsignedMessage = WakuMessage( + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedShard, + version: 2, + timestamp: 0, + ephemeral: true, + ) + discard await nodes[i].publish(some($spamProtectedShard), unsignedMessage) + + # Each node sends 5 messages way BEFORE than the current timestmap (total = 25) + for i in 0 ..< 5: + for j in 0 ..< 5: + let beforeTimestamp = now() - getNanosecondTime(6 * 60) + let unsignedMessage = WakuMessage( + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedShard, + version: 2, + timestamp: beforeTimestamp, + ephemeral: true, + ) + discard await nodes[i].publish(some($spamProtectedShard), unsignedMessage) + + # Each node sends 5 messages way LATER than the current timestmap (total = 25) + for i in 0 ..< 5: + for j in 0 ..< 5: + let afterTimestamp = now() - getNanosecondTime(6 * 60) + let unsignedMessage = WakuMessage( + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedShard, + version: 2, + timestamp: afterTimestamp, + ephemeral: true, + ) + discard await nodes[i].publish(some($spamProtectedShard), unsignedMessage) + + # Since we have a full mesh with 5 nodes and each one publishes 25+25+25+25+25 msgs + # there are 625 messages being sent. + # 125 are received ok in the handler (first hop) + # 500 are wrong so rejected (rejected not relayed) + + var msgRejected = 0 + + # Active wait for the messages to be delivered across the mesh + for i in 0 ..< 100: + msgRejected = 0 + for i in 0 ..< 5: + for k, v in nodes[i].wakuRelay.peerStats.mpairs: + msgRejected += v.topicInfos[spamProtectedShard].invalidMessageDeliveries.int + + if msgReceived == 125 and msgRejected == 500: + break + else: + await sleepAsync(100.milliseconds) + + check: + msgReceived == 125 + msgRejected == 500 + + await allFutures(nodes.mapIt(it.stop())) + + asyncTest "Spam protected topic rejects a spammer node": + # Create 5 nodes + let nodes = toSeq(0 ..< 5).mapIt( + newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) + ) + + # Protected shard and key to sign + let spamProtectedShard = RelayShard(clusterId: 0, shardId: 7) + let secretKey = SkSecretKey + .fromHex("5526a8990317c9b7b58d07843d270f9cd1d9aaee129294c1c478abf7261dd9e6") + .expect("valid key") + let publicKey = secretKey.toPublicKey() + let shardsPrivateKeys = {spamProtectedShard: secretKey}.toTable + let shardsPublicKeys = {spamProtectedShard: publicKey}.toTable + + # Non whitelisted secret key + let wrongSecretKey = SkSecretKey + .fromHex("32ad0cc8edeb9f8a3e8635c5fe5bd200b9247a33da5e7171bd012691805151f3") + .expect("valid key") + + # Start all the nodes and mount relay with protected topic + await allFutures(nodes.mapIt(it.start())) + + # Mount relay for all nodes + await allFutures(nodes.mapIt(it.mountRelay())) + + var msgReceived = 0 + proc handler(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} = + msgReceived += 1 + + # Subscribe all nodes to the same topic/handler + for node in nodes: + node.subscribe((kind: PubsubSub, topic: $spamProtectedShard), handler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + await sleepAsync(500.millis) + + # Add signed message validator to all nodes. They will only route signed messages + for node in nodes: + var signedShards: seq[ProtectedShard] + for shard, publicKey in shardsPublicKeys: + signedShards.add(ProtectedShard(shard: shard.shardId, key: publicKey)) + node.wakuRelay.addSignedShardsValidator( + signedShards, spamProtectedShard.clusterId + ) + + # nodes[0] is connected only to nodes[1] + let connOk1 = await nodes[0].peerManager.connectPeer( + nodes[1].switch.peerInfo.toRemotePeerInfo() + ) + require connOk1 + + # rest of nodes[1..4] are connected in a full mesh + for i in 1 ..< 5: + for j in 1 ..< 5: + if i == j: + continue + let connOk2 = await nodes[i].peerManager.connectPeer( + nodes[j].switch.peerInfo.toRemotePeerInfo() + ) + require connOk2 + + # Connection triggers different actions, wait for them + await sleepAsync(500.millis) + + # nodes[0] spams 50 non signed messages (nodes[0] just knows of nodes[1]) + for j in 0 ..< 50: + let unsignedMessage = WakuMessage( + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedShard, + version: 2, + timestamp: now(), + ephemeral: true, + ) + discard await nodes[0].publish(some($spamProtectedShard), unsignedMessage) + + # nodes[0] spams 50 wrongly signed messages (nodes[0] just knows of nodes[1]) + for j in 0 ..< 50: + var msg = WakuMessage( + payload: urandom(1 * (10 ^ 3)), + contentTopic: spamProtectedShard, + version: 2, + timestamp: now(), + ephemeral: true, + ) + # Sign the message with a wrong key + msg.meta = + wrongSecretKey.sign(SkMessage(spamProtectedShard.msgHash(msg))).toRaw()[0 .. 63] + discard await nodes[0].publish(some($spamProtectedShard), msg) + + # Wait for gossip + await sleepAsync(2.seconds) + + # only 100 messages are received (50 + 50) which demonstrate + # nodes[1] doest gossip invalid messages. + check: + msgReceived == 100 + + # peer1 got invalid messages from peer0 + let p0Id = nodes[0].peerInfo.peerId + check: + nodes[1].wakuRelay.peerStats[p0Id].topicInfos[spamProtectedShard].invalidMessageDeliveries == + 100.0 + + # peer1 did not gossip further, so no other node rx invalid messages + for i in 0 ..< 5: + for k, v in nodes[i].wakuRelay.peerStats.mpairs: + if k == p0Id and i == 1: + continue + check: + v.topicInfos[spamProtectedShard].invalidMessageDeliveries == 0.0 + + # Stop all nodes + await allFutures(nodes.mapIt(it.stop())) + + asyncTest "Tests vectors": + # keys + let privateKey = "5526a8990317c9b7b58d07843d270f9cd1d9aaee129294c1c478abf7261dd9e6" + let publicKey = + "049c5fac802da41e07e6cdf51c3b9a6351ad5e65921527f2df5b7d59fd9b56ab02bab736cdcfc37f25095e78127500da371947217a8cd5186ab890ea866211c3f6" + + # message + let contentTopic = "content-topic" + let pubsubTopic = "pubsub-topic" + let payload = + "1A12E077D0E89F9CAC11FBBB6A676C86120B5AD3E248B1F180E98F15EE43D2DFCF62F00C92737B2FF6F59B3ABA02773314B991C41DC19ADB0AD8C17C8E26757B" + let timestamp = 1683208172339052800 + let ephemeral = true + + # expected values + let expectedMsgAppHash = + "662F8C20A335F170BD60ABC1F02AD66F0C6A6EE285DA2A53C95259E7937C0AE9" + let expectedSignature = + "127FA211B2514F0E974A055392946DC1A14052182A6ABEFB8A6CD7C51DA1BF2E40595D28EF1A9488797C297EED3AAC45430005FB3A7F037BDD9FC4BD99F59E63" + + let secretKey = SkSecretKey.fromHex(privateKey).expect("valid key") + + check: + secretKey.toPublicKey().toHex() == publicKey + secretKey.toHex() == privateKey + + var msg = WakuMessage( + payload: payload.fromHex(), + contentTopic: contentTopic, + version: 2, + timestamp: timestamp, + ephemeral: ephemeral, + ) + + let msgAppHash = pubsubTopic.msgHash(msg) + let signature = secretKey.sign(SkMessage(msgAppHash)).toRaw() + + check: + msgAppHash.toHex() == expectedMsgAppHash + signature.toHex() == expectedSignature diff --git a/third-party/nwaku/tests/wakunode_rest/test_all.nim b/third-party/nwaku/tests/wakunode_rest/test_all.nim new file mode 100644 index 0000000..4071e63 --- /dev/null +++ b/third-party/nwaku/tests/wakunode_rest/test_all.nim @@ -0,0 +1,15 @@ +{.used.} + +import + ./test_rest_admin, + ./test_rest_cors, + ./test_rest_debug, + ./test_rest_debug_serdes, + ./test_rest_filter, + ./test_rest_health, + ./test_rest_lightpush, + ./test_rest_lightpush_legacy, + ./test_rest_relay_serdes, + ./test_rest_relay, + ./test_rest_serdes, + ./test_rest_store diff --git a/third-party/nwaku/tests/wakunode_rest/test_rest_admin.nim b/third-party/nwaku/tests/wakunode_rest/test_rest_admin.nim new file mode 100644 index 0000000..e47207a --- /dev/null +++ b/third-party/nwaku/tests/wakunode_rest/test_rest_admin.nim @@ -0,0 +1,323 @@ +{.used.} + +import + std/[sequtils, net], + testutils/unittests, + presto, + presto/client as presto_client, + presto /../ tests/helpers, + libp2p/crypto/crypto + +import + waku/[ + waku_core, + waku_node, + waku_filter_v2/client, + node/peer_manager, + waku_api/rest/server, + waku_api/rest/client, + waku_api/rest/responses, + waku_api/rest/admin/types, + waku_api/rest/admin/handlers as admin_api, + waku_api/rest/admin/client as admin_api_client, + waku_relay, + waku_peer_exchange, + ], + ../testlib/wakucore, + ../testlib/wakunode, + ../testlib/testasync + +suite "Waku v2 Rest API - Admin": + var node1 {.threadvar.}: WakuNode + var node2 {.threadvar.}: WakuNode + var node3 {.threadvar.}: WakuNode + var peerInfo1 {.threadvar.}: RemotePeerInfo + var peerInfo2 {.threadvar.}: RemotePeerInfo + var peerInfo3 {.threadvar.}: RemotePeerInfo + var restServer {.threadvar.}: WakuRestServerRef + var client {.threadvar.}: RestClientRef + + asyncSetup: + node1 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(60600)) + node2 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(60602)) + node3 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(60604)) + + let clusterId = 1.uint16 + let shards: seq[uint16] = @[0] + node1.mountMetadata(clusterId, shards).isOkOr: + assert false, "Failed to mount metadata: " & $error + node2.mountMetadata(clusterId, shards).isOkOr: + assert false, "Failed to mount metadata: " & $error + node3.mountMetadata(clusterId, shards).isOkOr: + assert false, "Failed to mount metadata: " & $error + + await allFutures(node1.start(), node2.start(), node3.start()) + await allFutures( + node1.mountRelay(), + node2.mountRelay(), + node3.mountRelay(), + node3.mountPeerExchange(), + ) + + # The three nodes should be subscribed to the same shard + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + let shard = RelayShard(clusterId: clusterId, shardId: 0) + node1.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + node2.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + node3.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + + peerInfo1 = node1.switch.peerInfo + peerInfo2 = node2.switch.peerInfo + peerInfo3 = node3.switch.peerInfo + + var restPort = Port(0) + let restAddress = parseIpAddress("127.0.0.1") + restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installAdminApiHandlers(restServer.router, node1) + + restServer.start() + + client = newRestHttpClient(initTAddress(restAddress, restPort)) + + asyncTearDown: + await restServer.stop() + await restServer.closeWait() + await allFutures(node1.stop(), node2.stop(), node3.stop()) + + asyncTest "Set and get remote peers": + # Connect to nodes 2 and 3 using the Admin API + let postRes = await client.postPeers( + @[constructMultiaddrStr(peerInfo2), constructMultiaddrStr(peerInfo3)] + ) + + check: + postRes.status == 200 + + # Verify that newly connected peers are being managed + let getRes = await client.getPeers() + + check: + getRes.status == 200 + $getRes.contentType == $MIMETYPE_JSON + getRes.data.len() == 2 + # Check peer 2 + getRes.data.anyIt( + it.protocols.find(WakuRelayCodec) >= 0 and + it.multiaddr == constructMultiaddrStr(peerInfo2) + ) + # Check peer 3 + getRes.data.anyIt( + it.protocols.find(WakuRelayCodec) >= 0 and + it.multiaddr == constructMultiaddrStr(peerInfo3) + ) + + # Check peer 3 + getRes.data.anyIt( + it.protocols.find(WakuPeerExchangeCodec) >= 0 and + it.multiaddr == constructMultiaddrStr(peerInfo3) + ) + + asyncTest "Set wrong peer": + let nonExistentPeer = + "/ip4/0.0.0.0/tcp/10000/p2p/16Uiu2HAm6HZZr7aToTvEBPpiys4UxajCTU97zj5v7RNR2gbniy1D" + let postRes = await client.postPeers(@[nonExistentPeer]) + + check: + postRes.status == 400 + $postRes.contentType == $MIMETYPE_TEXT + postRes.data == "Failed to connect to peer at index: 0 - " & nonExistentPeer + + # Verify that newly connected peers are being managed + let getRes = await client.getPeers() + + check: + getRes.status == 200 + $getRes.contentType == $MIMETYPE_JSON + getRes.data.len() == 1 + getRes.data[0].multiaddr == nonExistentPeer + getRes.data[0].connected == CannotConnect + + asyncTest "Get filter data": + await allFutures( + node1.mountFilter(), node2.mountFilterClient(), node3.mountFilterClient() + ) + + let + contentFiltersNode2 = @[DefaultContentTopic, ContentTopic("2"), ContentTopic("3")] + contentFiltersNode3 = @[ContentTopic("3"), ContentTopic("4")] + pubsubTopicNode2 = DefaultPubsubTopic + pubsubTopicNode3 = PubsubTopic("/waku/2/custom-waku/proto") + + let + subscribeResponse2 = await node2.wakuFilterClient.subscribe( + peerInfo1, pubsubTopicNode2, contentFiltersNode2 + ) + subscribeResponse3 = await node3.wakuFilterClient.subscribe( + peerInfo1, pubsubTopicNode3, contentFiltersNode3 + ) + + assert subscribeResponse2.isOk(), $subscribeResponse2.error + assert subscribeResponse3.isOk(), $subscribeResponse3.error + + let getRes = await client.getFilterSubscriptions() + + check: + getRes.status == 200 + $getRes.contentType == $MIMETYPE_JSON + getRes.data.len() == 2 + + let + peers = @[getRes.data[0].peerId, getRes.data[1].peerId] + numCriteria = + @[getRes.data[0].filterCriteria.len, getRes.data[1].filterCriteria.len] + + check: + $peerInfo2 in peers + $peerInfo3 in peers + 2 in numCriteria + 3 in numCriteria + + asyncTest "Get filter data - no filter subscribers": + await node1.mountFilter() + + let getRes = await client.getFilterSubscriptions() + + check: + getRes.status == 200 + $getRes.contentType == $MIMETYPE_JSON + getRes.data.len() == 0 + + asyncTest "Get filter data - filter not mounted": + let getRes = await client.getFilterSubscriptionsFilterNotMounted() + + check: + getRes.status == 400 + getRes.data == "Error: Filter Protocol is not mounted to the node" + + asyncTest "Get peer origin": + # Adding peers to the Peer Store + node1.peerManager.addPeer(peerInfo2, Discv5) + node1.peerManager.addPeer(peerInfo3, PeerExchange) + + # Connecting to both peers + let conn2 = await node1.peerManager.connectPeer(peerInfo2) + let conn3 = await node1.peerManager.connectPeer(peerInfo3) + + # Check successful connections + check: + conn2 == true + conn3 == true + + # Query peers REST endpoint + let getRes = await client.getPeers() + + check: + getRes.status == 200 + $getRes.contentType == $MIMETYPE_JSON + getRes.data.len() == 2 + # Check peer 2 + getRes.data.anyIt(it.origin == Discv5) + # Check peer 3 + getRes.data.anyIt(it.origin == PeerExchange) + + asyncTest "get peers by id": + # Connect to nodes 2 and 3 using the Admin API + let postRes = await client.postPeers( + @[constructMultiaddrStr(peerInfo2), constructMultiaddrStr(peerInfo3)] + ) + + check: + postRes.status == 200 + + let getRes = await client.getPeerById($peerInfo2.peerId) + + check: + getRes.status == 200 + $getRes.contentType == $MIMETYPE_JSON + getRes.data.protocols.find(WakuRelayCodec) >= 0 + getRes.data.multiaddr == constructMultiaddrStr(peerInfo2) + + ## nim-presto library's RestClient does not support text error case decode if + ## the RestResponse expects a JSON with complex type + # let getRes2 = await client.getPeerById("bad peer id") + let getRes2 = await httpClient( + restServer.httpServer.address, MethodGet, "/admin/v1/peer/bad+peer+id", "" + ) + check: + getRes2.status == 400 + getRes2.data == "Invalid argument:peerid: incorrect PeerId string" + + asyncTest "get connected peers": + # Connect to nodes 2 and 3 using the Admin API + let postRes = await client.postPeers( + @[constructMultiaddrStr(peerInfo2), constructMultiaddrStr(peerInfo3)] + ) + + check: + postRes.status == 200 + + let getRes = await client.getConnectedPeers() + + check: + getRes.status == 200 + $getRes.contentType == $MIMETYPE_JSON + getRes.data.len() == 2 + # Check peer 2 + getRes.data.anyIt(it.multiaddr == constructMultiaddrStr(peerInfo2)) + # Check peer 3 + getRes.data.anyIt(it.multiaddr == constructMultiaddrStr(peerInfo3)) + + # Seems shard info is not available in the peer manager + # let getRes2 = await client.getConnectedPeersByShard(0) + # check: + # getRes2.status == 200 + # $getRes2.contentType == $MIMETYPE_JSON + # getRes2.data.len() == 2 + + let getRes3 = await client.getConnectedPeersByShard(99) + check: + getRes3.status == 200 + $getRes3.contentType == $MIMETYPE_JSON + getRes3.data.len() == 0 + + asyncTest "get relay peers": + # Connect to nodes 2 and 3 using the Admin API + let postRes = await client.postPeers( + @[constructMultiaddrStr(peerInfo2), constructMultiaddrStr(peerInfo3)] + ) + + check: + postRes.status == 200 + + let getRes = await client.getRelayPeers() + + check: + getRes.status == 200 + $getRes.contentType == $MIMETYPE_JSON + require getRes.data.len() == 1 # Check peer 2 + check getRes.data[0].peers.anyIt(it.multiaddr == constructMultiaddrStr(peerInfo2)) + # Check peer 2 + check getRes.data[0].peers.anyIt(it.multiaddr == constructMultiaddrStr(peerInfo3)) + # Check peer 3 + + # Todo: investigate why the test setup missing remote peer's shard info + # let getRes2 = await client.getRelayPeersByShard(0) + # check: + # getRes2.status == 200 + # $getRes2.contentType == $MIMETYPE_JSON + # getRes2.data.peers.len() == 2 + + let getRes3 = await client.getRelayPeersByShard(99) + check: + getRes3.status == 200 + $getRes3.contentType == $MIMETYPE_JSON + getRes3.data.peers.len() == 0 diff --git a/third-party/nwaku/tests/wakunode_rest/test_rest_cors.nim b/third-party/nwaku/tests/wakunode_rest/test_rest_cors.nim new file mode 100644 index 0000000..58e70aa --- /dev/null +++ b/third-party/nwaku/tests/wakunode_rest/test_rest_cors.nim @@ -0,0 +1,290 @@ +{.used.} + +import + testutils/unittests, + presto, + presto/client as presto_client, + libp2p/peerinfo, + libp2p/multiaddress, + libp2p/crypto/crypto +import + waku/[ + waku_node, + node/waku_node as waku_node2, + waku_api/rest/server, + waku_api/rest/debug/handlers as debug_api, + ], + ../testlib/common, + ../testlib/wakucore, + ../testlib/wakunode + +type TestResponseTuple = tuple[status: int, data: string, headers: HttpTable] + +proc testWakuNode(): WakuNode = + let + privkey = crypto.PrivateKey.random(Secp256k1, rng[]).tryGet() + bindIp = parseIpAddress("0.0.0.0") + extIp = parseIpAddress("127.0.0.1") + port = Port(0) + + newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) + +proc fetchWithHeader( + request: HttpClientRequestRef +): Future[TestResponseTuple] {.async: (raises: [CancelledError, HttpError]).} = + var response: HttpClientResponseRef + try: + response = await request.send() + let buffer = await response.getBodyBytes() + let status = response.status + let headers = response.headers + await response.closeWait() + response = nil + return (status, buffer.bytesToString(), headers) + except HttpError as exc: + if not (isNil(response)): + await response.closeWait() + assert false + except CancelledError as exc: + if not (isNil(response)): + await response.closeWait() + assert false + +proc issueRequest( + address: HttpAddress, reqOrigin: Option[string] = none(string) +): Future[TestResponseTuple] {.async.} = + var + session = HttpSessionRef.new({HttpClientFlag.Http11Pipeline}) + data: TestResponseTuple + + var originHeader: seq[HttpHeaderTuple] + if reqOrigin.isSome(): + originHeader.insert(("Origin", reqOrigin.get())) + + var request = HttpClientRequestRef.new( + session, address, version = HttpVersion11, headers = originHeader + ) + try: + data = await request.fetchWithHeader() + finally: + await request.closeWait() + return data + +proc checkResponse( + response: TestResponseTuple, expectedStatus: int, expectedOrigin: Option[string] +): bool = + if response.status != expectedStatus: + echo( + " -> check failed: expected status" & $expectedStatus & " got " & $response.status + ) + return false + + if not ( + expectedOrigin.isNone() or ( + expectedOrigin.isSome() and + response.headers.contains("Access-Control-Allow-Origin") and + response.headers.getLastString("Access-Control-Allow-Origin") == + expectedOrigin.get() and response.headers.contains("Access-Control-Allow-Headers") and + response.headers.getLastString("Access-Control-Allow-Headers") == "Content-Type" + ) + ): + echo( + " -> check failed: expected origin " & $expectedOrigin & " got " & + response.headers.getLastString("Access-Control-Allow-Origin") + ) + return false + + return true + +suite "Waku v2 REST API CORS Handling": + asyncTest "AllowedOrigin matches": + # Given + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef + .init( + restAddress, + restPort, + allowedOrigin = + some("test.net:1234,https://localhost:*,http://127.0.0.1:?8,?waku*.net:*80*"), + ) + .tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installDebugApiHandlers(restServer.router, node) + restServer.start() + + let srvAddr = restServer.localAddress() + let ha = getAddress(srvAddr, HttpClientScheme.NonSecure, "/debug/v1/info") + + # When + var response = await issueRequest(ha, some("http://test.net:1234")) + check checkResponse(response, 200, some("http://test.net:1234")) + + response = await issueRequest(ha, some("https://test.net:1234")) + check checkResponse(response, 200, some("https://test.net:1234")) + + response = await issueRequest(ha, some("https://localhost:8080")) + check checkResponse(response, 200, some("https://localhost:8080")) + + response = await issueRequest(ha, some("https://localhost:80")) + check checkResponse(response, 200, some("https://localhost:80")) + + response = await issueRequest(ha, some("http://127.0.0.1:78")) + check checkResponse(response, 200, some("http://127.0.0.1:78")) + + response = await issueRequest(ha, some("http://wakuTHE.net:8078")) + check checkResponse(response, 200, some("http://wakuTHE.net:8078")) + + response = await issueRequest(ha, some("http://nwaku.main.net:1980")) + check checkResponse(response, 200, some("http://nwaku.main.net:1980")) + + response = await issueRequest(ha, some("http://nwaku.main.net:80")) + check checkResponse(response, 200, some("http://nwaku.main.net:80")) + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "AllowedOrigin reject": + # Given + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef + .init( + restAddress, + restPort, + allowedOrigin = + some("test.net:1234,https://localhost:*,http://127.0.0.1:?8,?waku*.net:*80*"), + ) + .tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installDebugApiHandlers(restServer.router, node) + restServer.start() + + let srvAddr = restServer.localAddress() + let ha = getAddress(srvAddr, HttpClientScheme.NonSecure, "/debug/v1/info") + + # When + var response = await issueRequest(ha, some("http://test.net:12334")) + check checkResponse(response, 403, none(string)) + + response = await issueRequest(ha, some("http://test.net:12345")) + check checkResponse(response, 403, none(string)) + + response = await issueRequest(ha, some("xhttp://test.net:1234")) + check checkResponse(response, 403, none(string)) + + response = await issueRequest(ha, some("https://xtest.net:1234")) + check checkResponse(response, 403, none(string)) + + response = await issueRequest(ha, some("http://localhost:8080")) + check checkResponse(response, 403, none(string)) + + response = await issueRequest(ha, some("https://127.0.0.1:78")) + check checkResponse(response, 403, none(string)) + + response = await issueRequest(ha, some("http://127.0.0.1:89")) + check checkResponse(response, 403, none(string)) + + response = await issueRequest(ha, some("http://the.waku.net:8078")) + check checkResponse(response, 403, none(string)) + + response = await issueRequest(ha, some("http://nwaku.main.net:1900")) + check checkResponse(response, 403, none(string)) + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "AllowedOrigin allmatches": + # Given + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = + WakuRestServerRef.init(restAddress, restPort, allowedOrigin = some("*")).tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installDebugApiHandlers(restServer.router, node) + restServer.start() + + let srvAddr = restServer.localAddress() + let ha = getAddress(srvAddr, HttpClientScheme.NonSecure, "/debug/v1/info") + + # When + var response = await issueRequest(ha, some("http://test.net:1234")) + check checkResponse(response, 200, some("*")) + + response = await issueRequest(ha, some("https://test.net:1234")) + check checkResponse(response, 200, some("*")) + + response = await issueRequest(ha, some("https://localhost:8080")) + check checkResponse(response, 200, some("*")) + + response = await issueRequest(ha, some("https://localhost:80")) + check checkResponse(response, 200, some("*")) + + response = await issueRequest(ha, some("http://127.0.0.1:78")) + check checkResponse(response, 200, some("*")) + + response = await issueRequest(ha, some("http://wakuTHE.net:8078")) + check checkResponse(response, 200, some("*")) + + response = await issueRequest(ha, some("http://nwaku.main.net:1980")) + check checkResponse(response, 200, some("*")) + + response = await issueRequest(ha, some("http://nwaku.main.net:80")) + check checkResponse(response, 200, some("*")) + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "No origin goes through": + # Given + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef + .init( + restAddress, + restPort, + allowedOrigin = + some("test.net:1234,https://localhost:*,http://127.0.0.1:?8,?waku*.net:*80*"), + ) + .tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installDebugApiHandlers(restServer.router, node) + restServer.start() + + let srvAddr = restServer.localAddress() + let ha = getAddress(srvAddr, HttpClientScheme.NonSecure, "/debug/v1/info") + + # When + var response = await issueRequest(ha, none(string)) + check checkResponse(response, 200, none(string)) + + await restServer.stop() + await restServer.closeWait() + await node.stop() diff --git a/third-party/nwaku/tests/wakunode_rest/test_rest_debug.nim b/third-party/nwaku/tests/wakunode_rest/test_rest_debug.nim new file mode 100644 index 0000000..9add57c --- /dev/null +++ b/third-party/nwaku/tests/wakunode_rest/test_rest_debug.nim @@ -0,0 +1,92 @@ +{.used.} + +import + testutils/unittests, + presto, + presto/client as presto_client, + libp2p/peerinfo, + libp2p/multiaddress, + libp2p/crypto/crypto +import + waku/[ + waku_node, + node/waku_node as waku_node2, + # TODO: Remove after moving `git_version` to the app code. + waku_api/rest/server, + waku_api/rest/client, + waku_api/rest/responses, + waku_api/rest/debug/handlers as debug_api, + waku_api/rest/debug/client as debug_api_client, + ], + ../testlib/common, + ../testlib/wakucore, + ../testlib/wakunode + +proc testWakuNode(): WakuNode = + let + privkey = crypto.PrivateKey.random(Secp256k1, rng[]).tryGet() + bindIp = parseIpAddress("0.0.0.0") + extIp = parseIpAddress("127.0.0.1") + port = Port(0) + + newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) + +suite "Waku v2 REST API - Debug": + asyncTest "Get node info - GET /info": + # Given + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installDebugApiHandlers(restServer.router, node) + restServer.start() + + # When + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + let response = await client.debugInfoV1() + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.listenAddresses == + @[$node.switch.peerInfo.addrs[^1] & "/p2p/" & $node.switch.peerInfo.peerId] + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "Get node version - GET /version": + # Given + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installDebugApiHandlers(restServer.router, node) + restServer.start() + + # When + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + let response = await client.debugVersionV1() + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_TEXT + response.data == waku_node2.git_version + + await restServer.stop() + await restServer.closeWait() + await node.stop() diff --git a/third-party/nwaku/tests/wakunode_rest/test_rest_debug_serdes.nim b/third-party/nwaku/tests/wakunode_rest/test_rest_debug_serdes.nim new file mode 100644 index 0000000..13b791d --- /dev/null +++ b/third-party/nwaku/tests/wakunode_rest/test_rest_debug_serdes.nim @@ -0,0 +1,34 @@ +{.used.} + +import results, stew/byteutils, testutils/unittests, json_serialization +import waku/waku_api/rest/serdes, waku/waku_api/rest/debug/types + +suite "Waku v2 REST API - Debug - serialization": + suite "DebugWakuInfo - decode": + test "optional field is not provided": + # Given + let jsonBytes = toBytes("""{ "listenAddresses":["123"] }""") + + # When + let res = decodeFromJsonBytes(DebugWakuInfo, jsonBytes, requireAllFields = true) + + # Then + require(res.isOk()) + let value = res.get() + check: + value.listenAddresses == @["123"] + value.enrUri.isNone() + + suite "DebugWakuInfo - encode": + test "optional field is none": + # Given + let data = DebugWakuInfo(listenAddresses: @["GO"], enrUri: none(string)) + + # When + let res = encodeIntoJsonBytes(data) + + # Then + require(res.isOk()) + let value = res.get() + check: + value == toBytes("""{"listenAddresses":["GO"]}""") diff --git a/third-party/nwaku/tests/wakunode_rest/test_rest_filter.nim b/third-party/nwaku/tests/wakunode_rest/test_rest_filter.nim new file mode 100644 index 0000000..f8dbf42 --- /dev/null +++ b/third-party/nwaku/tests/wakunode_rest/test_rest_filter.nim @@ -0,0 +1,490 @@ +{.used.} + +import + chronos/timer, + stew/byteutils, + testutils/unittests, + presto, + presto/client as presto_client, + libp2p/crypto/crypto +import + waku/[ + waku_api/message_cache, + waku_core, + waku_node, + node/peer_manager, + waku_api/rest/server, + waku_api/rest/client, + waku_api/rest/responses, + waku_api/rest/filter/types, + waku_api/rest/filter/handlers as filter_api, + waku_api/rest/filter/client as filter_api_client, + waku_relay, + waku_filter_v2/subscriptions, + waku_filter_v2/common, + waku_api/rest/relay/handlers as relay_api, + waku_api/rest/relay/client as relay_api_client, + ], + ../testlib/wakucore, + ../testlib/wakunode + +proc testWakuNode(): WakuNode = + let + privkey = generateSecp256k1Key() + bindIp = parseIpAddress("0.0.0.0") + extIp = parseIpAddress("127.0.0.1") + port = Port(0) + + return newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) + +type RestFilterTest = object + serviceNode: WakuNode + subscriberNode: WakuNode + restServer: WakuRestServerRef + restServerForService: WakuRestServerRef + messageCache: MessageCache + client: RestClientRef + clientTwdServiceNode: RestClientRef + +proc init(T: type RestFilterTest): Future[T] {.async.} = + var testSetup = RestFilterTest() + testSetup.serviceNode = testWakuNode() + testSetup.subscriberNode = testWakuNode() + + await allFutures(testSetup.serviceNode.start(), testSetup.subscriberNode.start()) + + (await testSetup.serviceNode.mountRelay()).isOkOr: + assert false, "Failed to mount relay: " & $error + + await testSetup.serviceNode.mountFilter(messageCacheTTL = 1.seconds) + await testSetup.subscriberNode.mountFilterClient() + + testSetup.subscriberNode.peerManager.addServicePeer( + testSetup.serviceNode.peerInfo.toRemotePeerInfo(), WakuFilterSubscribeCodec + ) + + var restPort = Port(0) + let restAddress = parseIpAddress("127.0.0.1") + testSetup.restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = testSetup.restServer.httpServer.address.port + # update with bound port for client use + + var restPort2 = Port(0) + testSetup.restServerForService = + WakuRestServerRef.init(restAddress, restPort2).tryGet() + restPort2 = testSetup.restServerForService.httpServer.address.port + # update with bound port for client use + + # through this one we will see if messages are pushed according to our content topic sub + testSetup.messageCache = MessageCache.init() + installFilterRestApiHandlers( + testSetup.restServer.router, testSetup.subscriberNode, testSetup.messageCache + ) + + let topicCache = MessageCache.init() + installRelayApiHandlers( + testSetup.restServerForService.router, testSetup.serviceNode, topicCache + ) + + testSetup.restServer.start() + testSetup.restServerForService.start() + + testSetup.client = newRestHttpClient(initTAddress(restAddress, restPort)) + testSetup.clientTwdServiceNode = + newRestHttpClient(initTAddress(restAddress, restPort2)) + + return testSetup + +proc shutdown(self: RestFilterTest) {.async.} = + await self.restServer.stop() + await self.restServer.closeWait() + await self.restServerForService.stop() + await self.restServerForService.closeWait() + await allFutures(self.serviceNode.stop(), self.subscriberNode.stop()) + +suite "Waku v2 Rest API - Filter V2": + asyncTest "Subscribe a node to an array of topics - POST /filter/v2/subscriptions": + # Given + let restFilterTest = await RestFilterTest.init() + let subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId + + # When + let contentFilters = + @[DefaultContentTopic, ContentTopic("2"), ContentTopic("3"), ContentTopic("4")] + + let requestBody = FilterSubscribeRequest( + requestId: "1234", + contentFilters: contentFilters, + pubsubTopic: some(DefaultPubsubTopic), + ) + let response = await restFilterTest.client.filterPostSubscriptions(requestBody) + + echo "response", $response + + let subscribedPeer1 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, DefaultContentTopic + ) + let subscribedPeer2 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, "2" + ) + let subscribedPeer3 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, "3" + ) + let subscribedPeer4 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, "4" + ) + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.requestId == "1234" + subscribedPeer1.len() == 1 + subPeerId in subscribedPeer1 + subPeerId in subscribedPeer2 + subPeerId in subscribedPeer3 + subPeerId in subscribedPeer4 + + # When - error case + let badRequestBody = FilterSubscribeRequest( + requestId: "4567", contentFilters: @[], pubsubTopic: none(string) + ) + let badRequestResp = + await restFilterTest.client.filterPostSubscriptions(badRequestBody) + + check: + badRequestResp.status == 400 + $badRequestResp.contentType == $MIMETYPE_JSON + badRequestResp.data.requestId == "unknown" + # badRequestResp.data.statusDesc == "*********" + badRequestResp.data.statusDesc.startsWith("BAD_REQUEST: Failed to decode request") + + await restFilterTest.shutdown() + + asyncTest "Unsubscribe a node from an array of topics - DELETE /filter/v2/subscriptions": + # Given + let + restFilterTest = await RestFilterTest.init() + subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId + + # When + var requestBody = FilterSubscribeRequest( + requestId: "1234", + contentFilters: + @[ContentTopic("1"), ContentTopic("2"), ContentTopic("3"), ContentTopic("4")], + pubsubTopic: some(DefaultPubsubTopic), + ) + discard await restFilterTest.client.filterPostSubscriptions(requestBody) + + let contentFilters = + @[ + ContentTopic("1"), + ContentTopic("2"), + ContentTopic("3"), # ,ContentTopic("4") # Keep this subscription for check + ] + + let requestBodyUnsub = FilterUnsubscribeRequest( + requestId: "4321", + contentFilters: contentFilters, + pubsubTopic: some(DefaultPubsubTopic), + ) + let response = + await restFilterTest.client.filterDeleteSubscriptions(requestBodyUnsub) + + let subscribedPeer1 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, DefaultContentTopic + ) + let subscribedPeer2 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, "2" + ) + let subscribedPeer3 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, "3" + ) + let subscribedPeer4 = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, "4" + ) + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.requestId == "4321" + subscribedPeer1.len() == 0 + subPeerId notin subscribedPeer1 + subPeerId notin subscribedPeer2 + subPeerId notin subscribedPeer3 + subscribedPeer4.len() == 1 + subPeerId in subscribedPeer4 + + # When - error case + let requestBodyUnsubAll = FilterUnsubscribeAllRequest(requestId: "2143") + let responseUnsubAll = + await restFilterTest.client.filterDeleteAllSubscriptions(requestBodyUnsubAll) + + let subscribedPeer = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, "4" + ) + + check: + responseUnsubAll.status == 200 + $responseUnsubAll.contentType == $MIMETYPE_JSON + responseUnsubAll.data.requestId == "2143" + subscribedPeer.len() == 0 + + await restFilterTest.shutdown() + + asyncTest "ping subscribed node - GET /filter/v2/subscriptions/{requestId}": + # Given + let + restFilterTest = await RestFilterTest.init() + subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId + + # When + var requestBody = FilterSubscribeRequest( + requestId: "1234", + contentFilters: @[ContentTopic("1")], + pubsubTopic: some(DefaultPubsubTopic), + ) + discard await restFilterTest.client.filterPostSubscriptions(requestBody) + + let pingResponse = await restFilterTest.client.filterSubscriberPing("9999") + + # Then + check: + pingResponse.status == 200 + $pingResponse.contentType == $MIMETYPE_JSON + pingResponse.data.requestId == "9999" + pingResponse.data.statusDesc == "OK" + + # When - error case + let requestBodyUnsubAll = FilterUnsubscribeAllRequest(requestId: "9988") + discard + await restFilterTest.client.filterDeleteAllSubscriptions(requestBodyUnsubAll) + + let pingResponseFail = await restFilterTest.client.filterSubscriberPing("9977") + + # Then + check: + pingResponseFail.status == 404 # NOT_FOUND + $pingResponseFail.contentType == $MIMETYPE_JSON + pingResponseFail.data.requestId == "9977" + pingResponseFail.data.statusDesc == "NOT_FOUND: peer has no subscriptions" + + await restFilterTest.shutdown() + + asyncTest "push filtered message": + # Given + let + restFilterTest = await RestFilterTest.init() + subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId + + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + restFilterTest.messageCache.pubsubSubscribe(DefaultPubsubTopic) + + restFilterTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + + # When + var requestBody = FilterSubscribeRequest( + requestId: "1234", + contentFilters: @[ContentTopic("1")], + pubsubTopic: some(DefaultPubsubTopic), + ) + discard await restFilterTest.client.filterPostSubscriptions(requestBody) + + let pingResponse = await restFilterTest.client.filterSubscriberPing("9999") + + # Then + check: + pingResponse.status == 200 + $pingResponse.contentType == $MIMETYPE_JSON + pingResponse.data.requestId == "9999" + pingResponse.data.statusDesc == "OK" + + # When - message push + let testMessage = WakuMessage( + payload: "TEST-PAYLOAD-MUST-RECEIVE".toBytes(), + contentTopic: "1", + timestamp: int64(2022), + meta: "test-meta".toBytes(), + ) + + let postMsgResponse = await restFilterTest.clientTwdServiceNode.relayPostMessagesV1( + DefaultPubsubTopic, toRelayWakuMessage(testMessage) + ) + # Then + let messages = restFilterTest.messageCache.getAutoMessages("1").tryGet() + + check: + postMsgResponse.status == 200 + $postMsgResponse.contentType == $MIMETYPE_TEXT + postMsgResponse.data == "OK" + messages == @[testMessage] + + await restFilterTest.shutdown() + + asyncTest "duplicate message push to filter subscriber": + # setup filter service and client node + let restFilterTest = await RestFilterTest.init() + let subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + restFilterTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + + let requestBody = FilterSubscribeRequest( + requestId: "1001", + contentFilters: @[DefaultContentTopic], + pubsubTopic: some(DefaultPubsubTopic), + ) + let response = await restFilterTest.client.filterPostSubscriptions(requestBody) + + # subscribe fiter service + let subscribedPeer = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, DefaultContentTopic + ) + + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.requestId == "1001" + subscribedPeer.len() == 1 + + # ping subscriber node + restFilterTest.messageCache.pubsubSubscribe(DefaultPubsubTopic) + + let pingResponse = await restFilterTest.client.filterSubscriberPing("1002") + + check: + pingResponse.status == 200 + pingResponse.data.requestId == "1002" + pingResponse.data.statusDesc == "OK" + + # first - message push from service node to subscriber client + let testMessage = WakuMessage( + payload: "TEST-PAYLOAD-MUST-RECEIVE".toBytes(), + contentTopic: DefaultContentTopic, + timestamp: int64(2022), + meta: "test-meta".toBytes(), + ) + + let postMsgResponse1 = await restFilterTest.clientTwdServiceNode.relayPostMessagesV1( + DefaultPubsubTopic, toRelayWakuMessage(testMessage) + ) + + # check messages received client side or not + let messages1 = await restFilterTest.client.filterGetMessagesV1(DefaultContentTopic) + + check: + postMsgResponse1.status == 200 + $postMsgResponse1.contentType == $MIMETYPE_TEXT + postMsgResponse1.data == "OK" + len(messages1.data) == 1 + + # second - message push from service node to subscriber client + let postMsgResponse2 = await restFilterTest.clientTwdServiceNode.relayPostMessagesV1( + DefaultPubsubTopic, toRelayWakuMessage(testMessage) + ) + + # check message received client side or not + let messages2 = await restFilterTest.client.filterGetMessagesV1(DefaultContentTopic) + + check: + postMsgResponse2.status == 200 + $postMsgResponse2.contentType == $MIMETYPE_TEXT + postMsgResponse2.data == "OK" + len(messages2.data) == 0 + + await restFilterTest.shutdown() + + asyncTest "duplicate message push to filter subscriber ( sleep in between )": + # setup filter service and client node + let restFilterTest = await RestFilterTest.init() + let subPeerId = restFilterTest.subscriberNode.peerInfo.toRemotePeerInfo().peerId + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + restFilterTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: + assert false, "Failed to subscribe to topic: " & $error + + let requestBody = FilterSubscribeRequest( + requestId: "1001", + contentFilters: @[DefaultContentTopic], + pubsubTopic: some(DefaultPubsubTopic), + ) + let response = await restFilterTest.client.filterPostSubscriptions(requestBody) + + # subscribe fiter service + let subscribedPeer = restFilterTest.serviceNode.wakuFilter.subscriptions.findSubscribedPeers( + DefaultPubsubTopic, DefaultContentTopic + ) + + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.requestId == "1001" + subscribedPeer.len() == 1 + + # ping subscriber node + restFilterTest.messageCache.pubsubSubscribe(DefaultPubsubTopic) + + let pingResponse = await restFilterTest.client.filterSubscriberPing("1002") + + check: + pingResponse.status == 200 + pingResponse.data.requestId == "1002" + pingResponse.data.statusDesc == "OK" + + # first - message push from service node to subscriber client + let testMessage = WakuMessage( + payload: "TEST-PAYLOAD-MUST-RECEIVE".toBytes(), + contentTopic: DefaultContentTopic, + timestamp: int64(2022), + meta: "test-meta".toBytes(), + ) + + let postMsgResponse1 = await restFilterTest.clientTwdServiceNode.relayPostMessagesV1( + DefaultPubsubTopic, toRelayWakuMessage(testMessage) + ) + + # check messages received client side or not + let messages1 = await restFilterTest.client.filterGetMessagesV1(DefaultContentTopic) + + check: + postMsgResponse1.status == 200 + $postMsgResponse1.contentType == $MIMETYPE_TEXT + postMsgResponse1.data == "OK" + len(messages1.data) == 1 + + # Pause execution for 1 seconds to test TimeCache functionality of service node + await sleepAsync(1.seconds) + + # second - message push from service node to subscriber client + let postMsgResponse2 = await restFilterTest.clientTwdServiceNode.relayPostMessagesV1( + DefaultPubsubTopic, toRelayWakuMessage(testMessage) + ) + + # check message received client side or not + let messages2 = await restFilterTest.client.filterGetMessagesV1(DefaultContentTopic) + + check: + postMsgResponse2.status == 200 + $postMsgResponse2.contentType == $MIMETYPE_TEXT + postMsgResponse2.data == "OK" + len(messages2.data) == 1 + await restFilterTest.shutdown() diff --git a/third-party/nwaku/tests/wakunode_rest/test_rest_health.nim b/third-party/nwaku/tests/wakunode_rest/test_rest_health.nim new file mode 100644 index 0000000..3adc4d6 --- /dev/null +++ b/third-party/nwaku/tests/wakunode_rest/test_rest_health.nim @@ -0,0 +1,134 @@ +{.used.} + +import + std/[tempfiles, osproc], + testutils/unittests, + presto, + presto/client as presto_client, + libp2p/peerinfo, + libp2p/multiaddress, + libp2p/crypto/crypto +import + waku/[ + waku_node, + node/waku_node as waku_node2, + # TODO: Remove after moving `git_version` to the app code. + waku_api/rest/server, + waku_api/rest/client, + waku_api/rest/responses, + waku_api/rest/health/handlers as health_api, + waku_api/rest/health/client as health_api_client, + waku_rln_relay, + node/health_monitor, + ], + ../testlib/common, + ../testlib/wakucore, + ../testlib/wakunode, + ../waku_rln_relay/[rln/waku_rln_relay_utils, utils_onchain] + +proc testWakuNode(): WakuNode = + let + privkey = crypto.PrivateKey.random(Secp256k1, rng[]).tryGet() + bindIp = parseIpAddress("0.0.0.0") + extIp = parseIpAddress("127.0.0.1") + port = Port(0) + + newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) + +suite "Waku v2 REST API - health": + # TODO: better test for health + var anvilProc {.threadVar.}: Process + var manager {.threadVar.}: OnchainGroupManager + + setup: + anvilProc = runAnvil() + manager = waitFor setupOnchainGroupManager() + + teardown: + stopAnvil(anvilProc) + + asyncTest "Get node health info - GET /health": + # Given + let node = testWakuNode() + let healthMonitor = NodeHealthMonitor() + await node.start() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + healthMonitor.setOverallHealth(HealthStatus.INITIALIZING) + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installHealthApiHandler(restServer.router, healthMonitor) + restServer.start() + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + # When + var response = await client.healthCheck() + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data == + HealthReport(nodeHealth: HealthStatus.INITIALIZING, protocolsHealth: @[]) + + # now kick in rln (currently the only check for health) + await node.mountRlnRelay( + getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + ) + + node.mountLightPushClient() + await node.mountFilterClient() + + healthMonitor.setNodeToHealthMonitor(node) + healthMonitor.setOverallHealth(HealthStatus.READY) + # When + response = await client.healthCheck() + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.nodeHealth == HealthStatus.READY + response.data.protocolsHealth.len() == 14 + response.data.protocolsHealth[0].protocol == "Relay" + response.data.protocolsHealth[0].health == HealthStatus.NOT_READY + response.data.protocolsHealth[0].desc == some("No connected peers") + response.data.protocolsHealth[1].protocol == "Rln Relay" + response.data.protocolsHealth[1].health == HealthStatus.READY + response.data.protocolsHealth[2].protocol == "Lightpush" + response.data.protocolsHealth[2].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[3].protocol == "Legacy Lightpush" + response.data.protocolsHealth[3].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[4].protocol == "Filter" + response.data.protocolsHealth[4].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[5].protocol == "Store" + response.data.protocolsHealth[5].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[6].protocol == "Legacy Store" + response.data.protocolsHealth[6].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[7].protocol == "Peer Exchange" + response.data.protocolsHealth[7].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[8].protocol == "Rendezvous" + response.data.protocolsHealth[8].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[9].protocol == "Lightpush Client" + response.data.protocolsHealth[9].health == HealthStatus.NOT_READY + response.data.protocolsHealth[9].desc == + some("No Lightpush service peer available yet") + response.data.protocolsHealth[10].protocol == "Legacy Lightpush Client" + response.data.protocolsHealth[10].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[11].protocol == "Store Client" + response.data.protocolsHealth[11].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[12].protocol == "Legacy Store Client" + response.data.protocolsHealth[12].health == HealthStatus.NOT_MOUNTED + response.data.protocolsHealth[13].protocol == "Filter Client" + response.data.protocolsHealth[13].health == HealthStatus.NOT_READY + response.data.protocolsHealth[13].desc == + some("No Filter service peer available yet") + + await restServer.stop() + await restServer.closeWait() + await node.stop() diff --git a/third-party/nwaku/tests/wakunode_rest/test_rest_lightpush.nim b/third-party/nwaku/tests/wakunode_rest/test_rest_lightpush.nim new file mode 100644 index 0000000..b09c72e --- /dev/null +++ b/third-party/nwaku/tests/wakunode_rest/test_rest_lightpush.nim @@ -0,0 +1,303 @@ +{.used.} + +import + std/sequtils, + stew/byteutils, + testutils/unittests, + presto, + presto/client as presto_client, + libp2p/crypto/crypto + +import + waku/[ + waku_api/message_cache, + waku_core, + waku_node, + node/peer_manager, + waku_lightpush/common, + waku_api/rest/server, + waku_api/rest/client, + waku_api/rest/responses, + waku_api/rest/lightpush/types, + waku_api/rest/lightpush/handlers as lightpush_api, + waku_api/rest/lightpush/client as lightpush_api_client, + waku_relay, + common/rate_limit/setting, + ], + ../testlib/wakucore, + ../testlib/wakunode + +proc testWakuNode(): WakuNode = + let + privkey = generateSecp256k1Key() + bindIp = parseIpAddress("0.0.0.0") + extIp = parseIpAddress("127.0.0.1") + port = Port(0) + + return newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) + +type RestLightPushTest = object + serviceNode: WakuNode + pushNode: WakuNode + consumerNode: WakuNode + restServer: WakuRestServerRef + restClient: RestClientRef + +proc init( + T: type RestLightPushTest, rateLimit: RateLimitSetting = (0, 0.millis) +): Future[T] {.async.} = + var testSetup = RestLightPushTest() + testSetup.serviceNode = testWakuNode() + testSetup.pushNode = testWakuNode() + testSetup.consumerNode = testWakuNode() + + await allFutures( + testSetup.serviceNode.start(), + testSetup.pushNode.start(), + testSetup.consumerNode.start(), + ) + + (await testSetup.consumerNode.mountRelay()).isOkOr: + assert false, "Failed to mount relay: " & $error + (await testSetup.serviceNode.mountRelay()).isOkOr: + assert false, "Failed to mount relay: " & $error + await testSetup.serviceNode.mountLightPush(rateLimit) + testSetup.pushNode.mountLightPushClient() + + testSetup.serviceNode.peerManager.addServicePeer( + testSetup.consumerNode.peerInfo.toRemotePeerInfo(), WakuRelayCodec + ) + + await testSetup.serviceNode.connectToNodes( + @[testSetup.consumerNode.peerInfo.toRemotePeerInfo()] + ) + + testSetup.pushNode.peerManager.addServicePeer( + testSetup.serviceNode.peerInfo.toRemotePeerInfo(), WakuLightPushCodec + ) + + var restPort = Port(0) + let restAddress = parseIpAddress("127.0.0.1") + testSetup.restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = testSetup.restServer.httpServer.address.port + # update with bound port for restClient use + + installLightPushRequestHandler(testSetup.restServer.router, testSetup.pushNode) + + testSetup.restServer.start() + + testSetup.restClient = newRestHttpClient(initTAddress(restAddress, restPort)) + + return testSetup + +proc shutdown(self: RestLightPushTest) {.async.} = + await self.restServer.stop() + await self.restServer.closeWait() + await allFutures( + self.serviceNode.stop(), self.pushNode.stop(), self.consumerNode.stop() + ) + +suite "Waku v2 Rest API - lightpush": + asyncTest "Push message with proof": + let restLightPushTest = await RestLightPushTest.init() + + let message: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, + payload = toBytes("TEST-1"), + proof = toBytes("proof-test"), + ) + .toRelayWakuMessage() + + check message.proof.isSome() + + let requestBody = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message) + + let response = + await restLightPushTest.restClient.sendPushRequest(body = requestBody) + + ## Validate that the push request failed because the node is not + ## connected to other node but, doesn't fail because of not properly + ## handling the proof message attribute within the REST request. + check: + response.status == 505 + response.data.statusDesc == some("No peers for topic, skipping publish") + response.data.relayPeerCount == none[uint32]() + + asyncTest "Push message request": + # Given + let restLightPushTest = await RestLightPushTest.init() + + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + restLightPushTest.consumerNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: + assert false, "Failed to subscribe to relay: " & $error + + restLightPushTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: + assert false, "Failed to subscribe to relay: " & $error + require: + toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 + + # When + let message: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("TEST-1") + ) + .toRelayWakuMessage() + + let requestBody = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message) + let response = await restLightPushTest.restClient.sendPushRequest(requestBody) + + echo "response", $response + + # Then + check: + response.status == 200 + response.data.relayPeerCount == some(1.uint32) + + await restLightPushTest.shutdown() + + asyncTest "Push message bad-request": + # Given + let restLightPushTest = await RestLightPushTest.init() + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + restLightPushTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: + assert false, "Failed to subscribe to relay: " & $error + require: + toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 + + # When + let badMessage1: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("") + ) + .toRelayWakuMessage() + let badRequestBody1 = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: badMessage1) + + let badMessage2: RelayWakuMessage = + fakeWakuMessage(contentTopic = "", payload = toBytes("Sthg")).toRelayWakuMessage() + let badRequestBody2 = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: badMessage2) + + let badRequestBody3 = + PushRequest(pubsubTopic: none(PubsubTopic), message: badMessage2) + + # var response: RestResponse[PushResponse] + + var response = await restLightPushTest.restClient.sendPushRequest(badRequestBody1) + + # Then + check: + response.status == 400 + response.data.statusDesc.isSome() + response.data.statusDesc.get().startsWith("Invalid push request") + + # when + response = await restLightPushTest.restClient.sendPushRequest(badRequestBody2) + + # Then + check: + response.status == 400 + response.data.statusDesc.isSome() + response.data.statusDesc.get().startsWith("Invalid push request") + + # when + response = await restLightPushTest.restClient.sendPushRequest(badRequestBody3) + + # Then + check: + response.data.statusDesc.isSome() + response.data.statusDesc.get().startsWith("Invalid push request") + + await restLightPushTest.shutdown() + + asyncTest "Request rate limit push message": + # Given + let budgetCap = 3 + let tokenPeriod = 500.millis + let restLightPushTest = await RestLightPushTest.init((budgetCap, tokenPeriod)) + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + restLightPushTest.consumerNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: + assert false, "Failed to subscribe to relay: " & $error + + restLightPushTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: + assert false, "Failed to subscribe to relay: " & $error + require: + toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 + + # When + let pushProc = proc() {.async.} = + let message: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("TEST-1") + ) + .toRelayWakuMessage() + + let requestBody = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message) + let response = await restLightPushTest.restClient.sendPushRequest(requestBody) + + echo "response", $response + + # Then + check: + response.status == 200 + response.data.relayPeerCount == some(1.uint32) + + let pushRejectedProc = proc() {.async.} = + let message: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("TEST-1") + ) + .toRelayWakuMessage() + + let requestBody = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message) + let response = await restLightPushTest.restClient.sendPushRequest(requestBody) + + echo "response", $response + + # Then + check: + response.status == 429 + response.data.statusDesc.isSome() # Ensure error status description is present + response.data.statusDesc.get().startsWith( + "Request rejected due to too many requests" + ) # Check specific error message + + await pushProc() + await pushProc() + await pushProc() + await pushRejectedProc() + + await sleepAsync(tokenPeriod) + + for runCnt in 0 ..< 3: + let startTime = Moment.now() + for sendCnt in 0 ..< budgetCap: + await pushProc() + + let endTime = Moment.now() + let elapsed: Duration = (endTime - startTime) + await sleepAsync(tokenPeriod - elapsed + 10.millis) + + await restLightPushTest.shutdown() diff --git a/third-party/nwaku/tests/wakunode_rest/test_rest_lightpush_legacy.nim b/third-party/nwaku/tests/wakunode_rest/test_rest_lightpush_legacy.nim new file mode 100644 index 0000000..fea5155 --- /dev/null +++ b/third-party/nwaku/tests/wakunode_rest/test_rest_lightpush_legacy.nim @@ -0,0 +1,296 @@ +{.used.} + +import + std/sequtils, + stew/byteutils, + testutils/unittests, + presto, + presto/client as presto_client, + libp2p/crypto/crypto + +import + waku/[ + waku_api/message_cache, + waku_core, + waku_node, + node/peer_manager, + waku_lightpush_legacy/common, + waku_api/rest/server, + waku_api/rest/client, + waku_api/rest/responses, + waku_api/rest/legacy_lightpush/types, + waku_api/rest/legacy_lightpush/handlers as lightpush_api, + waku_api/rest/legacy_lightpush/client as lightpush_api_client, + waku_relay, + common/rate_limit/setting, + ], + ../testlib/wakucore, + ../testlib/wakunode + +proc testWakuNode(): WakuNode = + let + privkey = generateSecp256k1Key() + bindIp = parseIpAddress("0.0.0.0") + extIp = parseIpAddress("127.0.0.1") + port = Port(0) + + return newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) + +type RestLightPushTest = object + serviceNode: WakuNode + pushNode: WakuNode + consumerNode: WakuNode + restServer: WakuRestServerRef + client: RestClientRef + +proc init( + T: type RestLightPushTest, rateLimit: RateLimitSetting = (0, 0.millis) +): Future[T] {.async.} = + var testSetup = RestLightPushTest() + testSetup.serviceNode = testWakuNode() + testSetup.pushNode = testWakuNode() + testSetup.consumerNode = testWakuNode() + + await allFutures( + testSetup.serviceNode.start(), + testSetup.pushNode.start(), + testSetup.consumerNode.start(), + ) + + (await testSetup.consumerNode.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + (await testSetup.serviceNode.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + await testSetup.serviceNode.mountLegacyLightPush(rateLimit) + testSetup.pushNode.mountLegacyLightPushClient() + + testSetup.serviceNode.peerManager.addServicePeer( + testSetup.consumerNode.peerInfo.toRemotePeerInfo(), WakuRelayCodec + ) + + await testSetup.serviceNode.connectToNodes( + @[testSetup.consumerNode.peerInfo.toRemotePeerInfo()] + ) + + testSetup.pushNode.peerManager.addServicePeer( + testSetup.serviceNode.peerInfo.toRemotePeerInfo(), WakuLegacyLightPushCodec + ) + + var restPort = Port(0) + let restAddress = parseIpAddress("127.0.0.1") + testSetup.restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = testSetup.restServer.httpServer.address.port + # update with bound port for client use + + installLightPushRequestHandler(testSetup.restServer.router, testSetup.pushNode) + + testSetup.restServer.start() + + testSetup.client = newRestHttpClient(initTAddress(restAddress, restPort)) + + return testSetup + +proc shutdown(self: RestLightPushTest) {.async.} = + await self.restServer.stop() + await self.restServer.closeWait() + await allFutures(self.serviceNode.stop(), self.pushNode.stop()) + +suite "Waku v2 Rest API - lightpush": + asyncTest "Push message with proof": + let restLightPushTest = await RestLightPushTest.init() + + let message: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, + payload = toBytes("TEST-1"), + proof = toBytes("proof-test"), + ) + .toRelayWakuMessage() + + check message.proof.isSome() + + let requestBody = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message) + + let response = await restLightPushTest.client.sendPushRequest(body = requestBody) + + ## Validate that the push request failed because the node is not + ## connected to other node but, doesn't fail because of not properly + ## handling the proof message attribute within the REST request. + check: + response.data == "Failed to request a message push: not_published_to_any_peer" + + asyncTest "Push message request": + # Given + let restLightPushTest = await RestLightPushTest.init() + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + restLightPushTest.consumerNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: + assert false, "Failed to subscribe to topic" + + restLightPushTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: + assert false, "Failed to subscribe to topic" + require: + toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 + + # When + let message: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("TEST-1") + ) + .toRelayWakuMessage() + + let requestBody = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message) + let response = await restLightPushTest.client.sendPushRequest(requestBody) + + echo "response", $response + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_TEXT + + await restLightPushTest.shutdown() + + asyncTest "Push message bad-request": + # Given + let restLightPushTest = await RestLightPushTest.init() + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + restLightPushTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: + assert false, "Failed to subscribe to topic" + require: + toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 + + # When + let badMessage1: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("") + ) + .toRelayWakuMessage() + let badRequestBody1 = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: badMessage1) + + let badMessage2: RelayWakuMessage = + fakeWakuMessage(contentTopic = "", payload = toBytes("Sthg")).toRelayWakuMessage() + let badRequestBody2 = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: badMessage2) + + let badRequestBody3 = + PushRequest(pubsubTopic: none(PubsubTopic), message: badMessage2) + + var response: RestResponse[string] + + response = await restLightPushTest.client.sendPushRequest(badRequestBody1) + + echo "response", $response + + # Then + check: + response.status == 400 + $response.contentType == $MIMETYPE_TEXT + response.data.startsWith("Invalid content body") + + # when + response = await restLightPushTest.client.sendPushRequest(badRequestBody2) + + # Then + check: + response.status == 400 + $response.contentType == $MIMETYPE_TEXT + response.data.startsWith("Invalid content body") + + # when + response = await restLightPushTest.client.sendPushRequest(badRequestBody3) + + # Then + check: + response.status == 400 + $response.contentType == $MIMETYPE_TEXT + response.data.startsWith("Invalid content body") + + await restLightPushTest.shutdown() + + asyncTest "Request rate limit push message": + # Given + let budgetCap = 3 + let tokenPeriod = 500.millis + let restLightPushTest = await RestLightPushTest.init((budgetCap, tokenPeriod)) + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + restLightPushTest.consumerNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: + assert false, "Failed to subscribe to topic" + + restLightPushTest.serviceNode.subscribe( + (kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler + ).isOkOr: + assert false, "Failed to subscribe to topic" + require: + toSeq(restLightPushTest.serviceNode.wakuRelay.subscribedTopics).len == 1 + + # When + let pushProc = proc() {.async.} = + let message: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("TEST-1") + ) + .toRelayWakuMessage() + + let requestBody = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message) + let response = await restLightPushTest.client.sendPushRequest(requestBody) + + echo "response", $response + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_TEXT + + let pushRejectedProc = proc() {.async.} = + let message: RelayWakuMessage = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("TEST-1") + ) + .toRelayWakuMessage() + + let requestBody = + PushRequest(pubsubTopic: some(DefaultPubsubTopic), message: message) + let response = await restLightPushTest.client.sendPushRequest(requestBody) + + echo "response", $response + + # Then + check: + response.status == 429 + + await pushProc() + await pushProc() + await pushProc() + await pushRejectedProc() + + await sleepAsync(tokenPeriod) + + for runCnt in 0 ..< 3: + let startTime = Moment.now() + for sendCnt in 0 ..< budgetCap: + await pushProc() + + let endTime = Moment.now() + let elapsed: Duration = (endTime - startTime) + await sleepAsync(tokenPeriod - elapsed + 10.millis) + + await restLightPushTest.shutdown() diff --git a/third-party/nwaku/tests/wakunode_rest/test_rest_relay.nim b/third-party/nwaku/tests/wakunode_rest/test_rest_relay.nim new file mode 100644 index 0000000..2a1954d --- /dev/null +++ b/third-party/nwaku/tests/wakunode_rest/test_rest_relay.nim @@ -0,0 +1,787 @@ +{.used.} + +import + std/[sequtils, strformat, tempfiles, osproc], + stew/byteutils, + testutils/unittests, + presto, + presto/client as presto_client, + libp2p/crypto/crypto +import + waku/[ + common/base64, + waku_core, + waku_node, + waku_api/message_cache, + waku_api/rest/server, + waku_api/rest/client, + waku_api/rest/responses, + waku_api/rest/relay/types, + waku_api/rest/relay/handlers as relay_api, + waku_api/rest/relay/client as relay_api_client, + waku_relay, + waku_rln_relay, + ], + ../testlib/wakucore, + ../testlib/wakunode, + ../resources/payloads, + ../waku_rln_relay/[rln/waku_rln_relay_utils, utils_onchain] + +proc testWakuNode(): WakuNode = + let + privkey = generateSecp256k1Key() + bindIp = parseIpAddress("0.0.0.0") + extIp = parseIpAddress("127.0.0.1") + port = Port(0) + + newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) + +suite "Waku v2 Rest API - Relay": + var anvilProc {.threadVar.}: Process + var manager {.threadVar.}: OnchainGroupManager + + setup: + anvilProc = runAnvil() + manager = waitFor setupOnchainGroupManager() + + teardown: + stopAnvil(anvilProc) + + asyncTest "Subscribe a node to an array of pubsub topics - POST /relay/v1/subscriptions": + # Given + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + var restPort = Port(0) + + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + + restPort = restServer.httpServer.address.port # update with bound port for client use + + let cache = MessageCache.init() + + installRelayApiHandlers(restServer.router, node, cache) + restServer.start() + + let + shard0 = RelayShard(clusterId: DefaultClusterId, shardId: 0) + shard1 = RelayShard(clusterId: DefaultClusterId, shardId: 1) + shard2 = RelayShard(clusterId: DefaultClusterId, shardId: 2) + + let shards = @[$shard0, $shard1, $shard2] + + let invalidTopic = "/test/2/this/is/a/content/topic/1" + + var containsIncorrect = shards + containsIncorrect.add(invalidTopic) + + # When contains incorrect pubsub topics, subscribe shall fail + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + let errorResponse = await client.relayPostSubscriptionsV1(containsIncorrect) + + # Then + check: + errorResponse.status == 400 + $errorResponse.contentType == $MIMETYPE_TEXT + errorResponse.data == + "Invalid pubsub topic(s): @[\"/test/2/this/is/a/content/topic/1\"]" + + # when all pubsub topics are correct, subscribe shall succeed + let response = await client.relayPostSubscriptionsV1(shards) + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_TEXT + response.data == "OK" + + check: + cache.isPubsubSubscribed($shard0) + cache.isPubsubSubscribed($shard1) + cache.isPubsubSubscribed($shard2) + + check: + toSeq(node.wakuRelay.subscribedTopics).len == shards.len + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "Unsubscribe a node from an array of pubsub topics - DELETE /relay/v1/subscriptions": + # Given + let node = testWakuNode() + await node.start() + + let + shard0 = RelayShard(clusterId: DefaultClusterId, shardId: 0) + shard1 = RelayShard(clusterId: DefaultClusterId, shardId: 1) + shard2 = RelayShard(clusterId: DefaultClusterId, shardId: 2) + shard3 = RelayShard(clusterId: DefaultClusterId, shardId: 3) + shard4 = RelayShard(clusterId: DefaultClusterId, shardId: 4) + + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + proc simpleHandler( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + for shard in @[$shard0, $shard1, $shard2, $shard3, $shard4]: + node.subscribe((kind: PubsubSub, topic: shard), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + + restPort = restServer.httpServer.address.port # update with bound port for client use + + let cache = MessageCache.init() + cache.pubsubSubscribe($shard0) + cache.pubsubSubscribe($shard1) + cache.pubsubSubscribe($shard2) + cache.pubsubSubscribe($shard3) + + installRelayApiHandlers(restServer.router, node, cache) + restServer.start() + + let shards = @[$shard0, $shard1, $shard2, $shard4] + + # When + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + let response = await client.relayDeleteSubscriptionsV1(shards) + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_TEXT + response.data == "OK" + + check: + not cache.isPubsubSubscribed($shard0) + not node.wakuRelay.isSubscribed($shard0) + not cache.isPubsubSubscribed($shard1) + not node.wakuRelay.isSubscribed($shard1) + not cache.isPubsubSubscribed($shard2) + not node.wakuRelay.isSubscribed($shard2) + cache.isPubsubSubscribed($shard3) + node.wakuRelay.isSubscribed($shard3) + not cache.isPubsubSubscribed($shard4) + not node.wakuRelay.isSubscribed($shard4) + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "Get the latest messages for a pubsub topic - GET /relay/v1/messages/{topic}": + # Given + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + + restPort = restServer.httpServer.address.port # update with bound port for client use + + let pubSubTopic = "/waku/2/rs/0/0" + + var messages = + @[ + fakeWakuMessage( + contentTopic = "content-topic-x", + payload = toBytes("TEST-1"), + meta = toBytes("test-meta"), + ephemeral = true, + ) + ] + + # Prevent duplicate messages + for i in 0 ..< 2: + var msg = fakeWakuMessage( + contentTopic = "content-topic-x", + payload = toBytes("TEST-1"), + meta = toBytes("test-meta"), + ephemeral = true, + ) + + while msg == messages[i]: + msg = fakeWakuMessage( + contentTopic = "content-topic-x", + payload = toBytes("TEST-1"), + meta = toBytes("test-meta"), + ephemeral = true, + ) + + messages.add(msg) + + let cache = MessageCache.init() + + cache.pubsubSubscribe(pubSubTopic) + for msg in messages: + cache.addMessage(pubSubTopic, msg) + + installRelayApiHandlers(restServer.router, node, cache) + restServer.start() + + # When + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + let response = await client.relayGetMessagesV1(pubSubTopic) + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.len == 3 + response.data.all do(msg: RelayWakuMessage) -> bool: + msg.payload == base64.encode("TEST-1") and + msg.contentTopic.get() == "content-topic-x" and msg.version.get() == 2 and + msg.timestamp.get() != Timestamp(0) and + msg.meta.get() == base64.encode("test-meta") and msg.ephemeral.get() == true + + check: + cache.isPubsubSubscribed(pubSubTopic) + cache.getMessages(pubSubTopic).tryGet().len == 0 + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "Post a message to a pubsub topic - POST /relay/v1/messages/{topic}": + ## "Relay API: publish and subscribe/unsubscribe": + # Given + let node = testWakuNode() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + + await node.mountRlnRelay(wakuRlnConfig) + await node.start() + # Registration is mandatory before sending messages with rln-relay + let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager) + let idCredentials = generateCredentials(manager.rlnInstance) + + try: + waitFor manager.register(idCredentials, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated = waitFor manager.updateRoots() + debug "Updated root for node", rootUpdated + + let proofRes = waitFor manager.fetchMerkleProofElements() + if proofRes.isErr(): + assert false, "failed to fetch merkle proof: " & proofRes.error + manager.merkleProofCache = proofRes.get() + + # RPC server setup + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + + restPort = restServer.httpServer.address.port # update with bound port for client use + + let cache = MessageCache.init() + + installRelayApiHandlers(restServer.router, node, cache) + restServer.start() + + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic" + + require: + toSeq(node.wakuRelay.subscribedTopics).len == 1 + + # When + let response = await client.relayPostMessagesV1( + DefaultPubsubTopic, + RelayWakuMessage( + payload: base64.encode("TEST-PAYLOAD"), + contentTopic: some(DefaultContentTopic), + timestamp: some(now()), + ), + ) + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_TEXT + response.data == "OK" + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + # Autosharding API + + asyncTest "Subscribe a node to an array of content topics - POST /relay/v1/auto/subscriptions": + # Given + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + require node.mountAutoSharding(1, 8).isOk + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + + restPort = restServer.httpServer.address.port # update with bound port for client use + + let cache = MessageCache.init() + + installRelayApiHandlers(restServer.router, node, cache) + restServer.start() + + let contentTopics = + @[ + ContentTopic("/app-1/2/default-content/proto"), + ContentTopic("/app-2/2/default-content/proto"), + ContentTopic("/app-3/2/default-content/proto"), + ] + + # When + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + let response = await client.relayPostAutoSubscriptionsV1(contentTopics) + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_TEXT + response.data == "OK" + + check: + cache.isContentSubscribed(contentTopics[0]) + cache.isContentSubscribed(contentTopics[1]) + cache.isContentSubscribed(contentTopics[2]) + + check: + # Node should be subscribed to all shards + node.wakuRelay.subscribedTopics == + @["/waku/2/rs/1/5", "/waku/2/rs/1/7", "/waku/2/rs/1/2"] + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "Unsubscribe a node from an array of content topics - DELETE /relay/v1/auto/subscriptions": + # Given + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + require node.mountAutoSharding(1, 8).isOk + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restServer.start() + + restPort = restServer.httpServer.address.port # update with bound port for client use + + let contentTopics = + @[ + ContentTopic("/waku/2/default-content1/proto"), + ContentTopic("/waku/2/default-content2/proto"), + ContentTopic("/waku/2/default-content3/proto"), + ContentTopic("/waku/2/default-contentX/proto"), + ] + + let cache = MessageCache.init() + cache.contentSubscribe(contentTopics[0]) + cache.contentSubscribe(contentTopics[1]) + cache.contentSubscribe(contentTopics[2]) + cache.contentSubscribe("/waku/2/default-contentY/proto") + + installRelayApiHandlers(restServer.router, node, cache) + + # When + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + var response = await client.relayPostAutoSubscriptionsV1(contentTopics) + + check: + response.status == 200 + $response.contentType == $MIMETYPE_TEXT + response.data == "OK" + + response = await client.relayDeleteAutoSubscriptionsV1(contentTopics) + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_TEXT + response.data == "OK" + + check: + not cache.isContentSubscribed(contentTopics[1]) + not cache.isContentSubscribed(contentTopics[2]) + not cache.isContentSubscribed(contentTopics[3]) + cache.isContentSubscribed("/waku/2/default-contentY/proto") + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "Get the latest messages for a content topic - GET /relay/v1/auto/messages/{topic}": + # Given + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + require node.mountAutoSharding(1, 8).isOk + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + + restPort = restServer.httpServer.address.port # update with bound port for client use + + let contentTopic = DefaultContentTopic + + var messages = + @[ + fakeWakuMessage(contentTopic = DefaultContentTopic, payload = toBytes("TEST-1")) + ] + + # Prevent duplicate messages + for i in 0 ..< 2: + var msg = + fakeWakuMessage(contentTopic = DefaultContentTopic, payload = toBytes("TEST-1")) + + while msg == messages[i]: + msg = fakeWakuMessage( + contentTopic = DefaultContentTopic, payload = toBytes("TEST-1") + ) + + messages.add(msg) + + let cache = MessageCache.init() + + cache.contentSubscribe(contentTopic) + for msg in messages: + cache.addMessage(DefaultPubsubTopic, msg) + + installRelayApiHandlers(restServer.router, node, cache) + restServer.start() + + # When + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + let response = await client.relayGetAutoMessagesV1(contentTopic) + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.len == 3 + response.data.all do(msg: RelayWakuMessage) -> bool: + msg.payload == base64.encode("TEST-1") and + msg.contentTopic.get() == DefaultContentTopic and msg.version.get() == 2 and + msg.timestamp.get() != Timestamp(0) + + check: + cache.isContentSubscribed(contentTopic) + cache.getAutoMessages(contentTopic).tryGet().len == 0 + # The cache is cleared when getMessage is called + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "Post a message to a content topic - POST /relay/v1/auto/messages/{topic}": + ## "Relay API: publish and subscribe/unsubscribe": + # Given + let node = testWakuNode() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + require node.mountAutoSharding(1, 8).isOk + + let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + + await node.mountRlnRelay(wakuRlnConfig) + await node.start() + # Registration is mandatory before sending messages with rln-relay + let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager) + let idCredentials = generateCredentials(manager.rlnInstance) + + try: + waitFor manager.register(idCredentials, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated = waitFor manager.updateRoots() + debug "Updated root for node", rootUpdated + + let proofRes = waitFor manager.fetchMerkleProofElements() + if proofRes.isErr(): + assert false, "failed to fetch merkle proof: " & proofRes.error + manager.merkleProofCache = proofRes.get() + + # RPC server setup + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + + restPort = restServer.httpServer.address.port # update with bound port for client use + + let cache = MessageCache.init() + installRelayApiHandlers(restServer.router, node, cache) + restServer.start() + + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node.subscribe((kind: ContentSub, topic: DefaultContentTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to content topic: " & $error + require: + toSeq(node.wakuRelay.subscribedTopics).len == 1 + + # When + let response = await client.relayPostAutoMessagesV1( + RelayWakuMessage( + payload: base64.encode("TEST-PAYLOAD"), + contentTopic: some(DefaultContentTopic), + timestamp: some(now()), + ) + ) + + # Then + check: + response.status == 200 + $response.contentType == $MIMETYPE_TEXT + response.data == "OK" + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "Post a message to an invalid content topic - POST /relay/v1/auto/messages/{topic}": + ## "Relay API: publish and subscribe/unsubscribe": + # Given + let node = testWakuNode() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + require node.mountAutoSharding(1, 8).isOk + + let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + await node.mountRlnRelay(wakuRlnConfig) + await node.start() + + # Registration is mandatory before sending messages with rln-relay + let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager) + let idCredentials = generateCredentials(manager.rlnInstance) + + try: + waitFor manager.register(idCredentials, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated = waitFor manager.updateRoots() + debug "Updated root for node", rootUpdated + + let proofRes = waitFor manager.fetchMerkleProofElements() + if proofRes.isErr(): + assert false, "failed to fetch merkle proof: " & proofRes.error + manager.merkleProofCache = proofRes.get() + + # RPC server setup + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + + restPort = restServer.httpServer.address.port # update with bound port for client use + + let cache = MessageCache.init() + installRelayApiHandlers(restServer.router, node, cache) + restServer.start() + + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + let invalidContentTopic = "invalidContentTopic" + # When + let response = await client.relayPostAutoMessagesV1( + RelayWakuMessage( + payload: base64.encode("TEST-PAYLOAD"), + contentTopic: some(invalidContentTopic), + timestamp: some(int64(2022)), + ) + ) + + # Then + check: + response.status == 400 + $response.contentType == $MIMETYPE_TEXT + response.data == + "Failed to publish. Autosharding error: invalid format: content-topic '" & + invalidContentTopic & "' must start with slash" + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "Post a message larger than maximum size - POST /relay/v1/messages/{topic}": + # Given + let node = testWakuNode() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + await node.mountRlnRelay(wakuRlnConfig) + await node.start() + + # Registration is mandatory before sending messages with rln-relay + let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager) + let idCredentials = generateCredentials(manager.rlnInstance) + + try: + waitFor manager.register(idCredentials, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated = waitFor manager.updateRoots() + debug "Updated root for node", rootUpdated + + let proofRes = waitFor manager.fetchMerkleProofElements() + if proofRes.isErr(): + assert false, "failed to fetch merkle proof: " & proofRes.error + manager.merkleProofCache = proofRes.get() + + # RPC server setup + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + + restPort = restServer.httpServer.address.port # update with bound port for client use + + let cache = MessageCache.init() + + installRelayApiHandlers(restServer.router, node, cache) + restServer.start() + + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error + require: + toSeq(node.wakuRelay.subscribedTopics).len == 1 + + # When + let response = await client.relayPostMessagesV1( + DefaultPubsubTopic, + RelayWakuMessage( + payload: base64.encode(getByteSequence(DefaultMaxWakuMessageSize)), + # Message will be bigger than the max size + contentTopic: some(DefaultContentTopic), + timestamp: some(int64(2022)), + ), + ) + + # Then + check: + response.status == 400 + $response.contentType == $MIMETYPE_TEXT + response.data == + fmt"Failed to publish: Message size exceeded maximum of {DefaultMaxWakuMessageSize} bytes" + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "Post a message larger than maximum size - POST /relay/v1/auto/messages/{topic}": + # Given + let node = testWakuNode() + (await node.mountRelay()).isOkOr: + assert false, "Failed to mount relay" + require node.mountAutoSharding(1, 8).isOk + + let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) + await node.mountRlnRelay(wakuRlnConfig) + await node.start() + + # Registration is mandatory before sending messages with rln-relay + let manager = cast[OnchainGroupManager](node.wakuRlnRelay.groupManager) + let idCredentials = generateCredentials(manager.rlnInstance) + + try: + waitFor manager.register(idCredentials, UserMessageLimit(20)) + except Exception, CatchableError: + assert false, + "exception raised when calling register: " & getCurrentExceptionMsg() + + let rootUpdated = waitFor manager.updateRoots() + debug "Updated root for node", rootUpdated + + let proofRes = waitFor manager.fetchMerkleProofElements() + if proofRes.isErr(): + assert false, "failed to fetch merkle proof: " & proofRes.error + manager.merkleProofCache = proofRes.get() + + # RPC server setup + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + + restPort = restServer.httpServer.address.port # update with bound port for client use + + let cache = MessageCache.init() + + installRelayApiHandlers(restServer.router, node, cache) + restServer.start() + + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + let simpleHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await sleepAsync(0.milliseconds) + + node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), simpleHandler).isOkOr: + assert false, "Failed to subscribe to pubsub topic: " & $error + require: + toSeq(node.wakuRelay.subscribedTopics).len == 1 + + # When + let response = await client.relayPostAutoMessagesV1( + RelayWakuMessage( + payload: base64.encode(getByteSequence(DefaultMaxWakuMessageSize)), + # Message will be bigger than the max size + contentTopic: some(DefaultContentTopic), + timestamp: some(int64(2022)), + ) + ) + + # Then + check: + response.status == 400 + $response.contentType == $MIMETYPE_TEXT + response.data == + fmt"Failed to publish: Message size exceeded maximum of {DefaultMaxWakuMessageSize} bytes" + + await restServer.stop() + await restServer.closeWait() + await node.stop() diff --git a/third-party/nwaku/tests/wakunode_rest/test_rest_relay_serdes.nim b/third-party/nwaku/tests/wakunode_rest/test_rest_relay_serdes.nim new file mode 100644 index 0000000..086aba2 --- /dev/null +++ b/third-party/nwaku/tests/wakunode_rest/test_rest_relay_serdes.nim @@ -0,0 +1,47 @@ +{.used.} + +import results, stew/byteutils, unittest2, json_serialization +import waku/[common/base64, waku_api/rest/serdes, waku_api/rest/relay/types, waku_core] + +suite "Waku v2 Rest API - Relay - serialization": + suite "RelayWakuMessage - decode": + test "optional fields are not provided": + # Given + let payload = base64.encode("MESSAGE") + let jsonBytes = + toBytes("{\"payload\":\"" & $payload & "\",\"contentTopic\":\"some/topic\"}") + + # When + let res = + decodeFromJsonBytes(RelayWakuMessage, jsonBytes, requireAllFields = true) + + # Then + require(res.isOk()) + let value = res.get() + check: + value.payload == payload + value.contentTopic.isSome() + value.contentTopic.get() == "some/topic" + value.version.isNone() + value.timestamp.isNone() + + suite "RelayWakuMessage - encode": + test "optional fields are none": + # Given + let payload = base64.encode("MESSAGE") + let data = RelayWakuMessage( + payload: payload, + contentTopic: none(ContentTopic), + version: none(Natural), + timestamp: none(int64), + ephemeral: none(bool), + ) + + # When + let res = encodeIntoJsonBytes(data) + + # Then + require(res.isOk()) + let value = res.get() + check: + value == toBytes("{\"payload\":\"" & $payload & "\"}") diff --git a/third-party/nwaku/tests/wakunode_rest/test_rest_serdes.nim b/third-party/nwaku/tests/wakunode_rest/test_rest_serdes.nim new file mode 100644 index 0000000..719742b --- /dev/null +++ b/third-party/nwaku/tests/wakunode_rest/test_rest_serdes.nim @@ -0,0 +1,63 @@ +{.used.} + +import results, stew/byteutils, chronicles, unittest2, json_serialization +import waku/waku_api/rest/serdes, waku/waku_api/rest/debug/types + +# TODO: Decouple this test suite from the `debug_api` module by defining +# private custom types for this test suite module +suite "Waku v2 Rest API - Serdes": + suite "decode": + test "decodeFromJsonString - use the corresponding readValue template": + # Given + let jsonString = JsonString("""{ "listenAddresses":["123"] }""") + + # When + let res = decodeFromJsonString(DebugWakuInfo, jsonString, requireAllFields = true) + + # Then + require(res.isOk) + let value = res.get() + check: + value.listenAddresses == @["123"] + value.enrUri.isNone + + test "decodeFromJsonBytes - use the corresponding readValue template": + # Given + let jsonBytes = toBytes("""{ "listenAddresses":["123"] }""") + + # When + let res = decodeFromJsonBytes(DebugWakuInfo, jsonBytes, requireAllFields = true) + + # Then + require(res.isOk) + let value = res.get() + check: + value.listenAddresses == @["123"] + value.enrUri.isNone + + suite "encode": + test "encodeIntoJsonString - use the corresponding writeValue template": + # Given + let data = DebugWakuInfo(listenAddresses: @["GO"]) + + # When + let res = encodeIntoJsonString(data) + + # Then + require(res.isOk) + let value = res.get() + check: + value == """{"listenAddresses":["GO"]}""" + + test "encodeIntoJsonBytes - use the corresponding writeValue template": + # Given + let data = DebugWakuInfo(listenAddresses: @["ABC"]) + + # When + let res = encodeIntoJsonBytes(data) + + # Then + require(res.isOk) + let value = res.get() + check: + value == toBytes("""{"listenAddresses":["ABC"]}""") diff --git a/third-party/nwaku/tests/wakunode_rest/test_rest_store.nim b/third-party/nwaku/tests/wakunode_rest/test_rest_store.nim new file mode 100644 index 0000000..f08ed0a --- /dev/null +++ b/third-party/nwaku/tests/wakunode_rest/test_rest_store.nim @@ -0,0 +1,874 @@ +{.used.} + +import + std/[options, sugar], + chronicles, + chronos/timer, + testutils/unittests, + eth/keys, + presto, + presto/client as presto_client, + libp2p/crypto/crypto +import + waku/[ + waku_core/message, + waku_core/message/digest, + waku_core/topics, + waku_core/time, + waku_node, + node/peer_manager, + waku_api/rest/server, + waku_api/rest/client, + waku_api/rest/responses, + waku_api/rest/store/handlers as store_api, + waku_api/rest/store/client as store_api_client, + waku_api/rest/store/types, + waku_archive, + waku_archive/driver/queue_driver, + waku_archive/driver/sqlite_driver, + common/databases/db_sqlite, + waku_archive/driver/postgres_driver, + waku_store as waku_store, + ], + ../testlib/wakucore, + ../testlib/wakunode + +logScope: + topics = "waku node rest store_api test" + +proc put( + store: ArchiveDriver, pubsubTopic: PubsubTopic, message: WakuMessage +): Future[Result[void, string]] = + let msgHash = computeMessageHash(pubsubTopic, message) + + store.put(msgHash, pubsubTopic, message) + +# Creates a new WakuNode +proc testWakuNode(): WakuNode = + let + privkey = generateSecp256k1Key() + bindIp = parseIpAddress("0.0.0.0") + extIp = parseIpAddress("127.0.0.1") + port = Port(0) + + return newTestWakuNode(privkey, bindIp, port, some(extIp), some(port)) + +################################################################################ +# Beginning of the tests +################################################################################ +procSuite "Waku Rest API - Store v3": + asyncTest "MessageHash <-> string conversions": + # Validate MessageHash conversion from a WakuMessage obj + let wakuMsg = WakuMessage( + contentTopic: "Test content topic", payload: @[byte('H'), byte('i'), byte('!')] + ) + + let messageHash = computeMessageHash(DefaultPubsubTopic, wakuMsg) + let restMsgHash = some(messageHash.toRestStringWakuMessageHash()) + + let parsedMsgHashRes = parseHash(restMsgHash) + assert parsedMsgHashRes.isOk(), $parsedMsgHashRes.error + + check: + messageHash == parsedMsgHashRes.get().get() + + # Random validation. Obtained the raw values manually + let expected = + some("0x9e0ea917677a3d2b8610b0126986d89824b6acf76008b5fb9aa8b99ac906c1a7") + + let msgHashRes = parseHash(expected) + assert msgHashRes.isOk(), $msgHashRes.error + + check: + expected.get() == msgHashRes.get().get().toRestStringWakuMessageHash() + + asyncTest "invalid cursor": + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installStoreApiHandlers(restServer.router, node) + restServer.start() + + # WakuStore setup + let db: SqliteDatabase = + SqliteDatabase.new(string.none().get(":memory:")).expect("valid DB") + let driver: ArchiveDriver = SqliteDriver.new(db).expect("valid driver") + let mountArchiveRes = node.mountArchive(driver) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + await node.mountStore() + node.mountStoreClient() + + let key = generateEcdsaKey() + var peerSwitch = newStandardSwitch(some(key)) + await peerSwitch.start() + + peerSwitch.mount(node.wakuStore) + + await sleepAsync(1.seconds()) + + # Now prime it with some history before tests + let msgList = + @[ + fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0), + fakeWakuMessage(@[byte 1], ts = 1), + fakeWakuMessage(@[byte 1, byte 2], ts = 2), + fakeWakuMessage(@[byte 1], ts = 3), + fakeWakuMessage(@[byte 1], ts = 4), + fakeWakuMessage(@[byte 1], ts = 5), + fakeWakuMessage(@[byte 1], ts = 6), + fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("c2"), ts = 9), + ] + for msg in msgList: + require (await driver.put(DefaultPubsubTopic, msg)).isOk() + + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + let remotePeerInfo = peerSwitch.peerInfo.toRemotePeerInfo() + let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId + + await sleepAsync(1.seconds()) + + let fakeCursor = computeMessageHash(DefaultPubsubTopic, fakeWakuMessage()) + let encodedCursor = fakeCursor.toRestStringWakuMessageHash() + + # Apply filter by start and end timestamps + var response = await client.getStoreMessagesV3( + encodeUrl(fullAddr), + "true", # include data + "", # pubsub topic + "ct1,c2", # empty content topics. + "", # start time + "", # end time + "", # hashes + encodedCursor, # hex-encoded hash + "true", # ascending + "5", # empty implies default page size + ) + + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.messages.len == 0 + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "Filter by start and end time": + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installStoreApiHandlers(restServer.router, node) + restServer.start() + + # WakuStore setup + let driver: ArchiveDriver = QueueDriver.new() + let mountArchiveRes = node.mountArchive(driver) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + await node.mountStore() + node.mountStoreClient() + + let key = generateEcdsaKey() + var peerSwitch = newStandardSwitch(some(key)) + await peerSwitch.start() + + peerSwitch.mount(node.wakuStore) + + # Now prime it with some history before tests + let msgList = + @[ + fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0), + fakeWakuMessage(@[byte 1], ts = 1), + fakeWakuMessage(@[byte 1, byte 2], ts = 2), + fakeWakuMessage(@[byte 1], ts = 3), + fakeWakuMessage(@[byte 1], ts = 4), + fakeWakuMessage(@[byte 1], ts = 5), + fakeWakuMessage(@[byte 1], ts = 6), + fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("c2"), ts = 9), + ] + for msg in msgList: + require (await driver.put(DefaultPubsubTopic, msg)).isOk() + + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + let remotePeerInfo = peerSwitch.peerInfo.toRemotePeerInfo() + let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId + + # Apply filter by start and end timestamps + var response = await client.getStoreMessagesV3( + encodeUrl(fullAddr), + "true", # include data + encodeUrl(DefaultPubsubTopic), + "", # empty content topics. Don't filter by this field + "3", # start time + "6", # end time + "", # hashes + "", # hex-encoded hash + "true", # ascending + "", # empty implies default page size + ) + + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.messages.len == 4 + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "Store node history response - forward pagination": + # Test adapted from the analogous present at waku_store/test_wakunode_store.nim + let node = testWakuNode() + await node.start() + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installStoreApiHandlers(restServer.router, node) + restServer.start() + + # WakuStore setup + let driver: ArchiveDriver = QueueDriver.new() + let mountArchiveRes = node.mountArchive(driver) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + await node.mountStore() + node.mountStoreClient() + + let key = generateEcdsaKey() + var peerSwitch = newStandardSwitch(some(key)) + await peerSwitch.start() + + peerSwitch.mount(node.wakuStore) + + # Now prime it with some history before tests + let timeOrigin = wakucore.now() + let msgList = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)), + ] + for msg in msgList: + require (await driver.put(DefaultPubsubTopic, msg)).isOk() + + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + let remotePeerInfo = peerSwitch.peerInfo.toRemotePeerInfo() + let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId + + var pages = newSeq[seq[WakuMessage]](2) + + var reqHash = none(string) + + for i in 0 ..< 2: + let response = await client.getStoreMessagesV3( + encodeUrl(fullAddr), + "true", # include data + encodeUrl(DefaultPubsubTopic), + "", # content topics. Empty ignores the field. + "", # start time. Empty ignores the field. + "", # end time. Empty ignores the field. + "", # hashes + if reqHash.isSome(): + reqHash.get() + else: + "", # hex-encoded digest. Empty ignores the field. + "true", # ascending + "7", # page size. Empty implies default page size. + ) + + let wakuMessages = collect(newSeq): + for element in response.data.messages: + if element.message.isSome(): + element.message.get() + + pages[i] = wakuMessages + + # populate the cursor for next page + if response.data.paginationCursor.isSome(): + reqHash = some(response.data.paginationCursor.get()) + + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + + check: + pages[0] == msgList[0 .. 6] + pages[1] == msgList[7 .. 9] + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "query a node and retrieve historical messages filtered by pubsub topic": + # Given + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installStoreApiHandlers(restServer.router, node) + restServer.start() + + # WakuStore setup + let driver: ArchiveDriver = QueueDriver.new() + let mountArchiveRes = node.mountArchive(driver) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + await node.mountStore() + node.mountStoreClient() + + let key = generateEcdsaKey() + var peerSwitch = newStandardSwitch(some(key)) + await peerSwitch.start() + + peerSwitch.mount(node.wakuStore) + + # Now prime it with some history before tests + let msgList = + @[ + fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("2"), ts = 0), + fakeWakuMessage(@[byte 1], ts = 1), + fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("2"), ts = 9), + ] + for msg in msgList: + require (await driver.put(DefaultPubsubTopic, msg)).isOk() + + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + let remotePeerInfo = peerSwitch.peerInfo.toRemotePeerInfo() + let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId + + # Filtering by a known pubsub topic + var response = await client.getStoreMessagesV3( + encodeUrl($fullAddr), "true", encodeUrl(DefaultPubsubTopic) + ) + + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.messages.len == 3 + + # Get all the messages by specifying an empty pubsub topic + response = await client.getStoreMessagesV3(encodeUrl($fullAddr), "true") + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.messages.len == 3 + + # Receiving no messages by filtering with a random pubsub topic + response = await client.getStoreMessagesV3( + encodeUrl($fullAddr), "true", encodeUrl("random pubsub topic") + ) + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.messages.len == 0 + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "retrieve historical messages from a provided store node address": + # Given + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installStoreApiHandlers(restServer.router, node) + restServer.start() + + # WakuStore setup + let driver: ArchiveDriver = QueueDriver.new() + let mountArchiveRes = node.mountArchive(driver) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + await node.mountStore() + node.mountStoreClient() + + let key = generateEcdsaKey() + var peerSwitch = newStandardSwitch(some(key)) + await peerSwitch.start() + + peerSwitch.mount(node.wakuStore) + + # Now prime it with some history before tests + let msgList = + @[ + fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0), + fakeWakuMessage(@[byte 1], ts = 1), + fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("ct2"), ts = 9), + ] + for msg in msgList: + require (await driver.put(DefaultPubsubTopic, msg)).isOk() + + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + let remotePeerInfo = peerSwitch.peerInfo.toRemotePeerInfo() + let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId + + # Filtering by a known pubsub topic. + # We also pass the store-node address in the request. + var response = await client.getStoreMessagesV3( + encodeUrl(fullAddr), "true", encodeUrl(DefaultPubsubTopic) + ) + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.messages.len == 3 + + # Get all the messages by specifying an empty pubsub topic + # We also pass the store-node address in the request. + response = + await client.getStoreMessagesV3(encodeUrl(fullAddr), "true", encodeUrl("")) + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.messages.len == 3 + + # Receiving no messages by filtering with a random pubsub topic + # We also pass the store-node address in the request. + response = await client.getStoreMessagesV3( + encodeUrl(fullAddr), "true", encodeUrl("random pubsub topic") + ) + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.messages.len == 0 + + # Receiving 400 response if setting wrong store-node address + response = await client.getStoreMessagesV3( + encodeUrl("incorrect multi address format"), + "true", + encodeUrl("random pubsub topic"), + ) + check: + response.status == 400 + $response.contentType == $MIMETYPE_TEXT + response.data.messages.len == 0 + response.data.statusDesc == + "Failed parsing remote peer info [MultiAddress.init [multiaddress: Invalid MultiAddress, must start with `/`]]" + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "filter historical messages by content topic": + # Given + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installStoreApiHandlers(restServer.router, node) + restServer.start() + + # WakuStore setup + let driver: ArchiveDriver = QueueDriver.new() + let mountArchiveRes = node.mountArchive(driver) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + await node.mountStore() + node.mountStoreClient() + + let key = generateEcdsaKey() + var peerSwitch = newStandardSwitch(some(key)) + await peerSwitch.start() + + peerSwitch.mount(node.wakuStore) + + # Now prime it with some history before tests + let msgList = + @[ + fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0), + fakeWakuMessage(@[byte 1], ts = 1), + fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("ct2"), ts = 9), + ] + for msg in msgList: + require (await driver.put(DefaultPubsubTopic, msg)).isOk() + + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + let remotePeerInfo = peerSwitch.peerInfo.toRemotePeerInfo() + let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId + + # Filtering by content topic + let response = await client.getStoreMessagesV3( + encodeUrl(fullAddr), "true", encodeUrl(DefaultPubsubTopic), encodeUrl("ct1,ct2") + ) + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.messages.len == 2 + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "precondition failed": + # Given + let node = testWakuNode() + await node.start() + (await node.mountRelay()).isOkOr: + error "failed to mount relay", error = error + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installStoreApiHandlers(restServer.router, node) + restServer.start() + + node.mountStoreClient() + + let key = generateEcdsaKey() + var peerSwitch = newStandardSwitch(some(key)) + await peerSwitch.start() + + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + let remotePeerInfo = peerSwitch.peerInfo.toRemotePeerInfo() + + # Sending no peer-store node address + var response = await client.getStoreMessagesV3( + encodeUrl(""), "true", encodeUrl(DefaultPubsubTopic) + ) + check: + response.status == 412 + $response.contentType == $MIMETYPE_TEXT + response.data.messages.len == 0 + response.data.statusDesc == NoPeerNoDiscError.errobj.message + + # Now add the storenode from "config" + node.peerManager.addServicePeer(remotePeerInfo, WakuStoreCodec) + + # WakuStore setup + let driver: ArchiveDriver = QueueDriver.new() + let mountArchiveRes = node.mountArchive(driver) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + await node.mountStore() + + # Now prime it with some history before tests + let msgList = + @[ + fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0), + fakeWakuMessage(@[byte 1], ts = 1), + fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("ct2"), ts = 9), + ] + for msg in msgList: + require (await driver.put(DefaultPubsubTopic, msg)).isOk() + + # Sending no peer-store node address + response = await client.getStoreMessagesV3( + encodeUrl(""), "true", encodeUrl(DefaultPubsubTopic) + ) + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.messages.len == 3 + + await restServer.stop() + await restServer.closeWait() + await node.stop() + + asyncTest "retrieve historical messages from a self-store-node": + ## This test aims to validate the correct message retrieval for a store-node which exposes + ## a REST server. + + # Given + let node = testWakuNode() + await node.start() + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installStoreApiHandlers(restServer.router, node) + restServer.start() + + # WakuStore setup + let driver: ArchiveDriver = QueueDriver.new() + let mountArchiveRes = node.mountArchive(driver) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + await node.mountStore() + + # Now prime it with some history before tests + let msgList = + @[ + fakeWakuMessage( + @[byte 0], contentTopic = ContentTopic("ct1"), ts = 0, meta = (@[byte 8]) + ), + fakeWakuMessage(@[byte 1], ts = 1), + fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("ct2"), ts = 9), + ] + for msg in msgList: + require (await driver.put(DefaultPubsubTopic, msg)).isOk() + + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + # Filtering by a known pubsub topic. + var response = await client.getStoreMessagesV3( + includeData = "true", pubsubTopic = encodeUrl(DefaultPubsubTopic) + ) + + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.messages.len == 3 + + # Get all the messages by specifying an empty pubsub topic + response = + await client.getStoreMessagesV3(includeData = "true", pubsubTopic = encodeUrl("")) + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.messages.len == 3 + + # Receiving no messages by filtering with a random pubsub topic + response = await client.getStoreMessagesV3( + includeData = "true", pubsubTopic = encodeUrl("random pubsub topic") + ) + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.messages.len == 0 + + asyncTest "correct message fields are returned": + # Given + let node = testWakuNode() + await node.start() + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installStoreApiHandlers(restServer.router, node) + restServer.start() + + # WakuStore setup + let driver: ArchiveDriver = QueueDriver.new() + let mountArchiveRes = node.mountArchive(driver) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + await node.mountStore() + + # Now prime it with some history before tests + let msg = fakeWakuMessage( + @[byte 0], contentTopic = ContentTopic("ct1"), ts = 0, meta = (@[byte 8]) + ) + require (await driver.put(DefaultPubsubTopic, msg)).isOk() + + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + # Filtering by a known pubsub topic. + var response = await client.getStoreMessagesV3( + includeData = "true", pubsubTopic = encodeUrl(DefaultPubsubTopic) + ) + + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + response.data.messages.len == 1 + + let storeMessage = response.data.messages[0].message.get() + + check: + storeMessage.payload == msg.payload + storeMessage.contentTopic == msg.contentTopic + storeMessage.version == msg.version + storeMessage.timestamp == msg.timestamp + storeMessage.ephemeral == msg.ephemeral + storeMessage.meta == msg.meta + + asyncTest "Rate limit store node store query": + # Test adapted from the analogous present at waku_store/test_wakunode_store.nim + let node = testWakuNode() + await node.start() + + var restPort = Port(0) + let restAddress = parseIpAddress("0.0.0.0") + let restServer = WakuRestServerRef.init(restAddress, restPort).tryGet() + restPort = restServer.httpServer.address.port # update with bound port for client use + + installStoreApiHandlers(restServer.router, node) + restServer.start() + + # WakuStore setup + let driver: ArchiveDriver = QueueDriver.new() + let mountArchiveRes = node.mountArchive(driver) + assert mountArchiveRes.isOk(), mountArchiveRes.error + + await node.mountStore((2, 500.millis)) + node.mountStoreClient() + + let key = generateEcdsaKey() + var peerSwitch = newStandardSwitch(some(key)) + await peerSwitch.start() + + peerSwitch.mount(node.wakuStore) + + # Now prime it with some history before tests + let timeOrigin = wakucore.now() + let msgList = + @[ + fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)), + fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)), + fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)), + fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)), + fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)), + fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)), + fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)), + fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)), + fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)), + fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)), + ] + for msg in msgList: + require (await driver.put(DefaultPubsubTopic, msg)).isOk() + + let client = newRestHttpClient(initTAddress(restAddress, restPort)) + + let remotePeerInfo = peerSwitch.peerInfo.toRemotePeerInfo() + let fullAddr = $remotePeerInfo.addrs[0] & "/p2p/" & $remotePeerInfo.peerId + + var pages = newSeq[seq[WakuMessage]](2) + + var reqPubsubTopic = DefaultPubsubTopic + var reqHash = none(string) + + for i in 0 ..< 2: + let response = await client.getStoreMessagesV3( + encodeUrl(fullAddr), + "true", # include data + encodeUrl(reqPubsubTopic), + "", # content topics. Empty ignores the field. + "", # start time. Empty ignores the field. + "", # end time. Empty ignores the field. + "", # hashes + if reqHash.isSome(): + reqHash.get() + else: + "", # hex-encoded digest. Empty ignores the field. + "true", # ascending + "3", # page size. Empty implies default page size. + ) + + let wakuMessages = collect(newSeq): + for element in response.data.messages: + if element.message.isSome(): + element.message.get() + + pages[i] = wakuMessages + + # populate the cursor for next page + if response.data.paginationCursor.isSome(): + reqHash = response.data.paginationCursor + + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + + check: + pages[0] == msgList[0 .. 2] + pages[1] == msgList[3 .. 5] + + # request last third will lead to rate limit rejection + var response = await client.getStoreMessagesV3( + encodeUrl(fullAddr), + "true", # include data + encodeUrl(reqPubsubTopic), + "", # content topics. Empty ignores the field. + "", # start time. Empty ignores the field. + "", # end time. Empty ignores the field. + "", # hashes + if reqHash.isSome(): + reqHash.get() + else: + "", # hex-encoded digest. Empty ignores the field. + ) + + check: + response.status == 429 + $response.contentType == $MIMETYPE_TEXT + response.data.statusDesc == "Request rate limit reached" + + await sleepAsync(500.millis) + + # retry after respective amount of time shall succeed + response = await client.getStoreMessagesV3( + encodeUrl(fullAddr), + "true", # include data + encodeUrl(reqPubsubTopic), + "", # content topics. Empty ignores the field. + "", # start time. Empty ignores the field. + "", # end time. Empty ignores the field. + "", # hashes + if reqHash.isSome(): + reqHash.get() + else: + "", # hex-encoded digest. Empty ignores the field. + "true", # ascending + "5", # page size. Empty implies default page size. + ) + + check: + response.status == 200 + $response.contentType == $MIMETYPE_JSON + + let wakuMessages = collect(newSeq): + for element in response.data.messages: + if element.message.isSome(): + element.message.get() + + check wakuMessages == msgList[6 .. 9] + + await restServer.stop() + await restServer.closeWait() + await node.stop() diff --git a/third-party/nwaku/tools/confutils/cli_args.nim b/third-party/nwaku/tools/confutils/cli_args.nim new file mode 100644 index 0000000..c4bd66a --- /dev/null +++ b/third-party/nwaku/tools/confutils/cli_args.nim @@ -0,0 +1,1025 @@ +import + std/[strutils, strformat, sequtils], + results, + chronicles, + chronos, + regex, + stew/endians2, + stint, + confutils, + confutils/defs, + confutils/std/net, + confutils/toml/defs as confTomlDefs, + confutils/toml/std/net as confTomlNet, + libp2p/crypto/crypto, + libp2p/crypto/secp, + libp2p/multiaddress, + nimcrypto/utils, + secp256k1, + json + +import + waku/factory/[waku_conf, conf_builder/conf_builder, networks_config], + waku/common/[logging], + waku/[ + waku_enr, + node/peer_manager, + waku_core/topics/pubsub_topic, + waku_core/message/default_values, + ], + ../../tools/rln_keystore_generator/rln_keystore_generator + +import ./envvar as confEnvvarDefs, ./envvar_net as confEnvvarNet + +export confTomlDefs, confTomlNet, confEnvvarDefs, confEnvvarNet, ProtectedShard + +logScope: + topics = "waku cli args" + +# Git version in git describe format (defined at compile time) +const git_version* {.strdefine.} = "n/a" + +type ConfResult*[T] = Result[T, string] + +type EthRpcUrl* = distinct string + +type StartUpCommand* = enum + noCommand # default, runs waku + generateRlnKeystore # generates a new RLN keystore + +type WakuNodeConf* = object + configFile* {. + desc: "Loads configuration from a TOML file (cmd-line parameters take precedence)", + name: "config-file" + .}: Option[InputFile] + + ## Log configuration + logLevel* {. + desc: + "Sets the log level for process. Supported levels: TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL", + defaultValue: logging.LogLevel.INFO, + name: "log-level" + .}: logging.LogLevel + + logFormat* {. + desc: + "Specifies what kind of logs should be written to stdout. Supported formats: TEXT, JSON", + defaultValue: logging.LogFormat.TEXT, + name: "log-format" + .}: logging.LogFormat + + rlnRelayCredPath* {. + desc: "The path for persisting rln-relay credential", + defaultValue: "", + name: "rln-relay-cred-path" + .}: string + + ethClientUrls* {. + desc: + "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/. Argument may be repeated.", + defaultValue: @[EthRpcUrl("http://localhost:8540/")], + name: "rln-relay-eth-client-address" + .}: seq[EthRpcUrl] + + rlnRelayEthContractAddress* {. + desc: "Address of membership contract on an Ethereum testnet.", + defaultValue: "", + name: "rln-relay-eth-contract-address" + .}: string + + rlnRelayChainId* {. + desc: + "Chain ID of the provided contract (optional, will fetch from RPC provider if not used)", + defaultValue: 0, + name: "rln-relay-chain-id" + .}: uint + + rlnRelayCredPassword* {. + desc: "Password for encrypting RLN credentials", + defaultValue: "", + name: "rln-relay-cred-password" + .}: string + + rlnRelayEthPrivateKey* {. + desc: "Private key for broadcasting transactions", + defaultValue: "", + name: "rln-relay-eth-private-key" + .}: string + + # TODO: Remove "Default is" when it's already visible on the CLI + rlnRelayUserMessageLimit* {. + desc: + "Set a user message limit for the rln membership registration. Must be a positive integer. Default is 1.", + defaultValue: 1, + name: "rln-relay-user-message-limit" + .}: uint64 + + rlnEpochSizeSec* {. + desc: + "Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.", + defaultValue: 1, + name: "rln-relay-epoch-sec" + .}: uint64 + + maxMessageSize* {. + desc: + "Maximum message size. Accepted units: KiB, KB, and B. e.g. 1024KiB; 1500 B; etc.", + defaultValue: DefaultMaxWakuMessageSizeStr, + name: "max-msg-size" + .}: string + + case cmd* {.command, defaultValue: noCommand.}: StartUpCommand + of generateRlnKeystore: + execute* {. + desc: "Runs the registration function on-chain. By default, a dry-run will occur", + defaultValue: false, + name: "execute" + .}: bool + of noCommand: + ## Application-level configuration + protectedShards* {. + desc: + "Shards and its public keys to be used for message validation, shard:pubkey. Argument may be repeated.", + defaultValue: newSeq[ProtectedShard](0), + name: "protected-shard" + .}: seq[ProtectedShard] + + ## General node config + preset* {. + desc: + "Network preset to use. 'twn' is The RLN-protected Waku Network (cluster 1). Overrides other values.", + defaultValue: "", + name: "preset" + .}: string + + clusterId* {. + desc: + "Cluster id that the node is running in. Node in a different cluster id is disconnected.", + defaultValue: 0, + name: "cluster-id" + .}: uint16 + + agentString* {. + defaultValue: "nwaku-" & cli_args.git_version, + desc: "Node agent string which is used as identifier in network", + name: "agent-string" + .}: string + + nodekey* {.desc: "P2P node private key as 64 char hex string.", name: "nodekey".}: + Option[PrivateKey] + + listenAddress* {. + defaultValue: defaultListenAddress(), + desc: "Listening address for LibP2P (and Discovery v5, if enabled) traffic.", + name: "listen-address" + .}: IpAddress + + tcpPort* {.desc: "TCP listening port.", defaultValue: 60000, name: "tcp-port".}: + Port + + portsShift* {. + desc: "Add a shift to all port numbers.", defaultValue: 0, name: "ports-shift" + .}: uint16 + + nat* {. + desc: + "Specify method to use for determining public address. " & + "Must be one of: any, none, upnp, pmp, extip:.", + defaultValue: "any" + .}: string + + extMultiAddrs* {. + desc: + "External multiaddresses to advertise to the network. Argument may be repeated.", + name: "ext-multiaddr" + .}: seq[string] + + extMultiAddrsOnly* {. + desc: "Only announce external multiaddresses setup with --ext-multiaddr", + defaultValue: false, + name: "ext-multiaddr-only" + .}: bool + + maxConnections* {. + desc: "Maximum allowed number of libp2p connections.", + defaultValue: 50, + name: "max-connections" + .}: int + + maxRelayPeers* {. + desc: + "Deprecated. Use relay-service-ratio instead. It represents the maximum allowed number of relay peers.", + name: "max-relay-peers" + .}: Option[int] + + relayServiceRatio* {. + desc: + "This percentage ratio represents the relay peers to service peers. For example, 60:40, tells that 60% of the max-connections will be used for relay protocol and the other 40% of max-connections will be reserved for other service protocols (e.g., filter, lightpush, store, metadata, etc.)", + name: "relay-service-ratio", + defaultValue: "60:40" # 60:40 ratio of relay to service peers + .}: string + + colocationLimit* {. + desc: + "Max num allowed peers from the same IP. Set it to 0 to remove the limitation.", + defaultValue: defaultColocationLimit(), + name: "ip-colocation-limit" + .}: int + + peerStoreCapacity* {. + desc: "Maximum stored peers in the peerstore.", name: "peer-store-capacity" + .}: Option[int] + + peerPersistence* {. + desc: "Enable peer persistence.", defaultValue: false, name: "peer-persistence" + .}: bool + + ## DNS addrs config + dnsAddrsNameServers* {. + desc: + "DNS name server IPs to query for DNS multiaddrs resolution. Argument may be repeated.", + defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")], + name: "dns-addrs-name-server" + .}: seq[IpAddress] + + dns4DomainName* {. + desc: "The domain name resolving to the node's public IPv4 address", + defaultValue: "", + name: "dns4-domain-name" + .}: string + + ## Circuit-relay config + isRelayClient* {. + desc: + """Set the node as a relay-client. +Set it to true for nodes that run behind a NAT or firewall and +hence would have reachability issues.""", + defaultValue: false, + name: "relay-client" + .}: bool + + ## Relay config + relay* {. + desc: "Enable relay protocol: true|false", defaultValue: true, name: "relay" + .}: bool + + relayPeerExchange* {. + desc: "Enable gossipsub peer exchange in relay protocol: true|false", + defaultValue: false, + name: "relay-peer-exchange" + .}: bool + + relayShardedPeerManagement* {. + desc: + "Enable experimental shard aware peer manager for relay protocol: true|false", + defaultValue: false, + name: "relay-shard-manager" + .}: bool + + rlnRelay* {. + desc: "Enable spam protection through rln-relay: true|false.", + defaultValue: false, + name: "rln-relay" + .}: bool + + rlnRelayCredIndex* {. + desc: "the index of the onchain commitment to use", + name: "rln-relay-membership-index" + .}: Option[uint] + + rlnRelayDynamic* {. + desc: "Enable waku-rln-relay with on-chain dynamic group management: true|false.", + defaultValue: false, + name: "rln-relay-dynamic" + .}: bool + + staticnodes* {. + desc: "Peer multiaddr to directly connect with. Argument may be repeated.", + name: "staticnode" + .}: seq[string] + + keepAlive* {. + desc: + "Deprecated since >=v0.37. This param is ignored and keep alive is always active", + defaultValue: true, + name: "keep-alive" + .}: bool + + numShardsInNetwork* {. + desc: + "Enables autosharding and set number of shards in the cluster, set to `0` to use static sharding", + defaultValue: 1, + name: "num-shards-in-network" + .}: uint16 + + shards* {. + desc: + "Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated. Subscribes to all shards by default in auto-sharding, no shard for static sharding", + name: "shard" + .}: seq[uint16] + + contentTopics* {. + desc: "Default content topic to subscribe to. Argument may be repeated.", + name: "content-topic" + .}: seq[string] + + ## Store and message store config + store* {. + desc: "Enable/disable waku store protocol", defaultValue: false, name: "store" + .}: bool + + legacyStore* {. + desc: "Enable/disable support of Waku Store v2 as a service", + defaultValue: false, + name: "legacy-store" + .}: bool + + storenode* {. + desc: "Peer multiaddress to query for storage", + defaultValue: "", + name: "storenode" + .}: string + + storeMessageRetentionPolicy* {. + desc: + "Message store retention policy. Time retention policy: 'time:'. Capacity retention policy: 'capacity:'. Size retention policy: 'size:'. Set to 'none' to disable.", + defaultValue: "time:" & $2.days.seconds, + name: "store-message-retention-policy" + .}: string + + storeMessageDbUrl* {. + desc: "The database connection URL for peristent storage.", + defaultValue: "sqlite://store.sqlite3", + name: "store-message-db-url" + .}: string + + storeMessageDbVacuum* {. + desc: + "Enable database vacuuming at start. Only supported by SQLite database engine.", + defaultValue: false, + name: "store-message-db-vacuum" + .}: bool + + storeMessageDbMigration* {. + desc: "Enable database migration at start.", + defaultValue: true, + name: "store-message-db-migration" + .}: bool + + storeMaxNumDbConnections* {. + desc: "Maximum number of simultaneous Postgres connections.", + defaultValue: 50, + name: "store-max-num-db-connections" + .}: int + + storeResume* {. + desc: "Enable store resume functionality", + defaultValue: false, + name: "store-resume" + .}: bool + + ## Sync config + storeSync* {. + desc: "Enable store sync protocol: true|false", + defaultValue: false, + name: "store-sync" + .}: bool + + storeSyncInterval* {. + desc: "Interval between store sync attempts. In seconds.", + defaultValue: 300, # 5 minutes + name: "store-sync-interval" + .}: uint32 + + storeSyncRange* {. + desc: "Amount of time to sync. In seconds.", + defaultValue: 3600, # 1 hours + name: "store-sync-range" + .}: uint32 + + storeSyncRelayJitter* {. + hidden, + desc: "Time offset to account for message propagation jitter. In seconds.", + defaultValue: 20, + name: "store-sync-relay-jitter" + .}: uint32 + + ## Filter config + filter* {. + desc: "Enable filter protocol: true|false", defaultValue: false, name: "filter" + .}: bool + + filternode* {. + desc: "Peer multiaddr to request content filtering of messages.", + defaultValue: "", + name: "filternode" + .}: string + + filterSubscriptionTimeout* {. + desc: + "Timeout for filter subscription without ping or refresh it, in seconds. Only for v2 filter protocol.", + defaultValue: 300, # 5 minutes + name: "filter-subscription-timeout" + .}: uint16 + + filterMaxPeersToServe* {. + desc: "Maximum number of peers to serve at a time. Only for v2 filter protocol.", + defaultValue: 1000, + name: "filter-max-peers-to-serve" + .}: uint32 + + filterMaxCriteria* {. + desc: + "Maximum number of pubsub- and content topic combination per peers at a time. Only for v2 filter protocol.", + defaultValue: 1000, + name: "filter-max-criteria" + .}: uint32 + + ## Lightpush config + lightpush* {. + desc: "Enable lightpush protocol: true|false", + defaultValue: false, + name: "lightpush" + .}: bool + + lightpushnode* {. + desc: "Peer multiaddr to request lightpush of published messages.", + defaultValue: "", + name: "lightpushnode" + .}: string + + ## Reliability config + reliabilityEnabled* {. + desc: + """Adds an extra effort in the delivery/reception of messages by leveraging store-v3 requests. +with the drawback of consuming some more bandwidth.""", + defaultValue: false, + name: "reliability" + .}: bool + + ## REST HTTP config + rest* {. + desc: "Enable Waku REST HTTP server: true|false", defaultValue: true, name: "rest" + .}: bool + + restAddress* {. + desc: "Listening address of the REST HTTP server.", + defaultValue: parseIpAddress("127.0.0.1"), + name: "rest-address" + .}: IpAddress + + restPort* {. + desc: "Listening port of the REST HTTP server.", + defaultValue: 8645, + name: "rest-port" + .}: uint16 + + restRelayCacheCapacity* {. + desc: "Capacity of the Relay REST API message cache.", + defaultValue: 50, + name: "rest-relay-cache-capacity" + .}: uint32 + + restAdmin* {. + desc: "Enable access to REST HTTP Admin API: true|false", + defaultValue: false, + name: "rest-admin" + .}: bool + + restAllowOrigin* {. + desc: + "Allow cross-origin requests from the specified origin." & + "Argument may be repeated." & "Wildcards: * or ? allowed." & + "Ex.: \"localhost:*\" or \"127.0.0.1:8080\"", + defaultValue: newSeq[string](), + name: "rest-allow-origin" + .}: seq[string] + + ## Metrics config + metricsServer* {. + desc: "Enable the metrics server: true|false", + defaultValue: false, + name: "metrics-server" + .}: bool + + metricsServerAddress* {. + desc: "Listening address of the metrics server.", + defaultValue: parseIpAddress("127.0.0.1"), + name: "metrics-server-address" + .}: IpAddress + + metricsServerPort* {. + desc: "Listening HTTP port of the metrics server.", + defaultValue: 8008, + name: "metrics-server-port" + .}: uint16 + + metricsLogging* {. + desc: "Enable metrics logging: true|false", + defaultValue: true, + name: "metrics-logging" + .}: bool + + ## DNS discovery config + dnsDiscovery* {. + desc: + "Deprecated, please set dns-discovery-url instead. Enable discovering nodes via DNS", + defaultValue: false, + name: "dns-discovery" + .}: bool + + dnsDiscoveryUrl* {. + desc: + "URL for DNS node list in format 'enrtree://@', enables DNS Discovery", + defaultValue: "", + name: "dns-discovery-url" + .}: string + + ## Discovery v5 config + discv5Discovery* {. + desc: "Enable discovering nodes via Node Discovery v5.", + defaultValue: none(bool), + name: "discv5-discovery" + .}: Option[bool] + + discv5UdpPort* {. + desc: "Listening UDP port for Node Discovery v5.", + defaultValue: 9000, + name: "discv5-udp-port" + .}: Port + + discv5BootstrapNodes* {. + desc: + "Text-encoded ENR for bootstrap node. Used when connecting to the network. Argument may be repeated.", + name: "discv5-bootstrap-node" + .}: seq[string] + + discv5EnrAutoUpdate* {. + desc: + "Discovery can automatically update its ENR with the IP address " & + "and UDP port as seen by other nodes it communicates with. " & + "This option allows to enable/disable this functionality", + defaultValue: false, + name: "discv5-enr-auto-update" + .}: bool + + discv5TableIpLimit* {. + hidden, + desc: "Maximum amount of nodes with the same IP in discv5 routing tables", + defaultValue: 10, + name: "discv5-table-ip-limit" + .}: uint + + discv5BucketIpLimit* {. + hidden, + desc: "Maximum amount of nodes with the same IP in discv5 routing table buckets", + defaultValue: 2, + name: "discv5-bucket-ip-limit" + .}: uint + + discv5BitsPerHop* {. + hidden, + desc: "Kademlia's b variable, increase for less hops per lookup", + defaultValue: 1, + name: "discv5-bits-per-hop" + .}: int + + ## waku peer exchange config + peerExchange* {. + desc: "Enable waku peer exchange protocol (responder side): true|false", + defaultValue: true, + name: "peer-exchange" + .}: bool + + peerExchangeNode* {. + desc: + "Peer multiaddr to send peer exchange requests to. (enables peer exchange protocol requester side)", + defaultValue: "", + name: "peer-exchange-node" + .}: string + + ## Rendez vous + rendezvous* {. + desc: "Enable waku rendezvous discovery server", + defaultValue: true, + name: "rendezvous" + .}: bool + + #Mix config + mix* {.desc: "Enable mix protocol: true|false", defaultValue: false, name: "mix".}: + bool + + mixkey* {. + desc: + "ED25519 private key as 64 char hex string , without 0x. If not provided, a random key will be generated.", + name: "mixkey" + .}: Option[string] + + ## websocket config + websocketSupport* {. + desc: "Enable websocket: true|false", + defaultValue: false, + name: "websocket-support" + .}: bool + + websocketPort* {. + desc: "WebSocket listening port.", defaultValue: 8000, name: "websocket-port" + .}: Port + + websocketSecureSupport* {. + desc: "Enable secure websocket: true|false", + defaultValue: false, + name: "websocket-secure-support" + .}: bool + + websocketSecureKeyPath* {. + desc: "Secure websocket key path: '/path/to/key.txt' ", + defaultValue: "", + name: "websocket-secure-key-path" + .}: string + + websocketSecureCertPath* {. + desc: "Secure websocket Certificate path: '/path/to/cert.txt' ", + defaultValue: "", + name: "websocket-secure-cert-path" + .}: string + + ## Rate limitation config, if not set, rate limit checks will not be performed + rateLimits* {. + desc: + "Rate limit settings for different protocols." & + "Format: protocol:volume/period" & + " Where 'protocol' can be one of: if not defined it means a global setting" & + " 'volume' and period must be an integer value. " & + " 'unit' must be one of - hours, minutes, seconds, milliseconds respectively. " & + "Argument may be repeated.", + defaultValue: newSeq[string](0), + name: "rate-limit" + .}: seq[string] + +## Parsing + +# NOTE: Keys are different in nim-libp2p +proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T = + try: + let key = SkPrivateKey.init(utils.fromHex(p)).tryGet() + crypto.PrivateKey(scheme: Secp256k1, skkey: key) + except CatchableError: + raise newException(ValueError, "Invalid private key") + +proc parseCmdArg*[T](_: type seq[T], s: string): seq[T] {.raises: [ValueError].} = + var + inputSeq: JsonNode + res: seq[T] = @[] + + try: + inputSeq = s.parseJson() + except Exception: + raise newException(ValueError, fmt"Could not parse sequence: {s}") + + for entry in inputSeq: + let formattedString = ($entry).strip(chars = {'\"'}) + res.add(parseCmdArg(T, formattedString)) + + return res + +proc completeCmdArg*(T: type crypto.PrivateKey, val: string): seq[string] = + return @[] + +# TODO: Remove when removing protected-topic configuration +proc isNumber(x: string): bool = + try: + discard parseInt(x) + result = true + except ValueError: + result = false + +proc parseCmdArg*(T: type ProtectedShard, p: string): T = + let elements = p.split(":") + if elements.len != 2: + raise newException( + ValueError, "Invalid format for protected shard expected shard:publickey" + ) + let publicKey = secp256k1.SkPublicKey.fromHex(elements[1]) + if publicKey.isErr: + raise newException(ValueError, "Invalid public key") + + if isNumber(elements[0]): + return ProtectedShard(shard: uint16.parseCmdArg(elements[0]), key: publicKey.get()) + + # TODO: Remove when removing protected-topic configuration + let shard = RelayShard.parse(elements[0]).valueOr: + raise newException( + ValueError, + "Invalid pubsub topic. Pubsub topics must be in the format /waku/2/rs//", + ) + return ProtectedShard(shard: shard.shardId, key: publicKey.get()) + +proc completeCmdArg*(T: type ProtectedShard, val: string): seq[string] = + return @[] + +proc completeCmdArg*(T: type IpAddress, val: string): seq[string] = + return @[] + +proc defaultListenAddress*(): IpAddress = + # TODO: Should probably listen on both ipv4 and ipv6 by default. + (static parseIpAddress("0.0.0.0")) + +proc defaultColocationLimit*(): int = + return DefaultColocationLimit + +proc completeCmdArg*(T: type Port, val: string): seq[string] = + return @[] + +proc completeCmdArg*(T: type EthRpcUrl, val: string): seq[string] = + return @[] + +proc parseCmdArg*(T: type EthRpcUrl, s: string): T = + ## allowed patterns: + ## http://url:port + ## https://url:port + ## http://url:port/path + ## https://url:port/path + ## http://url/with/path + ## http://url:port/path?query + ## https://url:port/path?query + ## https://username:password@url:port/path + ## https://username:password@url:port/path?query + ## supports IPv4, IPv6, URL-encoded credentials + ## disallowed patterns: + ## any valid/invalid ws or wss url + var httpPattern = + re2"^(https?):\/\/(([^\s:@]*(?:%[0-9A-Fa-f]{2})*):([^\s:@]*(?:%[0-9A-Fa-f]{2})*)@)?((?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)*[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?|(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)|\[[0-9a-fA-F:]+\])(?::([0-9]{1,5}))?(\/[^\s?#]*)?(\?[^\s#]*)?(#[^\s]*)?$" + var wsPattern = + re2"^(wss?):\/\/([\w-]+(\.[\w-]+)+)(:[0-9]{1,5})?(\/[\w.,@?^=%&:\/~+#-]*)?$" + if regex.match(s, wsPattern): + raise newException( + ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL" + ) + if not regex.match(s, httpPattern): + raise newException(ValueError, "Invalid HTTP RPC URL") + return EthRpcUrl(s) + +## Load + +proc readValue*( + r: var TomlReader, value: var crypto.PrivateKey +) {.raises: [SerializationError].} = + try: + value = parseCmdArg(crypto.PrivateKey, r.readValue(string)) + except CatchableError: + raise newException(SerializationError, getCurrentExceptionMsg()) + +proc readValue*( + r: var EnvvarReader, value: var crypto.PrivateKey +) {.raises: [SerializationError].} = + try: + value = parseCmdArg(crypto.PrivateKey, r.readValue(string)) + except CatchableError: + raise newException(SerializationError, getCurrentExceptionMsg()) + +proc readValue*( + r: var TomlReader, value: var ProtectedShard +) {.raises: [SerializationError].} = + try: + value = parseCmdArg(ProtectedShard, r.readValue(string)) + except CatchableError: + raise newException(SerializationError, getCurrentExceptionMsg()) + +proc readValue*( + r: var EnvvarReader, value: var ProtectedShard +) {.raises: [SerializationError].} = + try: + value = parseCmdArg(ProtectedShard, r.readValue(string)) + except CatchableError: + raise newException(SerializationError, getCurrentExceptionMsg()) + +proc readValue*( + r: var TomlReader, value: var EthRpcUrl +) {.raises: [SerializationError].} = + try: + value = parseCmdArg(EthRpcUrl, r.readValue(string)) + except CatchableError: + raise newException(SerializationError, getCurrentExceptionMsg()) + +proc readValue*( + r: var EnvvarReader, value: var EthRpcUrl +) {.raises: [SerializationError].} = + try: + value = parseCmdArg(EthRpcUrl, r.readValue(string)) + except CatchableError: + raise newException(SerializationError, getCurrentExceptionMsg()) + +proc load*(T: type WakuNodeConf, version = ""): ConfResult[T] = + try: + let conf = WakuNodeConf.load( + version = version, + secondarySources = proc( + conf: WakuNodeConf, sources: auto + ) {.gcsafe, raises: [ConfigurationError].} = + sources.addConfigFile(Envvar, InputFile("wakunode2")) + + if conf.configFile.isSome(): + sources.addConfigFile(Toml, conf.configFile.get()) + , + ) + + ok(conf) + except CatchableError: + err(getCurrentExceptionMsg()) + +proc defaultWakuNodeConf*(): ConfResult[WakuNodeConf] = + try: + let conf = WakuNodeConf.load(version = "", cmdLine = @[]) + return ok(conf) + except CatchableError: + return err("exception in defaultWakuNodeConf: " & getCurrentExceptionMsg()) + +proc toKeystoreGeneratorConf*(n: WakuNodeConf): RlnKeystoreGeneratorConf = + RlnKeystoreGeneratorConf( + execute: n.execute, + chainId: UInt256.fromBytesBE(n.rlnRelayChainId.toBytesBE()), + ethClientUrls: n.ethClientUrls.mapIt(string(it)), + ethContractAddress: n.rlnRelayEthContractAddress, + userMessageLimit: n.rlnRelayUserMessageLimit, + ethPrivateKey: n.rlnRelayEthPrivateKey, + credPath: n.rlnRelayCredPath, + credPassword: n.rlnRelayCredPassword, + ) + +proc toNetworkConf( + preset: string, clusterId: Option[uint16] +): ConfResult[Option[NetworkConf]] = + var lcPreset = toLowerAscii(preset) + if clusterId.isSome() and clusterId.get() == 1: + warn( + "TWN - The Waku Network configuration will not be applied when `--cluster-id=1` is passed in future releases. Use `--preset=twn` instead." + ) + lcPreset = "twn" + + case lcPreset + of "": + ok(none(NetworkConf)) + of "twn": + ok(some(NetworkConf.TheWakuNetworkConf())) + else: + err("Invalid --preset value passed: " & lcPreset) + +proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] = + var b = WakuConfBuilder.init() + + b.withLogLevel(n.logLevel) + b.withLogFormat(n.logFormat) + + b.rlnRelayConf.withEnabled(n.rlnRelay) + if n.rlnRelayCredPath != "": + b.rlnRelayConf.withCredPath(n.rlnRelayCredPath) + if n.rlnRelayCredPassword != "": + b.rlnRelayConf.withCredPassword(n.rlnRelayCredPassword) + if n.ethClientUrls.len > 0: + b.rlnRelayConf.withEthClientUrls(n.ethClientUrls.mapIt(string(it))) + if n.rlnRelayEthContractAddress != "": + b.rlnRelayConf.withEthContractAddress(n.rlnRelayEthContractAddress) + + if n.rlnRelayChainId != 0: + b.rlnRelayConf.withChainId(n.rlnRelayChainId) + b.rlnRelayConf.withUserMessageLimit(n.rlnRelayUserMessageLimit) + b.rlnRelayConf.withEpochSizeSec(n.rlnEpochSizeSec) + + if n.rlnRelayCredIndex.isSome(): + b.rlnRelayConf.withCredIndex(n.rlnRelayCredIndex.get()) + b.rlnRelayConf.withDynamic(n.rlnRelayDynamic) + + if n.maxMessageSize != "": + b.withMaxMessageSize(n.maxMessageSize) + + b.withProtectedShards(n.protectedShards) + b.withClusterId(n.clusterId) + + let networkConf = toNetworkConf(n.preset, some(n.clusterId)).valueOr: + return err("Error determining cluster from preset: " & $error) + + if networkConf.isSome(): + b.withNetworkConf(networkConf.get()) + + b.withAgentString(n.agentString) + + if n.nodeKey.isSome(): + b.withNodeKey(n.nodeKey.get()) + + b.withP2pListenAddress(n.listenAddress) + b.withP2pTcpPort(n.tcpPort) + b.withPortsShift(n.portsShift) + b.withNatStrategy(n.nat) + b.withExtMultiAddrs(n.extMultiAddrs) + b.withExtMultiAddrsOnly(n.extMultiAddrsOnly) + b.withMaxConnections(n.maxConnections) + + if n.maxRelayPeers.isSome(): + b.withMaxRelayPeers(n.maxRelayPeers.get()) + + if n.relayServiceRatio != "": + b.withRelayServiceRatio(n.relayServiceRatio) + b.withColocationLimit(n.colocationLimit) + + if n.peerStoreCapacity.isSome: + b.withPeerStoreCapacity(n.peerStoreCapacity.get()) + + b.withPeerPersistence(n.peerPersistence) + b.withDnsAddrsNameServers(n.dnsAddrsNameServers) + b.withDns4DomainName(n.dns4DomainName) + b.withCircuitRelayClient(n.isRelayClient) + b.withRelay(n.relay) + b.withRelayPeerExchange(n.relayPeerExchange) + b.withRelayShardedPeerManagement(n.relayShardedPeerManagement) + b.withStaticNodes(n.staticNodes) + + if n.numShardsInNetwork != 0: + b.withNumShardsInCluster(n.numShardsInNetwork) + b.withShardingConf(AutoSharding) + else: + b.withShardingConf(StaticSharding) + + # It is not possible to pass an empty sequence on the CLI + # If this is empty, it means the user did not specify any shards + if n.shards.len != 0: + b.withSubscribeShards(n.shards) + + b.withContentTopics(n.contentTopics) + + b.storeServiceConf.withEnabled(n.store) + b.storeServiceConf.withSupportV2(n.legacyStore) + b.storeServiceConf.withRetentionPolicy(n.storeMessageRetentionPolicy) + b.storeServiceConf.withDbUrl(n.storeMessageDbUrl) + b.storeServiceConf.withDbVacuum(n.storeMessageDbVacuum) + b.storeServiceConf.withDbMigration(n.storeMessageDbMigration) + b.storeServiceConf.withMaxNumDbConnections(n.storeMaxNumDbConnections) + b.storeServiceConf.withResume(n.storeResume) + + # TODO: can we just use `Option` on the CLI? + if n.storenode != "": + b.withRemoteStoreNode(n.storenode) + if n.filternode != "": + b.withRemoteFilterNode(n.filternode) + if n.lightpushnode != "": + b.withRemoteLightPushNode(n.lightpushnode) + if n.peerExchangeNode != "": + b.withRemotePeerExchangeNode(n.peerExchangeNode) + + b.storeServiceConf.storeSyncConf.withEnabled(n.storeSync) + b.storeServiceConf.storeSyncConf.withIntervalSec(n.storeSyncInterval) + b.storeServiceConf.storeSyncConf.withRangeSec(n.storeSyncRange) + b.storeServiceConf.storeSyncConf.withRelayJitterSec(n.storeSyncRelayJitter) + + b.mixConf.withEnabled(n.mix) + b.withMix(n.mix) + if n.mixkey.isSome(): + b.mixConf.withMixKey(n.mixkey.get()) + + b.filterServiceConf.withEnabled(n.filter) + b.filterServiceConf.withSubscriptionTimeout(n.filterSubscriptionTimeout) + b.filterServiceConf.withMaxPeersToServe(n.filterMaxPeersToServe) + b.filterServiceConf.withMaxCriteria(n.filterMaxCriteria) + + b.withLightPush(n.lightpush) + b.withP2pReliability(n.reliabilityEnabled) + + b.restServerConf.withEnabled(n.rest) + b.restServerConf.withListenAddress(n.restAddress) + b.restServerConf.withPort(n.restPort) + b.restServerConf.withRelayCacheCapacity(n.restRelayCacheCapacity) + b.restServerConf.withAdmin(n.restAdmin) + b.restServerConf.withAllowOrigin(n.restAllowOrigin) + + b.metricsServerConf.withEnabled(n.metricsServer) + b.metricsServerConf.withHttpAddress(n.metricsServerAddress) + b.metricsServerConf.withHttpPort(n.metricsServerPort) + b.metricsServerConf.withLogging(n.metricsLogging) + + if n.dnsDiscoveryUrl != "": + b.dnsDiscoveryConf.withEnrTreeUrl(n.dnsDiscoveryUrl) + b.dnsDiscoveryConf.withNameServers(n.dnsAddrsNameServers) + + if n.discv5Discovery.isSome(): + b.discv5Conf.withEnabled(n.discv5Discovery.get()) + + b.discv5Conf.withUdpPort(n.discv5UdpPort) + b.discv5Conf.withBootstrapNodes(n.discv5BootstrapNodes) + b.discv5Conf.withEnrAutoUpdate(n.discv5EnrAutoUpdate) + b.discv5Conf.withTableIpLimit(n.discv5TableIpLimit) + b.discv5Conf.withBucketIpLimit(n.discv5BucketIpLimit) + b.discv5Conf.withBitsPerHop(n.discv5BitsPerHop) + + b.withPeerExchange(n.peerExchange) + + b.withRendezvous(n.rendezvous) + + b.webSocketConf.withEnabled(n.websocketSupport) + b.webSocketConf.withWebSocketPort(n.websocketPort) + b.webSocketConf.withSecureEnabled(n.websocketSecureSupport) + b.webSocketConf.withKeyPath(n.websocketSecureKeyPath) + b.webSocketConf.withCertPath(n.websocketSecureCertPath) + + b.rateLimitConf.withRateLimits(n.rateLimits) + + return b.build() diff --git a/third-party/nwaku/tools/confutils/envvar.nim b/third-party/nwaku/tools/confutils/envvar.nim new file mode 100644 index 0000000..ee73594 --- /dev/null +++ b/third-party/nwaku/tools/confutils/envvar.nim @@ -0,0 +1,16 @@ +{.push raises: [].} + +import confutils/defs as confutilsDefs +import ./envvar_serialization + +export envvar_serialization, confutilsDefs + +template readConfutilsType(T: type) = + template readValue*(r: var EnvvarReader, value: var T) = + value = T r.readValue(string) + +readConfutilsType InputFile +readConfutilsType InputDir +readConfutilsType OutPath +readConfutilsType OutDir +readConfutilsType OutFile diff --git a/third-party/nwaku/tools/confutils/envvar_net.nim b/third-party/nwaku/tools/confutils/envvar_net.nim new file mode 100644 index 0000000..0c689f0 --- /dev/null +++ b/third-party/nwaku/tools/confutils/envvar_net.nim @@ -0,0 +1,22 @@ +{.push raises: [].} + +import std/[strutils, net] +import ./envvar_serialization + +export net, envvar_serialization + +proc readValue*( + r: var EnvvarReader, value: var IpAddress +) {.raises: [SerializationError].} = + try: + value = parseIpAddress(r.readValue(string)) + except ValueError, IOError: + raise newException( + SerializationError, "Invalid IP address: " & getCurrentExceptionMsg() + ) + +proc readValue*(r: var EnvvarReader, value: var Port) {.raises: [SerializationError].} = + try: + value = parseUInt(r.readValue(string)).Port + except ValueError, IOError: + raise newException(SerializationError, "Invalid Port: " & getCurrentExceptionMsg()) diff --git a/third-party/nwaku/tools/confutils/envvar_serialization.nim b/third-party/nwaku/tools/confutils/envvar_serialization.nim new file mode 100644 index 0000000..53314ef --- /dev/null +++ b/third-party/nwaku/tools/confutils/envvar_serialization.nim @@ -0,0 +1,49 @@ +{.push raises: [].} + +import stew/shims/macros, serialization +import ./envvar_serialization/reader, ./envvar_serialization/writer + +export serialization, reader, writer + +serializationFormat Envvar + +Envvar.setReader EnvvarReader +Envvar.setWriter EnvvarWriter, PreferredOutput = void + +template supports*(_: type Envvar, T: type): bool = + # The Envvar format should support every type + true + +template decode*( + _: type Envvar, prefix: string, RecordType: distinct type, params: varargs[untyped] +): auto = + mixin init, ReaderType + + {.noSideEffect.}: + var reader = unpackArgs(init, [EnvvarReader, prefix, params]) + reader.readValue(RecordType) + +template encode*( + _: type Envvar, prefix: string, value: auto, params: varargs[untyped] +) = + mixin init, WriterType, writeValue + + {.noSideEffect.}: + var writer = unpackArgs(init, [EnvvarWriter, prefix, params]) + writeValue writer, value + +template loadFile*( + _: type Envvar, prefix: string, RecordType: distinct type, params: varargs[untyped] +): auto = + mixin init, ReaderType, readValue + + var reader = unpackArgs(init, [EnvvarReader, prefix, params]) + reader.readValue(RecordType) + +template saveFile*( + _: type Envvar, prefix: string, value: auto, params: varargs[untyped] +) = + mixin init, WriterType, writeValue + + var writer = unpackArgs(init, [EnvvarWriter, prefix, params]) + writer.writeValue(value) diff --git a/third-party/nwaku/tools/confutils/envvar_serialization/reader.nim b/third-party/nwaku/tools/confutils/envvar_serialization/reader.nim new file mode 100644 index 0000000..ef0f47a --- /dev/null +++ b/third-party/nwaku/tools/confutils/envvar_serialization/reader.nim @@ -0,0 +1,108 @@ +{.push raises: [].} + +import + std/[tables, typetraits, options, os], + serialization/object_serialization, + serialization/errors +import ./utils + +type + EnvvarReader* = object + prefix: string + key: seq[string] + + EnvvarError* = object of SerializationError + + EnvvarReaderError* = object of EnvvarError + + GenericEnvvarReaderError* = object of EnvvarReaderError + deserializedField*: string + innerException*: ref CatchableError + +proc handleReadException*( + r: EnvvarReader, + Record: type, + fieldName: string, + field: auto, + err: ref CatchableError, +) {.raises: [GenericEnvvarReaderError].} = + var ex = new GenericEnvvarReaderError + ex.deserializedField = fieldName + ex.innerException = err + raise ex + +proc init*(T: type EnvvarReader, prefix: string): T = + result.prefix = prefix + +proc readValue*[T](r: var EnvvarReader, value: var T) {.raises: [SerializationError].} = + mixin readValue + + when T is string: + let key = constructKey(r.prefix, r.key) + value = os.getEnv(key) + elif T is (SomePrimitives or range): + let key = constructKey(r.prefix, r.key) + try: + getValue(key, value) + except ValueError: + raise newException( + SerializationError, + "Couldn't getValue SomePrimitives: " & getCurrentExceptionMsg(), + ) + elif T is Option: + template getUnderlyingType[T](_: Option[T]): untyped = + T + + let key = constructKey(r.prefix, r.key) + if os.existsEnv(key): + type uType = getUnderlyingType(value) + when uType is string: + value = some(os.getEnv(key)) + else: + try: + value = some(r.readValue(uType)) + except ValueError, IOError: + raise newException( + SerializationError, + "Couldn't read Option value: " & getCurrentExceptionMsg(), + ) + elif T is (seq or array): + when uTypeIsPrimitives(T): + let key = constructKey(r.prefix, r.key) + try: + getValue(key, value) + except ValueError: + raise newException( + SerializationError, "Couldn't get value: " & getCurrentExceptionMsg() + ) + else: + let key = r.key[^1] + for i in 0 ..< value.len: + r.key[^1] = key & $i + r.readValue(value[i]) + elif T is (object or tuple): + type T = type(value) + when T.totalSerializedFields > 0: + let fields = T.fieldReadersTable(EnvvarReader) + var expectedFieldPos = 0 + r.key.add "" + value.enumInstanceSerializedFields(fieldName, field): + when T is tuple: + r.key[^1] = $expectedFieldPos + var reader = fields[][expectedFieldPos].reader + expectedFieldPos += 1 + else: + r.key[^1] = fieldName + var reader = findFieldReader(fields[], fieldName, expectedFieldPos) + + if reader != nil: + try: + reader(value, r) + except ValueError, IOError: + raise newException( + SerializationError, "Couldn't read field: " & getCurrentExceptionMsg() + ) + discard r.key.pop() + else: + const typeName = typetraits.name(T) + {.fatal: "Failed to convert from Envvar an unsupported type: " & typeName.} diff --git a/third-party/nwaku/tools/confutils/envvar_serialization/utils.nim b/third-party/nwaku/tools/confutils/envvar_serialization/utils.nim new file mode 100644 index 0000000..cb2c628 --- /dev/null +++ b/third-party/nwaku/tools/confutils/envvar_serialization/utils.nim @@ -0,0 +1,89 @@ +{.push raises: [].} + +import std/[os, strutils], stew/byteutils, stew/ptrops + +type SomePrimitives* = SomeInteger | enum | bool | SomeFloat | char + +proc setValue*[T: SomePrimitives](key: string, val: openArray[T]) = + os.putEnv( + key, byteutils.toHex(makeOpenArray(val[0].unsafeAddr, byte, val.len * sizeof(T))) + ) + +proc setValue*(key: string, val: SomePrimitives) = + os.putEnv(key, byteutils.toHex(makeOpenArray(val.unsafeAddr, byte, sizeof(val)))) + +proc decodePaddedHex( + hex: string, res: ptr UncheckedArray[byte], outputLen: int +) {.raises: [ValueError].} = + # make it an even length + let + inputLen = hex.len and not 0x01 + numHex = inputLen div 2 + maxLen = min(outputLen, numHex) + + var + offI = hex.len - maxLen * 2 + offO = outputLen - maxLen + + for i in 0 ..< maxLen: + res[i + offO] = + hex[2 * i + offI].readHexChar shl 4 or hex[2 * i + 1 + offI].readHexChar + + # write single nibble from odd length hex + if (offO > 0) and (offI > 0): + res[offO - 1] = hex[offI - 1].readHexChar + +proc getValue*(key: string, outVal: var string) {.raises: [ValueError].} = + let hex = os.getEnv(key) + let size = (hex.len div 2) + (hex.len and 0x01) + outVal.setLen(size) + decodePaddedHex(hex, cast[ptr UncheckedArray[byte]](outVal[0].addr), size) + +proc getValue*[T: SomePrimitives]( + key: string, outVal: var seq[T] +) {.raises: [ValueError].} = + let hex = os.getEnv(key) + let byteSize = (hex.len div 2) + (hex.len and 0x01) + let size = (byteSize + sizeof(T) - 1) div sizeof(T) + outVal.setLen(size) + decodePaddedHex(hex, cast[ptr UncheckedArray[byte]](outVal[0].addr), size * sizeof(T)) + +proc getValue*[N, T: SomePrimitives](key: string, outVal: var array[N, T]) = + let hex = os.getEnv(key) + decodePaddedHex(hex, cast[ptr UncheckedArray[byte]](outVal[0].addr), sizeof(outVal)) + +proc getValue*(key: string, outVal: var SomePrimitives) {.raises: [ValueError].} = + let hex = os.getEnv(key) + decodePaddedHex(hex, cast[ptr UncheckedArray[byte]](outVal.addr), sizeof(outVal)) + +template uTypeIsPrimitives*[T](_: type seq[T]): bool = + when T is SomePrimitives: true else: false + +template uTypeIsPrimitives*[N, T](_: type array[N, T]): bool = + when T is SomePrimitives: true else: false + +template uTypeIsPrimitives*[T](_: type openArray[T]): bool = + when T is SomePrimitives: true else: false + +template uTypeIsRecord*(_: typed): bool = + false + +template uTypeIsRecord*[T](_: type seq[T]): bool = + when T is (object or tuple): true else: false + +template uTypeIsRecord*[N, T](_: type array[N, T]): bool = + when T is (object or tuple): true else: false + +func constructKey*(prefix: string, keys: openArray[string]): string = + var newKey: string + + let envvarPrefix = prefix.strip().toUpper().multiReplace(("-", "_"), (" ", "_")) + newKey.add(envvarPrefix) + + for k in keys: + newKey.add("_") + + let envvarKey = k.toUpper().multiReplace(("-", "_"), (" ", "_")) + newKey.add(envvarKey) + + newKey diff --git a/third-party/nwaku/tools/confutils/envvar_serialization/writer.nim b/third-party/nwaku/tools/confutils/envvar_serialization/writer.nim new file mode 100644 index 0000000..c497b54 --- /dev/null +++ b/third-party/nwaku/tools/confutils/envvar_serialization/writer.nim @@ -0,0 +1,44 @@ +import typetraits, options, tables, os, serialization, ./utils + +type EnvvarWriter* = object + prefix: string + key: seq[string] + +proc init*(T: type EnvvarWriter, prefix: string): T = + result.prefix = prefix + +proc writeValue*(w: var EnvvarWriter, value: auto) = + mixin enumInstanceSerializedFields, writeValue, writeFieldIMPL + # TODO: reduce allocation + + when value is string: + let key = constructKey(w.prefix, w.key) + os.putEnv(key, value) + elif value is (SomePrimitives or range): + let key = constructKey(w.prefix, w.key) + setValue(key, value) + elif value is Option: + if value.isSome: + w.writeValue value.get + elif value is (seq or array or openArray): + when uTypeIsPrimitives(type value): + let key = constructKey(w.prefix, w.key) + setValue(key, value) + elif uTypeIsRecord(type value): + let key = w.key[^1] + for i in 0 ..< value.len: + w.key[^1] = key & $i + w.writeValue(value[i]) + else: + const typeName = typetraits.name(value.type) + {.fatal: "Failed to convert to Envvar array an unsupported type: " & typeName.} + elif value is (object or tuple): + type RecordType = type value + w.key.add "" + value.enumInstanceSerializedFields(fieldName, field): + w.key[^1] = fieldName + w.writeFieldIMPL(FieldTag[RecordType, fieldName], field, value) + discard w.key.pop() + else: + const typeName = typetraits.name(value.type) + {.fatal: "Failed to convert to Envvar an unsupported type: " & typeName.} diff --git a/third-party/nwaku/tools/rln_keystore_generator/README.md b/third-party/nwaku/tools/rln_keystore_generator/README.md new file mode 100644 index 0000000..7e909c6 --- /dev/null +++ b/third-party/nwaku/tools/rln_keystore_generator/README.md @@ -0,0 +1,3 @@ +# rln_keystore_generator + +Documentation on running the `rln-keystore-generator` can be found [here](../../docs/tutorial/rln-keystore-generator.md) \ No newline at end of file diff --git a/third-party/nwaku/tools/rln_keystore_generator/rln_keystore_generator.nim b/third-party/nwaku/tools/rln_keystore_generator/rln_keystore_generator.nim new file mode 100644 index 0000000..a6181a1 --- /dev/null +++ b/third-party/nwaku/tools/rln_keystore_generator/rln_keystore_generator.nim @@ -0,0 +1,124 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import chronicles, results, std/[tempfiles, sequtils] + +import + waku/[ + waku_keystore, + waku_rln_relay/rln, + waku_rln_relay/conversion_utils, + waku_rln_relay/group_manager/on_chain, + ] + +logScope: + topics = "rln_keystore_generator" + +type RlnKeystoreGeneratorConf* = object + execute*: bool + ethContractAddress*: string + ethClientUrls*: seq[string] + chainId*: UInt256 + credPath*: string + credPassword*: string + userMessageLimit*: uint64 + ethPrivateKey*: string + +proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) = + # 1. load configuration + trace "configuration", conf = $conf + + # 2. initialize rlnInstance + let rlnInstanceRes = createRLNInstance(d = 20) + if rlnInstanceRes.isErr(): + error "failure while creating RLN instance", error = rlnInstanceRes.error + quit(1) + + let rlnInstance = rlnInstanceRes.get() + + # 3. generate credentials + let credentialRes = rlnInstance.membershipKeyGen() + if credentialRes.isErr(): + error "failure while generating credentials", error = credentialRes.error + quit(1) + + let credential = credentialRes.get() + debug "credentials", + idTrapdoor = credential.idTrapdoor.inHex(), + idNullifier = credential.idNullifier.inHex(), + idSecretHash = credential.idSecretHash.inHex(), + idCommitment = credential.idCommitment.inHex() + + if not conf.execute: + info "not executing, exiting" + quit(0) + + var onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} = + ## Action to be taken when an internal error occurs during the node run. + ## e.g. the connection with the database is lost and not recovered. + error "Unrecoverable error occurred", error = msg + quit(QuitFailure) + + # 4. initialize OnchainGroupManager + let groupManager = OnchainGroupManager( + ethClientUrls: conf.ethClientUrls, + chainId: conf.chainId, + ethContractAddress: conf.ethContractAddress, + rlnInstance: rlnInstance, + keystorePath: none(string), + keystorePassword: none(string), + ethPrivateKey: some(conf.ethPrivateKey), + onFatalErrorAction: onFatalErrorAction, + ) + try: + (waitFor groupManager.init()).isOkOr: + error "failure while initializing OnchainGroupManager", error = $error + quit(1) + # handling the exception is required since waitFor raises an exception + except Exception, CatchableError: + error "failure while initializing OnchainGroupManager", + error = getCurrentExceptionMsg() + quit(1) + + # 5. register on-chain + try: + waitFor groupManager.register(credential, conf.userMessageLimit) + except Exception, CatchableError: + error "failure while registering credentials on-chain", + error = getCurrentExceptionMsg() + quit(1) + + debug "Transaction hash", txHash = groupManager.registrationTxHash.get() + + info "Your membership has been registered on-chain.", + chainId = $groupManager.chainId, + contractAddress = conf.ethContractAddress, + membershipIndex = groupManager.membershipIndex.get() + info "Your user message limit is", userMessageLimit = conf.userMessageLimit + + # 6. write to keystore + let keystoreCred = KeystoreMembership( + membershipContract: MembershipContract( + chainId: $groupManager.chainId, address: conf.ethContractAddress + ), + treeIndex: groupManager.membershipIndex.get(), + identityCredential: credential, + userMessageLimit: conf.userMessageLimit, + ) + + let persistRes = + addMembershipCredentials(conf.credPath, keystoreCred, conf.credPassword, RLNAppInfo) + if persistRes.isErr(): + error "failed to persist credentials", error = persistRes.error + quit(1) + + info "credentials persisted", path = conf.credPath + + try: + waitFor groupManager.stop() + except CatchableError: + error "failure while stopping OnchainGroupManager", error = getCurrentExceptionMsg() + quit(0) # 0 because we already registered on-chain + quit(0) diff --git a/third-party/nwaku/vendor/db_connector b/third-party/nwaku/vendor/db_connector new file mode 160000 index 0000000..74aef39 --- /dev/null +++ b/third-party/nwaku/vendor/db_connector @@ -0,0 +1 @@ +Subproject commit 74aef399e5c232f95c9fc5c987cebac846f09d62 diff --git a/third-party/nwaku/vendor/dnsclient.nim b/third-party/nwaku/vendor/dnsclient.nim new file mode 160000 index 0000000..2321423 --- /dev/null +++ b/third-party/nwaku/vendor/dnsclient.nim @@ -0,0 +1 @@ +Subproject commit 23214235d4784d24aceed99bbfe153379ea557c8 diff --git a/third-party/nwaku/vendor/mix b/third-party/nwaku/vendor/mix new file mode 160000 index 0000000..5e95337 --- /dev/null +++ b/third-party/nwaku/vendor/mix @@ -0,0 +1 @@ +Subproject commit 5e95337693ad0787baec1ee25293f454c2d105ca diff --git a/third-party/nwaku/vendor/nim-bearssl b/third-party/nwaku/vendor/nim-bearssl new file mode 160000 index 0000000..11e798b --- /dev/null +++ b/third-party/nwaku/vendor/nim-bearssl @@ -0,0 +1 @@ +Subproject commit 11e798b62b8e6beabe958e048e9e24c7e0f9ee63 diff --git a/third-party/nwaku/vendor/nim-chronicles b/third-party/nwaku/vendor/nim-chronicles new file mode 160000 index 0000000..54f5b72 --- /dev/null +++ b/third-party/nwaku/vendor/nim-chronicles @@ -0,0 +1 @@ +Subproject commit 54f5b726025e8c7385e3a6529d3aa27454c6e6ff diff --git a/third-party/nwaku/vendor/nim-chronos b/third-party/nwaku/vendor/nim-chronos new file mode 160000 index 0000000..0646c44 --- /dev/null +++ b/third-party/nwaku/vendor/nim-chronos @@ -0,0 +1 @@ +Subproject commit 0646c444fce7c7ed08ef6f2c9a7abfd172ffe655 diff --git a/third-party/nwaku/vendor/nim-confutils b/third-party/nwaku/vendor/nim-confutils new file mode 160000 index 0000000..e214b39 --- /dev/null +++ b/third-party/nwaku/vendor/nim-confutils @@ -0,0 +1 @@ +Subproject commit e214b3992a31acece6a9aada7d0a1ad37c928f3b diff --git a/third-party/nwaku/vendor/nim-dnsdisc b/third-party/nwaku/vendor/nim-dnsdisc new file mode 160000 index 0000000..b71d029 --- /dev/null +++ b/third-party/nwaku/vendor/nim-dnsdisc @@ -0,0 +1 @@ +Subproject commit b71d029f4da4ec56974d54c04518bada00e1b623 diff --git a/third-party/nwaku/vendor/nim-eth b/third-party/nwaku/vendor/nim-eth new file mode 160000 index 0000000..d9135e6 --- /dev/null +++ b/third-party/nwaku/vendor/nim-eth @@ -0,0 +1 @@ +Subproject commit d9135e6c3c5d6d819afdfb566aa8d958756b73a8 diff --git a/third-party/nwaku/vendor/nim-faststreams b/third-party/nwaku/vendor/nim-faststreams new file mode 160000 index 0000000..c3ac3f6 --- /dev/null +++ b/third-party/nwaku/vendor/nim-faststreams @@ -0,0 +1 @@ +Subproject commit c3ac3f639ed1d62f59d3077d376a29c63ac9750c diff --git a/third-party/nwaku/vendor/nim-http-utils b/third-party/nwaku/vendor/nim-http-utils new file mode 160000 index 0000000..79cbab1 --- /dev/null +++ b/third-party/nwaku/vendor/nim-http-utils @@ -0,0 +1 @@ +Subproject commit 79cbab1460f4c0cdde2084589d017c43a3d7b4f1 diff --git a/third-party/nwaku/vendor/nim-json-rpc b/third-party/nwaku/vendor/nim-json-rpc new file mode 160000 index 0000000..9665c26 --- /dev/null +++ b/third-party/nwaku/vendor/nim-json-rpc @@ -0,0 +1 @@ +Subproject commit 9665c265035f49f5ff94bbffdeadde68e19d6221 diff --git a/third-party/nwaku/vendor/nim-json-serialization b/third-party/nwaku/vendor/nim-json-serialization new file mode 160000 index 0000000..0640259 --- /dev/null +++ b/third-party/nwaku/vendor/nim-json-serialization @@ -0,0 +1 @@ +Subproject commit 0640259af2fad330ea28e77359c0d0cefac5a361 diff --git a/third-party/nwaku/vendor/nim-libbacktrace b/third-party/nwaku/vendor/nim-libbacktrace new file mode 160000 index 0000000..d8bd4ce --- /dev/null +++ b/third-party/nwaku/vendor/nim-libbacktrace @@ -0,0 +1 @@ +Subproject commit d8bd4ce5c46bb6d2f984f6b3f3d7380897d95ecb diff --git a/third-party/nwaku/vendor/nim-libp2p b/third-party/nwaku/vendor/nim-libp2p new file mode 160000 index 0000000..59e7069 --- /dev/null +++ b/third-party/nwaku/vendor/nim-libp2p @@ -0,0 +1 @@ +Subproject commit 59e7069c15e914618b7f7b2206c47d16c5d10a34 diff --git a/third-party/nwaku/vendor/nim-metrics b/third-party/nwaku/vendor/nim-metrics new file mode 160000 index 0000000..ecf64c6 --- /dev/null +++ b/third-party/nwaku/vendor/nim-metrics @@ -0,0 +1 @@ +Subproject commit ecf64c6078d1276d3b7d9b3d931fbdb70004db11 diff --git a/third-party/nwaku/vendor/nim-minilru b/third-party/nwaku/vendor/nim-minilru new file mode 160000 index 0000000..0c4b2bc --- /dev/null +++ b/third-party/nwaku/vendor/nim-minilru @@ -0,0 +1 @@ +Subproject commit 0c4b2bce959591f0a862e9b541ba43c6d0cf3476 diff --git a/third-party/nwaku/vendor/nim-nat-traversal b/third-party/nwaku/vendor/nim-nat-traversal new file mode 160000 index 0000000..860e18c --- /dev/null +++ b/third-party/nwaku/vendor/nim-nat-traversal @@ -0,0 +1 @@ +Subproject commit 860e18c37667b5dd005b94c63264560c35d88004 diff --git a/third-party/nwaku/vendor/nim-presto b/third-party/nwaku/vendor/nim-presto new file mode 160000 index 0000000..92b1c7f --- /dev/null +++ b/third-party/nwaku/vendor/nim-presto @@ -0,0 +1 @@ +Subproject commit 92b1c7ff141e6920e1f8a98a14c35c1fa098e3be diff --git a/third-party/nwaku/vendor/nim-regex b/third-party/nwaku/vendor/nim-regex new file mode 160000 index 0000000..4593305 --- /dev/null +++ b/third-party/nwaku/vendor/nim-regex @@ -0,0 +1 @@ +Subproject commit 4593305ed1e49731fc75af1dc572dd2559aad19c diff --git a/third-party/nwaku/vendor/nim-results b/third-party/nwaku/vendor/nim-results new file mode 160000 index 0000000..df8113d --- /dev/null +++ b/third-party/nwaku/vendor/nim-results @@ -0,0 +1 @@ +Subproject commit df8113dda4c2d74d460a8fa98252b0b771bf1f27 diff --git a/third-party/nwaku/vendor/nim-secp256k1 b/third-party/nwaku/vendor/nim-secp256k1 new file mode 160000 index 0000000..9dd3df6 --- /dev/null +++ b/third-party/nwaku/vendor/nim-secp256k1 @@ -0,0 +1 @@ +Subproject commit 9dd3df62124aae79d564da636bb22627c53c7676 diff --git a/third-party/nwaku/vendor/nim-serialization b/third-party/nwaku/vendor/nim-serialization new file mode 160000 index 0000000..6f525d5 --- /dev/null +++ b/third-party/nwaku/vendor/nim-serialization @@ -0,0 +1 @@ +Subproject commit 6f525d5447d97256750ca7856faead03e562ed20 diff --git a/third-party/nwaku/vendor/nim-sqlite3-abi b/third-party/nwaku/vendor/nim-sqlite3-abi new file mode 160000 index 0000000..bdf01cf --- /dev/null +++ b/third-party/nwaku/vendor/nim-sqlite3-abi @@ -0,0 +1 @@ +Subproject commit bdf01cf4236fb40788f0733466cdf6708783cbac diff --git a/third-party/nwaku/vendor/nim-stew b/third-party/nwaku/vendor/nim-stew new file mode 160000 index 0000000..e574001 --- /dev/null +++ b/third-party/nwaku/vendor/nim-stew @@ -0,0 +1 @@ +Subproject commit e5740014961438610d336cd81706582dbf2c96f0 diff --git a/third-party/nwaku/vendor/nim-stint b/third-party/nwaku/vendor/nim-stint new file mode 160000 index 0000000..470b789 --- /dev/null +++ b/third-party/nwaku/vendor/nim-stint @@ -0,0 +1 @@ +Subproject commit 470b7892561b5179ab20bd389a69217d6213fe58 diff --git a/third-party/nwaku/vendor/nim-taskpools b/third-party/nwaku/vendor/nim-taskpools new file mode 160000 index 0000000..9e8ccc7 --- /dev/null +++ b/third-party/nwaku/vendor/nim-taskpools @@ -0,0 +1 @@ +Subproject commit 9e8ccc754631ac55ac2fd495e167e74e86293edb diff --git a/third-party/nwaku/vendor/nim-testutils b/third-party/nwaku/vendor/nim-testutils new file mode 160000 index 0000000..94d68e7 --- /dev/null +++ b/third-party/nwaku/vendor/nim-testutils @@ -0,0 +1 @@ +Subproject commit 94d68e796c045d5b37cabc6be32d7bfa168f8857 diff --git a/third-party/nwaku/vendor/nim-toml-serialization b/third-party/nwaku/vendor/nim-toml-serialization new file mode 160000 index 0000000..fea85b2 --- /dev/null +++ b/third-party/nwaku/vendor/nim-toml-serialization @@ -0,0 +1 @@ +Subproject commit fea85b27f0badcf617033ca1bc05444b5fd8aa7a diff --git a/third-party/nwaku/vendor/nim-unicodedb b/third-party/nwaku/vendor/nim-unicodedb new file mode 160000 index 0000000..66f2458 --- /dev/null +++ b/third-party/nwaku/vendor/nim-unicodedb @@ -0,0 +1 @@ +Subproject commit 66f2458710dc641dd4640368f9483c8a0ec70561 diff --git a/third-party/nwaku/vendor/nim-unittest2 b/third-party/nwaku/vendor/nim-unittest2 new file mode 160000 index 0000000..8b51e99 --- /dev/null +++ b/third-party/nwaku/vendor/nim-unittest2 @@ -0,0 +1 @@ +Subproject commit 8b51e99b4a57fcfb31689230e75595f024543024 diff --git a/third-party/nwaku/vendor/nim-web3 b/third-party/nwaku/vendor/nim-web3 new file mode 160000 index 0000000..81ee8ce --- /dev/null +++ b/third-party/nwaku/vendor/nim-web3 @@ -0,0 +1 @@ +Subproject commit 81ee8ce479d86acb73be7c4f365328e238d9b4a3 diff --git a/third-party/nwaku/vendor/nim-websock b/third-party/nwaku/vendor/nim-websock new file mode 160000 index 0000000..ebe308a --- /dev/null +++ b/third-party/nwaku/vendor/nim-websock @@ -0,0 +1 @@ +Subproject commit ebe308a79a7b440a11dfbe74f352be86a3883508 diff --git a/third-party/nwaku/vendor/nim-zlib b/third-party/nwaku/vendor/nim-zlib new file mode 160000 index 0000000..daa8723 --- /dev/null +++ b/third-party/nwaku/vendor/nim-zlib @@ -0,0 +1 @@ +Subproject commit daa8723fd32299d4ca621c837430c29a5a11e19a diff --git a/third-party/nwaku/vendor/nimbus-build-system b/third-party/nwaku/vendor/nimbus-build-system new file mode 160000 index 0000000..e6c2c9d --- /dev/null +++ b/third-party/nwaku/vendor/nimbus-build-system @@ -0,0 +1 @@ +Subproject commit e6c2c9da39c2d368d9cf420ac22692e99715d22c diff --git a/third-party/nwaku/vendor/nimcrypto b/third-party/nwaku/vendor/nimcrypto new file mode 160000 index 0000000..721fb99 --- /dev/null +++ b/third-party/nwaku/vendor/nimcrypto @@ -0,0 +1 @@ +Subproject commit 721fb99ee099b632eb86dfad1f0d96ee87583774 diff --git a/third-party/nwaku/vendor/nph b/third-party/nwaku/vendor/nph new file mode 160000 index 0000000..c6e0316 --- /dev/null +++ b/third-party/nwaku/vendor/nph @@ -0,0 +1 @@ +Subproject commit c6e03162dc2820d3088660f644818d7040e95791 diff --git a/third-party/nwaku/vendor/waku-rlnv2-contract b/third-party/nwaku/vendor/waku-rlnv2-contract new file mode 160000 index 0000000..900d4f9 --- /dev/null +++ b/third-party/nwaku/vendor/waku-rlnv2-contract @@ -0,0 +1 @@ +Subproject commit 900d4f95e0e618bdeb4c241f7a4b6347df6bb950 diff --git a/third-party/nwaku/vendor/zerokit b/third-party/nwaku/vendor/zerokit new file mode 160000 index 0000000..ba467d3 --- /dev/null +++ b/third-party/nwaku/vendor/zerokit @@ -0,0 +1 @@ +Subproject commit ba467d370c56b7432522227de22fbd664d44ef3e diff --git a/third-party/nwaku/waku.nim b/third-party/nwaku/waku.nim new file mode 100644 index 0000000..18d5274 --- /dev/null +++ b/third-party/nwaku/waku.nim @@ -0,0 +1,10 @@ +## Main module for using nwaku as a Nimble library +## +## This module re-exports the public API for creating and managing Waku nodes +## when using nwaku as a library dependency. + +import waku/api/[api, api_conf] +export api, api_conf + +import waku/factory/waku +export waku diff --git a/third-party/nwaku/waku.nimble b/third-party/nwaku/waku.nimble new file mode 100644 index 0000000..6de73ad --- /dev/null +++ b/third-party/nwaku/waku.nimble @@ -0,0 +1,221 @@ +#!fmt: off + +import os +mode = ScriptMode.Verbose + +### Package +version = "0.36.0" +author = "Status Research & Development GmbH" +description = "Waku, Private P2P Messaging for Resource-Restricted Devices" +license = "MIT or Apache License 2.0" +#bin = @["build/waku"] + +### Dependencies +requires "nim >= 2.2.4", + "chronicles", + "confutils", + "chronos", + "dnsdisc", + "eth", + "json_rpc", + "libbacktrace", + "nimcrypto", + "stew", + "stint", + "metrics", + "libp2p >= 1.13.0", + "web3", + "presto", + "regex", + "results", + "db_connector", + "minilru", + "quic", + "https://github.com/vacp2p/mix#0.1.0" + +### Helper functions +proc buildModule(filePath, params = "", lang = "c"): bool = + if not dirExists "build": + mkDir "build" + # allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims" + var extra_params = params + for i in 2 ..< paramCount() - 1: + extra_params &= " " & paramStr(i) + + if not fileExists(filePath): + echo "File to build not found: " & filePath + return false + + exec "nim " & lang & " --out:build/" & filepath & ".bin --mm:refc " & extra_params & + " " & filePath + + # exec will raise exception if anything goes wrong + return true + +proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") = + if not dirExists "build": + mkDir "build" + # allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims" + var extra_params = params + for i in 2 ..< paramCount(): + extra_params &= " " & paramStr(i) + exec "nim " & lang & " --out:build/" & name & " --mm:refc " & extra_params & " " & + srcDir & name & ".nim" + +proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "static") = + if not dirExists "build": + mkDir "build" + # allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims" + var extra_params = params + for i in 2 ..< paramCount(): + extra_params &= " " & paramStr(i) + if `type` == "static": + exec "nim c" & " --out:build/" & name & + ".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " & + extra_params & " " & srcDir & name & ".nim" + else: + let lib_name = (when defined(windows): toDll(name) else: name & ".so") + when defined(windows): + exec "nim c" & " --out:build/" & lib_name & + " --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:off -d:discv5_protocol_id=d5waku " & + extra_params & " " & srcDir & name & ".nim" + else: + exec "nim c" & " --out:build/" & lib_name & + " --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " & + extra_params & " " & srcDir & name & ".nim" + +proc buildMobileAndroid(srcDir = ".", params = "") = + let cpu = getEnv("CPU") + let abiDir = getEnv("ABIDIR") + + let outDir = "build/android/" & abiDir + if not dirExists outDir: + mkDir outDir + + var extra_params = params + for i in 2 ..< paramCount(): + extra_params &= " " & paramStr(i) + + exec "nim c" & " --out:" & outDir & + "/libwaku.so --threads:on --app:lib --opt:size --noMain --mm:refc -d:chronicles_sinks=textlines[dynamic] --header --passL:-L" & + outdir & " --passL:-lrln --passL:-llog --cpu:" & cpu & " --os:android -d:androidNDK " & + extra_params & " " & srcDir & "/libwaku.nim" + +proc test(name: string, params = "-d:chronicles_log_level=DEBUG", lang = "c") = + # XXX: When running `> NIM_PARAMS="-d:chronicles_log_level=INFO" make test2` + # I expect compiler flag to be overridden, however it stays with whatever is + # specified here. + buildBinary name, "tests/", params + exec "build/" & name + +### Waku common tasks +task testcommon, "Build & run common tests": + test "all_tests_common", "-d:chronicles_log_level=WARN -d:chronosStrictException" + +### Waku tasks +task wakunode2, "Build Waku v2 cli node": + let name = "wakunode2" + buildBinary name, "apps/wakunode2/", " -d:chronicles_log_level='TRACE' " + +task benchmarks, "Some benchmarks": + let name = "benchmarks" + buildBinary name, "apps/benchmarks/", "-p:../.." + +task wakucanary, "Build waku-canary tool": + let name = "wakucanary" + buildBinary name, "apps/wakucanary/" + +task networkmonitor, "Build network monitor tool": + let name = "networkmonitor" + buildBinary name, "apps/networkmonitor/" + +task rln_db_inspector, "Build the rln db inspector": + let name = "rln_db_inspector" + buildBinary name, "tools/rln_db_inspector/" + +task test, "Build & run Waku tests": + test "all_tests_waku" + +task testwakunode2, "Build & run wakunode2 app tests": + test "all_tests_wakunode2" + +task example2, "Build Waku examples": + buildBinary "waku_example", "examples/" + buildBinary "publisher", "examples/" + buildBinary "subscriber", "examples/" + buildBinary "filter_subscriber", "examples/" + buildBinary "lightpush_publisher", "examples/" + +task chat2, "Build example Waku chat usage": + # NOTE For debugging, set debug level. For chat usage we want minimal log + # output to STDOUT. Can be fixed by redirecting logs to file (e.g.) + #buildBinary name, "examples/", "-d:chronicles_log_level=WARN" + + let name = "chat2" + buildBinary name, + "apps/chat2/", + "-d:chronicles_sinks=textlines[file] -d:ssl -d:chronicles_log_level='TRACE' " + +task chat2mix, "Build example Waku chat mix usage": + # NOTE For debugging, set debug level. For chat usage we want minimal log + # output to STDOUT. Can be fixed by redirecting logs to file (e.g.) + #buildBinary name, "examples/", "-d:chronicles_log_level=WARN" + + let name = "chat2mix" + buildBinary name, + "apps/chat2mix/", + "-d:chronicles_sinks=textlines[file] -d:ssl -d:chronicles_log_level='TRACE' " + +task chat2bridge, "Build chat2bridge": + let name = "chat2bridge" + buildBinary name, "apps/chat2bridge/" + +task liteprotocoltester, "Build liteprotocoltester": + let name = "liteprotocoltester" + buildBinary name, "apps/liteprotocoltester/" + +task lightpushwithmix, "Build lightpushwithmix": + let name = "lightpush_publisher_mix" + buildBinary name, "examples/lightpush_mix/" + +task buildone, "Build custom target": + let filepath = paramStr(paramCount()) + discard buildModule filepath + +task buildTest, "Test custom target": + let filepath = paramStr(paramCount()) + discard buildModule(filepath) + +import std/strutils + +task execTest, "Run test": + # Expects to be parameterized with test case name in quotes + # preceded with the nim source file name and path + # If no test case name is given still it requires empty quotes `""` + let filepath = paramStr(paramCount() - 1) + var testSuite = paramStr(paramCount()).strip(chars = {'\"'}) + if testSuite != "": + testSuite = " \"" & testSuite & "\"" + exec "build/" & filepath & ".bin " & testSuite + +### C Bindings +let chroniclesParams = + "-d:chronicles_line_numbers " & "-d:chronicles_runtime_filtering=on " & + """-d:chronicles_sinks="textlines,json" """ & + "-d:chronicles_default_output_device=Dynamic " & + """-d:chronicles_disabled_topics="eth,dnsdisc.client" """ & "--warning:Deprecated:off " & + "--warning:UnusedImport:on " & "-d:chronicles_log_level=TRACE" + +task libwakuStatic, "Build the cbindings waku node library": + let name = "libwaku" + buildLibrary name, "library/", chroniclesParams, "static" + +task libwakuDynamic, "Build the cbindings waku node library": + let name = "libwaku" + buildLibrary name, "library/", chroniclesParams, "dynamic" + +### Mobile Android +task libWakuAndroid, "Build the mobile bindings for Android": + let srcDir = "./library" + let extraParams = "-d:chronicles_log_level=ERROR" + buildMobileAndroid srcDir, extraParams diff --git a/third-party/nwaku/waku/README.md b/third-party/nwaku/waku/README.md new file mode 100644 index 0000000..ed3887a --- /dev/null +++ b/third-party/nwaku/waku/README.md @@ -0,0 +1,237 @@ +# Waku + +This folder contains code related to Waku, both as a node and as a protocol. + +## Introduction + +This is an implementation in Nim of the Waku suite of protocols. + +See [specifications](https://rfc.vac.dev/waku/standards/core/10/waku2). + +## How to Build & Run + +### Prerequisites + +* GNU Make, Bash and the usual POSIX utilities. Git 2.9.4 or newer. + +### Wakunode binary + +```bash +# The first `make` invocation will update all Git submodules. +# You'll run `make update` after each `git pull`, in the future, to keep those submodules up to date. +make wakunode2 + +# See available command line options +./build/wakunode2 --help + +# Connect the client directly with the Status test fleet +# TODO NYI +#./build/wakunode2 --log-level:debug --discovery:off --fleet:test --log-metrics +``` + +Note: building `wakunode2` requires 2GB of RAM. The build will fail on systems not fulfilling this requirement. + +Setting up a `wakunode2` on the smallest [digital ocean](https://docs.digitalocean.com/products/droplets/how-to/) droplet, you can either + +* compile on a stronger droplet featuring the same CPU architecture and downgrade after compiling, or +* activate swap on the smallest droplet, or +* use Docker. + + +### Waku Protocol Test Suite + +```bash +# Run all the Waku tests +make test +``` + +To run a specific test. +```bash +# Get a shell with the right environment variables set +./env.sh bash +# Run a specific test +nim c -r ./tests/test_waku_filter_legacy.nim +``` + +You can also alter compile options. For example, if you want a less verbose output you can do the following. For more, refer to the [compiler flags](https://nim-lang.org/docs/nimc.html#compiler-usage) and [chronicles documentation](https://github.com/status-im/nim-chronicles#compile-time-configuration). + +```bash +nim c -r -d:chronicles_log_level=WARN --verbosity=0 --hints=off ./tests/waku_filter_v2/test_waku_filter.nim +``` + +You may also want to change the `outdir` to a folder ignored by git. +```bash +nim c -r -d:chronicles_log_level=WARN --verbosity=0 --hints=off --outdir=build ./tests/waku_filter_v2/test_waku_filter.nim +``` + +### Waku Protocol Example + +There are basic examples of both publishing and subscribing, +more limited in features and configuration than the `wakunode2` binary, +located in `examples/`. + +There is also a more full featured example in `apps/chat2/`. + +## Using Metrics + +Metrics are available for Waku nodes. + +```bash +make wakunode2 +./build/wakunode2 --metrics-server +``` + +Ensure your Prometheus config `prometheus.yml` contains the targets you care about, e.g.: + +``` +scrape_configs: + - job_name: "waku" + static_configs: + - targets: ['localhost:8008', 'localhost:8009', 'localhost:8010'] +``` + +For visualisation, similar steps can be used as is written down for Nimbus +[here](https://github.com/status-im/nimbus#metric-visualisation). + +There is a similar example dashboard that includes visualisation of the +envelopes available at `metrics/waku-grafana-dashboard.json`. + +## Spec support + +All Waku RFCs reside at rfc.vac.dev. +Note that Waku specs are titled `WAKU2-XXX` +to differentiate them from a previous legacy version of Waku with RFC titles in the format `WAKU-XXX`. +The legacy Waku protocols are stable, but not under active development. + +## Generating and configuring a private key + +By default a node will generate a new, random key pair each time it boots, +resulting in a different public libp2p `multiaddrs` after each restart. + +To maintain consistent addressing across restarts, +it is possible to configure the node with a previously generated private key using the `--nodekey` option. + +```shell +wakunode2 --nodekey=<64_char_hex> +``` + +This option takes a [Secp256k1](https://en.bitcoin.it/wiki/Secp256k1) private key in 64 char hexstring format. + +To generate such a key on Linux systems, +use the openssl `rand` command to generate a pseudo-random 32 byte hexstring. + +```sh +openssl rand -hex 32 +``` + +Example output: + +```sh +$ openssl rand -hex 32 +6a29e767c96a2a380bb66b9a6ffcd6eb54049e14d796a1d866307b8beb7aee58 +``` + +where the key `6a29e767c96a2a380bb66b9a6ffcd6eb54049e14d796a1d866307b8beb7aee58` can be used as `nodekey`. + +To create a reusable keyfile on Linux using `openssl`, +use the `ecparam` command coupled with some standard utilities +whenever you want to extract the 32 byte private key in hex format. + +```sh +# Generate keyfile +openssl ecparam -genkey -name secp256k1 -out my_private_key.pem +# Extract 32 byte private key +openssl ec -in my_private_key.pem -outform DER | tail -c +8 | head -c 32| xxd -p -c 32 +``` + +Example output: + +```sh +read EC key +writing EC key +0c687bb8a7984c770b566eae08520c67f53d302f24b8d4e5e47cc479a1e1ce23 +``` + +where the key `0c687bb8a7984c770b566eae08520c67f53d302f24b8d4e5e47cc479a1e1ce23` can be used as `nodekey`. + +```sh +wakunode2 --nodekey=0c687bb8a7984c770b566eae08520c67f53d302f24b8d4e5e47cc479a1e1ce23 +``` + +## Configuring a domain name + +It is possible to configure an IPv4 DNS domain name that resolves to the node's public IPv4 address. + +```shell +wakunode2 --dns4-domain-name=mynode.example.com +``` + +This allows for the node's publicly announced `multiaddrs` to use the `/dns4` scheme. +In addition, nodes with domain name and [secure websocket configured](#enabling-websocket), +will generate a discoverable ENR containing the `/wss` multiaddr with `/dns4` domain name. +This is necessary to verify domain certificates when connecting to this node over secure websocket. + +## Using DNS discovery to connect to existing nodes + +A node can discover other nodes to connect to using [DNS-based discovery](../docs/tutorial/dns-disc.md). +The following command line options are available: + +``` +--dns-discovery Enable DNS Discovery +--dns-discovery-url URL for DNS node list in format 'enrtree://@' +--dns-addrs-name-server DNS name server IPs to query. Argument may be repeated. +``` + +- `--dns-discovery` is used to enable DNS discovery on the node. +Waku DNS discovery is disabled by default. +- `--dns-discovery-url` is mandatory if DNS discovery is enabled. +It contains the URL for the node list. +The URL must be in the format `enrtree://@` where `` is the fully qualified domain name and `` is the base32 encoding of the compressed 32-byte public key that signed the list at that location. + +A node will attempt connection to all discovered nodes. + +This can be used, for example, to connect to one of the existing fleets. +Current URLs for the published fleet lists: +- production fleet: `enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im` +- test fleet: `enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im` + +See the [separate tutorial](../docs/tutorial/dns-disc.md) for a complete guide to DNS discovery. + +## Enabling Websocket + +Websocket is currently the only Waku transport supported by browser nodes that uses [js-waku](https://github.com/status-im/js-waku). +Setting up websocket enables your node to directly serve browser peers. + +A valid certificate is necessary to serve browser nodes, +you can use [`letsencrypt`](https://letsencrypt.org/): + +```shell +sudo letsencrypt -d +``` + +You will need the `privkey.pem` and `fullchain.pem` files. + +To enable secure websocket, pass the generated files to `wakunode2`: +Note, the default port for websocket is 8000. + +```shell +wakunode2 --websocket-secure-support=true --websocket-secure-key-path="/privkey.pem" --websocket-secure-cert-path="/fullchain.pem" +``` + +### Self-signed certificates + +Self-signed certificates are not recommended for production setups because: + +- Browsers do not accept self-signed certificates +- Browsers do not display an error when rejecting a certificate for websocket. + +However, they can be used for local testing purposes: + +```shell +mkdir -p ./ssl_dir/ +openssl req -x509 -newkey rsa:4096 -keyout ./ssl_dir/key.pem -out ./ssl_dir/cert.pem -sha256 -nodes +wakunode2 --websocket-secure-support=true --websocket-secure-key-path="./ssl_dir/key.pem" --websocket-secure-cert-path="./ssl_dir/cert.pem" +``` + + + diff --git a/third-party/nwaku/waku/api/api.nim b/third-party/nwaku/waku/api/api.nim new file mode 100644 index 0000000..5bab061 --- /dev/null +++ b/third-party/nwaku/waku/api/api.nim @@ -0,0 +1,17 @@ +import chronicles, chronos, results + +import waku/factory/waku + +import ./api_conf + +# TODO: Specs says it should return a `WakuNode`. As `send` and other APIs are defined, we can align. +proc createNode*(config: NodeConfig): Future[Result[Waku, string]] {.async.} = + let wakuConf = toWakuConf(config).valueOr: + return err("Failed to handle the configuration: " & error) + + ## We are not defining app callbacks at node creation + let wakuRes = (await Waku.new(wakuConf)).valueOr: + error "waku initialization failed", error = error + return err("Failed setting up Waku: " & $error) + + return ok(wakuRes) diff --git a/third-party/nwaku/waku/api/api_conf.nim b/third-party/nwaku/waku/api/api_conf.nim new file mode 100644 index 0000000..360c397 --- /dev/null +++ b/third-party/nwaku/waku/api/api_conf.nim @@ -0,0 +1,203 @@ +import std/[net, options] + +import results + +import + waku/common/utils/parse_size_units, + waku/factory/waku_conf, + waku/factory/conf_builder/conf_builder, + waku/factory/networks_config, + ./entry_nodes + +type AutoShardingConfig* {.requiresInit.} = object + numShardsInCluster*: uint16 + +type RlnConfig* {.requiresInit.} = object + contractAddress*: string + chainId*: uint + epochSizeSec*: uint64 + +type NetworkingConfig* {.requiresInit.} = object + listenIpv4*: string + p2pTcpPort*: uint16 + discv5UdpPort*: uint16 + +type MessageValidation* {.requiresInit.} = object + maxMessageSize*: string # Accepts formats like "150 KiB", "1500 B" + rlnConfig*: Option[RlnConfig] + +type WakuConfig* {.requiresInit.} = object + entryNodes: seq[string] + staticStoreNodes: seq[string] + clusterId: uint16 + autoShardingConfig: AutoShardingConfig + messageValidation: MessageValidation + +const DefaultNetworkingConfig* = + NetworkingConfig(listenIpv4: "0.0.0.0", p2pTcpPort: 60000, discv5UdpPort: 9000) + +const DefaultAutoShardingConfig* = AutoShardingConfig(numShardsInCluster: 1) + +const DefaultMessageValidation* = + MessageValidation(maxMessageSize: "150 KiB", rlnConfig: none(RlnConfig)) + +proc init*( + T: typedesc[WakuConfig], + entryNodes: seq[string], + staticStoreNodes: seq[string] = @[], + clusterId: uint16, + autoShardingConfig: AutoShardingConfig = DefaultAutoShardingConfig, + messageValidation: MessageValidation = DefaultMessageValidation, +): T = + return T( + entryNodes: entryNodes, + staticStoreNodes: staticStoreNodes, + clusterId: clusterId, + autoShardingConfig: autoShardingConfig, + messageValidation: messageValidation, + ) + +const TheWakuNetworkPreset* = WakuConfig( + entryNodes: + @[ + "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im" + ], + staticStoreNodes: @[], + clusterId: 1, + autoShardingConfig: AutoShardingConfig(numShardsInCluster: 8), + messageValidation: MessageValidation( + maxMessageSize: "150 KiB", + rlnConfig: some( + RlnConfig( + contractAddress: "0xB9cd878C90E49F797B4431fBF4fb333108CB90e6", + chainId: 59141, + epochSizeSec: 600, # 10 minutes + ) + ), + ), +) + +type WakuMode* {.pure.} = enum + Edge + Core + +type NodeConfig* {.requiresInit.} = object + mode: WakuMode + wakuConfig: WakuConfig + networkingConfig: NetworkingConfig + ethRpcEndpoints: seq[string] + +proc init*( + T: typedesc[NodeConfig], + mode: WakuMode = WakuMode.Core, + wakuConfig: WakuConfig = TheWakuNetworkPreset, + networkingConfig: NetworkingConfig = DefaultNetworkingConfig, + ethRpcEndpoints: seq[string] = @[], +): T = + return T( + mode: mode, + wakuConfig: wakuConfig, + networkingConfig: networkingConfig, + ethRpcEndpoints: ethRpcEndpoints, + ) + +proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] = + var b = WakuConfBuilder.init() + + # Apply networking configuration + let networkingConfig = nodeConfig.networkingConfig + let ip = parseIpAddress(networkingConfig.listenIpv4) + + b.withP2pListenAddress(ip) + b.withP2pTcpPort(networkingConfig.p2pTcpPort) + b.discv5Conf.withUdpPort(networkingConfig.discv5UdpPort) + + case nodeConfig.mode + of Core: + b.withRelay(true) + + # Metadata is always mounted + + b.filterServiceConf.withEnabled(true) + b.filterServiceConf.withMaxPeersToServe(20) + + b.withLightPush(true) + + b.discv5Conf.withEnabled(true) + b.withPeerExchange(true) + b.withRendezvous(true) + + # TODO: fix store as client usage + + b.rateLimitConf.withRateLimits(@["filter:100/1s", "lightpush:5/1s", "px:5/1s"]) + of Edge: + return err("Edge mode is not implemented") + + ## Network Conf + let wakuConfig = nodeConfig.wakuConfig + + # Set cluster ID + b.withClusterId(wakuConfig.clusterId) + + # Set sharding configuration + b.withShardingConf(ShardingConfKind.AutoSharding) + let autoShardingConfig = wakuConfig.autoShardingConfig + b.withNumShardsInCluster(autoShardingConfig.numShardsInCluster) + + # Process entry nodes - supports enrtree:, enr:, and multiaddress formats + if wakuConfig.entryNodes.len > 0: + let (enrTreeUrls, bootstrapEnrs, staticNodesFromEntry) = processEntryNodes( + wakuConfig.entryNodes + ).valueOr: + return err("Failed to process entry nodes: " & error) + + # Set ENRTree URLs for DNS discovery + if enrTreeUrls.len > 0: + for url in enrTreeUrls: + b.dnsDiscoveryConf.withEnrTreeUrl(url) + b.dnsDiscoveryconf.withNameServers( + @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")] + ) + + # Set ENR records as bootstrap nodes for discv5 + if bootstrapEnrs.len > 0: + b.discv5Conf.withBootstrapNodes(bootstrapEnrs) + + # Add static nodes (multiaddrs and those extracted from ENR entries) + if staticNodesFromEntry.len > 0: + b.withStaticNodes(staticNodesFromEntry) + + # TODO: verify behaviour + # Set static store nodes + if wakuConfig.staticStoreNodes.len > 0: + b.withStaticNodes(wakuConfig.staticStoreNodes) + + # Set message validation + let msgValidation = wakuConfig.messageValidation + let maxSizeBytes = parseMsgSize(msgValidation.maxMessageSize).valueOr: + return err("Failed to parse max message size: " & error) + b.withMaxMessageSize(maxSizeBytes) + + # Set RLN config if provided + if msgValidation.rlnConfig.isSome(): + let rlnConfig = msgValidation.rlnConfig.get() + b.rlnRelayConf.withEnabled(true) + b.rlnRelayConf.withEthContractAddress(rlnConfig.contractAddress) + b.rlnRelayConf.withChainId(rlnConfig.chainId) + b.rlnRelayConf.withEpochSizeSec(rlnConfig.epochSizeSec) + b.rlnRelayConf.withDynamic(true) + b.rlnRelayConf.withEthClientUrls(nodeConfig.ethRpcEndpoints) + + # TODO: we should get rid of those two + b.rlnRelayconf.withUserMessageLimit(100) + + ## Various configurations + b.withNatStrategy("any") + + let wakuConf = b.build().valueOr: + return err("Failed to build configuration: " & error) + + wakuConf.validate().isOkOr: + return err("Failed to validate configuration: " & error) + + return ok(wakuConf) diff --git a/third-party/nwaku/waku/api/entry_nodes.nim b/third-party/nwaku/waku/api/entry_nodes.nim new file mode 100644 index 0000000..2dad853 --- /dev/null +++ b/third-party/nwaku/waku/api/entry_nodes.nim @@ -0,0 +1,77 @@ +import std/strutils + +import results, eth/p2p/discoveryv5/enr + +import waku/waku_core/peers + +type EntryNodeType {.pure.} = enum + EnrTree + Enr + Multiaddr + +proc classifyEntryNode(address: string): Result[EntryNodeType, string] = + ## Classifies an entry node address by its type + ## Returns the type as EntryNodeType enum + if address.len == 0: + return err("Empty entry node address") + + let lowerAddress = address.toLowerAscii() + if lowerAddress.startsWith("enrtree:"): + return ok(EnrTree) + elif lowerAddress.startsWith("enr:"): + return ok(Enr) + elif address[0] == '/': + return ok(Multiaddr) + else: + return + err("Unrecognized entry node format. Must start with 'enrtree:', 'enr:', or '/'") + +proc parseEnrToMultiaddrs(enrStr: string): Result[seq[string], string] = + ## Parses an ENR string and extracts multiaddresses from it + let enrRec = enr.Record.fromURI(enrStr).valueOr: + return err("Invalid ENR record") + + let remotePeerInfo = toRemotePeerInfo(enrRec).valueOr: + return err("Failed to convert ENR to peer info: " & $error) + + # Convert RemotePeerInfo addresses to multiaddr strings + var multiaddrs: seq[string] + for addr in remotePeerInfo.addrs: + multiaddrs.add($addr & "/p2p/" & $remotePeerInfo.peerId) + + if multiaddrs.len == 0: + return err("No valid addresses found in ENR") + + return ok(multiaddrs) + +proc processEntryNodes*( + entryNodes: seq[string] +): Result[(seq[string], seq[string], seq[string]), string] = + ## Processes entry nodes and returns (enrTreeUrls, bootstrapEnrs, staticNodes) + ## ENRTree URLs for DNS discovery, ENR records for bootstrap, multiaddrs for static nodes + var enrTreeUrls: seq[string] + var bootstrapEnrs: seq[string] + var staticNodes: seq[string] + + for node in entryNodes: + let nodeType = classifyEntryNode(node).valueOr: + return err("Entry node error: " & error) + + case nodeType + of EnrTree: + # ENRTree URLs go to DNS discovery configuration + enrTreeUrls.add(node) + of Enr: + # ENR records go to bootstrap nodes for discv5 + bootstrapEnrs.add(node) + # Additionally, extract multiaddrs for static connections + let multiaddrsRes = parseEnrToMultiaddrs(node) + if multiaddrsRes.isOk(): + for maddr in multiaddrsRes.get(): + staticNodes.add(maddr) + # If we can't extract multiaddrs, just use it as bootstrap (already added above) + of Multiaddr: + # Multiaddresses go to static nodes + staticNodes.add(node) + + return ok((enrTreeUrls, bootstrapEnrs, staticNodes)) diff --git a/third-party/nwaku/waku/common/base64.nim b/third-party/nwaku/waku/common/base64.nim new file mode 100644 index 0000000..3160fa4 --- /dev/null +++ b/third-party/nwaku/waku/common/base64.nim @@ -0,0 +1,36 @@ +{.push raises: [].} + +import stew/[byteutils, base64], results + +type Base64String* = distinct string + +proc encode*[T: byte | char](value: openArray[T]): Base64String = + Base64String(encode(Base64Pad, value)) + +proc encode*(value: string): Base64String = + encode(toBytes(value)) + +proc decode[T: byte | char]( + btype: typedesc[Base64Types], instr: openArray[T] +): Result[seq[byte], string] = + ## Decode BASE64 string ``instr`` and return sequence of bytes as result. + if len(instr) == 0: + return ok(newSeq[byte]()) + + var bufferLen = decodedLength(btype, len(instr)) + var buffer = newSeq[byte](bufferLen) + + if decode(btype, instr, buffer, bufferLen) != Base64Status.Success: + return err("Incorrect base64 string") + + buffer.setLen(bufferLen) + ok(buffer) + +proc decode*(t: Base64String): Result[seq[byte], string] = + decode(Base64Pad, string(t)) + +proc `$`*(t: Base64String): string {.inline.} = + string(t) + +proc `==`*(lhs: Base64String | string, rhs: Base64String | string): bool {.inline.} = + string(lhs) == string(rhs) diff --git a/third-party/nwaku/waku/common/callbacks.nim b/third-party/nwaku/waku/common/callbacks.nim new file mode 100644 index 0000000..9b85901 --- /dev/null +++ b/third-party/nwaku/waku/common/callbacks.nim @@ -0,0 +1,5 @@ +import ../waku_enr/capabilities + +type GetShards* = proc(): seq[uint16] {.closure, gcsafe, raises: [].} + +type GetCapabilities* = proc(): seq[Capabilities] {.closure, gcsafe, raises: [].} diff --git a/third-party/nwaku/waku/common/databases/common.nim b/third-party/nwaku/waku/common/databases/common.nim new file mode 100644 index 0000000..c718180 --- /dev/null +++ b/third-party/nwaku/waku/common/databases/common.nim @@ -0,0 +1,3 @@ +import results + +type DatabaseResult*[T] = Result[T, string] diff --git a/third-party/nwaku/waku/common/databases/db_postgres.nim b/third-party/nwaku/waku/common/databases/db_postgres.nim new file mode 100644 index 0000000..5ae0e50 --- /dev/null +++ b/third-party/nwaku/waku/common/databases/db_postgres.nim @@ -0,0 +1,3 @@ +import ./common, ./db_postgres/pgasyncpool + +export common, pgasyncpool diff --git a/third-party/nwaku/waku/common/databases/db_postgres/dbconn.nim b/third-party/nwaku/waku/common/databases/db_postgres/dbconn.nim new file mode 100644 index 0000000..66cc88e --- /dev/null +++ b/third-party/nwaku/waku/common/databases/db_postgres/dbconn.nim @@ -0,0 +1,331 @@ +import + std/[times, strutils, os, sets, strformat, tables], + results, + chronos, + chronos/threadsync, + metrics, + chronicles +import ./query_metrics + +include db_connector/db_postgres + +type DataProc* = proc(result: ptr PGresult) {.closure, gcsafe, raises: [].} + +type DbConnWrapper* = ref object + dbConn: DbConn + open: bool + preparedStmts: HashSet[string] ## [stmtName's] + futBecomeFree*: Future[void] + ## to notify the pgasyncpool that this conn is free, i.e. not busy + +## Connection management + +proc containsPreparedStmt*(dbConnWrapper: DbConnWrapper, preparedStmt: string): bool = + return dbConnWrapper.preparedStmts.contains(preparedStmt) + +proc inclPreparedStmt*(dbConnWrapper: DbConnWrapper, preparedStmt: string) = + dbConnWrapper.preparedStmts.incl(preparedStmt) + +proc getDbConn*(dbConnWrapper: DbConnWrapper): DbConn = + return dbConnWrapper.dbConn + +proc isPgDbConnBusy*(dbConnWrapper: DbConnWrapper): bool = + if isNil(dbConnWrapper.futBecomeFree): + return false + return not dbConnWrapper.futBecomeFree.finished() + +proc isPgDbConnOpen*(dbConnWrapper: DbConnWrapper): bool = + return dbConnWrapper.open + +proc setPgDbConnOpen*(dbConnWrapper: DbConnWrapper, newOpenState: bool) = + dbConnWrapper.open = newOpenState + +proc check(db: DbConn): Result[void, string] = + var message: string + try: + message = $db.pqErrorMessage() + except ValueError, DbError: + return err("exception in check: " & getCurrentExceptionMsg()) + + if message.len > 0: + let truncatedErr = message[0 .. 80] + ## libpq sometimes gives extremely long error messages + return err(truncatedErr) + + return ok() + +proc openDbConn(connString: string): Result[DbConn, string] = + ## Opens a new connection. + var conn: DbConn = nil + try: + conn = open("", "", "", connString) ## included from db_postgres module + except DbError: + return err("exception opening new connection: " & getCurrentExceptionMsg()) + + if conn.status != CONNECTION_OK: + let checkRes = conn.check() + if checkRes.isErr(): + return err("failed to connect to database: " & checkRes.error) + + return err("unknown reason") + + ## registering the socket fd in chronos for better wait for data + let asyncFd = cast[asyncengine.AsyncFD](pqsocket(conn)) + asyncengine.register(asyncFd) + + return ok(conn) + +proc new*(T: type DbConnWrapper, connString: string): Result[T, string] = + let dbConn = openDbConn(connString).valueOr: + return err("failed to establish a new connection: " & $error) + + return ok(DbConnWrapper(dbConn: dbConn, open: true)) + +proc closeDbConn*( + dbConnWrapper: DbConnWrapper +): Result[void, string] {.raises: [OSError].} = + let fd = dbConnWrapper.dbConn.pqsocket() + if fd == -1: + return err("error file descriptor -1 in closeDbConn") + + asyncengine.unregister(cast[asyncengine.AsyncFD](fd)) + + dbConnWrapper.dbConn.close() + + return ok() + +proc `$`(self: SqlQuery): string = + return cast[string](self) + +proc sendQuery( + dbConnWrapper: DbConnWrapper, query: SqlQuery, args: seq[string] +): Future[Result[void, string]] {.async.} = + ## This proc can be used directly for queries that don't retrieve values back. + + if dbConnWrapper.dbConn.status != CONNECTION_OK: + dbConnWrapper.dbConn.check().isOkOr: + return err("failed to connect to database: " & $error) + + return err("unknown reason") + + var wellFormedQuery = "" + try: + wellFormedQuery = dbFormat(query, args) + except DbError: + return err("exception formatting the query: " & getCurrentExceptionMsg()) + + let success = dbConnWrapper.dbConn.pqsendQuery(cstring(wellFormedQuery)) + if success != 1: + dbConnWrapper.dbConn.check().isOkOr: + return err("failed pqsendQuery: " & $error) + return err("failed pqsendQuery: unknown reason") + + return ok() + +proc sendQueryPrepared( + dbConnWrapper: DbConnWrapper, + stmtName: string, + paramValues: openArray[string], + paramLengths: openArray[int32], + paramFormats: openArray[int32], +): Result[void, string] {.raises: [].} = + ## This proc can be used directly for queries that don't retrieve values back. + + if paramValues.len != paramLengths.len or paramValues.len != paramFormats.len or + paramLengths.len != paramFormats.len: + let lengthsErrMsg = + $paramValues.len & " " & $paramLengths.len & " " & $paramFormats.len + return err("lengths discrepancies in sendQueryPrepared: " & $lengthsErrMsg) + + if dbConnWrapper.dbConn.status != CONNECTION_OK: + dbConnWrapper.dbConn.check().isOkOr: + return err("failed to connect to database: " & $error) + + return err("unknown reason") + + var cstrArrayParams = allocCStringArray(paramValues) + defer: + deallocCStringArray(cstrArrayParams) + + let nParams = cast[int32](paramValues.len) + + const ResultFormat = 0 ## 0 for text format, 1 for binary format. + + let success = dbConnWrapper.dbConn.pqsendQueryPrepared( + stmtName, + nParams, + cstrArrayParams, + unsafeAddr paramLengths[0], + unsafeAddr paramFormats[0], + ResultFormat, + ) + if success != 1: + dbConnWrapper.dbConn.check().isOkOr: + return err("failed pqsendQueryPrepared: " & $error) + + return err("failed pqsendQueryPrepared: unknown reason") + + return ok() + +proc waitQueryToFinish( + dbConnWrapper: DbConnWrapper, rowCallback: DataProc = nil +): Future[Result[void, string]] {.async.} = + ## The 'rowCallback' param is != nil when the underlying query wants to retrieve results (SELECT.) + ## For other queries, like "INSERT", 'rowCallback' should be nil. + + let futDataAvailable = newFuture[void]("futDataAvailable") + + proc onDataAvailable(udata: pointer) {.gcsafe, raises: [].} = + if not futDataAvailable.completed(): + futDataAvailable.complete() + + let asyncFd = cast[asyncengine.AsyncFD](pqsocket(dbConnWrapper.dbConn)) + + when not defined(windows): + asyncengine.addReader2(asyncFd, onDataAvailable).isOkOr: + dbConnWrapper.futBecomeFree.fail(newException(ValueError, $error)) + return err("failed to add event reader in waitQueryToFinish: " & $error) + defer: + asyncengine.removeReader2(asyncFd).isOkOr: + return err("failed to remove event reader in waitQueryToFinish: " & $error) + else: + return err("Postgres not supported on Windows") + + await futDataAvailable + + ## Now retrieve the result from the database + while true: + let pqResult = dbConnWrapper.dbConn.pqgetResult() + + if pqResult == nil: + dbConnWrapper.dbConn.check().isOkOr: + if not dbConnWrapper.futBecomeFree.failed(): + dbConnWrapper.futBecomeFree.fail(newException(ValueError, $error)) + return err("error in query: " & $error) + + dbConnWrapper.futBecomeFree.complete() + return ok() # reached the end of the results. The query is completed + + if not rowCallback.isNil(): + rowCallback(pqResult) + + pqclear(pqResult) + +proc containsRiskyPatterns(input: string): bool = + let riskyPatterns = + @[ + " OR ", " AND ", " UNION ", " SELECT ", "INSERT ", "DELETE ", "UPDATE ", "DROP ", + "EXEC ", "--", "/*", "*/", + ] + + for pattern in riskyPatterns: + if pattern.toLowerAscii() in input.toLowerAscii(): + return true + + return false + +proc isSecureString(input: string): bool = + ## Returns `false` if the string contains risky characters or patterns, `true` otherwise. + let riskyChars = {'\'', '\"', ';', '#', '\\', '%', '_', '/', '*', '\0'} + + for ch in input: + if ch in riskyChars: + return false + + if containsRiskyPatterns(input): + return false + + return true + +proc convertQueryToMetricLabel*(query: string): string = + ## Simple query categorization. The output label is the one that should be used in query metrics + for snippetQuery, metric in QueriesToMetricMap.pairs(): + if $snippetQuery in query: + return $metric + return "unknown_query_metric" + +proc dbConnQuery*( + dbConnWrapper: DbConnWrapper, + query: SqlQuery, + args: seq[string], + rowCallback: DataProc, + requestId: string, +): Future[Result[void, string]] {.async, gcsafe.} = + if not requestId.isSecureString(): + return err("the passed request id is not secure: " & requestId) + + dbConnWrapper.futBecomeFree = newFuture[void]("dbConnQuery") + + let metricLabel = convertQueryToMetricLabel($query) + + var queryStartTime = getTime().toUnixFloat() + + let reqIdAndQuery = "/* requestId=" & requestId & " */ " & $query + (await dbConnWrapper.sendQuery(SqlQuery(reqIdAndQuery), args)).isOkOr: + error "error in dbConnQuery", error = $error + dbConnWrapper.futBecomeFree.fail(newException(ValueError, $error)) + return err("error in dbConnQuery calling sendQuery: " & $error) + + let sendDuration = getTime().toUnixFloat() - queryStartTime + query_time_secs.set(sendDuration, [metricLabel, "sendToDBQuery"]) + + queryStartTime = getTime().toUnixFloat() + + (await dbConnWrapper.waitQueryToFinish(rowCallback)).isOkOr: + return err("error in dbConnQuery calling waitQueryToFinish: " & $error) + + let waitDuration = getTime().toUnixFloat() - queryStartTime + query_time_secs.set(waitDuration, [metricLabel, "waitFinish"]) + + query_count.inc(labelValues = [metricLabel]) + + if "insert" notin ($query).toLower(): + debug "dbConnQuery", + requestId, + query = $query, + args, + metricLabel, + waitDbQueryDurationSecs = waitDuration, + sendToDBDurationSecs = sendDuration + + return ok() + +proc dbConnQueryPrepared*( + dbConnWrapper: DbConnWrapper, + stmtName: string, + paramValues: seq[string], + paramLengths: seq[int32], + paramFormats: seq[int32], + rowCallback: DataProc, + requestId: string, +): Future[Result[void, string]] {.async, gcsafe.} = + dbConnWrapper.futBecomeFree = newFuture[void]("dbConnQueryPrepared") + var queryStartTime = getTime().toUnixFloat() + + dbConnWrapper.sendQueryPrepared(stmtName, paramValues, paramLengths, paramFormats).isOkOr: + dbConnWrapper.futBecomeFree.fail(newException(ValueError, $error)) + error "error in dbConnQueryPrepared", error = $error + return err("error in dbConnQueryPrepared calling sendQuery: " & $error) + + let sendDuration = getTime().toUnixFloat() - queryStartTime + query_time_secs.set(sendDuration, [stmtName, "sendToDBQuery"]) + + queryStartTime = getTime().toUnixFloat() + + (await dbConnWrapper.waitQueryToFinish(rowCallback)).isOkOr: + return err("error in dbConnQueryPrepared calling waitQueryToFinish: " & $error) + + let waitDuration = getTime().toUnixFloat() - queryStartTime + query_time_secs.set(waitDuration, [stmtName, "waitFinish"]) + + query_count.inc(labelValues = [stmtName]) + + if "insert" notin stmtName.toLower(): + debug "dbConnQueryPrepared", + requestId, + stmtName, + paramValues, + waitDbQueryDurationSecs = waitDuration, + sendToDBDurationSecs = sendDuration + + return ok() diff --git a/third-party/nwaku/waku/common/databases/db_postgres/pgasyncpool.nim b/third-party/nwaku/waku/common/databases/db_postgres/pgasyncpool.nim new file mode 100644 index 0000000..d1e2908 --- /dev/null +++ b/third-party/nwaku/waku/common/databases/db_postgres/pgasyncpool.nim @@ -0,0 +1,189 @@ +# Simple async pool driver for postgress. +# Inspired by: https://github.com/treeform/pg/ +{.push raises: [].} + +import + std/[sequtils, strformat], + regex, + results, + chronos, + chronos/threadsync, + chronicles, + strutils +import ./dbconn, ../common, ../../../waku_core/time + +type + # Database connection pool + PgAsyncPool* = ref object + connString: string + maxConnections: int + conns: seq[DbConnWrapper] + busySignal: ThreadSignalPtr ## signal to wait while the pool is busy + +proc new*(T: type PgAsyncPool, dbUrl: string, maxConnections: int): DatabaseResult[T] = + var connString: string + + try: + let regex = re2("""^postgres:\/\/([^:]+):([^@]+)@([^:]+):(\d+)\/(.+)$""") + var m: RegexMatch2 + if dbUrl.match(regex, m) == false: + return err("could not properly parse dbUrl: " & dbUrl) + + let user = dbUrl[m.captures[0]] + ## m.captures[i] contains an slice with the desired value + let password = dbUrl[m.captures[1]] + let host = dbUrl[m.captures[2]] + let port = dbUrl[m.captures[3]] + let dbName = dbUrl[m.captures[4]] + + connString = + fmt"user={user} host={host} port={port} dbname={dbName} password={password}" + except KeyError, ValueError: + return err("could not parse postgres string: " & getCurrentExceptionMsg()) + + let pool = PgAsyncPool( + connString: connString, + maxConnections: maxConnections, + conns: newSeq[DbConnWrapper](0), + ) + + return ok(pool) + +func isBusy(pool: PgAsyncPool): bool = + return pool.conns.mapIt(it.isPgDbConnBusy()).allIt(it) + +proc close*(pool: PgAsyncPool): Future[Result[void, string]] {.async.} = + ## Gracefully wait and close all openned connections + # wait for the connections to be released and close them, without + # blocking the async runtime + + debug "close PgAsyncPool" + await allFutures(pool.conns.mapIt(it.futBecomeFree)) + debug "closing all connection PgAsyncPool" + + for i in 0 ..< pool.conns.len: + if pool.conns[i].isPgDbConnOpen(): + pool.conns[i].closeDbConn().isOkOr: + return err("error in close PgAsyncPool: " & $error) + pool.conns[i].setPgDbConnOpen(false) + + pool.conns.setLen(0) + + return ok() + +proc getFirstFreeConnIndex(pool: PgAsyncPool): DatabaseResult[int] = + for index in 0 ..< pool.conns.len: + if pool.conns[index].isPgDbConnBusy(): + continue + + ## Pick up the first free connection and set it busy + return ok(index) + +proc getConnIndex(pool: PgAsyncPool): Future[DatabaseResult[int]] {.async.} = + ## Waits for a free connection or create if max connections limits have not been reached. + ## Returns the index of the free connection + + if not pool.isBusy(): + return pool.getFirstFreeConnIndex() + + ## Pool is busy then + if pool.conns.len == pool.maxConnections: + ## Can't create more connections. Wait for a free connection without blocking the async runtime. + let busyFuts = pool.conns.mapIt(it.futBecomeFree) + discard await one(busyFuts) + + return pool.getFirstFreeConnIndex() + elif pool.conns.len < pool.maxConnections: + ## stablish a new connection + let dbConn = DbConnWrapper.new(pool.connString).valueOr: + return err("error creating DbConnWrapper: " & $error) + + pool.conns.add(dbConn) + return ok(pool.conns.len - 1) + +proc resetConnPool*(pool: PgAsyncPool): Future[DatabaseResult[void]] {.async.} = + ## Forces closing the connection pool. + ## This proc is intended to be called when the connection with the database + ## got interrupted from the database side or a connectivity problem happened. + + (await pool.close()).isOkOr: + return err("error in resetConnPool: " & error) + + return ok() + +const SlowQueryThreshold = 1.seconds + +proc pgQuery*( + pool: PgAsyncPool, + query: string, + args: seq[string] = newSeq[string](0), + rowCallback: DataProc = nil, + requestId: string = "", +): Future[DatabaseResult[void]] {.async.} = + let connIndex = (await pool.getConnIndex()).valueOr: + return err("connRes.isErr in query: " & $error) + + let queryStartTime = getNowInNanosecondTime() + let dbConnWrapper = pool.conns[connIndex] + defer: + let queryDuration = getNowInNanosecondTime() - queryStartTime + if queryDuration > SlowQueryThreshold.nanos: + debug "pgQuery slow query", + query_duration_secs = (queryDuration / 1_000_000_000), query, requestId + + (await dbConnWrapper.dbConnQuery(sql(query), args, rowCallback, requestId)).isOkOr: + return err("error in asyncpool query: " & $error) + + return ok() + +proc runStmt*( + pool: PgAsyncPool, + stmtName: string, + stmtDefinition: string, + paramValues: seq[string], + paramLengths: seq[int32], + paramFormats: seq[int32], + rowCallback: DataProc = nil, + requestId: string = "", +): Future[DatabaseResult[void]] {.async.} = + ## Runs a stored statement, for performance purposes. + ## The stored statements are connection specific and is a technique of caching a very common + ## queries within the same connection. + ## + ## rowCallback != nil when it is expected to retrieve info from the database. + ## rowCallback == nil for queries that change the database state. + + let connIndex = (await pool.getConnIndex()).valueOr: + return err("Error in runStmt: " & $error) + + let dbConnWrapper = pool.conns[connIndex] + let queryStartTime = getNowInNanosecondTime() + + defer: + let queryDuration = getNowInNanosecondTime() - queryStartTime + if queryDuration > SlowQueryThreshold.nanos: + debug "runStmt slow query", + query_duration = queryDuration / 1_000_000_000, + query = stmtDefinition, + requestId + + if not pool.conns[connIndex].containsPreparedStmt(stmtName): + # The connection doesn't have that statement yet. Let's create it. + # Each session/connection has its own prepared statements. + let res = catch: + let len = paramValues.len + discard dbConnWrapper.getDbConn().prepare(stmtName, sql(stmtDefinition), len) + + if res.isErr(): + return err("failed prepare in runStmt: " & res.error.msg) + + pool.conns[connIndex].inclPreparedStmt(stmtName) + + ( + await dbConnWrapper.dbConnQueryPrepared( + stmtName, paramValues, paramLengths, paramFormats, rowCallback, requestId + ) + ).isOkOr: + return err("error in runStmt: " & $error) + + return ok() diff --git a/third-party/nwaku/waku/common/databases/db_postgres/query_metrics.nim b/third-party/nwaku/waku/common/databases/db_postgres/query_metrics.nim new file mode 100644 index 0000000..13789a6 --- /dev/null +++ b/third-party/nwaku/waku/common/databases/db_postgres/query_metrics.nim @@ -0,0 +1,33 @@ +import metrics, tables + +declarePublicGauge query_time_secs, + "query time measured in nanoseconds", labels = ["query", "phase"] + +declarePublicCounter query_count, + "number of times a query is being performed", labels = ["query"] + +## Maps parts of the possible known queries with a fixed and shorter query label. +const QueriesToMetricMap* = toTable( + { + "contentTopic IN": "content_topic", + "SELECT version()": "select_version", + "WITH min_timestamp": "messages_lookup", + "SELECT messageHash FROM messages WHERE pubsubTopic = ? AND timestamp >= ? AND timestamp <= ? ORDER BY timestamp DESC, messageHash DESC LIMIT ?": + "msg_hash_no_ctopic", + "AS partition_name": "get_partitions_list", + "SELECT COUNT(1) FROM messages": "count_msgs", + "SELECT messageHash FROM messages WHERE (timestamp, messageHash) < (?,?) AND pubsubTopic = ? AND timestamp >= ? AND timestamp <= ? ORDER BY timestamp DESC, messageHash DESC LIMIT ?": + "msg_hash_with_cursor", + "SELECT pg_database_size(current_database())": "get_database_size", + "DELETE FROM messages_lookup WHERE timestamp": "delete_from_msgs_lookup", + "DROP TABLE messages_": "drop_partition_table", + "ALTER TABLE messages DETACH PARTITION": "detach_partition", + "SELECT pg_size_pretty(pg_total_relation_size(C.oid))": "get_partition_size", + "pg_try_advisory_lock": "try_advisory_lock", + "SELECT messageHash FROM messages ORDER BY timestamp DESC, messageHash DESC LIMIT ?": + "get_all_msg_hash", + "SELECT pg_advisory_unlock": "advisory_unlock", + "ANALYZE messages": "analyze_messages", + "SELECT EXISTS": "check_version_table_exists", + } +) diff --git a/third-party/nwaku/waku/common/databases/db_sqlite.nim b/third-party/nwaku/waku/common/databases/db_sqlite.nim new file mode 100644 index 0000000..2eaf5f4 --- /dev/null +++ b/third-party/nwaku/waku/common/databases/db_sqlite.nim @@ -0,0 +1,505 @@ +{.push raises: [].} +# The code in this file is an adaptation of the Sqlite KV Store found in nim-eth. +# https://github.com/status-im/nim-eth/blob/master/eth/db/kvstore_sqlite3.nim +# +# Most of it is a direct copy, the only unique functions being `get` and `put`. + +import std/[os, strutils, sequtils, algorithm], results, chronicles, sqlite3_abi +import ./common + +logScope: + topics = "sqlite" + +type + Sqlite = ptr sqlite3 + + NoParams* = tuple[] + RawStmtPtr* = ptr sqlite3_stmt + SqliteStmt*[Params; Result] = distinct RawStmtPtr + + AutoDisposed[T: ptr | ref] = object + val: T + +template dispose(db: Sqlite) = + discard sqlite3_close(db) + +template dispose(rawStmt: RawStmtPtr) = + discard sqlite3_finalize(rawStmt) + +template dispose*(sqliteStmt: SqliteStmt) = + discard sqlite3_finalize(RawStmtPtr sqliteStmt) + +proc release[T](x: var AutoDisposed[T]): T = + result = x.val + x.val = nil + +proc disposeIfUnreleased[T](x: var AutoDisposed[T]) = + mixin dispose + if x.val != nil: + dispose(x.release) + +template checkErr*(op, cleanup: untyped) = + if (let v = (op); v != SQLITE_OK): + cleanup + return err($sqlite3_errstr(v)) + +template checkErr*(op) = + checkErr(op): + discard + +type SqliteDatabase* = ref object of RootObj + env*: Sqlite + +type DataProc* = proc(s: RawStmtPtr) {.closure, gcsafe.} + # the nim-eth definition is different; one more indirection + +const NoopRowHandler* = proc(s: RawStmtPtr) {.closure, gcsafe.} = + discard + +proc new*(T: type SqliteDatabase, path: string, readOnly = false): DatabaseResult[T] = + var env: AutoDisposed[ptr sqlite3] + defer: + disposeIfUnreleased(env) + + let flags = + if readOnly: + SQLITE_OPEN_READONLY + else: + SQLITE_OPEN_READWRITE or SQLITE_OPEN_CREATE + + if path != ":memory:": + try: + createDir(parentDir(path)) + except OSError, IOError: + return err("sqlite: cannot create database directory") + + checkErr sqlite3_open_v2(path, addr env.val, flags.cint, nil) + + template prepare(q: string, cleanup: untyped): ptr sqlite3_stmt = + var s: ptr sqlite3_stmt + checkErr sqlite3_prepare_v2(env.val, q, q.len.cint, addr s, nil): + cleanup + s + + template checkExec(s: ptr sqlite3_stmt) = + if (let x = sqlite3_step(s); x != SQLITE_DONE): + discard sqlite3_finalize(s) + return err($sqlite3_errstr(x)) + + if (let x = sqlite3_finalize(s); x != SQLITE_OK): + return err($sqlite3_errstr(x)) + + template checkExec(q: string) = + let s = prepare(q): + discard + checkExec(s) + + template checkWalPragmaResult(journalModePragma: ptr sqlite3_stmt) = + if (let x = sqlite3_step(journalModePragma); x != SQLITE_ROW): + discard sqlite3_finalize(journalModePragma) + return err($sqlite3_errstr(x)) + + if (let x = sqlite3_column_type(journalModePragma, 0); x != SQLITE3_TEXT): + discard sqlite3_finalize(journalModePragma) + return err($sqlite3_errstr(x)) + + if (let x = sqlite3_column_text(journalModePragma, 0); x != "memory" and x != "wal"): + discard sqlite3_finalize(journalModePragma) + return err("Invalid pragma result: " & $x) + + let journalModePragma = prepare("PRAGMA journal_mode = WAL;"): + discard + checkWalPragmaResult(journalModePragma) + checkExec(journalModePragma) + + ok(SqliteDatabase(env: env.release)) + +template prepare*(env: Sqlite, q: string, cleanup: untyped): ptr sqlite3_stmt = + var s: ptr sqlite3_stmt + checkErr sqlite3_prepare_v2(env, q, q.len.cint, addr s, nil): + cleanup + s + +proc bindParam*(s: RawStmtPtr, n: int, val: auto): cint = + when val is openarray[byte] | seq[byte]: + # The constant, SQLITE_TRANSIENT, may be passed to indicate that the object is to be copied + # prior to the return from sqlite3_bind_*(). The object and pointer to it must remain valid + # until then. SQLite will then manage the lifetime of its private copy. + # + # From: https://www.sqlite.org/c3ref/bind_blob.html + if val.len > 0: + sqlite3_bind_blob(s, n.cint, unsafeAddr val[0], val.len.cint, SQLITE_TRANSIENT) + else: + sqlite3_bind_blob(s, n.cint, nil, 0.cint, SQLITE_TRANSIENT) + elif val is int32: + sqlite3_bind_int(s, n.cint, val) + elif val is uint32: + sqlite3_bind_int64(s, n.cint, val) + elif val is int64: + sqlite3_bind_int64(s, n.cint, val) + elif val is float64: + sqlite3_bind_double(s, n.cint, val) + # Note: bind_text not yet supported in sqlite3_abi wrapper + # elif val is string: + # sqlite3_bind_text(s, n.cint, val.cstring, -1, nil) # `-1` implies string length is the number of bytes up to the first null-terminator + else: + {.fatal: "Please add support for the '" & $typeof(val) & "' type".} + +template bindParams(s: RawStmtPtr, params: auto) = + when params is tuple: + var i = 1 + for param in fields(params): + checkErr bindParam(s, i, param) + inc i + else: + checkErr bindParam(s, 1, params) + +proc exec*[P](s: SqliteStmt[P, void], params: P): DatabaseResult[void] = + let s = RawStmtPtr s + bindParams(s, params) + + let res = + if (let v = sqlite3_step(s); v != SQLITE_DONE): + err($sqlite3_errstr(v) & " " & $sqlite3_errmsg(sqlite3_db_handle(s))) + else: + ok() + + # release implict transaction + discard sqlite3_reset(s) # same return information as step + discard sqlite3_clear_bindings(s) # no errors possible + + res + +template readResult(s: RawStmtPtr, column: cint, T: type): auto = + when T is Option: + if sqlite3_column_type(s, column) == SQLITE_NULL: + none(typeof(default(T).get())) + else: + some(readSimpleResult(s, column, typeof(default(T).get()))) + else: + readSimpleResult(s, column, T) + +template readResult(s: RawStmtPtr, T: type): auto = + when T is tuple: + var res: T + var i = cint 0 + for field in fields(res): + field = readResult(s, i, typeof(field)) + inc i + res + else: + readResult(s, 0.cint, T) + +proc exec*[Params, Res]( + s: SqliteStmt[Params, Res], params: Params, onData: DataProc +): DatabaseResult[bool] = + let s = RawStmtPtr s + bindParams(s, params) + + try: + var gotResults = false + while true: + let v = sqlite3_step(s) + case v + of SQLITE_ROW: + onData(s) + gotResults = true + of SQLITE_DONE: + break + else: + return err($sqlite3_errstr(v)) + return ok gotResults + finally: + # release implicit transaction + discard sqlite3_reset(s) # same return information as step + discard sqlite3_clear_bindings(s) # no errors possible + +proc query*( + db: SqliteDatabase, query: string, onData: DataProc +): DatabaseResult[bool] {.gcsafe.} = + var s = prepare(db.env, query): + discard + + try: + var gotResults = false + while true: + let v = sqlite3_step(s) + case v + of SQLITE_ROW: + onData(s) + gotResults = true + of SQLITE_DONE: + break + else: + return err($sqlite3_errstr(v)) + return ok gotResults + except Exception, CatchableError: + error "exception in query", query = query, error = getCurrentExceptionMsg() + + # release implicit transaction + discard sqlite3_reset(s) # same return information as step + discard sqlite3_clear_bindings(s) # no errors possible + discard sqlite3_finalize(s) + # NB: dispose of the prepared query statement and free associated memory + +proc prepareStmt*( + db: SqliteDatabase, stmt: string, Params: type, Res: type +): DatabaseResult[SqliteStmt[Params, Res]] = + var s: RawStmtPtr + checkErr sqlite3_prepare_v2(db.env, stmt, stmt.len.cint, addr s, nil) + ok SqliteStmt[Params, Res](s) + +proc close*(db: SqliteDatabase) = + discard sqlite3_close(db.env) + + db[] = SqliteDatabase()[] + +## Maintenance procedures + +# TODO: Cache this value in the SqliteDatabase object. +# Page size should not change during the node execution time +proc getPageSize*(db: SqliteDatabase): DatabaseResult[int64] = + ## Query or set the page size of the database. The page size must be a power of + ## two between 512 and 65536 inclusive. + var size: int64 + proc handler(s: RawStmtPtr) = + size = sqlite3_column_int64(s, 0) + + let res = db.query("PRAGMA page_size;", handler) + if res.isErr(): + return err("failed to get page_size") + + return ok(size) + +proc getFreelistCount*(db: SqliteDatabase): DatabaseResult[int64] = + ## Return the number of unused pages in the database file. + var count: int64 + proc handler(s: RawStmtPtr) = + count = sqlite3_column_int64(s, 0) + + let res = db.query("PRAGMA freelist_count;", handler) + if res.isErr(): + return err("failed to get freelist_count") + + return ok(count) + +proc getPageCount*(db: SqliteDatabase): DatabaseResult[int64] = + ## Return the total number of pages in the database file. + var count: int64 + proc handler(s: RawStmtPtr) = + count = sqlite3_column_int64(s, 0) + + let res = db.query("PRAGMA page_count;", handler) + if res.isErr(): + return err("failed to get page_count") + + return ok(count) + +proc getDatabaseSize*(db: SqliteDatabase): DatabaseResult[int64] = + # get the database page size in bytes + var pageSize: int64 = ?db.getPageSize() + + if pageSize == 0: + return err("failed to get page size ") + + # get the database page count + let pageCount = ?db.getPageCount() + + let databaseSize = (pageSize * pageCount) + + return ok(databaseSize) + +proc gatherSqlitePageStats*(db: SqliteDatabase): DatabaseResult[(int64, int64, int64)] = + let + pageSize = ?db.getPageSize() + pageCount = ?db.getPageCount() + freelistCount = ?db.getFreelistCount() + + return ok((pageSize, pageCount, freelistCount)) + +proc vacuum*(db: SqliteDatabase): DatabaseResult[void] = + ## The VACUUM command rebuilds the database file, repacking it into a minimal amount of disk space. + let res = db.query("VACUUM;", NoopRowHandler) + if res.isErr(): + return err("vacuum failed") + + return ok() + +## Database scheme versioning + +proc getUserVersion*(database: SqliteDatabase): DatabaseResult[int64] = + ## Get the value of the user-version integer. + ## + ## The user-version is an integer that is available to applications to use however they want. + ## SQLite makes no use of the user-version itself. This integer is stored at offset 60 in + ## the database header. + ## + ## For more info check: https://www.sqlite.org/pragma.html#pragma_user_version + var version: int64 + proc handler(s: ptr sqlite3_stmt) = + version = sqlite3_column_int64(s, 0) + + let res = database.query("PRAGMA user_version;", handler) + if res.isErr(): + return err("failed to get user_version") + + ok(version) + +proc setUserVersion*(database: SqliteDatabase, version: int64): DatabaseResult[void] = + ## Set the value of the user-version integer. + ## + ## The user-version is an integer that is available to applications to use however they want. + ## SQLite makes no use of the user-version itself. This integer is stored at offset 60 in + ## the database header. + ## + ## For more info check: https://www.sqlite.org/pragma.html#pragma_user_version + let query = "PRAGMA user_version=" & $version & ";" + let res = database.query(query, NoopRowHandler) + if res.isErr(): + return err("failed to set user_version") + + ok() + +## Migration scripts + +proc getMigrationScriptVersion(path: string): DatabaseResult[int64] = + let name = extractFilename(path) + let parts = name.split("_", 1) + + try: + let version = parseInt(parts[0]) + return ok(version) + except ValueError: + return err("failed to parse file version: " & name) + +proc isSqlScript(path: string): bool = + path.toLower().endsWith(".sql") + +proc listSqlScripts(path: string): DatabaseResult[seq[string]] = + var scripts = newSeq[string]() + + try: + for scriptPath in walkDirRec(path): + if isSqlScript(scriptPath): + scripts.add(scriptPath) + else: + debug "invalid migration script", file = scriptPath + except OSError: + return err("failed to list migration scripts: " & getCurrentExceptionMsg()) + + ok(scripts) + +proc filterMigrationScripts( + paths: seq[string], lowVersion, highVersion: int64, direction: string = "up" +): seq[string] = + ## Returns migration scripts whose version fall between lowVersion and highVersion (inclusive) + let filterPredicate = proc(script: string): bool = + if not isSqlScript(script): + return false + + if direction != "" and not script.toLower().endsWith("." & direction & ".sql"): + return false + + let scriptVersionRes = getMigrationScriptVersion(script) + if scriptVersionRes.isErr(): + return false + + let scriptVersion = scriptVersionRes.value + return lowVersion < scriptVersion and scriptVersion <= highVersion + + paths.filter(filterPredicate) + +proc sortMigrationScripts(paths: seq[string]): seq[string] = + ## Sort migration scripts paths alphabetically + paths.sorted(system.cmp[string]) + +proc loadMigrationScripts(paths: seq[string]): DatabaseResult[seq[string]] = + var loadedScripts = newSeq[string]() + + for script in paths: + try: + loadedScripts.add(readFile(script)) + except OSError, IOError: + return err("failed to load script '" & script & "': " & getCurrentExceptionMsg()) + + ok(loadedScripts) + +proc breakIntoStatements(script: string): seq[string] = + var statements = newSeq[string]() + + for chunk in script.split(';'): + if chunk.strip().isEmptyOrWhitespace(): + continue + + let statement = chunk.strip() & ";" + statements.add(statement) + + statements + +proc migrate*( + db: SqliteDatabase, targetVersion: int64, migrationsScriptsDir: string +): DatabaseResult[void] = + ## Compares the `user_version` of the sqlite database with the provided `targetVersion`, then + ## it runs migration scripts if the `user_version` is outdated. The `migrationScriptsDir` path + ## points to the directory holding the migrations scripts once the db is updated, it sets the + ## `user_version` to the `tragetVersion`. + ## + ## NOTE: Down migration it is not currently supported + let userVersion = ?db.getUserVersion() + + if userVersion == targetVersion: + debug "database schema is up to date", + userVersion = userVersion, targetVersion = targetVersion + return ok() + + info "database schema is outdated", + userVersion = userVersion, targetVersion = targetVersion + + # Load migration scripts + var migrationScriptsPaths = ?listSqlScripts(migrationsScriptsDir) + migrationScriptsPaths = filterMigrationScripts( + migrationScriptsPaths, + lowVersion = userVersion, + highVersion = targetVersion, + direction = "up", + ) + migrationScriptsPaths = sortMigrationScripts(migrationScriptsPaths) + + if migrationScriptsPaths.len <= 0: + debug "no scripts to be run" + return ok() + + let scripts = ?loadMigrationScripts(migrationScriptsPaths) + + # Run the migration scripts + for script in scripts: + for statement in script.breakIntoStatements(): + debug "executing migration statement", statement = statement + + let execRes = db.query(statement, NoopRowHandler) + if execRes.isErr(): + error "failed to execute migration statement", + statement = statement, error = execRes.error + return err("failed to execute migration statement") + + debug "migration statement executed succesfully", statement = statement + + # Update user_version + ?db.setUserVersion(targetVersion) + + debug "database user_version updated", userVersion = targetVersion + ok() + +proc performSqliteVacuum*(db: SqliteDatabase): DatabaseResult[void] = + ## SQLite database vacuuming + # TODO: Run vacuuming conditionally based on database page stats + # if (pageCount > 0 and freelistCount > 0): + + debug "starting sqlite database vacuuming" + + let resVacuum = db.vacuum() + if resVacuum.isErr(): + return err("failed to execute vacuum: " & resVacuum.error) + + debug "finished sqlite database vacuuming" + ok() diff --git a/third-party/nwaku/waku/common/databases/dburl.nim b/third-party/nwaku/waku/common/databases/dburl.nim new file mode 100644 index 0000000..b4cbf1d --- /dev/null +++ b/third-party/nwaku/waku/common/databases/dburl.nim @@ -0,0 +1,29 @@ +import std/strutils, regex, results + +proc validateDbUrl*(dbUrl: string): Result[string, string] = + ## dbUrl mimics SQLAlchemy Database URL schema + ## See: https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls + let regex = re2"^\w+:\/\/.+:.+@[\w*-.]+:[0-9]+\/[\w*-.]+$" + let dbUrl = dbUrl.strip() + if "sqlite" in dbUrl or dbUrl == "" or dbUrl == "none" or dbUrl.match(regex): + return ok(dbUrl) + else: + return err("invalid 'db url' option format") + +proc getDbEngine*(dbUrl: string): Result[string, string] = + let dbUrlParts = dbUrl.split("://", 1) + + if dbUrlParts.len != 2: + return err("Incorrect dbUrl") + + let engine = dbUrlParts[0] + return ok(engine) + +proc getDbPath*(dbUrl: string): Result[string, string] = + let dbUrlParts = dbUrl.split("://", 1) + + if dbUrlParts.len != 2: + return err("Incorrect dbUrl") + + let path = dbUrlParts[1] + return ok(path) diff --git a/third-party/nwaku/waku/common/enr.nim b/third-party/nwaku/waku/common/enr.nim new file mode 100644 index 0000000..9c5ff35 --- /dev/null +++ b/third-party/nwaku/waku/common/enr.nim @@ -0,0 +1,19 @@ +## An extension wrapper around nim-eth's ENR module + +import eth/p2p/discoveryv5/enr +import ./enr/builder, ./enr/typed_record + +export + enr.Record, + enr.EnrResult, + enr.get, + enr.tryGet, + enr.fromBase64, + enr.toBase64, + enr.fromURI, + enr.toURI, + enr.FieldPair, + enr.toFieldPair, + enr.init, # TODO: Delete after removing the deprecated procs + builder, + typed_record diff --git a/third-party/nwaku/waku/common/enr/builder.nim b/third-party/nwaku/waku/common/enr/builder.nim new file mode 100644 index 0000000..3c5455d --- /dev/null +++ b/third-party/nwaku/waku/common/enr/builder.nim @@ -0,0 +1,85 @@ +{.push raises: [].} + +import + std/[options, net], + results, + eth/keys as eth_keys, + eth/p2p/discoveryv5/enr, + libp2p/crypto/crypto as libp2p_crypto + +import ./typed_record + +## Builder + +type EnrBuilder* = object + seqNumber: uint64 + privateKey: eth_keys.PrivateKey + ipAddress: Opt[IpAddress] + tcpPort: Opt[Port] + udpPort: Opt[Port] + fields: seq[FieldPair] + +proc init*(T: type EnrBuilder, key: eth_keys.PrivateKey, seqNum: uint64 = 1): T = + EnrBuilder( + seqNumber: seqNum, + privateKey: key, + ipAddress: Opt.none(IpAddress), + tcpPort: Opt.none(Port), + udpPort: Opt.none(Port), + fields: newSeq[FieldPair](), + ) + +proc init*(T: type EnrBuilder, key: libp2p_crypto.PrivateKey, seqNum: uint64 = 1): T = + # TODO: Inconvenient runtime assertion. Move this assertion to compile time + if key.scheme != PKScheme.Secp256k1: + raise newException(Defect, "invalid private key scheme") + + let + bytes = key.getRawBytes().expect("Private key is valid") + privateKey = + eth_keys.PrivateKey.fromRaw(bytes).expect("Raw private key is of valid length") + + EnrBuilder.init(key = privateKey, seqNum = seqNum) + +proc addFieldPair*(builder: var EnrBuilder, pair: FieldPair) = + builder.fields.add(pair) + +proc addFieldPair*[V](builder: var EnrBuilder, key: string, value: V) = + builder.addFieldPair(toFieldPair(key, value)) + +proc build*(builder: EnrBuilder): EnrResult[enr.Record] = + # Note that nim-eth's `Record.init` does not deduplicate the field pairs. + # See: https://github.com/status-im/nim-eth/blob/4b22fcd/eth/p2p/discoveryv5/enr.nim#L143-L144 + enr.Record.init( + seqNum = builder.seqNumber, + pk = builder.privateKey, + ip = builder.ipAddress, + tcpPort = builder.tcpPort, + udpPort = builder.udpPort, + extraFields = builder.fields, + ) + +## Builder extension: IP address and TCP/UDP ports + +proc addAddressAndPorts( + builder: var EnrBuilder, ip: IpAddress, tcpPort, udpPort: Option[Port] +) = + builder.ipAddress = Opt.some(ip) + builder.tcpPort = tcpPort.toOpt() + builder.udpPort = udpPort.toOpt() + +proc addPorts(builder: var EnrBuilder, tcp, udp: Option[Port]) = + # Based on: https://github.com/status-im/nim-eth/blob/4b22fcd/eth/p2p/discoveryv5/enr.nim#L166 + builder.tcpPort = tcp.toOpt() + builder.udpPort = udp.toOpt() + +proc withIpAddressAndPorts*( + builder: var EnrBuilder, + ipAddr = none(IpAddress), + tcpPort = none(Port), + udpPort = none(Port), +) = + if ipAddr.isSome(): + addAddressAndPorts(builder, ipAddr.get(), tcpPort, udpPort) + else: + addPorts(builder, tcpPort, udpPort) diff --git a/third-party/nwaku/waku/common/enr/typed_record.nim b/third-party/nwaku/waku/common/enr/typed_record.nim new file mode 100644 index 0000000..d0b055a --- /dev/null +++ b/third-party/nwaku/waku/common/enr/typed_record.nim @@ -0,0 +1,99 @@ +{.push raises: [].} + +import std/options, results, eth/keys as eth_keys, libp2p/crypto/crypto as libp2p_crypto + +import eth/p2p/discoveryv5/enr except TypedRecord, toTypedRecord + +## Since enr changed to result.Opt[T] from Option[T] for intercompatibility introduce a conversion between +func toOpt*[T](o: Option[T]): Opt[T] = + if o.isSome(): + return Opt.some(o.get()) + else: + return Opt.none(T) + +func toOption*[T](o: Opt[T]): Option[T] = + if o.isSome(): + return some(o.get()) + else: + return none(T) + +## ENR typed record + +# Record identity scheme + +type RecordId* {.pure.} = enum + V4 + +func toRecordId(id: string): EnrResult[RecordId] = + case id + of "v4": + ok(RecordId.V4) + else: + err("unknown identity scheme") + +func `$`*(id: RecordId): string = + case id + of RecordId.V4: "v4" + +# Typed record + +type TypedRecord* = object + raw: Record + +proc init(T: type TypedRecord, record: Record): T = + TypedRecord(raw: record) + +proc tryGet*(record: TypedRecord, field: string, T: type): Option[T] = + return record.raw.tryGet(field, T).toOption() + +func toTyped*(record: Record): EnrResult[TypedRecord] = + let tr = TypedRecord.init(record) + + # Validate record's identity scheme + let idOpt = tr.tryGet("id", string) + if idOpt.isNone(): + return err("missing id scheme field") + + discard ?toRecordId(idOpt.get()) + + ok(tr) + +# Typed record field accessors + +func id*(record: TypedRecord): Option[RecordId] = + let fieldOpt = record.tryGet("id", string) + if fieldOpt.isNone(): + return none(RecordId) + + let fieldRes = toRecordId(fieldOpt.get()) + if fieldRes.isErr(): + return none(RecordId) + + some(fieldRes.value) + +func secp256k1*(record: TypedRecord): Option[array[33, byte]] = + record.tryGet("secp256k1", array[33, byte]) + +func ip*(record: TypedRecord): Option[array[4, byte]] = + record.tryGet("ip", array[4, byte]) + +func ip6*(record: TypedRecord): Option[array[16, byte]] = + record.tryGet("ip6", array[16, byte]) + +func tcp*(record: TypedRecord): Option[uint16] = + record.tryGet("tcp", uint16) + +func tcp6*(record: TypedRecord): Option[uint16] = + let port = record.tryGet("tcp6", uint16) + if port.isNone(): + return record.tcp() + return port + +func udp*(record: TypedRecord): Option[uint16] = + record.tryGet("udp", uint16) + +func udp6*(record: TypedRecord): Option[uint16] = + let port = record.tryGet("udp6", uint16) + if port.isNone(): + return record.udp() + return port diff --git a/third-party/nwaku/waku/common/error_handling.nim b/third-party/nwaku/waku/common/error_handling.nim new file mode 100644 index 0000000..4924889 --- /dev/null +++ b/third-party/nwaku/waku/common/error_handling.nim @@ -0,0 +1 @@ +type OnFatalErrorHandler* = proc(errMsg: string) {.gcsafe, closure, raises: [].} diff --git a/third-party/nwaku/waku/common/hexstrings.nim b/third-party/nwaku/waku/common/hexstrings.nim new file mode 100644 index 0000000..fd6b590 --- /dev/null +++ b/third-party/nwaku/waku/common/hexstrings.nim @@ -0,0 +1,61 @@ +{.push raises: [].} + +type + HexDataStr* = distinct string + Identifier* = distinct string # 32 bytes, no 0x prefix! + HexStrings* = HexDataStr | Identifier + +# Validation + +template hasHexHeader(value: string): bool = + if value.len >= 2 and value[0] == '0' and value[1] in {'x', 'X'}: true else: false + +template isHexChar(c: char): bool = + if c notin {'0' .. '9'} and c notin {'a' .. 'f'} and c notin {'A' .. 'F'}: + false + else: + true + +func isValidHexQuantity*(value: string): bool = + if not hasHexHeader(value): + return false + + # No leading zeros (but allow 0x0) + if value.len < 3 or (value.len > 3 and value[2] == '0'): + return false + + for i in 2 ..< value.len: + let c = value[i] + if not isHexChar(c): + return false + + return true + +func isValidHexData*(value: string, header = true): bool = + if header and not hasHexHeader(value): + return false + + # Must be even number of digits + if value.len mod 2 != 0: + return false + + # Leading zeros are allowed + for i in 2 ..< value.len: + let c = value[i] + if not isHexChar(c): + return false + + return true + +template isValidHexData*(value: string, hexLen: int, header = true): bool = + value.len == hexLen and value.isValidHexData(header) + +proc validateHexData*(value: string) {.inline, raises: [ValueError].} = + if unlikely(not isValidHexData(value)): + raise newException(ValueError, "Invalid hex data format: " & value) + +# Initialisation + +proc hexDataStr*(value: string): HexDataStr {.inline, raises: [ValueError].} = + validateHexData(value) + HexDataStr(value) diff --git a/third-party/nwaku/waku/common/logging.nim b/third-party/nwaku/waku/common/logging.nim new file mode 100644 index 0000000..2b664f2 --- /dev/null +++ b/third-party/nwaku/waku/common/logging.nim @@ -0,0 +1,105 @@ +## This code has been copied and addapted from `status-im/nimbu-eth2` project. +## Link: https://github.com/status-im/nimbus-eth2/blob/c585b0a5b1ae4d55af38ad7f4715ad455e791552/beacon_chain/nimbus_binary_common.nim +import + std/[typetraits, os, strutils, syncio], + chronicles, + chronicles/log_output, + chronicles/topics_registry + +export chronicles.LogLevel + +{.push raises: [].} + +type LogFormat* = enum + TEXT + JSON + +## Utils + +proc stripAnsi(v: string): string = + ## Copied from: https://github.com/status-im/nimbus-eth2/blob/stable/beacon_chain/nimbus_binary_common.nim#L41 + ## Silly chronicles, colors is a compile-time property + var + res = newStringOfCap(v.len) + i: int + + while i < v.len: + let c = v[i] + if c == '\x1b': + var + x = i + 1 + found = false + + while x < v.len: # look for [..m + let c2 = v[x] + if x == i + 1: + if c2 != '[': + break + else: + if c2 in {'0' .. '9'} + {';'}: + discard # keep looking + elif c2 == 'm': + i = x + 1 + found = true + break + else: + break + inc x + + if found: # skip adding c + continue + res.add c + inc i + + res + +proc writeAndFlush(f: syncio.File, s: LogOutputStr) = + try: + f.write(s) + f.flushFile() + except CatchableError: + logLoggingFailure(cstring(s), getCurrentException()) + +## Setup + +proc setupLogLevel(level: LogLevel) = + # TODO: Support per topic level configuratio + topics_registry.setLogLevel(level) + +proc setupLogFormat(format: LogFormat, color = true) = + proc noOutputWriter(logLevel: LogLevel, msg: LogOutputStr) = + discard + + proc stdoutOutputWriter(logLevel: LogLevel, msg: LogOutputStr) = + writeAndFlush(syncio.stdout, msg) + + proc stdoutNoColorOutputWriter(logLevel: LogLevel, msg: LogOutputStr) = + writeAndFlush(syncio.stdout, stripAnsi(msg)) + + when defaultChroniclesStream.outputs.type.arity == 2: + case format + of LogFormat.Text: + defaultChroniclesStream.outputs[0].writer = + if color: stdoutOutputWriter else: stdoutNoColorOutputWriter + defaultChroniclesStream.outputs[1].writer = noOutputWriter + of LogFormat.Json: + defaultChroniclesStream.outputs[0].writer = noOutputWriter + defaultChroniclesStream.outputs[1].writer = stdoutOutputWriter + else: + {. + warning: + "the present module should be compiled with '-d:chronicles_default_output_device=dynamic' " & + "and '-d:chronicles_sinks=\"textlines,json\"' options" + .} + +proc setupLog*(level: LogLevel, format: LogFormat) = + ## Logging setup + # Adhere to NO_COLOR initiative: https://no-color.org/ + let color = + try: + not parseBool(os.getEnv("NO_COLOR", "false")) + except CatchableError: + true + + setupLogLevel(level) + setupLogFormat(format, color) diff --git a/third-party/nwaku/waku/common/nimchronos.nim b/third-party/nwaku/waku/common/nimchronos.nim new file mode 100644 index 0000000..dc425c4 --- /dev/null +++ b/third-party/nwaku/waku/common/nimchronos.nim @@ -0,0 +1,31 @@ +## An extension wrapper around nim-chronos +{.push raises: [].} + +import chronos, chronicles + +export chronos + +## Extension methods + +# Taken from: https://github.com/status-im/nim-libp2p/blob/master/libp2p/utils/heartbeat.nim +template heartbeat*(name: string, interval: Duration, body: untyped): untyped = + var nextHeartbeat = Moment.now() + while true: + body + + nextHeartbeat += interval + let now = Moment.now() + if nextHeartbeat < now: + let + delay = now - nextHeartbeat + itv = interval + + if delay > itv: + info "Missed multiple heartbeats", + heartbeat = name, delay = delay, hinterval = itv + else: + debug "Missed heartbeat", heartbeat = name, delay = delay, hinterval = itv + + nextHeartbeat = now + itv + + await sleepAsync(nextHeartbeat - now) diff --git a/third-party/nwaku/waku/common/paging.nim b/third-party/nwaku/waku/common/paging.nim new file mode 100644 index 0000000..c963a24 --- /dev/null +++ b/third-party/nwaku/waku/common/paging.nim @@ -0,0 +1,28 @@ +import std/options + +type PagingDirection* {.pure.} = enum + ## PagingDirection determines the direction of pagination + BACKWARD = uint32(0) + FORWARD = uint32(1) + +proc default*(): PagingDirection {.inline.} = + PagingDirection.FORWARD + +proc into*(b: bool): PagingDirection = + PagingDirection(b) + +proc into*(b: Option[bool]): PagingDirection = + if b.isNone(): + return default() + b.get().into() + +proc into*(d: PagingDirection): bool = + d == PagingDirection.FORWARD + +proc into*(d: Option[PagingDirection]): bool = + if d.isNone(): + return false + d.get().into() + +proc into*(s: string): PagingDirection = + (s == "true").into() diff --git a/third-party/nwaku/waku/common/protobuf.nim b/third-party/nwaku/waku/common/protobuf.nim new file mode 100644 index 0000000..767bdae --- /dev/null +++ b/third-party/nwaku/waku/common/protobuf.nim @@ -0,0 +1,64 @@ +# Extensions for libp2p's protobuf library implementation + +{.push raises: [].} + +import std/options, libp2p/protobuf/minprotobuf, libp2p/varint + +export minprotobuf, varint + +## Custom errors + +type + ProtobufErrorKind* {.pure.} = enum + DecodeFailure + MissingRequiredField + InvalidLengthField + + ProtobufError* = object + case kind*: ProtobufErrorKind + of DecodeFailure: + error*: minprotobuf.ProtoError + of MissingRequiredField, InvalidLengthField: + field*: string + + ProtobufResult*[T] = Result[T, ProtobufError] + +converter toProtobufError*(err: minprotobuf.ProtoError): ProtobufError = + case err + of minprotobuf.ProtoError.RequiredFieldMissing: + ProtobufError(kind: ProtobufErrorKind.MissingRequiredField, field: "unknown") + else: + ProtobufError(kind: ProtobufErrorKind.DecodeFailure, error: err) + +proc missingRequiredField*(T: type ProtobufError, field: string): T = + ProtobufError(kind: ProtobufErrorKind.MissingRequiredField, field: field) + +proc invalidLengthField*(T: type ProtobufError, field: string): T = + ProtobufError(kind: ProtobufErrorKind.InvalidLengthField, field: field) + +## Extension methods + +proc write3*(proto: var ProtoBuffer, field: int, value: auto) = + when value is Option: + if value.isSome(): + proto.write(field, value.get()) + else: + proto.write(field, value) + +proc finish3*(proto: var ProtoBuffer) = + if proto.buffer.len > 0: + proto.finish() + else: + proto.offset = 0 + +proc `==`*(a: zint64, b: zint64): bool = + int64(a) == int64(b) + +proc `$`*(err: ProtobufError): string = + case err.kind + of DecodeFailure: + return $err.error ## assume that ProtoError is pure + of MissingRequiredField: + return "MissingRequiredField " & err.field + of InvalidLengthField: + return "InvalidLengthField " & err.field diff --git a/third-party/nwaku/waku/common/rate_limit/per_peer_limiter.nim b/third-party/nwaku/waku/common/rate_limit/per_peer_limiter.nim new file mode 100644 index 0000000..5cb96a2 --- /dev/null +++ b/third-party/nwaku/waku/common/rate_limit/per_peer_limiter.nim @@ -0,0 +1,38 @@ +## PerPeerRateLimiter +## +## With this class one can easily track usage of a service per PeerId +## Rate limit is applied separately by each peer upon first use. Also time period is counted distinct per peer. +## It will use compensating replenish mode for peers to balance the load and allow fair usage of a service. + +{.push raises: [].} + +import std/[options, tables], libp2p/stream/connection + +import ./[single_token_limiter, service_metrics], ../../utils/tableutils + +export token_bucket, setting, service_metrics + +type PerPeerRateLimiter* = ref object of RootObj + setting*: Option[RateLimitSetting] + peerBucket: Table[PeerId, Option[TokenBucket]] + +proc mgetOrPut( + perPeerRateLimiter: var PerPeerRateLimiter, peerId: PeerId +): var Option[TokenBucket] = + return perPeerRateLimiter.peerBucket.mgetOrPut( + peerId, newTokenBucket(perPeerRateLimiter.setting, ReplenishMode.Compensating) + ) + +template checkUsageLimit*( + t: var PerPeerRateLimiter, + proto: string, + conn: Connection, + bodyWithinLimit, bodyRejected: untyped, +) = + checkUsageLimit(t.mgetOrPut(conn.peerId), proto, conn, bodyWithinLimit, bodyRejected) + +proc unregister*(perPeerRateLimiter: var PerPeerRateLimiter, peerId: PeerId) = + perPeerRateLimiter.peerBucket.del(peerId) + +proc unregister*(perPeerRateLimiter: var PerPeerRateLimiter, peerIds: seq[PeerId]) = + perPeerRateLimiter.peerBucket.keepItIf(key notin peerIds) diff --git a/third-party/nwaku/waku/common/rate_limit/request_limiter.nim b/third-party/nwaku/waku/common/rate_limit/request_limiter.nim new file mode 100644 index 0000000..0ede20b --- /dev/null +++ b/third-party/nwaku/waku/common/rate_limit/request_limiter.nim @@ -0,0 +1,143 @@ +## RequestRateLimiter +## +## RequestRateLimiter is a general service protection mechanism. +## While applies an overall rate limit, it also ensure fair usage among peers. +## +## This is reached by reject peers that are constantly over using the service while allowing others to use it +## within the global limit set. +## Punished peers will also be recovered after a certain time period if not violating the limit. +## +## This is reached by calculating a ratio of the global limit and applying it to each peer. +## This ratio is applied to the allowed tokens within a ratio * the global time period. +## The allowed tokens for peers are limited to 75% of ratio * global token volume. +## +## This needs to be taken into account when setting the global limit for the specific service type and use cases. + +{.push raises: [].} + +import + std/[options, math], + chronicles, + chronos/timer, + libp2p/stream/connection, + libp2p/utility + +import std/times except TimeInterval, Duration, seconds, minutes + +import ./[single_token_limiter, service_metrics, timed_map] + +export token_bucket, setting, service_metrics + +logScope: + topics = "waku ratelimit" + +const PER_PEER_ALLOWED_PERCENT_OF_VOLUME = 0.75 +const UNLIMITED_RATIO = 0 +const UNLIMITED_TIMEOUT = 0.seconds +const MILISECONDS_RATIO = 10 +const SECONDS_RATIO = 3 +const MINUTES_RATIO = 2 + +type RequestRateLimiter* = ref object of RootObj + tokenBucket: Option[TokenBucket] + setting*: Option[RateLimitSetting] + peerBucketSetting*: RateLimitSetting + peerUsage: TimedMap[PeerId, TokenBucket] + +proc mgetOrPut( + requestRateLimiter: var RequestRateLimiter, peerId: PeerId +): var TokenBucket = + let bucketForNew = newTokenBucket(some(requestRateLimiter.peerBucketSetting)).valueOr: + raiseAssert "This branch is not allowed to be reached as it will not be called if the setting is None." + + return requestRateLimiter.peerUsage.mgetOrPut(peerId, bucketForNew) + +proc checkUsage*( + t: var RequestRateLimiter, proto: string, conn: Connection, now = Moment.now() +): bool {.raises: [].} = + if t.tokenBucket.isNone(): + return true + + let peerBucket = t.mgetOrPut(conn.peerId) + ## check requesting peer's usage is not over the calculated ratio and let that peer go which not requested much/or this time... + if not peerBucket.tryConsume(1, now): + trace "peer usage limit reached", peer = conn.peerId + return false + + # Ok if the peer can consume, check the overall budget we have left + let tokenBucket = t.tokenBucket.get() + if not tokenBucket.tryConsume(1, now): + return false + + return true + +template checkUsageLimit*( + t: var RequestRateLimiter, + proto: string, + conn: Connection, + bodyWithinLimit, bodyRejected: untyped, +) = + if t.checkUsage(proto, conn): + let requestStartTime = Moment.now() + waku_service_requests.inc(labelValues = [proto, "served"]) + + bodyWithinLimit + + let requestDuration = Moment.now() - requestStartTime + waku_service_request_handling_duration_seconds.observe( + requestDuration.milliseconds.float / 1000, labelValues = [proto] + ) + else: + waku_service_requests.inc(labelValues = [proto, "rejected"]) + bodyRejected + +# TODO: review these ratio assumptions! Debatable! +func calcPeriodRatio(settingOpt: Option[RateLimitSetting]): int = + settingOpt.withValue(setting): + if setting.isUnlimited(): + return UNLIMITED_RATIO + + if setting.period <= 1.seconds: + return MILISECONDS_RATIO + + if setting.period <= 1.minutes: + return SECONDS_RATIO + + return MINUTES_RATIO + do: + # when setting is none + return UNLIMITED_RATIO + +# calculates peer cache items timeout +# effectively if a peer does not issue any requests for this amount of time will be forgotten. +func calcCacheTimeout(settingOpt: Option[RateLimitSetting], ratio: int): Duration = + settingOpt.withValue(setting): + if setting.isUnlimited(): + return UNLIMITED_TIMEOUT + + # CacheTimout for peers is double the replensih period for peers + return setting.period * ratio * 2 + do: + # when setting is none + return UNLIMITED_TIMEOUT + +func calcPeerTokenSetting( + setting: Option[RateLimitSetting], ratio: int +): RateLimitSetting = + let s = setting.valueOr: + return (0, 0.minutes) + + let peerVolume = + trunc((s.volume * ratio).float * PER_PEER_ALLOWED_PERCENT_OF_VOLUME).int + let peerPeriod = s.period * ratio + + return (peerVolume, peerPeriod) + +proc newRequestRateLimiter*(setting: Option[RateLimitSetting]): RequestRateLimiter = + let ratio = calcPeriodRatio(setting) + return RequestRateLimiter( + tokenBucket: newTokenBucket(setting), + setting: setting, + peerBucketSetting: calcPeerTokenSetting(setting, ratio), + peerUsage: init(TimedMap[PeerId, TokenBucket], calcCacheTimeout(setting, ratio)), + ) diff --git a/third-party/nwaku/waku/common/rate_limit/service_metrics.nim b/third-party/nwaku/waku/common/rate_limit/service_metrics.nim new file mode 100644 index 0000000..bff91f6 --- /dev/null +++ b/third-party/nwaku/waku/common/rate_limit/service_metrics.nim @@ -0,0 +1,30 @@ +{.push raises: [].} + +import std/options +import chronos/timer +import metrics, setting + +export metrics + +declarePublicGauge waku_service_requests_limit, + "Applied rate limit of non-relay service", ["service"] + +declarePublicCounter waku_service_requests, + "number of non-relay service requests received", ["service", "state"] + +declarePublicCounter waku_service_network_bytes, + "total incoming traffic of specific waku services", labels = ["service", "direction"] + +proc setServiceLimitMetric*(service: string, limit: Option[RateLimitSetting]) = + if limit.isSome() and not limit.get().isUnlimited(): + waku_service_requests_limit.set( + limit.get().calculateLimitPerSecond(), labelValues = [service] + ) + +declarePublicHistogram waku_service_request_handling_duration_seconds, + "duration of non-relay service handling", + labels = ["service"], + buckets = [ + 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, + 15.0, 20.0, 30.0, Inf, + ] diff --git a/third-party/nwaku/waku/common/rate_limit/setting.nim b/third-party/nwaku/waku/common/rate_limit/setting.nim new file mode 100644 index 0000000..70f0ee7 --- /dev/null +++ b/third-party/nwaku/waku/common/rate_limit/setting.nim @@ -0,0 +1,135 @@ +{.push raises: [].} + +import chronos/timer, std/[tables, strutils, options], regex, results + +# Setting for TokenBucket defined as volume over period of time +type RateLimitSetting* = tuple[volume: int, period: Duration] + +type RateLimitedProtocol* = enum + GLOBAL + STOREV2 + STOREV3 + LIGHTPUSH + PEEREXCHG + FILTER + +type ProtocolRateLimitSettings* = Table[RateLimitedProtocol, RateLimitSetting] + +# Set the default to switch off rate limiting for now +let DefaultGlobalNonRelayRateLimit*: RateLimitSetting = (0, 0.minutes) +let UnlimitedRateLimit*: RateLimitSetting = (0, 0.seconds) + +# Acceptable call frequence from one peer using filter service +# Assumption is having to set up a subscription with max 30 calls than using ping in every min +# While subscribe/unsubscribe events are distributed in time among clients, pings will happen regularly from +# all subscribed peers +let FilterDefaultPerPeerRateLimit*: RateLimitSetting = (30, 1.minutes) + +# For being used under GC-safe condition must use threadvar +var DefaultProtocolRateLimit* {.threadvar.}: ProtocolRateLimitSettings +DefaultProtocolRateLimit = + {GLOBAL: UnlimitedRateLimit, FILTER: FilterDefaultPerPeerRateLimit}.toTable() + +proc isUnlimited*(t: RateLimitSetting): bool {.inline.} = + return t.volume <= 0 or t.period <= 0.seconds + +func `$`*(t: RateLimitSetting): string {.inline.} = + return + if t.isUnlimited(): + "no-limit" + else: + $t.volume & "/" & $t.period + +proc translate(sProtocol: string): RateLimitedProtocol {.raises: [ValueError].} = + if sProtocol.len == 0: + return GLOBAL + + case sProtocol + of "global": + return GLOBAL + of "storev2": + return STOREV2 + of "storev3": + return STOREV3 + of "lightpush": + return LIGHTPUSH + of "px": + return PEEREXCHG + of "filter": + return FILTER + else: + raise newException(ValueError, "Unknown protocol definition: " & sProtocol) + +proc fillSettingTable( + t: var ProtocolRateLimitSettings, sProtocol: var string, setting: RateLimitSetting +) {.raises: [ValueError].} = + if sProtocol == "store": + # generic store will only applies to version which is not listed directly + discard t.hasKeyOrPut(STOREV2, setting) + discard t.hasKeyOrPut(STOREV3, setting) + else: + let protocol = translate(sProtocol) + # always overrides, last one wins if same protocol duplicated + t[protocol] = setting + +proc parse*( + T: type ProtocolRateLimitSettings, settings: seq[string] +): Result[ProtocolRateLimitSettings, string] = + var settingsTable: ProtocolRateLimitSettings = + initTable[RateLimitedProtocol, RateLimitSetting]() + + ## Following regex can match the exact syntax of how rate limit can be set for different protocol or global. + ## It uses capture groups + ## group0: Will be check if protocol name is followed by a colon but only if protocol name is set. + ## group1: Protocol name, if empty we take it as "global" setting + ## group2: Volume of tokens - only integer + ## group3: Duration of period - only integer + ## group4: Unit of period - only h:hour, m:minute, s:second, ms:millisecond allowed + ## whitespaces are allowed lazily + const parseRegex = + """^\s*((store|storev2|storev3|lightpush|px|filter)\s*:)?\s*(\d+)\s*\/\s*(\d+)\s*(s|h|m|ms)\s*$""" + const regexParseSize = re2(parseRegex) + for settingStr in settings: + let aSetting = settingStr.toLower() + try: + var m: RegexMatch2 + if aSetting.match(regexParseSize, m) == false: + return err("Invalid rate-limit setting: " & settingStr) + + var sProtocol = aSetting[m.captures[1]] + let volume = aSetting[m.captures[2]].parseInt() + let duration = aSetting[m.captures[3]].parseInt() + let periodUnit = aSetting[m.captures[4]] + + var period = 0.seconds + case periodUnit + of "ms": + period = duration.milliseconds + of "s": + period = duration.seconds + of "m": + period = duration.minutes + of "h": + period = duration.hours + + fillSettingTable(settingsTable, sProtocol, (volume, period)) + except ValueError: + return err("Invalid rate-limit setting: " & settingStr) + + # If there were no global setting predefined, we set unlimited + # due it is taken for protocols not defined in the list - thus those will not apply accidentally wrong settings. + discard settingsTable.hasKeyOrPut(GLOBAL, UnlimitedRateLimit) + discard settingsTable.hasKeyOrPut(FILTER, FilterDefaultPerPeerRateLimit) + + return ok(settingsTable) + +proc getSetting*( + t: ProtocolRateLimitSettings, protocol: RateLimitedProtocol +): RateLimitSetting = + let default = t.getOrDefault(GLOBAL, UnlimitedRateLimit) + return t.getOrDefault(protocol, default) + +proc calculateLimitPerSecond*(setting: RateLimitSetting): float64 = + if setting.isUnlimited(): + return 0.float64 + return (setting.volume.float64 / setting.period.milliseconds.float64) * 1000.float64 diff --git a/third-party/nwaku/waku/common/rate_limit/single_token_limiter.nim b/third-party/nwaku/waku/common/rate_limit/single_token_limiter.nim new file mode 100644 index 0000000..50fb2d6 --- /dev/null +++ b/third-party/nwaku/waku/common/rate_limit/single_token_limiter.nim @@ -0,0 +1,59 @@ +## This module add usage check helpers for simple rate limiting with the use of TokenBucket. + +{.push raises: [].} + +import std/[options], chronos/timer, libp2p/stream/connection, libp2p/utility + +import std/times except TimeInterval, Duration + +import ./[token_bucket, setting, service_metrics] +export token_bucket, setting, service_metrics + +proc newTokenBucket*( + setting: Option[RateLimitSetting], + replenishMode: ReplenishMode = ReplenishMode.Compensating, +): Option[TokenBucket] = + if setting.isNone(): + return none[TokenBucket]() + + if setting.get().isUnlimited(): + return none[TokenBucket]() + + return some(TokenBucket.new(setting.get().volume, setting.get().period)) + +proc checkUsage( + t: var TokenBucket, proto: string, now = Moment.now() +): bool {.raises: [].} = + if not t.tryConsume(1, now): + return false + + return true + +proc checkUsage( + t: var Option[TokenBucket], proto: string, now = Moment.now() +): bool {.raises: [].} = + if t.isNone(): + return true + + var tokenBucket = t.get() + return checkUsage(tokenBucket, proto, now) + +template checkUsageLimit*( + t: var Option[TokenBucket] | var TokenBucket, + proto: string, + conn: Connection, + bodyWithinLimit, bodyRejected: untyped, +) = + if t.checkUsage(proto): + let requestStartTime = Moment.now() + waku_service_requests.inc(labelValues = [proto, "served"]) + + bodyWithinLimit + + let requestDuration = Moment.now() - requestStartTime + waku_service_request_handling_duration_seconds.observe( + requestDuration.milliseconds.float / 1000, labelValues = [proto] + ) + else: + waku_service_requests.inc(labelValues = [proto, "rejected"]) + bodyRejected diff --git a/third-party/nwaku/waku/common/rate_limit/timed_map.nim b/third-party/nwaku/waku/common/rate_limit/timed_map.nim new file mode 100644 index 0000000..b05dfb0 --- /dev/null +++ b/third-party/nwaku/waku/common/rate_limit/timed_map.nim @@ -0,0 +1,162 @@ +## TimedMap +## =========== +## Inspired by nim-libp2p's TimedCache class. This is using the same approach to prune +## untouched items from the map where the set timeout duration is reached. +## But unlike TimedCache this TimedMap is capable to hold and return any type of value for a key. +## +## - `mgetOrPut` proc is similar to std/tables, but will renew the timeout for the key. +## - For non-renewal check use `contains` proc. +## - `expire` proc will remove all items that have expired. +## +## Choose your initial timeout for your needs to control the size of the map. + +{.push raises: [].} + +import std/[hashes, sets] +import chronos/timer, results +import libp2p/utility + +export results + +type + TimedEntry[K, V] = ref object of RootObj + key: K + value: V + addedAt: Moment + expiresAt: Moment + next, prev: TimedEntry[K, V] + + TimedMap*[K, V] = object of RootObj + head, tail: TimedEntry[K, V] # nim linked list doesn't allow inserting at pos + entries: HashSet[TimedEntry[K, V]] + timeout: Duration + +func `==`*[K, V](a, b: TimedEntry[K, V]): bool = + if isNil(a) == isNil(b): + isNil(a) or a.key == b.key + else: + false + +func hash*(a: TimedEntry): Hash = + if isNil(a): + default(Hash) + else: + hash(a[].key) + +func `$`*[T](a: T): string = + if isNil(a): + "nil" + + return $a + +func `$`*[K, V](a: TimedEntry[K, V]): string = + if isNil(a): + return "nil" + + return + "TimedEntry: key:" & $a.key & ", val:" & $a.value & ", addedAt:" & $a.addedAt & + ", expiresAt:" & $a.expiresAt + +func expire*(t: var TimedMap, now: Moment = Moment.now()) = + while t.head != nil and t.head.expiresAt <= now: + t.entries.excl(t.head) + t.head.prev = nil + t.head = t.head.next + if t.head == nil: + t.tail = nil + +func del[K, V](t: var TimedMap[K, V], key: K): Opt[TimedEntry[K, V]] = + # Removes existing key from cache, returning the previous item if present + let tmp = TimedEntry[K, V](key: key) + if tmp in t.entries: + let item = + try: + t.entries[tmp] # use the shared instance in the set + except KeyError: + raiseAssert "just checked" + t.entries.excl(item) + + if t.head == item: + t.head = item.next + if t.tail == item: + t.tail = item.prev + + if item.next != nil: + item.next.prev = item.prev + if item.prev != nil: + item.prev.next = item.next + Opt.some(item) + else: + Opt.none(TimedEntry[K, V]) + +func remove*[K, V](t: var TimedMap[K, V], key: K): Opt[V] = + # Removes existing key from cache, returning the previous value if present + # public version of del without exporting TimedEntry + let deleted = t.del(key) + if deleted.isSome(): + Opt.some(deleted[].value) + else: + Opt.none(V) + +proc mgetOrPut*[K, V](t: var TimedMap[K, V], k: K, v: V, now = Moment.now()): var V = + # Puts k in cache, returning true if the item was already present and false + # otherwise. If the item was already present, its expiry timer will be + # refreshed. + t.expire(now) + + let + previous = t.del(k) # Refresh existing item + addedAt = + if previous.isSome(): + previous[].addedAt + else: + now + value = + if previous.isSome(): + previous[].value + else: + v + + let node = + TimedEntry[K, V](key: k, value: value, addedAt: addedAt, expiresAt: now + t.timeout) + if t.head == nil: + t.tail = node + t.head = t.tail + else: + # search from tail because typically that's where we add when now grows + var cur = t.tail + while cur != nil and node.expiresAt < cur.expiresAt: + cur = cur.prev + + if cur == nil: + node.next = t.head + t.head.prev = node + t.head = node + else: + node.prev = cur + node.next = cur.next + cur.next = node + if cur == t.tail: + t.tail = node + + t.entries.incl(node) + + return node.value + +func contains*[K, V](t: TimedMap[K, V], k: K): bool = + let tmp = TimedEntry[K, V](key: k) + tmp in t.entries + +func addedAt*[K, V](t: var TimedMap[K, V], k: K): Moment = + let tmp = TimedEntry[K, V](key: k) + try: + if tmp in t.entries: # raising is slow + # Use shared instance from entries + return t.entries[tmp][].addedAt + except KeyError: + raiseAssert "just checked" + + default(Moment) + +func init*[K, V](T: type TimedMap[K, V], timeout: Duration): T = + T(timeout: timeout) diff --git a/third-party/nwaku/waku/common/rate_limit/token_bucket.nim b/third-party/nwaku/waku/common/rate_limit/token_bucket.nim new file mode 100644 index 0000000..799817e --- /dev/null +++ b/third-party/nwaku/waku/common/rate_limit/token_bucket.nim @@ -0,0 +1,182 @@ +{.push raises: [].} + +import chronos, std/math, std/options + +const BUDGET_COMPENSATION_LIMIT_PERCENT = 0.25 + +## This is an extract from chronos/rate_limit.nim due to the found bug in the original implementation. +## Unfortunately that bug cannot be solved without harm the original features of TokenBucket class. +## So, this current shortcut is used to enable move ahead with nwaku rate limiter implementation. +## ref: https://github.com/status-im/nim-chronos/issues/500 +## +## This version of TokenBucket is different from the original one in chronos/rate_limit.nim in many ways: +## - It has a new mode called `Compensating` which is the default mode. +## Compensation is calculated as the not used bucket capacity in the last measured period(s) in average. +## or up until maximum the allowed compansation treshold (Currently it is const 25%). +## Also compensation takes care of the proper time period calculation to avoid non-usage periods that can lead to +## overcompensation. +## - Strict mode is also available which will only replenish when time period is over but also will fill +## the bucket to the max capacity. + +type + ReplenishMode* = enum + Strict + Compensating + + TokenBucket* = ref object + budget: int ## Current number of tokens in the bucket + budgetCap: int ## Bucket capacity + lastTimeFull: Moment + ## This timer measures the proper periodizaiton of the bucket refilling + fillDuration: Duration ## Refill period + case replenishMode*: ReplenishMode + of Strict: + ## In strict mode, the bucket is refilled only till the budgetCap + discard + of Compensating: + ## This is the default mode. + maxCompensation: float + +func periodDistance(bucket: TokenBucket, currentTime: Moment): float = + ## notice fillDuration cannot be zero by design + ## period distance is a float number representing the calculated period time + ## since the last time bucket was refilled. + return + nanoseconds(currentTime - bucket.lastTimeFull).float / + nanoseconds(bucket.fillDuration).float + +func getUsageAverageSince(bucket: TokenBucket, distance: float): float = + if distance == 0.float: + ## in case there is zero time difference than the usage percentage is 100% + return 1.0 + + ## budgetCap can never be zero + ## usage average is calculated as a percentage of total capacity available over + ## the measured period + return bucket.budget.float / bucket.budgetCap.float / distance + +proc calcCompensation(bucket: TokenBucket, averageUsage: float): int = + # if we already fully used or even overused the tokens, there is no place for compensation + if averageUsage >= 1.0: + return 0 + + ## compensation is the not used bucket capacity in the last measured period(s) in average. + ## or maximum the allowed compansation treshold + let compensationPercent = + min((1.0 - averageUsage) * bucket.budgetCap.float, bucket.maxCompensation) + return trunc(compensationPercent).int + +func periodElapsed(bucket: TokenBucket, currentTime: Moment): bool = + return currentTime - bucket.lastTimeFull >= bucket.fillDuration + +## Update will take place if bucket is empty and trying to consume tokens. +## It checks if the bucket can be replenished as refill duration is passed or not. +## - strict mode: +proc updateStrict(bucket: TokenBucket, currentTime: Moment) = + if bucket.fillDuration == default(Duration): + bucket.budget = min(bucket.budgetCap, bucket.budget) + return + + if not periodElapsed(bucket, currentTime): + return + + bucket.budget = bucket.budgetCap + bucket.lastTimeFull = currentTime + +## - compensating - ballancing load: +## - between updates we calculate average load (current bucket capacity / number of periods till last update) +## - gives the percentage load used recently +## - with this we can replenish bucket up to 100% + calculated leftover from previous period (caped with max treshold) +proc updateWithCompensation(bucket: TokenBucket, currentTime: Moment) = + if bucket.fillDuration == default(Duration): + bucket.budget = min(bucket.budgetCap, bucket.budget) + return + + # do not replenish within the same period + if not periodElapsed(bucket, currentTime): + return + + let distance = bucket.periodDistance(currentTime) + let recentAvgUsage = bucket.getUsageAverageSince(distance) + let compensation = bucket.calcCompensation(recentAvgUsage) + + bucket.budget = bucket.budgetCap + compensation + bucket.lastTimeFull = currentTime + +proc update(bucket: TokenBucket, currentTime: Moment) = + if bucket.replenishMode == ReplenishMode.Compensating: + updateWithCompensation(bucket, currentTime) + else: + updateStrict(bucket, currentTime) + +proc tryConsume*(bucket: TokenBucket, tokens: int, now = Moment.now()): bool = + ## If `tokens` are available, consume them, + ## Otherwhise, return false. + + if bucket.budget >= bucket.budgetCap: + bucket.lastTimeFull = now + + if bucket.budget >= tokens: + bucket.budget -= tokens + return true + + bucket.update(now) + + if bucket.budget >= tokens: + bucket.budget -= tokens + return true + else: + return false + +proc replenish*(bucket: TokenBucket, tokens: int, now = Moment.now()) = + ## Add `tokens` to the budget (capped to the bucket capacity) + bucket.budget += tokens + bucket.update(now) + +proc new*( + T: type[TokenBucket], + budgetCap: int, + fillDuration: Duration = 1.seconds, + mode: ReplenishMode = ReplenishMode.Compensating, +): T = + assert not isZero(fillDuration) + assert budgetCap != 0 + + ## Create different mode TokenBucket + case mode + of ReplenishMode.Strict: + return T( + budget: budgetCap, + budgetCap: budgetCap, + fillDuration: fillDuration, + lastTimeFull: Moment.now(), + replenishMode: mode, + ) + of ReplenishMode.Compensating: + T( + budget: budgetCap, + budgetCap: budgetCap, + fillDuration: fillDuration, + lastTimeFull: Moment.now(), + replenishMode: mode, + maxCompensation: budgetCap.float * BUDGET_COMPENSATION_LIMIT_PERCENT, + ) + +proc newStrict*(T: type[TokenBucket], capacity: int, period: Duration): TokenBucket = + T.new(capacity, period, ReplenishMode.Strict) + +proc newCompensating*( + T: type[TokenBucket], capacity: int, period: Duration +): TokenBucket = + T.new(capacity, period, ReplenishMode.Compensating) + +func `$`*(b: TokenBucket): string {.inline.} = + if isNil(b): + return "nil" + return $b.budgetCap & "/" & $b.fillDuration + +func `$`*(ob: Option[TokenBucket]): string {.inline.} = + if ob.isNone(): + return "no-limit" + + return $ob.get() diff --git a/third-party/nwaku/waku/common/utils/DEPRECATION_NOTICE.md b/third-party/nwaku/waku/common/utils/DEPRECATION_NOTICE.md new file mode 100644 index 0000000..391ec92 --- /dev/null +++ b/third-party/nwaku/waku/common/utils/DEPRECATION_NOTICE.md @@ -0,0 +1,6 @@ +# :warning: DEPRECATION NOTICE :warning: + +The `utils` module has been marked as deprecated. +This package submodules are planned to be moved to different modules. + +**No new sub-modules must be added to this folder.** diff --git a/third-party/nwaku/waku/common/utils/matterbridge_client.nim b/third-party/nwaku/waku/common/utils/matterbridge_client.nim new file mode 100644 index 0000000..e8593ae --- /dev/null +++ b/third-party/nwaku/waku/common/utils/matterbridge_client.nim @@ -0,0 +1,74 @@ +{.push raises: [].} + +import std/[httpclient, json, uri, options], results + +const + # Resource locators + stream* = "/api/stream" + messages* = "/api/messages" + message* = "/api/message" + health* = "/api/health" + +type + MatterbridgeResult[T] = Result[T, string] + + MatterbridgeClient* = ref object of RootObj + hostClient*: HttpClient + host*: Uri + gateway*: string + +proc new*( + T: type MatterbridgeClient, hostUri: string, gateway = "gateway1" +): MatterbridgeClient {.raises: [Defect, KeyError].} = + let mbClient = MatterbridgeClient() + + mbClient.hostClient = newHttpClient() + mbClient.hostClient.headers = newHttpHeaders({"Content-Type": "application/json"}) + + mbClient.host = parseUri(hostUri) + mbClient.gateway = gateway + + return mbClient + +proc getMessages*(mb: MatterbridgeClient): MatterbridgeResult[seq[JsonNode]] = + var + response: Response + msgs: seq[JsonNode] + try: + response = mb.hostClient.get($(mb.host / messages)) + msgs = parseJson(response.body()).getElems() + except Exception as e: + return err("failed to get messages: " & e.msg) + + assert response.status == "200 OK" + + ok(msgs) + +proc postMessage*(mb: MatterbridgeClient, msg: JsonNode): MatterbridgeResult[bool] = + var response: Response + try: + response = + mb.hostClient.request($(mb.host / message), httpMethod = HttpPost, body = $msg) + except Exception as e: + return err("post request failed: " & e.msg) + + ok(response.status == "200 OK") + +proc postMessage*( + mb: MatterbridgeClient, text: string, username: string +): MatterbridgeResult[bool] = + let jsonNode = %*{"text": text, "username": username, "gateway": mb.gateway} + + return mb.postMessage(jsonNode) + +proc isHealthy*(mb: MatterbridgeClient): MatterbridgeResult[bool] = + var + response: Response + healthOk: bool + try: + response = mb.hostClient.get($(mb.host / health)) + healthOk = response.body == "OK" + except Exception as e: + return err("failed to get health: " & e.msg) + + ok(response.status == "200 OK" and healthOk) diff --git a/third-party/nwaku/waku/common/utils/nat.nim b/third-party/nwaku/waku/common/utils/nat.nim new file mode 100644 index 0000000..125a489 --- /dev/null +++ b/third-party/nwaku/waku/common/utils/nat.nim @@ -0,0 +1,80 @@ +{.push raises: [].} + +import std/[options, strutils, net] +import chronicles, eth/net/nat, results, nativesockets + +logScope: + topics = "nat" + +## Due to the design of nim-eth/nat module we must ensure it is only initialized once. +## see: https://github.com/waku-org/nwaku/issues/2628 +## Details: nim-eth/nat module starts a maintenance thread for refreshing the NAT mappings, but everything in the module is global, +## there is no room to store multiple configurations. +## Exact meaning: redirectPorts cannot be called twice in a program lifetime. +## During waku tests we happen to start several node instances in parallel thus resulting in multiple NAT configurations and multiple threads. +## Those threads will dead lock each other in tear down. +var singletonNat: bool = false + +# TODO: pass `NatStrategy`, not a string +proc setupNat*( + natConf, clientId: string, tcpPort, udpPort: Port +): Result[ + tuple[ip: Option[IpAddress], tcpPort: Option[Port], udpPort: Option[Port]], string +] {.gcsafe.} = + let strategy = + case natConf.toLowerAscii() + of "any": NatAny + of "none": NatNone + of "upnp": NatUpnp + of "pmp": NatPmp + else: NatNone + + var endpoint: + tuple[ip: Option[IpAddress], tcpPort: Option[Port], udpPort: Option[Port]] + + if strategy != NatNone: + ## Only initialize the NAT module once + ## redirectPorts cannot be called twice in a program lifetime. + ## We can do it as same happens if getExternalIP fails and returns None + if singletonNat: + warn "NAT already initialized, skipping as cannot be done multiple times" + else: + singletonNat = true + var extIp = Opt.none(IpAddress) + try: + extIp = getExternalIP(strategy) + except Exception: + warn "exception in setupNat", error = getCurrentExceptionMsg() + + if extIP.isSome(): + endpoint.ip = some(extIp.get()) + # RedirectPorts in considered a gcsafety violation + # because it obtains the address of a non-gcsafe proc? + var extPorts: Opt[(Port, Port)] + try: + extPorts = ( + {.gcsafe.}: + redirectPorts( + tcpPort = tcpPort, udpPort = udpPort, description = clientId + ) + ) + except CatchableError: + # TODO: nat.nim Error: can raise an unlisted exception: Exception. Isolate here for now. + error "unable to determine external ports" + extPorts = Opt.none((Port, Port)) + + if extPorts.isSome(): + let (extTcpPort, extUdpPort) = extPorts.get() + endpoint.tcpPort = some(extTcpPort) + endpoint.udpPort = some(extUdpPort) + else: # NatNone + if not natConf.startsWith("extip:"): + return err("not a valid NAT mechanism: " & $natConf) + + try: + # any required port redirection is assumed to be done by hand + endpoint.ip = some(parseIpAddress(natConf[6 ..^ 1])) + except ValueError: + return err("not a valid IP address: " & $natConf[6 ..^ 1]) + + return ok(endpoint) diff --git a/third-party/nwaku/waku/common/utils/parse_size_units.nim b/third-party/nwaku/waku/common/utils/parse_size_units.nim new file mode 100644 index 0000000..14f41a9 --- /dev/null +++ b/third-party/nwaku/waku/common/utils/parse_size_units.nim @@ -0,0 +1,74 @@ +import std/[strutils, math], results, regex + +proc parseMsgSize*(input: string): Result[uint64, string] = + ## Parses size strings such as "1.2 KiB" or "3Kb" and returns the equivalent number of bytes + ## if the parse task goes well. If not, it returns an error describing the problem. + + const RegexDef = """\s*(\d+([\,\.]\d*)?)\s*([Kk]{0,1}[i]?[Bb]{1})""" + const RegexParseSize = re2(RegexDef) + + var m: RegexMatch2 + if input.match(RegexParseSize, m) == false: + return err("error in parseSize. regex is not matching: " & RegexDef) + + var value: float + + try: + value = parseFloat(input[m.captures[0]].replace(",", ".")) + except ValueError: + return err( + "invalid size in parseSize: " & getCurrentExceptionMsg() & " error parsing: " & + input[m.captures[0]] & " KKK : " & $m + ) + + let units = input[m.captures[2]].toLowerAscii() # units is "kib", or "kb", or "b". + + var multiplier: float + case units + of "kb": + multiplier = 1000 + of "kib": + multiplier = 1024 + of "ib": + return err("wrong units. ib or iB aren't allowed.") + else: ## bytes + multiplier = 1 + + value = value * multiplier + + return ok(uint64(value)) + +proc parseCorrectMsgSize*(input: string): uint64 = + ## This proc always returns an int and wraps the following proc: + ## + ## proc parseMsgSize*(input: string): Result[int, string] = ... + ## + ## in case of error, it just returns 0, and this is expected to + ## be called only from a controlled and well-known inputs + + let ret = parseMsgSize(input).valueOr: + return 0 + return ret + +proc parseRelayServiceRatio*(ratio: string): Result[(float, float), string] = + ## Parses a relay/service ratio string to [ float, float ]. The total should sum 100% + ## e.g., (0.4, 0.6) == parseRelayServiceRatio("40:60") + let elements = ratio.split(":") + if elements.len != 2: + return err("expected format 'X:Y', ratio = " & ratio) + + var relayRatio, serviceRatio: float + try: + relayRatio = parseFloat(elements[0]) + serviceRatio = parseFloat(elements[1]) + except ValueError: + return err("failed to parse ratio numbers: " & ratio) + + if relayRatio < 0 or serviceRatio < 0: + return err("relay service ratio must be non-negative, ratio = " & ratio) + + let total = relayRatio + serviceRatio + if int(total) != 100: + return err("total ratio should be 100, total = " & $total) + + ok((relayRatio / 100.0, serviceRatio / 100.0)) diff --git a/third-party/nwaku/waku/common/utils/sequence.nim b/third-party/nwaku/waku/common/utils/sequence.nim new file mode 100644 index 0000000..ad243d8 --- /dev/null +++ b/third-party/nwaku/waku/common/utils/sequence.nim @@ -0,0 +1,7 @@ +{.push raises: [].} + +proc flatten*[T](a: seq[seq[T]]): seq[T] = + var aFlat = newSeq[T](0) + for subseq in a: + aFlat &= subseq + return aFlat diff --git a/third-party/nwaku/waku/discovery/autonat_service.nim b/third-party/nwaku/waku/discovery/autonat_service.nim new file mode 100644 index 0000000..fb1f7db --- /dev/null +++ b/third-party/nwaku/waku/discovery/autonat_service.nim @@ -0,0 +1,35 @@ +import + chronos, + chronicles, + bearssl/rand, + libp2p/protocols/connectivity/autonat/client, + libp2p/protocols/connectivity/autonat/service + +const AutonatCheckInterval = Opt.some(chronos.seconds(30)) + +proc getAutonatService*(rng: ref HmacDrbgContext): AutonatService = + ## AutonatService request other peers to dial us back + ## flagging us as Reachable or NotReachable. + ## minConfidence is used as threshold to determine the state. + ## If maxQueueSize > numPeersToAsk past samples are considered + ## in the calculation. + let autonatService = AutonatService.new( + autonatClient = AutonatClient.new(), + rng = rng, + scheduleInterval = AutonatCheckInterval, + askNewConnectedPeers = false, + numPeersToAsk = 3, + maxQueueSize = 3, + minConfidence = 0.7, + ) + + proc statusAndConfidenceHandler( + networkReachability: NetworkReachability, confidence: Opt[float] + ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = + if confidence.isSome(): + info "Peer reachability status", + networkReachability = networkReachability, confidence = confidence.get() + + autonatService.statusAndConfidenceHandler(statusAndConfidenceHandler) + + return autonatService diff --git a/third-party/nwaku/waku/discovery/waku_discv5.nim b/third-party/nwaku/waku/discovery/waku_discv5.nim new file mode 100644 index 0000000..5ad0738 --- /dev/null +++ b/third-party/nwaku/waku/discovery/waku_discv5.nim @@ -0,0 +1,484 @@ +{.push raises: [].} + +import + std/[sequtils, strutils, options, sets, net, json], + results, + chronos, + chronicles, + metrics, + libp2p/multiaddress, + eth/keys as eth_keys, + eth/p2p/discoveryv5/node, + eth/p2p/discoveryv5/protocol +import ../node/peer_manager/peer_manager, ../waku_core, ../waku_enr + +export protocol, waku_enr + +declarePublicGauge waku_discv5_discovered_per_shard, + "number of nodes discovered by each shard", labels = ["shard"] +declarePublicCounter waku_discv5_errors, "number of waku discv5 errors", ["type"] + +logScope: + topics = "waku discv5" + +## Config + +# TODO: merge both conf +type Discv5Conf* {.requiresInit.} = object + # TODO: This should probably be an option on the builder + # But translated to everything else "false" on the config + bootstrapNodes*: seq[string] + udpPort*: Port + tableIpLimit*: uint + bucketIpLimit*: uint + bitsPerHop*: int + enrAutoUpdate*: bool + +type WakuDiscoveryV5Config* = object + discv5Config*: Option[DiscoveryConfig] + address*: IpAddress + port*: Port + privateKey*: eth_keys.PrivateKey + bootstrapRecords*: seq[waku_enr.Record] + autoupdateRecord*: bool + +## Protocol + +type WakuDiscv5Predicate* = + proc(record: waku_enr.Record): bool {.closure, gcsafe, raises: [].} + +type WakuDiscoveryV5* = ref object + conf: WakuDiscoveryV5Config + protocol*: protocol.Protocol + listening*: bool + predicate: Option[WakuDiscv5Predicate] + peerManager: Option[PeerManager] + topicSubscriptionQueue: AsyncEventQueue[SubscriptionEvent] + +proc shardingPredicate*( + record: Record, bootnodes: seq[Record] = @[] +): Option[WakuDiscv5Predicate] = + ## Filter peers based on relay sharding information + let typedRecord = record.toTyped().valueOr: + debug "peer filtering failed", reason = error + return none(WakuDiscv5Predicate) + + let nodeShard = typedRecord.relaySharding().valueOr: + debug "no relay sharding information, peer filtering disabled" + return none(WakuDiscv5Predicate) + + debug "peer filtering updated" + + let predicate = proc(record: waku_enr.Record): bool = + bootnodes.contains(record) or # Temp. Bootnode exception + ( + record.getCapabilities().len > 0 and #RFC 31 requirement + nodeShard.shardIds.anyIt(record.containsShard(nodeShard.clusterId, it)) + ) #RFC 64 guideline + + return some(predicate) + +proc new*( + T: type WakuDiscoveryV5, + rng: ref HmacDrbgContext, + conf: WakuDiscoveryV5Config, + record: Option[waku_enr.Record], + peerManager: Option[PeerManager] = none(PeerManager), + queue: AsyncEventQueue[SubscriptionEvent] = + newAsyncEventQueue[SubscriptionEvent](30), +): T = + let protocol = newProtocol( + rng = rng, + config = conf.discv5Config.get(protocol.defaultDiscoveryConfig), + bindPort = conf.port, + bindIp = conf.address, + privKey = conf.privateKey, + bootstrapRecords = conf.bootstrapRecords, + enrAutoUpdate = conf.autoupdateRecord, + previousRecord = record.toOpt(), + enrIp = Opt.none(IpAddress), + enrTcpPort = Opt.none(Port), + enrUdpPort = Opt.none(Port), + ) + + let shardPredOp = + if record.isSome(): + shardingPredicate(record.get(), conf.bootstrapRecords) + else: + none(WakuDiscv5Predicate) + + WakuDiscoveryV5( + conf: conf, + protocol: protocol, + listening: false, + predicate: shardPredOp, + peerManager: peerManager, + topicSubscriptionQueue: queue, + ) + +proc updateAnnouncedMultiAddress*( + wd: WakuDiscoveryV5, addresses: seq[MultiAddress] +): Result[void, string] = + let encodedAddrs = multiaddr.encodeMultiaddrs(addresses) + + wd.protocol.updateRecord([(MultiaddrEnrField, encodedAddrs)]).isOkOr: + return err("failed to update multiaddress in ENR: " & $error) + + debug "ENR updated successfully with new multiaddress", + enrUri = wd.protocol.localNode.record.toUri(), enr = $(wd.protocol.localNode.record) + + return ok() + +proc updateENRShards( + wd: WakuDiscoveryV5, newTopics: seq[PubsubTopic], add: bool +): Result[void, string] = + ## Add or remove shards from the Discv5 ENR + let newShardOp = topicsToRelayShards(newTopics).valueOr: + return err("ENR update failed topicsToRelayShards: " & error) + + let newShard = newShardOp.valueOr: + return ok() + + let typedRecord = wd.protocol.localNode.record.toTyped().valueOr: + return err("ENR update failed toTyped: " & $error) + + let currentShardsOp = typedRecord.relaySharding() + + let resultShard = + if add and currentShardsOp.isSome(): + let currentShard = currentShardsOp.get() + + if currentShard.clusterId != newShard.clusterId: + return err("ENR update failed: clusterId id mismatch in add") + + RelayShards.init( + currentShard.clusterId, currentShard.shardIds & newShard.shardIds + ).valueOr: + return err("ENR update failed RelayShards.init in add: " & error) + elif not add and currentShardsOp.isSome(): + let currentShard = currentShardsOp.get() + + if currentShard.clusterId != newShard.clusterId: + return err("ENR update failed: clusterId id mismatch in not add") + + let currentSet = toHashSet(currentShard.shardIds) + let newSet = toHashSet(newShard.shardIds) + + let indices = toSeq(currentSet - newSet) + + if indices.len == 0: + return err("ENR update failed: cannot remove all shards") + + RelayShards.init(currentShard.clusterId, indices).valueOr: + return err("ENR update failed RelayShards.init in not add: " & error) + elif add and currentShardsOp.isNone(): + newShard + else: + return ok() + + let (field, value) = + if resultShard.shardIds.len >= ShardingIndicesListMaxLength: + (ShardingBitVectorEnrField, resultShard.toBitVector()) + else: + let list = resultShard.toIndicesList().valueOr: + return err("ENR update failed toIndicesList: " & $error) + + (ShardingIndicesListEnrField, list) + + wd.protocol.updateRecord([(field, value)]).isOkOr: + return err("ENR update failed updateRecord: " & $error) + + return ok() + +proc logDiscv5FoundPeers(discoveredRecords: seq[waku_enr.Record]) = + for record in discoveredRecords: + let recordUri = record.toURI() + let capabilities = record.getCapabilities() + + let typedRecord = record.toTyped().valueOr: + warn "Could not parse to typed record", error = error, enr = recordUri + continue + + let peerInfo = record.toRemotePeerInfo().valueOr: + warn "Could not generate remote peer info", error = error, enr = recordUri + continue + + let addrs = peerInfo.constructMultiaddrStr() + + let rs = typedRecord.relaySharding() + let shardsStr = + if rs.isSome(): + $rs.get() + else: + "no shards found" + + notice "Received discv5 node", + addrs = addrs, enr = recordUri, capabilities = capabilities, shards = shardsStr + +proc findRandomPeers*( + wd: WakuDiscoveryV5, overridePred = none(WakuDiscv5Predicate) +): Future[seq[waku_enr.Record]] {.async.} = + ## Find random peers to connect to using Discovery v5 + let discoveredNodes = await wd.protocol.queryRandom() + + var discoveredRecords = discoveredNodes.mapIt(it.record) + + when defined(debugDiscv5): + logDiscv5FoundPeers(discoveredRecords) + + # Filter out nodes that do not match the predicate + if overridePred.isSome(): + discoveredRecords = discoveredRecords.filter(overridePred.get()) + elif wd.predicate.isSome(): + discoveredRecords = discoveredRecords.filter(wd.predicate.get()) + + # Increment metric for each discovered record's shards + for record in discoveredRecords: + let typedRecord = record.toTyped().valueOr: + # If we can't parse the record, skip it + waku_discv5_errors.inc(labelValues = ["ParseFailure"]) + continue + + let relayShards = typedRecord.relaySharding().valueOr: + # If no relay sharding info, skip it + waku_discv5_errors.inc(labelValues = ["NoShardInfo"]) + continue + + for shardId in relayShards.shardIds: + waku_discv5_discovered_per_shard.inc(labelValues = [$shardId]) + + return discoveredRecords + +proc searchLoop(wd: WakuDiscoveryV5) {.async.} = + ## Continuously add newly discovered nodes + + let peerManager = wd.peerManager.valueOr: + return + + info "Starting discovery v5 search" + + while wd.listening: + trace "running discv5 discovery loop" + let discoveredRecords = await wd.findRandomPeers() + + var discoveredPeers: seq[RemotePeerInfo] + var wrongRecordsReasons: seq[tuple[record: string, errorDescription: string]] + ## this is to store the reasons why certain records could not be converted to RemotePeerInfo + + for record in discoveredRecords: + let peerInfo = record.toRemotePeerInfo().valueOr: + ## in case of error, we keep track of it for debugging purposes + wrongRecordsReasons.add(($record, $error)) + waku_discv5_errors.inc(labelValues = [$error]) + continue + + discoveredPeers.add(peerInfo) + + trace "discv5 discovered peers", + num_discovered_peers = discoveredPeers.len, + peers = toSeq(discoveredPeers.mapIt(shortLog(it.peerId))) + + trace "discv5 discarded wrong records", + wrong_records = + wrongRecordsReasons.mapIt("(" & it.record & "," & it.errorDescription & ")") + + for peer in discoveredPeers: + # Peers added are filtered by the peer manager + peerManager.addPeer(peer, PeerOrigin.Discv5) + + # Discovery `queryRandom` can have a synchronous fast path for example + # when no peers are in the routing table. Don't run it in continuous loop. + # + # Also, give some time to dial the discovered nodes and update stats, etc. + await sleepAsync(5.seconds) + +proc subscriptionsListener(wd: WakuDiscoveryV5) {.async.} = + ## Listen for pubsub topics subscriptions changes + + let key = wd.topicSubscriptionQueue.register() + + while wd.listening: + let events = await wd.topicSubscriptionQueue.waitEvents(key) + + # Since we don't know the events we will receive we have to anticipate. + + let subs = events.filterIt(it.kind == PubsubSub).mapIt(it.topic) + let unsubs = events.filterIt(it.kind == PubsubUnsub).mapIt(it.topic) + + if subs.len == 0 and unsubs.len == 0: + continue + + let unsubRes = wd.updateENRShards(unsubs, false) + let subRes = wd.updateENRShards(subs, true) + + if subRes.isErr(): + debug "ENR shard addition failed", reason = $subRes.error + + if unsubRes.isErr(): + debug "ENR shard removal failed", reason = $unsubRes.error + + if subRes.isErr() and unsubRes.isErr(): + continue + + debug "ENR updated successfully", + enrUri = wd.protocol.localNode.record.toUri(), + enr = $(wd.protocol.localNode.record) + + wd.predicate = + shardingPredicate(wd.protocol.localNode.record, wd.protocol.bootstrapRecords) + + wd.topicSubscriptionQueue.unregister(key) + +proc start*(wd: WakuDiscoveryV5): Future[Result[void, string]] {.async: (raises: []).} = + if wd.listening: + return err("already listening") + + info "Starting discovery v5 service" + + debug "start listening on udp port", address = $wd.conf.address, port = $wd.conf.port + try: + wd.protocol.open() + except CatchableError: + return err("failed to open udp port: " & getCurrentExceptionMsg()) + + wd.listening = true + + trace "start discv5 service" + wd.protocol.start() + + asyncSpawn wd.searchLoop() + asyncSpawn wd.subscriptionsListener() + + debug "Successfully started discovery v5 service" + info "Discv5: discoverable ENR ", + enrUri = wd.protocol.localNode.record.toUri(), enr = $(wd.protocol.localNode.record) + + ok() + +proc stop*(wd: WakuDiscoveryV5): Future[void] {.async.} = + if not wd.listening: + return + + info "Stopping discovery v5 service" + + wd.listening = false + trace "Stop listening on discv5 port" + await wd.protocol.closeWait() + + debug "Successfully stopped discovery v5 service" + +## Helper functions + +proc parseBootstrapAddress(address: string): Result[enr.Record, cstring] = + logScope: + address = address + + if address[0] == '/': + return err("MultiAddress bootstrap addresses are not supported") + + let lowerCaseAddress = toLowerAscii(address) + if lowerCaseAddress.startsWith("enr:"): + var enrRec: enr.Record + if not enrRec.fromURI(address): + return err("Invalid ENR bootstrap record") + + return ok(enrRec) + elif lowerCaseAddress.startsWith("enode:"): + return err("ENode bootstrap addresses are not supported") + else: + return err("Ignoring unrecognized bootstrap address type") + +proc addBootstrapNode*(bootstrapAddr: string, bootstrapEnrs: var seq[enr.Record]) = + # Ignore empty lines or lines starting with # + if bootstrapAddr.len == 0 or bootstrapAddr[0] == '#': + return + + let enrRes = parseBootstrapAddress(bootstrapAddr) + if enrRes.isErr(): + debug "ignoring invalid bootstrap address", reason = enrRes.error + return + + bootstrapEnrs.add(enrRes.value) + +proc setupDiscoveryV5*( + myENR: enr.Record, + nodePeerManager: PeerManager, + nodeTopicSubscriptionQueue: AsyncEventQueue[SubscriptionEvent], + conf: Discv5Conf, + dynamicBootstrapNodes: seq[RemotePeerInfo], + rng: ref HmacDrbgContext, + key: crypto.PrivateKey, + p2pListenAddress: IpAddress, + portsShift: uint16, +): WakuDiscoveryV5 = + let dynamicBootstrapEnrs = + dynamicBootstrapNodes.filterIt(it.hasUdpPort()).mapIt(it.enr.get()) + + var discv5BootstrapEnrs: seq[enr.Record] + + # parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq + for enrUri in conf.bootstrapNodes: + addBootstrapNode(enrUri, discv5BootstrapEnrs) + + for enr in discv5BootstrapEnrs: + let peerInfoRes = enr.toRemotePeerInfo() + if peerInfoRes.isOk(): + nodePeerManager.addPeer(peerInfoRes.get(), PeerOrigin.Discv5) + else: + debug "could not convert discv5 bootstrap node to peerInfo, not adding peer to Peer Store", + enr = enr.toUri(), error = peerInfoRes.error + + discv5BootstrapEnrs.add(dynamicBootstrapEnrs) + + let discv5Config = + DiscoveryConfig.init(conf.tableIpLimit, conf.bucketIpLimit, conf.bitsPerHop) + + let discv5UdpPort = Port(uint16(conf.udpPort) + portsShift) + + let discv5Conf = WakuDiscoveryV5Config( + discv5Config: some(discv5Config), + address: p2pListenAddress, + port: discv5UdpPort, + privateKey: eth_keys.PrivateKey(key.skkey), + bootstrapRecords: discv5BootstrapEnrs, + autoupdateRecord: conf.enrAutoUpdate, + ) + + WakuDiscoveryV5.new( + rng, discv5Conf, some(myENR), some(nodePeerManager), nodeTopicSubscriptionQueue + ) + +proc updateBootstrapRecords*( + self: var WakuDiscoveryV5, newRecordsString: string +): Result[void, string] = + ## newRecordsString - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]` + var newRecords = newSeq[waku_enr.Record]() + + var jsonNode: JsonNode + try: + jsonNode = parseJson(newRecordsString) + except Exception: + return err("exception parsing json enr records: " & getCurrentExceptionMsg()) + + if jsonNode.kind != JArray: + return err("updateBootstrapRecords should receive a json array containing ENRs") + + for enr in jsonNode: + let enrWithoutQuotes = ($enr).replace("\"", "") + var bootstrapNodeEnr: waku_enr.Record + if not bootstrapNodeEnr.fromURI(enrWithoutQuotes): + return err("wrong enr given: " & enrWithoutQuotes) + + self.protocol.bootstrapRecords = newRecords + self.protocol.seedTable() + + return ok() + +proc updateBootstrapRecords*( + self: var WakuDiscoveryV5, updatedRecords: seq[enr.Record] +): void = + self.protocol.bootstrapRecords = updatedRecords + + # If we're updating the table with nodes that already existed, it will log an error when trying + # to add a bootstrap node that was already there. That's ok. + self.protocol.seedTable() diff --git a/third-party/nwaku/waku/discovery/waku_dnsdisc.nim b/third-party/nwaku/waku/discovery/waku_dnsdisc.nim new file mode 100644 index 0000000..995732a --- /dev/null +++ b/third-party/nwaku/waku/discovery/waku_dnsdisc.nim @@ -0,0 +1,130 @@ +{.push raises: [].} + +## A set of utilities to integrate EIP-1459 DNS-based discovery +## for Waku v2 nodes. +## +## EIP-1459 is defined in https://eips.ethereum.org/EIPS/eip-1459 + +import + std/[options, net, sequtils], + chronicles, + chronos, + metrics, + eth/keys, + eth/p2p/discoveryv5/enr, + libp2p/crypto/crypto, + libp2p/crypto/secp, + libp2p/multiaddress, + libp2p/peerid, + dnsdisc/client +import libp2p/nameresolving/dnsresolver +import ../waku_core + +export client + +declarePublicGauge waku_dnsdisc_discovered, "number of nodes discovered" +declarePublicCounter waku_dnsdisc_errors, "number of waku dnsdisc errors", ["type"] + +logScope: + topics = "waku dnsdisc" + +type WakuDnsDiscovery* = object + client*: Client + resolver*: Resolver + +##################### +# DNS Discovery API # +##################### + +proc emptyResolver*(domain: string): Future[string] {.async, gcsafe.} = + debug "Empty resolver called", domain = domain + return "" + +proc findPeers*( + wdd: WakuDnsDiscovery +): Future[Result[seq[RemotePeerInfo], cstring]] {.async.} = + ## Find peers to connect to using DNS based discovery + + info "Finding peers using Waku DNS discovery" + + # Synchronise client tree using configured resolver + var tree: Tree + try: + tree = (await syncTree(wdd.resolver, wdd.client.loc)).tryGet() + except Exception: + error "Failed to synchronise client tree" + waku_dnsdisc_errors.inc(labelValues = ["tree_sync_failure"]) + return err("Node discovery failed") + + let discoveredEnr = tree.getNodes().mapIt(it.record) + + if discoveredEnr.len > 0: + info "Successfully discovered ENR", count = discoveredEnr.len + else: + trace "No ENR retrieved from client tree" + + var discoveredNodes: seq[RemotePeerInfo] + + for enr in discoveredEnr: + # Convert discovered ENR to RemotePeerInfo and add to discovered nodes + let res = enr.toRemotePeerInfo() + + if res.isOk(): + discoveredNodes.add(res.get()) + else: + error "Failed to convert ENR to peer info", enr = $enr, err = res.error() + waku_dnsdisc_errors.inc(labelValues = ["peer_info_failure"]) + + if discoveredNodes.len > 0: + info "Successfully discovered nodes", count = discoveredNodes.len + waku_dnsdisc_discovered.inc(discoveredNodes.len.int64) + + return ok(discoveredNodes) + +proc init*( + T: type WakuDnsDiscovery, locationUrl: string, resolver: Resolver +): Result[T, cstring] = + ## Initialise Waku peer discovery via DNS + + debug "init WakuDnsDiscovery", locationUrl = locationUrl + + let + client = ?Client.init(locationUrl) + wakuDnsDisc = WakuDnsDiscovery(client: client, resolver: resolver) + + debug "init success" + + return ok(wakuDnsDisc) + +proc retrieveDynamicBootstrapNodes*( + dnsDiscoveryUrl: string, dnsAddrsNameServers: seq[IpAddress] +): Future[Result[seq[RemotePeerInfo], string]] {.async.} = + ## Retrieve dynamic bootstrap nodes (DNS discovery) + + if dnsDiscoveryUrl != "": + # DNS discovery + debug "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl + + var nameServers: seq[TransportAddress] + for ip in dnsAddrsNameServers: + nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53 + + let dnsResolver = DnsResolver.new(nameServers) + + proc resolver(domain: string): Future[string] {.async, gcsafe.} = + trace "resolving", domain = domain + let resolved = await dnsResolver.resolveTxt(domain) + if resolved.len > 0: + return resolved[0] # Use only first answer + + var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl, resolver) + if wakuDnsDiscovery.isOk(): + return (await wakuDnsDiscovery.get().findPeers()).mapErr( + proc(e: cstring): string = + $e + ) + else: + warn "Failed to init Waku DNS discovery" + + debug "No method for retrieving dynamic bootstrap nodes specified." + ok(newSeq[RemotePeerInfo]()) # Return an empty seq by default diff --git a/third-party/nwaku/waku/factory/app_callbacks.nim b/third-party/nwaku/waku/factory/app_callbacks.nim new file mode 100644 index 0000000..d28b9f2 --- /dev/null +++ b/third-party/nwaku/waku/factory/app_callbacks.nim @@ -0,0 +1,6 @@ +import ../waku_relay, ../node/peer_manager + +type AppCallbacks* = ref object + relayHandler*: WakuRelayHandler + topicHealthChangeHandler*: TopicHealthChangeHandler + connectionChangeHandler*: ConnectionChangeHandler diff --git a/third-party/nwaku/waku/factory/builder.nim b/third-party/nwaku/waku/factory/builder.nim new file mode 100644 index 0000000..772cfbf --- /dev/null +++ b/third-party/nwaku/waku/factory/builder.nim @@ -0,0 +1,227 @@ +{.push raises: [].} + +import + std/[options, net, math], + results, + chronicles, + libp2p/crypto/crypto, + libp2p/builders, + libp2p/nameresolving/nameresolver, + libp2p/transports/wstransport, + libp2p/protocols/connectivity/relay/relay +import + ../waku_enr, + ../discovery/waku_discv5, + ../waku_node, + ../node/peer_manager, + ../common/rate_limit/setting, + ../common/utils/parse_size_units + +type + WakuNodeBuilder* = object # General + nodeRng: Option[ref crypto.HmacDrbgContext] + nodeKey: Option[crypto.PrivateKey] + netConfig: Option[NetConfig] + record: Option[enr.Record] + + # Peer storage and peer manager + peerStorage: Option[PeerStorage] + peerStorageCapacity: Option[int] + + # Peer manager config + maxRelayPeers: int + maxServicePeers: int + colocationLimit: int + shardAware: bool + + # Libp2p switch + switchMaxConnections: Option[int] + switchNameResolver: Option[NameResolver] + switchAgentString: Option[string] + switchSslSecureKey: Option[string] + switchSslSecureCert: Option[string] + switchSendSignedPeerRecord: Option[bool] + circuitRelay: Relay + + # Rate limit configs for non-relay req-resp protocols + rateLimitSettings: Option[ProtocolRateLimitSettings] + + WakuNodeBuilderResult* = Result[void, string] + +## Init + +proc init*(T: type WakuNodeBuilder): WakuNodeBuilder = + WakuNodeBuilder() + +## General + +proc withRng*(builder: var WakuNodeBuilder, rng: ref crypto.HmacDrbgContext) = + builder.nodeRng = some(rng) + +proc withNodeKey*(builder: var WakuNodeBuilder, nodeKey: crypto.PrivateKey) = + builder.nodeKey = some(nodeKey) + +proc withRecord*(builder: var WakuNodeBuilder, record: enr.Record) = + builder.record = some(record) + +proc withNetworkConfiguration*(builder: var WakuNodeBuilder, config: NetConfig) = + builder.netConfig = some(config) + +proc withNetworkConfigurationDetails*( + builder: var WakuNodeBuilder, + bindIp: IpAddress, + bindPort: Port, + extIp = none(IpAddress), + extPort = none(Port), + extMultiAddrs = newSeq[MultiAddress](), + wsBindPort: Port = Port(8000), + wsEnabled: bool = false, + wssEnabled: bool = false, + wakuFlags = none(CapabilitiesBitfield), + dns4DomainName = none(string), + dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")], +): WakuNodeBuilderResult {. + deprecated: "use 'builder.withNetworkConfiguration()' instead" +.} = + let netConfig = + ?NetConfig.init( + bindIp = bindIp, + bindPort = bindPort, + extIp = extIp, + extPort = extPort, + extMultiAddrs = extMultiAddrs, + wsBindPort = some(wsBindPort), + wsEnabled = wsEnabled, + wssEnabled = wssEnabled, + wakuFlags = wakuFlags, + dns4DomainName = dns4DomainName, + dnsNameServers = dnsNameServers, + ) + builder.withNetworkConfiguration(netConfig) + ok() + +## Peer storage and peer manager + +proc withPeerStorage*( + builder: var WakuNodeBuilder, peerStorage: PeerStorage, capacity = none(int) +) = + if not peerStorage.isNil(): + builder.peerStorage = some(peerStorage) + + builder.peerStorageCapacity = capacity + +proc withPeerManagerConfig*( + builder: var WakuNodeBuilder, + maxConnections: int, + relayServiceRatio: string, + shardAware = false, +) = + let (relayRatio, serviceRatio) = parseRelayServiceRatio(relayServiceRatio).get() + var relayPeers = int(ceil(float(maxConnections) * relayRatio)) + var servicePeers = int(floor(float(maxConnections) * serviceRatio)) + + builder.maxServicePeers = servicePeers + builder.maxRelayPeers = relayPeers + builder.shardAware = shardAware + +proc withColocationLimit*(builder: var WakuNodeBuilder, colocationLimit: int) = + builder.colocationLimit = colocationLimit + +proc withRateLimit*(builder: var WakuNodeBuilder, limits: ProtocolRateLimitSettings) = + builder.rateLimitSettings = some(limits) + +proc withCircuitRelay*(builder: var WakuNodeBuilder, circuitRelay: Relay) = + builder.circuitRelay = circuitRelay + +## Waku switch + +proc withSwitchConfiguration*( + builder: var WakuNodeBuilder, + maxConnections = none(int), + nameResolver: NameResolver = nil, + sendSignedPeerRecord = false, + secureKey = none(string), + secureCert = none(string), + agentString = none(string), +) = + builder.switchMaxConnections = maxConnections + builder.switchSendSignedPeerRecord = some(sendSignedPeerRecord) + builder.switchSslSecureKey = secureKey + builder.switchSslSecureCert = secureCert + builder.switchAgentString = agentString + + if not nameResolver.isNil(): + builder.switchNameResolver = some(nameResolver) + +## Build + +proc build*(builder: WakuNodeBuilder): Result[WakuNode, string] = + var rng: ref crypto.HmacDrbgContext + if builder.nodeRng.isNone(): + rng = crypto.newRng() + else: + rng = builder.nodeRng.get() + + if builder.nodeKey.isNone(): + return err("node key is required") + + if builder.netConfig.isNone(): + return err("network configuration is required") + + let netConfig = builder.netConfig.get() + if netConfig.dnsNameServers.len == 0: + return err("DNS name servers are required for WakuNode") + + if builder.record.isNone(): + return err("node record is required") + + let circuitRelay = + if builder.circuitRelay.isNil(): + Relay.new() + else: + builder.circuitRelay + + var switch: Switch + try: + switch = newWakuSwitch( + privKey = builder.nodekey, + address = builder.netConfig.get().hostAddress, + wsAddress = builder.netConfig.get().wsHostAddress, + transportFlags = {ServerFlags.ReuseAddr, ServerFlags.TcpNoDelay}, + rng = rng, + maxConnections = builder.switchMaxConnections.get(builders.MaxConnections), + wssEnabled = builder.netConfig.get().wssEnabled, + secureKeyPath = builder.switchSslSecureKey.get(""), + secureCertPath = builder.switchSslSecureCert.get(""), + nameResolver = builder.switchNameResolver.get(nil), + sendSignedPeerRecord = builder.switchSendSignedPeerRecord.get(false), + agentString = builder.switchAgentString, + peerStoreCapacity = builder.peerStorageCapacity, + circuitRelay = circuitRelay, + ) + except CatchableError: + return err("failed to create switch: " & getCurrentExceptionMsg()) + + let peerManager = PeerManager.new( + switch = switch, + storage = builder.peerStorage.get(nil), + maxRelayPeers = some(builder.maxRelayPeers), + maxServicePeers = some(builder.maxServicePeers), + colocationLimit = builder.colocationLimit, + shardedPeerManagement = builder.shardAware, + ) + + var node: WakuNode + try: + node = WakuNode.new( + netConfig = netConfig, + enr = builder.record.get(), + switch = switch, + peerManager = peerManager, + rng = rng, + rateLimitSettings = builder.rateLimitSettings.get(DefaultProtocolRateLimit), + ) + except Exception: + return err("failed to build WakuNode instance: " & getCurrentExceptionMsg()) + + ok(node) diff --git a/third-party/nwaku/waku/factory/conf_builder/conf_builder.nim b/third-party/nwaku/waku/factory/conf_builder/conf_builder.nim new file mode 100644 index 0000000..37cea76 --- /dev/null +++ b/third-party/nwaku/waku/factory/conf_builder/conf_builder.nim @@ -0,0 +1,19 @@ +import + ./waku_conf_builder, + ./filter_service_conf_builder, + ./store_sync_conf_builder, + ./store_service_conf_builder, + ./rest_server_conf_builder, + ./dns_discovery_conf_builder, + ./discv5_conf_builder, + ./web_socket_conf_builder, + ./metrics_server_conf_builder, + ./rate_limit_conf_builder, + ./rln_relay_conf_builder, + ./mix_conf_builder + +export + waku_conf_builder, filter_service_conf_builder, store_sync_conf_builder, + store_service_conf_builder, rest_server_conf_builder, dns_discovery_conf_builder, + discv5_conf_builder, web_socket_conf_builder, metrics_server_conf_builder, + rate_limit_conf_builder, rln_relay_conf_builder, mix_conf_builder diff --git a/third-party/nwaku/waku/factory/conf_builder/discv5_conf_builder.nim b/third-party/nwaku/waku/factory/conf_builder/discv5_conf_builder.nim new file mode 100644 index 0000000..e272902 --- /dev/null +++ b/third-party/nwaku/waku/factory/conf_builder/discv5_conf_builder.nim @@ -0,0 +1,63 @@ +import chronicles, std/[net, options, sequtils], results +import ../waku_conf + +logScope: + topics = "waku conf builder discv5" + +########################### +## Discv5 Config Builder ## +########################### +type Discv5ConfBuilder* = object + enabled*: Option[bool] + + bootstrapNodes*: seq[string] + bitsPerHop*: Option[int] + bucketIpLimit*: Option[uint] + enrAutoUpdate*: Option[bool] + tableIpLimit*: Option[uint] + udpPort*: Option[Port] + +proc init*(T: type Discv5ConfBuilder): Discv5ConfBuilder = + Discv5ConfBuilder() + +proc withEnabled*(b: var Discv5ConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withBitsPerHop*(b: var Discv5ConfBuilder, bitsPerHop: int) = + b.bitsPerHop = some(bitsPerHop) + +proc withBucketIpLimit*(b: var Discv5ConfBuilder, bucketIpLimit: uint) = + b.bucketIpLimit = some(bucketIpLimit) + +proc withEnrAutoUpdate*(b: var Discv5ConfBuilder, enrAutoUpdate: bool) = + b.enrAutoUpdate = some(enrAutoUpdate) + +proc withTableIpLimit*(b: var Discv5ConfBuilder, tableIpLimit: uint) = + b.tableIpLimit = some(tableIpLimit) + +proc withUdpPort*(b: var Discv5ConfBuilder, udpPort: Port) = + b.udpPort = some(udpPort) + +proc withUdpPort*(b: var Discv5ConfBuilder, udpPort: uint) = + b.udpPort = some(Port(udpPort.uint16)) + +proc withBootstrapNodes*(b: var Discv5ConfBuilder, bootstrapNodes: seq[string]) = + # TODO: validate ENRs? + b.bootstrapNodes = concat(b.bootstrapNodes, bootstrapNodes) + +proc build*(b: Discv5ConfBuilder): Result[Option[Discv5Conf], string] = + if not b.enabled.get(false): + return ok(none(Discv5Conf)) + + return ok( + some( + Discv5Conf( + bootstrapNodes: b.bootstrapNodes, + bitsPerHop: b.bitsPerHop.get(1), + bucketIpLimit: b.bucketIpLimit.get(2), + enrAutoUpdate: b.enrAutoUpdate.get(true), + tableIpLimit: b.tableIpLimit.get(10), + udpPort: b.udpPort.get(9000.Port), + ) + ) + ) diff --git a/third-party/nwaku/waku/factory/conf_builder/dns_discovery_conf_builder.nim b/third-party/nwaku/waku/factory/conf_builder/dns_discovery_conf_builder.nim new file mode 100644 index 0000000..1c577bb --- /dev/null +++ b/third-party/nwaku/waku/factory/conf_builder/dns_discovery_conf_builder.nim @@ -0,0 +1,34 @@ +import chronicles, std/[net, options, strutils], results +import ../waku_conf + +logScope: + topics = "waku conf builder dns discovery" + +################################## +## DNS Discovery Config Builder ## +################################## +type DnsDiscoveryConfBuilder* = object + enrTreeUrl*: Option[string] + nameServers*: seq[IpAddress] + +proc init*(T: type DnsDiscoveryConfBuilder): DnsDiscoveryConfBuilder = + DnsDiscoveryConfBuilder() + +proc withEnrTreeUrl*(b: var DnsDiscoveryConfBuilder, enrTreeUrl: string) = + b.enrTreeUrl = some(enrTreeUrl) + +proc withNameServers*(b: var DnsDiscoveryConfBuilder, nameServers: seq[IpAddress]) = + b.nameServers = nameServers + +proc build*(b: DnsDiscoveryConfBuilder): Result[Option[DnsDiscoveryConf], string] = + if b.enrTreeUrl.isNone(): + return ok(none(DnsDiscoveryConf)) + + if isEmptyOrWhiteSpace(b.enrTreeUrl.get()): + return err("dnsDiscovery.enrTreeUrl cannot be an empty string") + if b.nameServers.len == 0: + return err("dnsDiscovery.nameServers is not specified") + + return ok( + some(DnsDiscoveryConf(nameServers: b.nameServers, enrTreeUrl: b.enrTreeUrl.get())) + ) diff --git a/third-party/nwaku/waku/factory/conf_builder/filter_service_conf_builder.nim b/third-party/nwaku/waku/factory/conf_builder/filter_service_conf_builder.nim new file mode 100644 index 0000000..a3f056b --- /dev/null +++ b/third-party/nwaku/waku/factory/conf_builder/filter_service_conf_builder.nim @@ -0,0 +1,45 @@ +import chronicles, std/options, results +import ../waku_conf + +logScope: + topics = "waku conf builder filter service" + +################################### +## Filter Service Config Builder ## +################################### +type FilterServiceConfBuilder* = object + enabled*: Option[bool] + maxPeersToServe*: Option[uint32] + subscriptionTimeout*: Option[uint16] + maxCriteria*: Option[uint32] + +proc init*(T: type FilterServiceConfBuilder): FilterServiceConfBuilder = + FilterServiceConfBuilder() + +proc withEnabled*(b: var FilterServiceConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withMaxPeersToServe*(b: var FilterServiceConfBuilder, maxPeersToServe: uint32) = + b.maxPeersToServe = some(maxPeersToServe) + +proc withSubscriptionTimeout*( + b: var FilterServiceConfBuilder, subscriptionTimeout: uint16 +) = + b.subscriptionTimeout = some(subscriptionTimeout) + +proc withMaxCriteria*(b: var FilterServiceConfBuilder, maxCriteria: uint32) = + b.maxCriteria = some(maxCriteria) + +proc build*(b: FilterServiceConfBuilder): Result[Option[FilterServiceConf], string] = + if not b.enabled.get(false): + return ok(none(FilterServiceConf)) + + return ok( + some( + FilterServiceConf( + maxPeersToServe: b.maxPeersToServe.get(500), + subscriptionTimeout: b.subscriptionTimeout.get(300), + maxCriteria: b.maxCriteria.get(1000), + ) + ) + ) diff --git a/third-party/nwaku/waku/factory/conf_builder/metrics_server_conf_builder.nim b/third-party/nwaku/waku/factory/conf_builder/metrics_server_conf_builder.nim new file mode 100644 index 0000000..0f0d185 --- /dev/null +++ b/third-party/nwaku/waku/factory/conf_builder/metrics_server_conf_builder.nim @@ -0,0 +1,47 @@ +import chronicles, std/[net, options], results +import ../waku_conf + +logScope: + topics = "waku conf builder metrics server" + +################################### +## Metrics Server Config Builder ## +################################### +type MetricsServerConfBuilder* = object + enabled*: Option[bool] + + httpAddress*: Option[IpAddress] + httpPort*: Option[Port] + logging*: Option[bool] + +proc init*(T: type MetricsServerConfBuilder): MetricsServerConfBuilder = + MetricsServerConfBuilder() + +proc withEnabled*(b: var MetricsServerConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withHttpAddress*(b: var MetricsServerConfBuilder, httpAddress: IpAddress) = + b.httpAddress = some(httpAddress) + +proc withHttpPort*(b: var MetricsServerConfBuilder, httpPort: Port) = + b.httpPort = some(httpPort) + +proc withHttpPort*(b: var MetricsServerConfBuilder, httpPort: uint16) = + b.httpPort = some(Port(httpPort)) + +proc withLogging*(b: var MetricsServerConfBuilder, logging: bool) = + b.logging = some(logging) + +proc build*(b: MetricsServerConfBuilder): Result[Option[MetricsServerConf], string] = + if not b.enabled.get(false): + return ok(none(MetricsServerConf)) + + return ok( + some( + MetricsServerConf( + httpAddress: b.httpAddress.get(static parseIpAddress("127.0.0.1")), + httpPort: b.httpPort.get(8008.Port), + logging: b.logging.get(false), + ) + ) + ) diff --git a/third-party/nwaku/waku/factory/conf_builder/mix_conf_builder.nim b/third-party/nwaku/waku/factory/conf_builder/mix_conf_builder.nim new file mode 100644 index 0000000..cbe932c --- /dev/null +++ b/third-party/nwaku/waku/factory/conf_builder/mix_conf_builder.nim @@ -0,0 +1,35 @@ +import chronicles, std/options, results +import libp2p/crypto/crypto, libp2p/crypto/curve25519, mix/curve25519 +import ../waku_conf + +logScope: + topics = "waku conf builder mix" + +################################## +## Mix Config Builder ## +################################## +type MixConfBuilder* = object + enabled: Option[bool] + mixKey: Option[string] + +proc init*(T: type MixConfBuilder): MixConfBuilder = + MixConfBuilder() + +proc withEnabled*(b: var MixConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withMixKey*(b: var MixConfBuilder, mixKey: string) = + b.mixKey = some(mixKey) + +proc build*(b: MixConfBuilder): Result[Option[MixConf], string] = + if not b.enabled.get(false): + return ok(none[MixConf]()) + else: + if b.mixKey.isSome(): + let mixPrivKey = intoCurve25519Key(ncrutils.fromHex(b.mixKey.get())) + let mixPubKey = public(mixPrivKey) + return ok(some(MixConf(mixKey: mixPrivKey, mixPubKey: mixPubKey))) + else: + let (mixPrivKey, mixPubKey) = generateKeyPair().valueOr: + return err("Generate key pair error: " & $error) + return ok(some(MixConf(mixKey: mixPrivKey, mixPubKey: mixPubKey))) diff --git a/third-party/nwaku/waku/factory/conf_builder/rate_limit_conf_builder.nim b/third-party/nwaku/waku/factory/conf_builder/rate_limit_conf_builder.nim new file mode 100644 index 0000000..0d466a1 --- /dev/null +++ b/third-party/nwaku/waku/factory/conf_builder/rate_limit_conf_builder.nim @@ -0,0 +1,29 @@ +import chronicles, std/[net, options], results +import waku/common/rate_limit/setting + +logScope: + topics = "waku conf builder rate limit" + +type RateLimitConfBuilder* = object + strValue: Option[seq[string]] + objValue: Option[ProtocolRateLimitSettings] + +proc init*(T: type RateLimitConfBuilder): RateLimitConfBuilder = + RateLimitConfBuilder() + +proc withRateLimits*(b: var RateLimitConfBuilder, rateLimits: seq[string]) = + b.strValue = some(rateLimits) + +proc build*(b: RateLimitConfBuilder): Result[ProtocolRateLimitSettings, string] = + if b.strValue.isSome() and b.objValue.isSome(): + return err("Rate limits conf must only be set once on the builder") + + if b.objValue.isSome(): + return ok(b.objValue.get()) + + if b.strValue.isSome(): + let rateLimits = ProtocolRateLimitSettings.parse(b.strValue.get()).valueOr: + return err("Invalid rate limits settings:" & $error) + return ok(rateLimits) + + return ok(DefaultProtocolRateLimit) diff --git a/third-party/nwaku/waku/factory/conf_builder/rest_server_conf_builder.nim b/third-party/nwaku/waku/factory/conf_builder/rest_server_conf_builder.nim new file mode 100644 index 0000000..2efd91f --- /dev/null +++ b/third-party/nwaku/waku/factory/conf_builder/rest_server_conf_builder.nim @@ -0,0 +1,64 @@ +import chronicles, std/[net, options, sequtils], results +import ../waku_conf + +logScope: + topics = "waku conf builder rest server" + +################################ +## REST Server Config Builder ## +################################ +type RestServerConfBuilder* = object + enabled*: Option[bool] + + allowOrigin*: seq[string] + listenAddress*: Option[IpAddress] + port*: Option[Port] + admin*: Option[bool] + relayCacheCapacity*: Option[uint32] + +proc init*(T: type RestServerConfBuilder): RestServerConfBuilder = + RestServerConfBuilder() + +proc withEnabled*(b: var RestServerConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withAllowOrigin*(b: var RestServerConfBuilder, allowOrigin: seq[string]) = + b.allowOrigin = concat(b.allowOrigin, allowOrigin) + +proc withListenAddress*(b: var RestServerConfBuilder, listenAddress: IpAddress) = + b.listenAddress = some(listenAddress) + +proc withPort*(b: var RestServerConfBuilder, port: Port) = + b.port = some(port) + +proc withPort*(b: var RestServerConfBuilder, port: uint16) = + b.port = some(Port(port)) + +proc withAdmin*(b: var RestServerConfBuilder, admin: bool) = + b.admin = some(admin) + +proc withRelayCacheCapacity*(b: var RestServerConfBuilder, relayCacheCapacity: uint32) = + b.relayCacheCapacity = some(relayCacheCapacity) + +proc build*(b: RestServerConfBuilder): Result[Option[RestServerConf], string] = + if not b.enabled.get(false): + return ok(none(RestServerConf)) + + if b.listenAddress.isNone(): + return err("restServer.listenAddress is not specified") + if b.port.isNone(): + return err("restServer.port is not specified") + if b.relayCacheCapacity.isNone(): + return err("restServer.relayCacheCapacity is not specified") + + return ok( + some( + RestServerConf( + allowOrigin: b.allowOrigin, + listenAddress: b.listenAddress.get(), + port: b.port.get(), + admin: b.admin.get(false), + relayCacheCapacity: b.relayCacheCapacity.get(), + ) + ) + ) diff --git a/third-party/nwaku/waku/factory/conf_builder/rln_relay_conf_builder.nim b/third-party/nwaku/waku/factory/conf_builder/rln_relay_conf_builder.nim new file mode 100644 index 0000000..4cdcf83 --- /dev/null +++ b/third-party/nwaku/waku/factory/conf_builder/rln_relay_conf_builder.nim @@ -0,0 +1,99 @@ +import chronicles, std/options, results, stint, stew/endians2 +import ../waku_conf + +logScope: + topics = "waku conf builder rln relay" + +############################## +## RLN Relay Config Builder ## +############################## +type RlnRelayConfBuilder* = object + enabled*: Option[bool] + chainId*: Option[UInt256] + ethClientUrls*: Option[seq[string]] + ethContractAddress*: Option[string] + credIndex*: Option[uint] + credPassword*: Option[string] + credPath*: Option[string] + dynamic*: Option[bool] + epochSizeSec*: Option[uint64] + userMessageLimit*: Option[uint64] + +proc init*(T: type RlnRelayConfBuilder): RlnRelayConfBuilder = + RlnRelayConfBuilder() + +proc withEnabled*(b: var RlnRelayConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withChainId*(b: var RlnRelayConfBuilder, chainId: uint | UInt256) = + when chainId is uint: + b.chainId = some(UInt256.fromBytesBE(chainId.toBytesBE())) + else: + b.chainId = some(chainId) + +proc withCredIndex*(b: var RlnRelayConfBuilder, credIndex: uint) = + b.credIndex = some(credIndex) + +proc withCredPassword*(b: var RlnRelayConfBuilder, credPassword: string) = + b.credPassword = some(credPassword) + +proc withCredPath*(b: var RlnRelayConfBuilder, credPath: string) = + b.credPath = some(credPath) + +proc withDynamic*(b: var RlnRelayConfBuilder, dynamic: bool) = + b.dynamic = some(dynamic) + +proc withEthClientUrls*(b: var RlnRelayConfBuilder, ethClientUrls: seq[string]) = + b.ethClientUrls = some(ethClientUrls) + +proc withEthContractAddress*(b: var RlnRelayConfBuilder, ethContractAddress: string) = + b.ethContractAddress = some(ethContractAddress) + +proc withEpochSizeSec*(b: var RlnRelayConfBuilder, epochSizeSec: uint64) = + b.epochSizeSec = some(epochSizeSec) + +proc withUserMessageLimit*(b: var RlnRelayConfBuilder, userMessageLimit: uint64) = + b.userMessageLimit = some(userMessageLimit) + +proc build*(b: RlnRelayConfBuilder): Result[Option[RlnRelayConf], string] = + if not b.enabled.get(false): + return ok(none(RlnRelayConf)) + + if b.chainId.isNone(): + return err("RLN Relay Chain Id is not specified") + + let creds = + if b.credPath.isSome() and b.credPassword.isSome(): + some(RlnRelayCreds(path: b.credPath.get(), password: b.credPassword.get())) + elif b.credPath.isSome() and b.credPassword.isNone(): + return err("RLN Relay Credential Password is not specified but path is") + elif b.credPath.isNone() and b.credPassword.isSome(): + return err("RLN Relay Credential Path is not specified but password is") + else: + none(RlnRelayCreds) + + if b.dynamic.isNone(): + return err("rlnRelay.dynamic is not specified") + if b.ethClientUrls.get(newSeq[string](0)).len == 0: + return err("rlnRelay.ethClientUrls is not specified") + if b.ethContractAddress.get("") == "": + return err("rlnRelay.ethContractAddress is not specified") + if b.epochSizeSec.isNone(): + return err("rlnRelay.epochSizeSec is not specified") + if b.userMessageLimit.isNone(): + return err("rlnRelay.userMessageLimit is not specified") + + return ok( + some( + RlnRelayConf( + chainId: b.chainId.get(), + credIndex: b.credIndex, + creds: creds, + dynamic: b.dynamic.get(), + ethClientUrls: b.ethClientUrls.get(), + ethContractAddress: b.ethContractAddress.get(), + epochSizeSec: b.epochSizeSec.get(), + userMessageLimit: b.userMessageLimit.get(), + ) + ) + ) diff --git a/third-party/nwaku/waku/factory/conf_builder/store_service_conf_builder.nim b/third-party/nwaku/waku/factory/conf_builder/store_service_conf_builder.nim new file mode 100644 index 0000000..d5d48c3 --- /dev/null +++ b/third-party/nwaku/waku/factory/conf_builder/store_service_conf_builder.nim @@ -0,0 +1,74 @@ +import chronicles, std/options, results, chronos +import ../waku_conf, ./store_sync_conf_builder + +logScope: + topics = "waku conf builder store service" + +################################## +## Store Service Config Builder ## +################################## +type StoreServiceConfBuilder* = object + enabled*: Option[bool] + + dbMigration*: Option[bool] + dbURl*: Option[string] + dbVacuum*: Option[bool] + supportV2*: Option[bool] + maxNumDbConnections*: Option[int] + retentionPolicy*: Option[string] + resume*: Option[bool] + storeSyncConf*: StoreSyncConfBuilder + +proc init*(T: type StoreServiceConfBuilder): StoreServiceConfBuilder = + StoreServiceConfBuilder(storeSyncConf: StoreSyncConfBuilder.init()) + +proc withEnabled*(b: var StoreServiceConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withDbMigration*(b: var StoreServiceConfBuilder, dbMigration: bool) = + b.dbMigration = some(dbMigration) + +proc withDbUrl*(b: var StoreServiceConfBuilder, dbUrl: string) = + b.dbURl = some(dbUrl) + +proc withDbVacuum*(b: var StoreServiceConfBuilder, dbVacuum: bool) = + b.dbVacuum = some(dbVacuum) + +proc withSupportV2*(b: var StoreServiceConfBuilder, supportV2: bool) = + b.supportV2 = some(supportV2) + +proc withMaxNumDbConnections*( + b: var StoreServiceConfBuilder, maxNumDbConnections: int +) = + b.maxNumDbConnections = some(maxNumDbConnections) + +proc withRetentionPolicy*(b: var StoreServiceConfBuilder, retentionPolicy: string) = + b.retentionPolicy = some(retentionPolicy) + +proc withResume*(b: var StoreServiceConfBuilder, resume: bool) = + b.resume = some(resume) + +proc build*(b: StoreServiceConfBuilder): Result[Option[StoreServiceConf], string] = + if not b.enabled.get(false): + return ok(none(StoreServiceConf)) + + if b.dbUrl.get("") == "": + return err "store.dbUrl is not specified" + + let storeSyncConf = b.storeSyncConf.build().valueOr: + return err("Store Sync Conf failed to build") + + return ok( + some( + StoreServiceConf( + dbMigration: b.dbMigration.get(true), + dbURl: b.dbUrl.get(), + dbVacuum: b.dbVacuum.get(false), + supportV2: b.supportV2.get(false), + maxNumDbConnections: b.maxNumDbConnections.get(50), + retentionPolicy: b.retentionPolicy.get("time:" & $2.days.seconds), + resume: b.resume.get(false), + storeSyncConf: storeSyncConf, + ) + ) + ) diff --git a/third-party/nwaku/waku/factory/conf_builder/store_sync_conf_builder.nim b/third-party/nwaku/waku/factory/conf_builder/store_sync_conf_builder.nim new file mode 100644 index 0000000..4c7177b --- /dev/null +++ b/third-party/nwaku/waku/factory/conf_builder/store_sync_conf_builder.nim @@ -0,0 +1,51 @@ +import chronicles, std/options, results +import ../waku_conf + +logScope: + topics = "waku conf builder store sync" + +################################## +## Store Sync Config Builder ## +################################## +type StoreSyncConfBuilder* = object + enabled*: Option[bool] + + rangeSec*: Option[uint32] + intervalSec*: Option[uint32] + relayJitterSec*: Option[uint32] + +proc init*(T: type StoreSyncConfBuilder): StoreSyncConfBuilder = + StoreSyncConfBuilder() + +proc withEnabled*(b: var StoreSyncConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withRangeSec*(b: var StoreSyncConfBuilder, rangeSec: uint32) = + b.rangeSec = some(rangeSec) + +proc withIntervalSec*(b: var StoreSyncConfBuilder, intervalSec: uint32) = + b.intervalSec = some(intervalSec) + +proc withRelayJitterSec*(b: var StoreSyncConfBuilder, relayJitterSec: uint32) = + b.relayJitterSec = some(relayJitterSec) + +proc build*(b: StoreSyncConfBuilder): Result[Option[StoreSyncConf], string] = + if not b.enabled.get(false): + return ok(none(StoreSyncConf)) + + if b.rangeSec.isNone(): + return err "store.rangeSec is not specified" + if b.intervalSec.isNone(): + return err "store.intervalSec is not specified" + if b.relayJitterSec.isNone(): + return err "store.relayJitterSec is not specified" + + return ok( + some( + StoreSyncConf( + rangeSec: b.rangeSec.get(), + intervalSec: b.intervalSec.get(), + relayJitterSec: b.relayJitterSec.get(), + ) + ) + ) diff --git a/third-party/nwaku/waku/factory/conf_builder/waku_conf_builder.nim b/third-party/nwaku/waku/factory/conf_builder/waku_conf_builder.nim new file mode 100644 index 0000000..6458692 --- /dev/null +++ b/third-party/nwaku/waku/factory/conf_builder/waku_conf_builder.nim @@ -0,0 +1,677 @@ +import + libp2p/crypto/crypto, + libp2p/multiaddress, + std/[net, options, sequtils], + stint, + chronicles, + chronos, + results + +import + ../waku_conf, + ../networks_config, + ../../common/logging, + ../../common/utils/parse_size_units, + ../../waku_enr/capabilities + +import + ./filter_service_conf_builder, + ./store_sync_conf_builder, + ./store_service_conf_builder, + ./rest_server_conf_builder, + ./dns_discovery_conf_builder, + ./discv5_conf_builder, + ./web_socket_conf_builder, + ./metrics_server_conf_builder, + ./rate_limit_conf_builder, + ./rln_relay_conf_builder, + ./mix_conf_builder + +logScope: + topics = "waku conf builder" + +type MaxMessageSizeKind* = enum + mmskNone + mmskStr + mmskInt + +type MaxMessageSize* = object + case kind*: MaxMessageSizeKind + of mmskNone: + discard + of mmskStr: + str*: string + of mmskInt: + bytes*: uint64 + +## `WakuConfBuilder` is a convenient tool to accumulate +## Config parameters to build a `WakuConfig`. +## It provides some type conversion, as well as applying +## defaults in an agnostic manner (for any usage of Waku node) +# +# TODO: Sub protocol builder (eg `StoreServiceConfBuilder` +# is be better defined in the protocol module (eg store) +# and apply good defaults from this protocol PoV and make the +# decision when the dev must specify a value vs when a default +# is fine to have. +# +# TODO: Add default to most values so that when a developer uses +# the builder, it works out-of-the-box +type WakuConfBuilder* = object + nodeKey: Option[crypto.PrivateKey] + + clusterId: Option[uint16] + shardingConf: Option[ShardingConfKind] + numShardsInCluster: Option[uint16] + subscribeShards: Option[seq[uint16]] + protectedShards: Option[seq[ProtectedShard]] + contentTopics: Option[seq[string]] + + # Conf builders + dnsDiscoveryConf*: DnsDiscoveryConfBuilder + discv5Conf*: Discv5ConfBuilder + filterServiceConf*: FilterServiceConfBuilder + metricsServerConf*: MetricsServerConfBuilder + restServerConf*: RestServerConfBuilder + rlnRelayConf*: RlnRelayConfBuilder + storeServiceConf*: StoreServiceConfBuilder + mixConf*: MixConfBuilder + webSocketConf*: WebSocketConfBuilder + rateLimitConf*: RateLimitConfBuilder + # End conf builders + relay: Option[bool] + lightPush: Option[bool] + peerExchange: Option[bool] + storeSync: Option[bool] + relayPeerExchange: Option[bool] + mix: Option[bool] + + # TODO: move within a relayConf + rendezvous: Option[bool] + + networkConf: Option[NetworkConf] + + staticNodes: seq[string] + + remoteStoreNode: Option[string] + remoteLightPushNode: Option[string] + remoteFilterNode: Option[string] + remotePeerExchangeNode: Option[string] + + maxMessageSize: MaxMessageSize + + logLevel: Option[logging.LogLevel] + logFormat: Option[logging.LogFormat] + + natStrategy: Option[string] + + p2pTcpPort: Option[Port] + p2pListenAddress: Option[IpAddress] + portsShift: Option[uint16] + dns4DomainName: Option[string] + extMultiAddrs: seq[string] + extMultiAddrsOnly: Option[bool] + + dnsAddrsNameServers: seq[IpAddress] + + peerPersistence: Option[bool] + peerStoreCapacity: Option[int] + maxConnections: Option[int] + colocationLimit: Option[int] + + agentString: Option[string] + + maxRelayPeers: Option[int] + relayShardedPeerManagement: Option[bool] + relayServiceRatio: Option[string] + circuitRelayClient: Option[bool] + p2pReliability: Option[bool] + +proc init*(T: type WakuConfBuilder): WakuConfBuilder = + WakuConfBuilder( + dnsDiscoveryConf: DnsDiscoveryConfBuilder.init(), + discv5Conf: Discv5ConfBuilder.init(), + filterServiceConf: FilterServiceConfBuilder.init(), + metricsServerConf: MetricsServerConfBuilder.init(), + restServerConf: RestServerConfBuilder.init(), + rlnRelayConf: RlnRelayConfBuilder.init(), + storeServiceConf: StoreServiceConfBuilder.init(), + webSocketConf: WebSocketConfBuilder.init(), + rateLimitConf: RateLimitConfBuilder.init(), + ) + +proc withNetworkConf*(b: var WakuConfBuilder, networkConf: NetworkConf) = + b.networkConf = some(networkConf) + +proc withNodeKey*(b: var WakuConfBuilder, nodeKey: crypto.PrivateKey) = + b.nodeKey = some(nodeKey) + +proc withClusterId*(b: var WakuConfBuilder, clusterId: uint16) = + b.clusterId = some(clusterId) + +proc withShardingConf*(b: var WakuConfBuilder, shardingConf: ShardingConfKind) = + b.shardingConf = some(shardingConf) + +proc withNumShardsInCluster*(b: var WakuConfBuilder, numShardsInCluster: uint16) = + b.numShardsInCluster = some(numShardsInCluster) + +proc withSubscribeShards*(b: var WakuConfBuilder, shards: seq[uint16]) = + b.subscribeShards = some(shards) + +proc withProtectedShards*( + b: var WakuConfBuilder, protectedShards: seq[ProtectedShard] +) = + b.protectedShards = some(protectedShards) + +proc withContentTopics*(b: var WakuConfBuilder, contentTopics: seq[string]) = + b.contentTopics = some(contentTopics) + +proc withRelay*(b: var WakuConfBuilder, relay: bool) = + b.relay = some(relay) + +proc withLightPush*(b: var WakuConfBuilder, lightPush: bool) = + b.lightPush = some(lightPush) + +proc withStoreSync*(b: var WakuConfBuilder, storeSync: bool) = + b.storeSync = some(storeSync) + +proc withPeerExchange*(b: var WakuConfBuilder, peerExchange: bool) = + b.peerExchange = some(peerExchange) + +proc withRelayPeerExchange*(b: var WakuConfBuilder, relayPeerExchange: bool) = + b.relayPeerExchange = some(relayPeerExchange) + +proc withRendezvous*(b: var WakuConfBuilder, rendezvous: bool) = + b.rendezvous = some(rendezvous) + +proc withMix*(builder: var WakuConfBuilder, mix: bool) = + builder.mix = some(mix) + +proc withRemoteStoreNode*(b: var WakuConfBuilder, remoteStoreNode: string) = + b.remoteStoreNode = some(remoteStoreNode) + +proc withRemoteLightPushNode*(b: var WakuConfBuilder, remoteLightPushNode: string) = + b.remoteLightPushNode = some(remoteLightPushNode) + +proc withRemoteFilterNode*(b: var WakuConfBuilder, remoteFilterNode: string) = + b.remoteFilterNode = some(remoteFilterNode) + +proc withRemotePeerExchangeNode*( + b: var WakuConfBuilder, remotePeerExchangeNode: string +) = + b.remotePeerExchangeNode = some(remotePeerExchangeNode) + +proc withPeerPersistence*(b: var WakuConfBuilder, peerPersistence: bool) = + b.peerPersistence = some(peerPersistence) + +proc withPeerStoreCapacity*(b: var WakuConfBuilder, peerStoreCapacity: int) = + b.peerStoreCapacity = some(peerStoreCapacity) + +proc withMaxConnections*(b: var WakuConfBuilder, maxConnections: int) = + b.maxConnections = some(maxConnections) + +proc withDnsAddrsNameServers*( + b: var WakuConfBuilder, dnsAddrsNameServers: seq[IpAddress] +) = + b.dnsAddrsNameServers.insert(dnsAddrsNameServers) + +proc withLogLevel*(b: var WakuConfBuilder, logLevel: logging.LogLevel) = + b.logLevel = some(logLevel) + +proc withLogFormat*(b: var WakuConfBuilder, logFormat: logging.LogFormat) = + b.logFormat = some(logFormat) + +proc withP2pTcpPort*(b: var WakuConfBuilder, p2pTcpPort: Port) = + b.p2pTcpPort = some(p2pTcpPort) + +proc withP2pTcpPort*(b: var WakuConfBuilder, p2pTcpPort: uint16) = + b.p2pTcpPort = some(Port(p2pTcpPort)) + +proc withPortsShift*(b: var WakuConfBuilder, portsShift: uint16) = + b.portsShift = some(portsShift) + +proc withP2pListenAddress*(b: var WakuConfBuilder, p2pListenAddress: IpAddress) = + b.p2pListenAddress = some(p2pListenAddress) + +proc withExtMultiAddrsOnly*(b: var WakuConfBuilder, extMultiAddrsOnly: bool) = + b.extMultiAddrsOnly = some(extMultiAddrsOnly) + +proc withDns4DomainName*(b: var WakuConfBuilder, dns4DomainName: string) = + b.dns4DomainName = some(dns4DomainName) + +proc withNatStrategy*(b: var WakuConfBuilder, natStrategy: string) = + b.natStrategy = some(natStrategy) + +proc withAgentString*(b: var WakuConfBuilder, agentString: string) = + b.agentString = some(agentString) + +proc withColocationLimit*(b: var WakuConfBuilder, colocationLimit: int) = + b.colocationLimit = some(colocationLimit) + +proc withMaxRelayPeers*(b: var WakuConfBuilder, maxRelayPeers: int) = + b.maxRelayPeers = some(maxRelayPeers) + +proc withRelayServiceRatio*(b: var WakuConfBuilder, relayServiceRatio: string) = + b.relayServiceRatio = some(relayServiceRatio) + +proc withCircuitRelayClient*(b: var WakuConfBuilder, circuitRelayClient: bool) = + b.circuitRelayClient = some(circuitRelayClient) + +proc withRelayShardedPeerManagement*( + b: var WakuConfBuilder, relayShardedPeerManagement: bool +) = + b.relayShardedPeerManagement = some(relayShardedPeerManagement) + +proc withP2pReliability*(b: var WakuConfBuilder, p2pReliability: bool) = + b.p2pReliability = some(p2pReliability) + +proc withExtMultiAddrs*(builder: var WakuConfBuilder, extMultiAddrs: seq[string]) = + builder.extMultiAddrs = concat(builder.extMultiAddrs, extMultiAddrs) + +proc withMaxMessageSize*(builder: var WakuConfBuilder, maxMessageSizeBytes: uint64) = + builder.maxMessageSize = MaxMessageSize(kind: mmskInt, bytes: maxMessageSizeBytes) + +proc withMaxMessageSize*(builder: var WakuConfBuilder, maxMessageSize: string) = + builder.maxMessageSize = MaxMessageSize(kind: mmskStr, str: maxMessageSize) + +proc withStaticNodes*(builder: var WakuConfBuilder, staticNodes: seq[string]) = + builder.staticNodes = concat(builder.staticNodes, staticNodes) + +## Building + +proc nodeKey( + builder: WakuConfBuilder, rng: ref HmacDrbgContext +): Result[crypto.PrivateKey, string] = + if builder.nodeKey.isSome(): + return ok(builder.nodeKey.get()) + else: + warn "missing node key, generating new set" + let nodeKey = crypto.PrivateKey.random(Secp256k1, rng[]).valueOr: + error "Failed to generate key", error = error + return err("Failed to generate key: " & $error) + return ok(nodeKey) + +proc buildShardingConf( + bShardingConfKind: Option[ShardingConfKind], + bNumShardsInCluster: Option[uint16], + bSubscribeShards: Option[seq[uint16]], +): (ShardingConf, seq[uint16]) = + echo "bSubscribeShards: ", bSubscribeShards + case bShardingConfKind.get(AutoSharding) + of StaticSharding: + (ShardingConf(kind: StaticSharding), bSubscribeShards.get(@[])) + of AutoSharding: + let numShardsInCluster = bNumShardsInCluster.get(1) + let shardingConf = + ShardingConf(kind: AutoSharding, numShardsInCluster: numShardsInCluster) + let upperShard = uint16(numShardsInCluster - 1) + (shardingConf, bSubscribeShards.get(toSeq(0.uint16 .. upperShard))) + +proc applyNetworkConf(builder: var WakuConfBuilder) = + # Apply network conf, overrides most values passed individually + # If you want to tweak values, don't use networkConf + # TODO: networkconf should be one field of the conf builder so that this function becomes unnecessary + if builder.networkConf.isNone(): + return + let networkConf = builder.networkConf.get() + + if builder.clusterId.isSome(): + warn "Cluster id was provided alongside a network conf", + used = networkConf.clusterId, discarded = builder.clusterId.get() + builder.clusterId = some(networkConf.clusterId) + + # Apply relay parameters + if builder.relay.get(false) and networkConf.rlnRelay: + if builder.rlnRelayConf.enabled.isSome(): + warn "RLN Relay was provided alongside a network conf", + used = networkConf.rlnRelay, discarded = builder.rlnRelayConf.enabled + builder.rlnRelayConf.withEnabled(true) + + if builder.rlnRelayConf.ethContractAddress.get("") != "": + warn "RLN Relay ETH Contract Address was provided alongside a network conf", + used = networkConf.rlnRelayEthContractAddress.string, + discarded = builder.rlnRelayConf.ethContractAddress.get().string + builder.rlnRelayConf.withEthContractAddress(networkConf.rlnRelayEthContractAddress) + + if builder.rlnRelayConf.chainId.isSome(): + warn "RLN Relay Chain Id was provided alongside a network conf", + used = networkConf.rlnRelayChainId, discarded = builder.rlnRelayConf.chainId + builder.rlnRelayConf.withChainId(networkConf.rlnRelayChainId) + + if builder.rlnRelayConf.dynamic.isSome(): + warn "RLN Relay Dynamic was provided alongside a network conf", + used = networkConf.rlnRelayDynamic, discarded = builder.rlnRelayConf.dynamic + builder.rlnRelayConf.withDynamic(networkConf.rlnRelayDynamic) + + if builder.rlnRelayConf.epochSizeSec.isSome(): + warn "RLN Epoch Size in Seconds was provided alongside a network conf", + used = networkConf.rlnEpochSizeSec, + discarded = builder.rlnRelayConf.epochSizeSec + builder.rlnRelayConf.withEpochSizeSec(networkConf.rlnEpochSizeSec) + + if builder.rlnRelayConf.userMessageLimit.isSome(): + warn "RLN Relay Dynamic was provided alongside a network conf", + used = networkConf.rlnRelayUserMessageLimit, + discarded = builder.rlnRelayConf.userMessageLimit + builder.rlnRelayConf.withUserMessageLimit(networkConf.rlnRelayUserMessageLimit) + # End Apply relay parameters + + case builder.maxMessageSize.kind + of mmskNone: + discard + of mmskStr, mmskInt: + warn "Max Message Size was provided alongside a network conf", + used = networkConf.maxMessageSize, discarded = $builder.maxMessageSize + builder.withMaxMessageSize(parseCorrectMsgSize(networkConf.maxMessageSize)) + + if builder.shardingConf.isSome(): + warn "Sharding Conf was provided alongside a network conf", + used = networkConf.shardingConf.kind, discarded = builder.shardingConf + + if builder.numShardsInCluster.isSome(): + warn "Num Shards In Cluster was provided alongside a network conf", + used = networkConf.shardingConf.numShardsInCluster, + discarded = builder.numShardsInCluster + + case networkConf.shardingConf.kind + of StaticSharding: + builder.shardingConf = some(StaticSharding) + of AutoSharding: + builder.shardingConf = some(AutoSharding) + builder.numShardsInCluster = some(networkConf.shardingConf.numShardsInCluster) + + if networkConf.discv5Discovery: + if builder.discv5Conf.enabled.isNone: + builder.discv5Conf.withEnabled(networkConf.discv5Discovery) + + if builder.discv5Conf.bootstrapNodes.len == 0 and + networkConf.discv5BootstrapNodes.len > 0: + warn "Discv5 Bootstrap nodes were provided alongside a network conf", + used = networkConf.discv5BootstrapNodes, + discarded = builder.discv5Conf.bootstrapNodes + builder.discv5Conf.withBootstrapNodes(networkConf.discv5BootstrapNodes) + +proc build*( + builder: var WakuConfBuilder, rng: ref HmacDrbgContext = crypto.newRng() +): Result[WakuConf, string] = + ## Return a WakuConf that contains all mandatory parameters + ## Applies some sane defaults that are applicable across any usage + ## of libwaku. It aims to be agnostic so it does not apply a + ## default when it is opinionated. + + applyNetworkConf(builder) + + let relay = + if builder.relay.isSome(): + builder.relay.get() + else: + warn "whether to mount relay is not specified, defaulting to not mounting" + false + + let lightPush = + if builder.lightPush.isSome(): + builder.lightPush.get() + else: + warn "whether to mount lightPush is not specified, defaulting to not mounting" + false + + let peerExchange = + if builder.peerExchange.isSome(): + builder.peerExchange.get() + else: + warn "whether to mount peerExchange is not specified, defaulting to not mounting" + false + + let storeSync = + if builder.storeSync.isSome(): + builder.storeSync.get() + else: + warn "whether to mount storeSync is not specified, defaulting to not mounting" + false + + let rendezvous = + if builder.rendezvous.isSome(): + builder.rendezvous.get() + else: + warn "whether to mount rendezvous is not specified, defaulting to not mounting" + false + + let mix = + if builder.mix.isSome(): + builder.mix.get() + else: + warn "whether to mount mix is not specified, defaulting to not mounting" + false + + let relayPeerExchange = builder.relayPeerExchange.get(false) + + let nodeKey = ?nodeKey(builder, rng) + + let clusterId = + if builder.clusterId.isNone(): + # TODO: ClusterId should never be defaulted, instead, presets + # should be defined and used + warn("Cluster Id was not specified, defaulting to 0") + 0.uint16 + else: + builder.clusterId.get().uint16 + + let (shardingConf, subscribeShards) = buildShardingConf( + builder.shardingConf, builder.numShardsInCluster, builder.subscribeShards + ) + let protectedShards = builder.protectedShards.get(@[]) + + info "Sharding configuration: ", + shardingConf = $shardingConf, subscribeShards = $subscribeShards + + let maxMessageSizeBytes = + case builder.maxMessageSize.kind + of mmskInt: + builder.maxMessageSize.bytes + of mmskStr: + ?parseMsgSize(builder.maxMessageSize.str) + else: + warn "Max Message Size not specified, defaulting to 150KiB" + parseCorrectMsgSize("150KiB") + + let contentTopics = builder.contentTopics.get(@[]) + + # Build sub-configs + let discv5Conf = builder.discv5Conf.build().valueOr: + return err("Discv5 Conf building failed: " & $error) + + let dnsDiscoveryConf = builder.dnsDiscoveryConf.build().valueOr: + return err("DNS Discovery Conf building failed: " & $error) + + let filterServiceConf = builder.filterServiceConf.build().valueOr: + return err("Filter Service Conf building failed: " & $error) + + let metricsServerConf = builder.metricsServerConf.build().valueOr: + return err("Metrics Server Conf building failed: " & $error) + + let restServerConf = builder.restServerConf.build().valueOr: + return err("REST Server Conf building failed: " & $error) + + let rlnRelayConf = builder.rlnRelayConf.build().valueOr: + return err("RLN Relay Conf building failed: " & $error) + + let storeServiceConf = builder.storeServiceConf.build().valueOr: + return err("Store Conf building failed: " & $error) + + let mixConf = builder.mixConf.build().valueOr: + return err("Mix Conf building failed: " & $error) + + let webSocketConf = builder.webSocketConf.build().valueOr: + return err("WebSocket Conf building failed: " & $error) + + let rateLimit = builder.rateLimitConf.build().valueOr: + return err("Rate limits Conf building failed: " & $error) + + # End - Build sub-configs + + let logLevel = + if builder.logLevel.isSome(): + builder.logLevel.get() + else: + warn "Log Level not specified, defaulting to INFO" + logging.LogLevel.INFO + + let logFormat = + if builder.logFormat.isSome(): + builder.logFormat.get() + else: + warn "Log Format not specified, defaulting to TEXT" + logging.LogFormat.TEXT + + let natStrategy = + if builder.natStrategy.isSome(): + builder.natStrategy.get() + else: + warn "Nat Strategy is not specified, defaulting to none" + "none" + + let p2pTcpPort = + if builder.p2pTcpPort.isSome(): + builder.p2pTcpPort.get() + else: + warn "P2P Listening TCP Port is not specified, listening on 60000" + 60000.Port + + let p2pListenAddress = + if builder.p2pListenAddress.isSome(): + builder.p2pListenAddress.get() + else: + warn "P2P listening address not specified, listening on 0.0.0.0" + (static parseIpAddress("0.0.0.0")) + + let portsShift = + if builder.portsShift.isSome(): + builder.portsShift.get() + else: + warn "Ports Shift is not specified, defaulting to 0" + 0.uint16 + + let dns4DomainName = + if builder.dns4DomainName.isSome(): + let d = builder.dns4DomainName.get() + if d.string != "": + some(d) + else: + none(string) + else: + none(string) + + var extMultiAddrs: seq[MultiAddress] = @[] + for s in builder.extMultiAddrs: + let m = MultiAddress.init(s).valueOr: + return err("Invalid multiaddress provided: " & s) + extMultiAddrs.add(m) + + let extMultiAddrsOnly = + if builder.extMultiAddrsOnly.isSome(): + builder.extMultiAddrsOnly.get() + else: + warn "Whether to only announce external multiaddresses is not specified, defaulting to false" + false + + let dnsAddrsNameServers = + if builder.dnsAddrsNameServers.len != 0: + builder.dnsAddrsNameServers + else: + warn "DNS name servers IPs not provided, defaulting to Cloudflare's." + @[static parseIpAddress("1.1.1.1"), static parseIpAddress("1.0.0.1")] + + let peerPersistence = + if builder.peerPersistence.isSome(): + builder.peerPersistence.get() + else: + warn "Peer persistence not specified, defaulting to false" + false + + let maxConnections = + if builder.maxConnections.isSome(): + builder.maxConnections.get() + else: + warn "Max Connections was not specified, defaulting to 300" + 300 + + # TODO: Do the git version thing here + let agentString = builder.agentString.get("nwaku") + + # TODO: use `DefaultColocationLimit`. the user of this value should + # probably be defining a config object + let colocationLimit = builder.colocationLimit.get(5) + + # TODO: is there a strategy for experimental features? delete vs promote + let relayShardedPeerManagement = builder.relayShardedPeerManagement.get(false) + + let wakuFlags = CapabilitiesBitfield.init( + lightpush = lightPush, + filter = filterServiceConf.isSome, + store = storeServiceConf.isSome, + relay = relay, + sync = storeServiceConf.isSome() and storeServiceConf.get().storeSyncConf.isSome, + mix = mix, + ) + + let wakuConf = WakuConf( + # confs + storeServiceConf: storeServiceConf, + filterServiceConf: filterServiceConf, + discv5Conf: discv5Conf, + rlnRelayConf: rlnRelayConf, + metricsServerConf: metricsServerConf, + restServerConf: restServerConf, + dnsDiscoveryConf: dnsDiscoveryConf, + mixConf: mixConf, + # end confs + nodeKey: nodeKey, + clusterId: clusterId, + shardingConf: shardingConf, + contentTopics: contentTopics, + subscribeShards: subscribeShards, + protectedShards: protectedShards, + relay: relay, + lightPush: lightPush, + peerExchangeService: peerExchange, + rendezvous: rendezvous, + peerExchangeDiscovery: true, + # enabling peer exchange client by default for quicker bootstrapping + remoteStoreNode: builder.remoteStoreNode, + remoteLightPushNode: builder.remoteLightPushNode, + remoteFilterNode: builder.remoteFilterNode, + remotePeerExchangeNode: builder.remotePeerExchangeNode, + relayPeerExchange: relayPeerExchange, + maxMessageSizeBytes: maxMessageSizeBytes, + logLevel: logLevel, + logFormat: logFormat, + # TODO: Separate builders + endpointConf: EndpointConf( + natStrategy: natStrategy, + p2pTcpPort: p2pTcpPort, + dns4DomainName: dns4DomainName, + p2pListenAddress: p2pListenAddress, + extMultiAddrs: extMultiAddrs, + extMultiAddrsOnly: extMultiAddrsOnly, + ), + portsShift: portsShift, + webSocketConf: webSocketConf, + dnsAddrsNameServers: dnsAddrsNameServers, + peerPersistence: peerPersistence, + peerStoreCapacity: builder.peerStoreCapacity, + maxConnections: maxConnections, + agentString: agentString, + colocationLimit: colocationLimit, + maxRelayPeers: builder.maxRelayPeers, + relayServiceRatio: builder.relayServiceRatio.get("60:40"), + rateLimit: rateLimit, + circuitRelayClient: builder.circuitRelayClient.get(false), + staticNodes: builder.staticNodes, + relayShardedPeerManagement: relayShardedPeerManagement, + p2pReliability: builder.p2pReliability.get(false), + wakuFlags: wakuFlags, + ) + + ?wakuConf.validate() + + return ok(wakuConf) diff --git a/third-party/nwaku/waku/factory/conf_builder/web_socket_conf_builder.nim b/third-party/nwaku/waku/factory/conf_builder/web_socket_conf_builder.nim new file mode 100644 index 0000000..88edc09 --- /dev/null +++ b/third-party/nwaku/waku/factory/conf_builder/web_socket_conf_builder.nim @@ -0,0 +1,70 @@ +import chronicles, std/[net, options], results +import waku/factory/waku_conf + +logScope: + topics = "waku conf builder websocket" + +############################## +## WebSocket Config Builder ## +############################## +type WebSocketConfBuilder* = object + enabled*: Option[bool] + webSocketPort*: Option[Port] + secureEnabled*: Option[bool] + keyPath*: Option[string] + certPath*: Option[string] + +proc init*(T: type WebSocketConfBuilder): WebSocketConfBuilder = + WebSocketConfBuilder() + +proc withEnabled*(b: var WebSocketConfBuilder, enabled: bool) = + b.enabled = some(enabled) + +proc withSecureEnabled*(b: var WebSocketConfBuilder, secureEnabled: bool) = + b.secureEnabled = some(secureEnabled) + if b.secureEnabled.get(): + b.enabled = some(true) # ws must be enabled to use wss + +proc withWebSocketPort*(b: var WebSocketConfBuilder, webSocketPort: Port) = + b.webSocketPort = some(webSocketPort) + +proc withWebSocketPort*(b: var WebSocketConfBuilder, webSocketPort: uint16) = + b.webSocketPort = some(Port(webSocketPort)) + +proc withKeyPath*(b: var WebSocketConfBuilder, keyPath: string) = + b.keyPath = some(keyPath) + +proc withCertPath*(b: var WebSocketConfBuilder, certPath: string) = + b.certPath = some(certPath) + +proc build*(b: WebSocketConfBuilder): Result[Option[WebSocketConf], string] = + if not b.enabled.get(false): + return ok(none(WebSocketConf)) + + if b.webSocketPort.isNone(): + return err("websocket.port is not specified") + + if not b.secureEnabled.get(false): + return ok( + some( + WebSocketConf( + port: b.websocketPort.get(), secureConf: none(WebSocketSecureConf) + ) + ) + ) + + if b.keyPath.get("") == "": + return err("WebSocketSecure enabled but key path is not specified") + if b.certPath.get("") == "": + return err("WebSocketSecure enabled but cert path is not specified") + + return ok( + some( + WebSocketConf( + port: b.webSocketPort.get(), + secureConf: some( + WebSocketSecureConf(keyPath: b.keyPath.get(), certPath: b.certPath.get()) + ), + ) + ) + ) diff --git a/third-party/nwaku/waku/factory/internal_config.nim b/third-party/nwaku/waku/factory/internal_config.nim new file mode 100644 index 0000000..5a8e219 --- /dev/null +++ b/third-party/nwaku/waku/factory/internal_config.nim @@ -0,0 +1,144 @@ +import + chronicles, + chronos, + libp2p/crypto/crypto, + libp2p/crypto/curve25519, + libp2p/multiaddress, + libp2p/nameresolving/dnsresolver, + std/[options, sequtils, net], + results + +import ../common/utils/nat, ../node/net_config, ../waku_enr, ../waku_core, ./waku_conf + +proc enrConfiguration*( + conf: WakuConf, netConfig: NetConfig +): Result[enr.Record, string] = + var enrBuilder = EnrBuilder.init(conf.nodeKey) + + enrBuilder.withIpAddressAndPorts( + netConfig.enrIp, netConfig.enrPort, netConfig.discv5UdpPort + ) + + if netConfig.wakuFlags.isSome(): + enrBuilder.withWakuCapabilities(netConfig.wakuFlags.get()) + + enrBuilder.withMultiaddrs(netConfig.enrMultiaddrs) + + enrBuilder.withWakuRelaySharding( + RelayShards(clusterId: conf.clusterId, shardIds: conf.subscribeShards) + ).isOkOr: + return err("could not initialize ENR with shards") + + if conf.mixConf.isSome(): + enrBuilder.withMixKey(conf.mixConf.get().mixPubKey) + + let recordRes = enrBuilder.build() + let record = + if recordRes.isErr(): + error "failed to create record", error = recordRes.error + return err($recordRes.error) + else: + recordRes.get() + + return ok(record) + +proc dnsResolve*( + domain: string, dnsAddrsNameServers: seq[IpAddress] +): Future[Result[string, string]] {.async.} = + # Use conf's DNS servers + var nameServers: seq[TransportAddress] + for ip in dnsAddrsNameServers: + nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53 + + let dnsResolver = DnsResolver.new(nameServers) + + # Resolve domain IP + let resolved = await dnsResolver.resolveIp(domain, 0.Port, Domain.AF_UNSPEC) + + if resolved.len > 0: + return ok(resolved[0].host) # Use only first answer + else: + return err("Could not resolve IP from DNS: empty response") + +# TODO: Reduce number of parameters, can be done once the same is done on Netconfig.init +proc networkConfiguration*( + clusterId: uint16, + conf: EndpointConf, + discv5Conf: Option[Discv5Conf], + webSocketConf: Option[WebSocketConf], + wakuFlags: CapabilitiesBitfield, + dnsAddrsNameServers: seq[IpAddress], + portsShift: uint16, + clientId: string, +): Future[NetConfigResult] {.async.} = + ## `udpPort` is only supplied to satisfy underlying APIs but is not + ## actually a supported transport for libp2p traffic. + let natRes = setupNat( + conf.natStrategy.string, + clientId, + Port(uint16(conf.p2pTcpPort) + portsShift), + Port(uint16(conf.p2pTcpPort) + portsShift), + ) + if natRes.isErr(): + return err("failed to setup NAT: " & $natRes.error) + + var (extIp, extTcpPort, _) = natRes.get() + + let + discv5UdpPort = + if discv5Conf.isSome(): + some(Port(uint16(discv5Conf.get().udpPort) + portsShift)) + else: + none(Port) + + ## TODO: the NAT setup assumes a manual port mapping configuration if extIp + ## config is set. This probably implies adding manual config item for + ## extPort as well. The following heuristic assumes that, in absence of + ## manual config, the external port is the same as the bind port. + extPort = + if (extIp.isSome() or conf.dns4DomainName.isSome()) and extTcpPort.isNone(): + some(Port(uint16(conf.p2pTcpPort) + portsShift)) + else: + extTcpPort + + # Resolve and use DNS domain IP + if conf.dns4DomainName.isSome() and extIp.isNone(): + try: + let dnsRes = await dnsResolve(conf.dns4DomainName.get(), dnsAddrsNameServers) + + if dnsRes.isErr(): + return err($dnsRes.error) # Pass error down the stack + + extIp = some(parseIpAddress(dnsRes.get())) + except CatchableError: + return + err("Could not update extIp to resolved DNS IP: " & getCurrentExceptionMsg()) + + let (wsEnabled, wsBindPort, wssEnabled) = + if webSocketConf.isSome: + let wsConf = webSocketConf.get() + (true, some(Port(wsConf.port.uint16 + portsShift)), wsConf.secureConf.isSome) + else: + (false, none(Port), false) + + # Wrap in none because NetConfig does not have a default constructor + # TODO: We could change bindIp in NetConfig to be something less restrictive + # than IpAddress, which doesn't allow default construction + let netConfigRes = NetConfig.init( + clusterId = clusterId, + bindIp = conf.p2pListenAddress, + bindPort = Port(uint16(conf.p2pTcpPort) + portsShift), + extIp = extIp, + extPort = extPort, + extMultiAddrs = conf.extMultiAddrs, + extMultiAddrsOnly = conf.extMultiAddrsOnly, + wsBindPort = wsBindPort, + wsEnabled = wsEnabled, + wssEnabled = wssEnabled, + dns4DomainName = conf.dns4DomainName, + discv5UdpPort = discv5UdpPort, + wakuFlags = some(wakuFlags), + dnsNameServers = dnsAddrsNameServers, + ) + + return netConfigRes diff --git a/third-party/nwaku/waku/factory/networks_config.nim b/third-party/nwaku/waku/factory/networks_config.nim new file mode 100644 index 0000000..c7193aa --- /dev/null +++ b/third-party/nwaku/waku/factory/networks_config.nim @@ -0,0 +1,73 @@ +{.push raises: [].} + +import chronicles, results, stint + +logScope: + topics = "waku networks conf" + +type + ShardingConfKind* = enum + AutoSharding + StaticSharding + + ShardingConf* = object + case kind*: ShardingConfKind + of AutoSharding: + numShardsInCluster*: uint16 + of StaticSharding: + discard + +type NetworkConf* = object + maxMessageSize*: string # TODO: static convert to a uint64 + clusterId*: uint16 + rlnRelay*: bool + rlnRelayEthContractAddress*: string + rlnRelayChainId*: UInt256 + rlnRelayDynamic*: bool + rlnEpochSizeSec*: uint64 + rlnRelayUserMessageLimit*: uint64 + shardingConf*: ShardingConf + discv5Discovery*: bool + discv5BootstrapNodes*: seq[string] + +# cluster-id=1 (aka The Waku Network) +# Cluster configuration corresponding to The Waku Network. Note that it +# overrides existing cli configuration +proc TheWakuNetworkConf*(T: type NetworkConf): NetworkConf = + const RelayChainId = 59141'u256 + return NetworkConf( + maxMessageSize: "150KiB", + clusterId: 1, + rlnRelay: true, + rlnRelayEthContractAddress: "0xB9cd878C90E49F797B4431fBF4fb333108CB90e6", + rlnRelayDynamic: true, + rlnRelayChainId: RelayChainId, + rlnEpochSizeSec: 600, + rlnRelayUserMessageLimit: 100, + shardingConf: ShardingConf(kind: AutoSharding, numShardsInCluster: 8), + discv5Discovery: true, + discv5BootstrapNodes: + @[ + "enr:-QESuED0qW1BCmF-oH_ARGPr97Nv767bl_43uoy70vrbah3EaCAdK3Q0iRQ6wkSTTpdrg_dU_NC2ydO8leSlRpBX4pxiAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOTd-h5owwj-cx7xrmbvQKU8CV3Fomfdvcv1MBc-67T5oN0Y3CCdl-DdWRwgiMohXdha3UyDw", + "enr:-QEkuED9X80QF_jcN9gA2ZRhhmwVEeJnsg_Hyg7IFCTYnZD0BDI7a8HArE61NhJZFwygpHCWkgwSt2vqiABXkBxzIqZBAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQPFAS8zz2cg1QQhxMaK8CzkGQ5wdHvPJcrgLzJGOiHpwYN0Y3CCdl-DdWRwgiMohXdha3UyDw", + "enr:-QEkuEBfEzJm_kigJ2HoSS_RBFJYhKHocGdkhhBr6jSUAWjLdFPp6Pj1l4yiTQp7TGHyu1kC6FyaU573VN8klLsEm-XuAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOwsS69tgD7u1K50r5-qG5hweuTwa0W26aYPnvivpNlrYN0Y3CCdl-DdWRwgiMohXdha3UyDw", + ], + ) + +proc validateShards*( + shardingConf: ShardingConf, shards: seq[uint16] +): Result[void, string] = + case shardingConf.kind + of StaticSharding: + return ok() + of AutoSharding: + let numShardsInCluster = shardingConf.numShardsInCluster + for shard in shards: + if shard >= numShardsInCluster: + let msg = + "validateShards invalid shard: " & $shard & " when numShardsInCluster: " & + $numShardsInCluster + error "validateShards failed", error = msg + return err(msg) + + return ok() diff --git a/third-party/nwaku/waku/factory/node_factory.nim b/third-party/nwaku/waku/factory/node_factory.nim new file mode 100644 index 0000000..85f87b3 --- /dev/null +++ b/third-party/nwaku/waku/factory/node_factory.nim @@ -0,0 +1,537 @@ +import + std/[options, sequtils], + chronicles, + chronos, + libp2p/peerid, + libp2p/protocols/pubsub/gossipsub, + libp2p/protocols/connectivity/relay/relay, + libp2p/nameresolving/dnsresolver, + libp2p/crypto/crypto + +import + ./internal_config, + ./networks_config, + ./waku_conf, + ./builder, + ./validator_signed, + ../waku_enr/sharding, + ../waku_node, + ../waku_core, + ../waku_core/codecs, + ../waku_rln_relay, + ../discovery/waku_dnsdisc, + ../waku_archive/retention_policy as policy, + ../waku_archive/retention_policy/builder as policy_builder, + ../waku_archive/driver as driver, + ../waku_archive/driver/builder as driver_builder, + ../waku_archive_legacy/driver as legacy_driver, + ../waku_archive_legacy/driver/builder as legacy_driver_builder, + ../waku_store, + ../waku_store/common as store_common, + ../waku_store_legacy, + ../waku_store_legacy/common as legacy_common, + ../waku_filter_v2, + ../waku_peer_exchange, + ../node/peer_manager, + ../node/peer_manager/peer_store/waku_peer_storage, + ../node/peer_manager/peer_store/migrations as peer_store_sqlite_migrations, + ../waku_lightpush_legacy/common, + ../common/rate_limit/setting, + ../common/databases/dburl + +## Peer persistence + +const PeerPersistenceDbUrl = "peers.db" +proc setupPeerStorage(): Result[Option[WakuPeerStorage], string] = + let db = ?SqliteDatabase.new(PeerPersistenceDbUrl) + + ?peer_store_sqlite_migrations.migrate(db) + + let res = WakuPeerStorage.new(db) + if res.isErr(): + return err("failed to init peer store" & res.error) + + ok(some(res.value)) + +## Init waku node instance + +proc initNode( + conf: WakuConf, + netConfig: NetConfig, + rng: ref HmacDrbgContext, + record: enr.Record, + peerStore: Option[WakuPeerStorage], + relay: Relay, + dynamicBootstrapNodes: openArray[RemotePeerInfo] = @[], +): Result[WakuNode, string] = + ## Setup a basic Waku v2 node based on a supplied configuration + ## file. Optionally include persistent peer storage. + ## No protocols are mounted yet. + + let pStorage = + if peerStore.isNone(): + nil + else: + peerStore.get() + + let (secureKey, secureCert) = + if conf.webSocketConf.isSome() and conf.webSocketConf.get().secureConf.isSome(): + let wssConf = conf.webSocketConf.get().secureConf.get() + (some(wssConf.keyPath), some(wssConf.certPath)) + else: + (none(string), none(string)) + + let nameResolver = + DnsResolver.new(conf.dnsAddrsNameServers.mapIt(initTAddress(it, Port(53)))) + + # Build waku node instance + var builder = WakuNodeBuilder.init() + builder.withRng(rng) + builder.withNodeKey(conf.nodeKey) + builder.withRecord(record) + builder.withNetworkConfiguration(netConfig) + builder.withPeerStorage(pStorage, capacity = conf.peerStoreCapacity) + builder.withSwitchConfiguration( + maxConnections = some(conf.maxConnections.int), + secureKey = secureKey, + secureCert = secureCert, + nameResolver = nameResolver, + sendSignedPeerRecord = conf.relayPeerExchange, + # We send our own signed peer record when peer exchange enabled + agentString = some(conf.agentString), + ) + builder.withColocationLimit(conf.colocationLimit) + + if conf.maxRelayPeers.isSome(): + let + maxRelayPeers = conf.maxRelayPeers.get() + maxConnections = conf.maxConnections + # Calculate the ratio as percentages + relayRatio = (maxRelayPeers.float / maxConnections.float) * 100 + serviceRatio = 100 - relayRatio + + builder.withPeerManagerConfig( + maxConnections = conf.maxConnections, + relayServiceRatio = $relayRatio & ":" & $serviceRatio, + shardAware = conf.relayShardedPeerManagement, + ) + error "maxRelayPeers is deprecated. It is recommended to use relayServiceRatio instead. If relayServiceRatio is not set, it will be automatically calculated based on maxConnections and maxRelayPeers." + else: + builder.withPeerManagerConfig( + maxConnections = conf.maxConnections, + relayServiceRatio = conf.relayServiceRatio, + shardAware = conf.relayShardedPeerManagement, + ) + builder.withRateLimit(conf.rateLimit) + builder.withCircuitRelay(relay) + + let node = + ?builder.build().mapErr( + proc(err: string): string = + "failed to create waku node instance: " & err + ) + + ok(node) + +## Mount protocols + +proc getAutoshards*( + node: WakuNode, contentTopics: seq[string] +): Result[seq[RelayShard], string] = + if node.wakuAutoSharding.isNone(): + return err("Static sharding used, cannot get shards from content topics") + var autoShards: seq[RelayShard] + for contentTopic in contentTopics: + let shard = node.wakuAutoSharding.get().getShard(contentTopic).valueOr: + return err("Could not parse content topic: " & error) + autoShards.add(shard) + return ok(autoshards) + +proc setupProtocols( + node: WakuNode, conf: WakuConf +): Future[Result[void, string]] {.async.} = + ## Setup configured protocols on an existing Waku v2 node. + ## Optionally include persistent message storage. + ## No protocols are started yet. + + var allShards = conf.subscribeShards + node.mountMetadata(conf.clusterId, allShards).isOkOr: + return err("failed to mount waku metadata protocol: " & error) + + var onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} = + ## Action to be taken when an internal error occurs during the node run. + ## e.g. the connection with the database is lost and not recovered. + error "Unrecoverable error occurred", error = msg + quit(QuitFailure) + + if conf.storeServiceConf.isSome(): + let storeServiceConf = conf.storeServiceConf.get() + if storeServiceConf.supportV2: + let archiveDriverRes = await legacy_driver.ArchiveDriver.new( + storeServiceConf.dbUrl, storeServiceConf.dbVacuum, storeServiceConf.dbMigration, + storeServiceConf.maxNumDbConnections, onFatalErrorAction, + ) + if archiveDriverRes.isErr(): + return err("failed to setup legacy archive driver: " & archiveDriverRes.error) + + let mountArcRes = node.mountLegacyArchive(archiveDriverRes.get()) + if mountArcRes.isErr(): + return err("failed to mount waku legacy archive protocol: " & mountArcRes.error) + + ## For now we always mount the future archive driver but if the legacy one is mounted, + ## then the legacy will be in charge of performing the archiving. + ## Regarding storage, the only diff between the current/future archive driver and the legacy + ## one, is that the legacy stores an extra field: the id (message digest.) + + ## TODO: remove this "migrate" variable once legacy store is removed + ## It is now necessary because sqlite's legacy store has an extra field: storedAt + ## This breaks compatibility between store's and legacy store's schemas in sqlite + ## So for now, we need to make sure that when legacy store is enabled and we use sqlite + ## that we migrate our db according to legacy store's schema to have the extra field + + let engineRes = dburl.getDbEngine(storeServiceConf.dbUrl) + if engineRes.isErr(): + return err("error getting db engine in setupProtocols: " & engineRes.error) + + let engine = engineRes.get() + + let migrate = + if engine == "sqlite" and storeServiceConf.supportV2: + false + else: + storeServiceConf.dbMigration + + let archiveDriverRes = await driver.ArchiveDriver.new( + storeServiceConf.dbUrl, storeServiceConf.dbVacuum, migrate, + storeServiceConf.maxNumDbConnections, onFatalErrorAction, + ) + if archiveDriverRes.isErr(): + return err("failed to setup archive driver: " & archiveDriverRes.error) + + let retPolicyRes = policy.RetentionPolicy.new(storeServiceConf.retentionPolicy) + if retPolicyRes.isErr(): + return err("failed to create retention policy: " & retPolicyRes.error) + + let mountArcRes = node.mountArchive(archiveDriverRes.get(), retPolicyRes.get()) + if mountArcRes.isErr(): + return err("failed to mount waku archive protocol: " & mountArcRes.error) + + if storeServiceConf.supportV2: + # Store legacy setup + try: + await mountLegacyStore(node, node.rateLimitSettings.getSetting(STOREV2)) + except CatchableError: + return + err("failed to mount waku legacy store protocol: " & getCurrentExceptionMsg()) + + # Store setup + try: + await mountStore(node, node.rateLimitSettings.getSetting(STOREV3)) + except CatchableError: + return err("failed to mount waku store protocol: " & getCurrentExceptionMsg()) + + if storeServiceConf.storeSyncConf.isSome(): + let confStoreSync = storeServiceConf.storeSyncConf.get() + + ( + await node.mountStoreSync( + conf.clusterId, conf.subscribeShards, conf.contentTopics, + confStoreSync.rangeSec, confStoreSync.intervalSec, + confStoreSync.relayJitterSec, + ) + ).isOkOr: + return err("failed to mount waku store sync protocol: " & $error) + + if conf.remoteStoreNode.isSome(): + let storeNode = parsePeerInfo(conf.remoteStoreNode.get()).valueOr: + return err("failed to set node waku store-sync peer: " & error) + + node.peerManager.addServicePeer(storeNode, WakuReconciliationCodec) + node.peerManager.addServicePeer(storeNode, WakuTransferCodec) + + mountStoreClient(node) + if conf.remoteStoreNode.isSome(): + let storeNode = parsePeerInfo(conf.remoteStoreNode.get()) + if storeNode.isOk(): + node.peerManager.addServicePeer(storeNode.value, store_common.WakuStoreCodec) + else: + return err("failed to set node waku store peer: " & storeNode.error) + + mountLegacyStoreClient(node) + if conf.remoteStoreNode.isSome(): + let storeNode = parsePeerInfo(conf.remoteStoreNode.get()) + if storeNode.isOk(): + node.peerManager.addServicePeer( + storeNode.value, legacy_common.WakuLegacyStoreCodec + ) + else: + return err("failed to set node waku legacy store peer: " & storeNode.error) + + if conf.storeServiceConf.isSome and conf.storeServiceConf.get().resume: + node.setupStoreResume() + + if conf.shardingConf.kind == AutoSharding: + node.mountAutoSharding(conf.clusterId, conf.shardingConf.numShardsInCluster).isOkOr: + return err("failed to mount waku auto sharding: " & error) + else: + warn("Auto sharding is disabled") + + # Mount relay on all nodes + var peerExchangeHandler = none(RoutingRecordsHandler) + if conf.relayPeerExchange: + proc handlePeerExchange( + peer: PeerId, topic: string, peers: seq[RoutingRecordsPair] + ) {.gcsafe.} = + ## Handle peers received via gossipsub peer exchange + # TODO: Only consider peers on pubsub topics we subscribe to + let exchangedPeers = peers.filterIt(it.record.isSome()) + # only peers with populated records + .mapIt(toRemotePeerInfo(it.record.get())) + + debug "adding exchanged peers", + src = peer, topic = topic, numPeers = exchangedPeers.len + + for peer in exchangedPeers: + # Peers added are filtered by the peer manager + node.peerManager.addPeer(peer, PeerOrigin.PeerExchange) + + peerExchangeHandler = some(handlePeerExchange) + + # TODO: when using autosharding, the user should not be expected to pass any shards, but only content topics + # Hence, this joint logic should be removed in favour of an either logic: + # use passed shards (static) or deduce shards from content topics (auto) + let autoShards = + if node.wakuAutoSharding.isSome(): + node.getAutoshards(conf.contentTopics).valueOr: + return err("Could not get autoshards: " & error) + else: + @[] + + debug "Shards created from content topics", + contentTopics = conf.contentTopics, shards = autoShards + + let confShards = conf.subscribeShards.mapIt( + RelayShard(clusterId: conf.clusterId, shardId: uint16(it)) + ) + let shards = confShards & autoShards + + if conf.relay: + debug "Setting max message size", num_bytes = conf.maxMessageSizeBytes + + ( + await mountRelay( + node, peerExchangeHandler = peerExchangeHandler, int(conf.maxMessageSizeBytes) + ) + ).isOkOr: + return err("failed to mount waku relay protocol: " & $error) + + # Add validation keys to protected topics + var subscribedProtectedShards: seq[ProtectedShard] + for shardKey in conf.protectedShards: + if shardKey.shard notin conf.subscribeShards: + warn "protected shard not in subscribed shards, skipping adding validator", + protectedShard = shardKey.shard, subscribedShards = shards + continue + subscribedProtectedShards.add(shardKey) + notice "routing only signed traffic", + protectedShard = shardKey.shard, publicKey = shardKey.key + node.wakuRelay.addSignedShardsValidator(subscribedProtectedShards, conf.clusterId) + + # Only relay nodes should be rendezvous points. + if conf.rendezvous: + await node.mountRendezvous(conf.clusterId) + + # Keepalive mounted on all nodes + try: + await mountLibp2pPing(node) + except CatchableError: + return err("failed to mount libp2p ping protocol: " & getCurrentExceptionMsg()) + + if conf.rlnRelayConf.isSome(): + let rlnRelayConf = conf.rlnRelayConf.get() + let rlnConf = WakuRlnConfig( + dynamic: rlnRelayConf.dynamic, + credIndex: rlnRelayConf.credIndex, + ethContractAddress: rlnRelayConf.ethContractAddress, + chainId: rlnRelayConf.chainId, + ethClientUrls: rlnRelayConf.ethClientUrls, + creds: rlnRelayConf.creds, + userMessageLimit: rlnRelayConf.userMessageLimit, + epochSizeSec: rlnRelayConf.epochSizeSec, + onFatalErrorAction: onFatalErrorAction, + ) + + try: + await node.mountRlnRelay(rlnConf) + except CatchableError: + return err("failed to mount waku RLN relay protocol: " & getCurrentExceptionMsg()) + + # NOTE Must be mounted after relay + if conf.lightPush: + try: + await mountLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH)) + await mountLegacyLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH)) + except CatchableError: + return err("failed to mount waku lightpush protocol: " & getCurrentExceptionMsg()) + + mountLightPushClient(node) + mountLegacyLightPushClient(node) + if conf.remoteLightPushNode.isSome(): + let lightPushNode = parsePeerInfo(conf.remoteLightPushNode.get()) + if lightPushNode.isOk(): + node.peerManager.addServicePeer(lightPushNode.value, WakuLightPushCodec) + node.peerManager.addServicePeer(lightPushNode.value, WakuLegacyLightPushCodec) + else: + return err("failed to set node waku lightpush peer: " & lightPushNode.error) + + # Filter setup. NOTE Must be mounted after relay + if conf.filterServiceConf.isSome(): + let confFilter = conf.filterServiceConf.get() + try: + await mountFilter( + node, + subscriptionTimeout = chronos.seconds(confFilter.subscriptionTimeout), + maxFilterPeers = confFilter.maxPeersToServe, + maxFilterCriteriaPerPeer = confFilter.maxCriteria, + rateLimitSetting = node.rateLimitSettings.getSetting(FILTER), + ) + except CatchableError: + return err("failed to mount waku filter protocol: " & getCurrentExceptionMsg()) + + await node.mountFilterClient() + if conf.remoteFilterNode.isSome(): + let filterNode = parsePeerInfo(conf.remoteFilterNode.get()) + if filterNode.isOk(): + try: + node.peerManager.addServicePeer(filterNode.value, WakuFilterSubscribeCodec) + except CatchableError: + return err( + "failed to mount waku filter client protocol: " & getCurrentExceptionMsg() + ) + else: + return err("failed to set node waku filter peer: " & filterNode.error) + + # waku peer exchange setup + if conf.peerExchangeService: + try: + await mountPeerExchange( + node, some(conf.clusterId), node.rateLimitSettings.getSetting(PEEREXCHG) + ) + except CatchableError: + return + err("failed to mount waku peer-exchange protocol: " & getCurrentExceptionMsg()) + + if conf.remotePeerExchangeNode.isSome(): + let peerExchangeNode = parsePeerInfo(conf.remotePeerExchangeNode.get()) + if peerExchangeNode.isOk(): + node.peerManager.addServicePeer(peerExchangeNode.value, WakuPeerExchangeCodec) + else: + return + err("failed to set node waku peer-exchange peer: " & peerExchangeNode.error) + + if conf.peerExchangeDiscovery: + await node.mountPeerExchangeClient() + + #mount mix + if conf.mixConf.isSome(): + (await node.mountMix(conf.clusterId, conf.mixConf.get().mixKey)).isOkOr: + return err("failed to mount waku mix protocol: " & $error) + return ok() + +## Start node + +proc startNode*( + node: WakuNode, conf: WakuConf, dynamicBootstrapNodes: seq[RemotePeerInfo] = @[] +): Future[Result[void, string]] {.async: (raises: []).} = + ## Start a configured node and all mounted protocols. + ## Connect to static nodes and start + ## keep-alive, if configured. + + info "Running nwaku node", version = git_version + try: + await node.start() + except CatchableError: + return err("failed to start waku node: " & getCurrentExceptionMsg()) + + # Connect to configured static nodes + if conf.staticNodes.len > 0: + try: + await connectToNodes(node, conf.staticNodes, "static") + except CatchableError: + return err("failed to connect to static nodes: " & getCurrentExceptionMsg()) + + if dynamicBootstrapNodes.len > 0: + info "Connecting to dynamic bootstrap peers" + try: + await connectToNodes(node, dynamicBootstrapNodes, "dynamic bootstrap") + except CatchableError: + return + err("failed to connect to dynamic bootstrap nodes: " & getCurrentExceptionMsg()) + + # retrieve px peers and add the to the peer store + if conf.remotePeerExchangeNode.isSome(): + var desiredOutDegree = DefaultPXNumPeersReq + if not node.wakuRelay.isNil() and node.wakuRelay.parameters.d.uint64() > 0: + desiredOutDegree = node.wakuRelay.parameters.d.uint64() + (await node.fetchPeerExchangePeers(desiredOutDegree)).isOkOr: + error "error while fetching peers from peer exchange", error = error + + # TODO: behavior described by comment is undesired. PX as client should be used in tandem with discv5. + # + # Use px to periodically get peers if discv5 is disabled, as discv5 nodes have their own + # periodic loop to find peers and px returned peers actually come from discv5 + if conf.peerExchangeDiscovery and not conf.discv5Conf.isSome(): + node.startPeerExchangeLoop() + + # Maintain relay connections + if conf.relay: + node.peerManager.start() + + return ok() + +proc setupNode*( + wakuConf: WakuConf, rng: ref HmacDrbgContext = crypto.newRng(), relay: Relay +): Future[Result[WakuNode, string]] {.async.} = + let netConfig = ( + await networkConfiguration( + wakuConf.clusterId, wakuConf.endpointConf, wakuConf.discv5Conf, + wakuConf.webSocketConf, wakuConf.wakuFlags, wakuConf.dnsAddrsNameServers, + wakuConf.portsShift, clientId, + ) + ).valueOr: + error "failed to create internal config", error = error + return err("failed to create internal config: " & error) + + let record = enrConfiguration(wakuConf, netConfig).valueOr: + error "failed to create record", error = error + return err("failed to create record: " & error) + + if isClusterMismatched(record, wakuConf.clusterId): + error "cluster id mismatch configured shards" + return err("cluster id mismatch configured shards") + + debug "Setting up storage" + + ## Peer persistence + var peerStore: Option[WakuPeerStorage] + if wakuConf.peerPersistence: + peerStore = setupPeerStorage().valueOr: + error "Setting up storage failed", error = "failed to setup peer store " & error + return err("Setting up storage failed: " & error) + + debug "Initializing node" + + let node = initNode(wakuConf, netConfig, rng, record, peerStore, relay).valueOr: + error "Initializing node failed", error = error + return err("Initializing node failed: " & error) + + debug "Mounting protocols" + + try: + (await node.setupProtocols(wakuConf)).isOkOr: + error "Mounting protocols failed", error = error + return err("Mounting protocols failed: " & error) + except CatchableError: + return err("Exception setting up protocols: " & getCurrentExceptionMsg()) + + return ok(node) diff --git a/third-party/nwaku/waku/factory/validator_signed.nim b/third-party/nwaku/waku/factory/validator_signed.nim new file mode 100644 index 0000000..0da380a --- /dev/null +++ b/third-party/nwaku/waku/factory/validator_signed.nim @@ -0,0 +1,83 @@ +{.push raises: [].} + +import + chronicles, + chronos, + metrics, + stew/byteutils, + stew/endians2, + libp2p/protocols/pubsub/gossipsub, + libp2p/protocols/pubsub/errors, + nimcrypto/sha2, + secp256k1 + +const MessageWindowInSec = 5 * 60 # +- 5 minutes + +import ./waku_conf, ../waku_relay/protocol, ../waku_core + +declarePublicCounter waku_msg_validator_signed_outcome, + "number of messages for each validation outcome", ["result"] + +# Application level message hash +proc msgHash*(pubSubTopic: string, msg: WakuMessage): array[32, byte] = + var ctx: sha256 + ctx.init() + defer: + ctx.clear() + + ctx.update(pubsubTopic.toBytes()) + ctx.update(msg.payload) + ctx.update(msg.contentTopic.toBytes()) + ctx.update(msg.timestamp.uint64.toBytes(Endianness.littleEndian)) + # ctx.update(msg.meta) meta is not included in the message hash, as the signature goes in the meta field + ctx.update( + if msg.ephemeral: + @[1.byte] + else: + @[0.byte] + ) + + return ctx.finish() + +proc withinTimeWindow*(msg: WakuMessage): bool = + # Returns true if the message timestamp is: + # abs(now - msg.timestamp) < MessageWindowInSec + let ts = msg.timestamp + let now = getNowInNanosecondTime() + let window = getNanosecondTime(MessageWindowInSec) + + if abs(now - ts) < window: + return true + return false + +proc addSignedShardsValidator*( + w: WakuRelay, protectedShards: seq[ProtectedShard], clusterId: uint16 +) = + debug "adding validator to signed shards", protectedShards, clusterId + + proc validator( + topic: string, msg: WakuMessage + ): Future[errors.ValidationResult] {.async.} = + var outcome = errors.ValidationResult.Reject + + for protectedShard in protectedShards: + let topicString = + $RelayShard(clusterId: clusterId, shardId: uint16(protectedShard.shard)) + if (topicString == topic): + if msg.timestamp != 0: + if msg.withinTimeWindow(): + let msgHash = SkMessage(topic.msgHash(msg)) + let recoveredSignature = SkSignature.fromRaw(msg.meta) + if recoveredSignature.isOk(): + if recoveredSignature.get.verify(msgHash, protectedShard.key): + outcome = errors.ValidationResult.Accept + + if outcome != errors.ValidationResult.Accept: + debug "signed topic validation failed", + topic = topic, publicShardKey = protectedShard.key + waku_msg_validator_signed_outcome.inc(labelValues = [$outcome]) + return outcome + + return errors.ValidationResult.Accept + + w.addValidator(validator, "signed shard validation failed") diff --git a/third-party/nwaku/waku/factory/waku.nim b/third-party/nwaku/waku/factory/waku.nim new file mode 100644 index 0000000..7c2c430 --- /dev/null +++ b/third-party/nwaku/waku/factory/waku.nim @@ -0,0 +1,469 @@ +{.push raises: [].} + +import + std/[options, sequtils, strformat], + results, + chronicles, + chronos, + libp2p/protocols/connectivity/relay/relay, + libp2p/protocols/connectivity/relay/client, + libp2p/wire, + libp2p/crypto/crypto, + libp2p/protocols/pubsub/gossipsub, + libp2p/services/autorelayservice, + libp2p/services/hpservice, + libp2p/peerid, + libp2p/discovery/discoverymngr, + libp2p/discovery/rendezvousinterface, + eth/keys, + eth/p2p/discoveryv5/enr, + presto, + metrics, + metrics/chronos_httpserver +import + ../common/logging, + ../waku_core, + ../waku_node, + ../node/peer_manager, + ../node/health_monitor, + ../node/waku_metrics, + ../node/delivery_monitor/delivery_monitor, + ../waku_api/message_cache, + ../waku_api/rest/server, + ../waku_api/rest/builder as rest_server_builder, + ../waku_archive, + ../waku_relay/protocol, + ../discovery/waku_dnsdisc, + ../discovery/waku_discv5, + ../discovery/autonat_service, + ../waku_enr/sharding, + ../waku_rln_relay, + ../waku_store, + ../waku_filter_v2, + ../factory/node_factory, + ../factory/internal_config, + ../factory/app_callbacks, + ../waku_enr/multiaddr, + ./waku_conf + +logScope: + topics = "wakunode waku" + +# Git version in git describe format (defined at compile time) +const git_version* {.strdefine.} = "n/a" + +type Waku* = ref object + version: string + conf*: WakuConf + rng*: ref HmacDrbgContext + + key: crypto.PrivateKey + + wakuDiscv5*: WakuDiscoveryV5 + dynamicBootstrapNodes*: seq[RemotePeerInfo] + dnsRetryLoopHandle: Future[void] + networkConnLoopHandle: Future[void] + discoveryMngr: DiscoveryManager + + node*: WakuNode + + healthMonitor*: NodeHealthMonitor + + deliveryMonitor: DeliveryMonitor + + restServer*: WakuRestServerRef + metricsServer*: MetricsHttpServerRef + appCallbacks*: AppCallbacks + +func version*(waku: Waku): string = + waku.version + +proc setupSwitchServices( + waku: Waku, conf: WakuConf, circuitRelay: Relay, rng: ref HmacDrbgContext +) = + proc onReservation(addresses: seq[MultiAddress]) {.gcsafe, raises: [].} = + debug "circuit relay handler new reserve event", + addrs_before = $(waku.node.announcedAddresses), addrs = $addresses + + waku.node.announcedAddresses.setLen(0) ## remove previous addresses + waku.node.announcedAddresses.add(addresses) + debug "waku node announced addresses updated", + announcedAddresses = waku.node.announcedAddresses + + if not isNil(waku.wakuDiscv5): + waku.wakuDiscv5.updateAnnouncedMultiAddress(addresses).isOkOr: + error "failed to update announced multiaddress", error = $error + + let autonatService = getAutonatService(rng) + if conf.circuitRelayClient: + ## The node is considered to be behind a NAT or firewall and then it + ## should struggle to be reachable and establish connections to other nodes + const MaxNumRelayServers = 2 + let autoRelayService = AutoRelayService.new( + MaxNumRelayServers, RelayClient(circuitRelay), onReservation, rng + ) + let holePunchService = HPService.new(autonatService, autoRelayService) + waku.node.switch.services = @[Service(holePunchService)] + else: + waku.node.switch.services = @[Service(autonatService)] + +## Initialisation + +proc newCircuitRelay(isRelayClient: bool): Relay = + # TODO: Does it mean it's a circuit-relay server when it's false? + if isRelayClient: + return RelayClient.new() + return Relay.new() + +proc setupAppCallbacks( + node: WakuNode, conf: WakuConf, appCallbacks: AppCallbacks +): Result[void, string] = + if appCallbacks.isNil(): + info "No external callbacks to be set" + return ok() + + if not appCallbacks.relayHandler.isNil(): + if node.wakuRelay.isNil(): + return err("Cannot configure relayHandler callback without Relay mounted") + + let autoShards = + if node.wakuAutoSharding.isSome(): + node.getAutoshards(conf.contentTopics).valueOr: + return err("Could not get autoshards: " & error) + else: + @[] + + let confShards = conf.subscribeShards.mapIt( + RelayShard(clusterId: conf.clusterId, shardId: uint16(it)) + ) + let shards = confShards & autoShards + + let uniqueShards = deduplicate(shards) + + for shard in uniqueShards: + let topic = $shard + node.subscribe((kind: PubsubSub, topic: topic), appCallbacks.relayHandler).isOkOr: + return err(fmt"Could not subscribe {topic}: " & $error) + + if not appCallbacks.topicHealthChangeHandler.isNil(): + if node.wakuRelay.isNil(): + return + err("Cannot configure topicHealthChangeHandler callback without Relay mounted") + node.wakuRelay.onTopicHealthChange = appCallbacks.topicHealthChangeHandler + + if not appCallbacks.connectionChangeHandler.isNil(): + if node.peerManager.isNil(): + return + err("Cannot configure connectionChangeHandler callback with empty peer manager") + node.peerManager.onConnectionChange = appCallbacks.connectionChangeHandler + + return ok() + +proc new*( + T: type Waku, wakuConf: WakuConf, appCallbacks: AppCallbacks = nil +): Future[Result[Waku, string]] {.async.} = + let rng = crypto.newRng() + + logging.setupLog(wakuConf.logLevel, wakuConf.logFormat) + + ?wakuConf.validate() + wakuConf.logConf() + + let healthMonitor = NodeHealthMonitor.new(wakuConf.dnsAddrsNameServers) + + let restServer: WakuRestServerRef = + if wakuConf.restServerConf.isSome(): + let restServer = startRestServerEssentials( + healthMonitor, wakuConf.restServerConf.get(), wakuConf.portsShift + ).valueOr: + error "Starting essential REST server failed", error = $error + return err("Failed to start essential REST server in Waku.new: " & $error) + + restServer + else: + nil + + var relay = newCircuitRelay(wakuConf.circuitRelayClient) + + let node = (await setupNode(wakuConf, rng, relay)).valueOr: + error "Failed setting up node", error = $error + return err("Failed setting up node: " & $error) + + healthMonitor.setNodeToHealthMonitor(node) + healthMonitor.onlineMonitor.setPeerStoreToOnlineMonitor(node.switch.peerStore) + healthMonitor.onlineMonitor.addOnlineStateObserver( + node.peerManager.getOnlineStateObserver() + ) + + node.setupAppCallbacks(wakuConf, appCallbacks).isOkOr: + error "Failed setting up app callbacks", error = error + return err("Failed setting up app callbacks: " & $error) + + ## Delivery Monitor + var deliveryMonitor: DeliveryMonitor + if wakuConf.p2pReliability: + if wakuConf.remoteStoreNode.isNone(): + return err("A storenode should be set when reliability mode is on") + + let deliveryMonitorRes = DeliveryMonitor.new( + node.wakuStoreClient, node.wakuRelay, node.wakuLightpushClient, + node.wakuFilterClient, + ) + if deliveryMonitorRes.isErr(): + return err("could not create delivery monitor: " & $deliveryMonitorRes.error) + deliveryMonitor = deliveryMonitorRes.get() + + var waku = Waku( + version: git_version, + conf: wakuConf, + rng: rng, + key: wakuConf.nodeKey, + node: node, + healthMonitor: healthMonitor, + deliveryMonitor: deliveryMonitor, + appCallbacks: appCallbacks, + restServer: restServer, + ) + + waku.setupSwitchServices(wakuConf, relay, rng) + + ok(waku) + +proc getPorts( + listenAddrs: seq[MultiAddress] +): Result[tuple[tcpPort, websocketPort: Option[Port]], string] = + var tcpPort, websocketPort = none(Port) + + for a in listenAddrs: + if a.isWsAddress(): + if websocketPort.isNone(): + let wsAddress = initTAddress(a).valueOr: + return err("getPorts wsAddr error:" & $error) + websocketPort = some(wsAddress.port) + elif tcpPort.isNone(): + let tcpAddress = initTAddress(a).valueOr: + return err("getPorts tcpAddr error:" & $error) + tcpPort = some(tcpAddress.port) + + return ok((tcpPort: tcpPort, websocketPort: websocketPort)) + +proc getRunningNetConfig(waku: ptr Waku): Future[Result[NetConfig, string]] {.async.} = + var conf = waku[].conf + let (tcpPort, websocketPort) = getPorts(waku[].node.switch.peerInfo.listenAddrs).valueOr: + return err("Could not retrieve ports: " & error) + + if tcpPort.isSome(): + conf.endpointConf.p2pTcpPort = tcpPort.get() + + if websocketPort.isSome() and conf.webSocketConf.isSome(): + conf.webSocketConf.get().port = websocketPort.get() + + # Rebuild NetConfig with bound port values + let netConf = ( + await networkConfiguration( + conf.clusterId, conf.endpointConf, conf.discv5Conf, conf.webSocketConf, + conf.wakuFlags, conf.dnsAddrsNameServers, conf.portsShift, clientId, + ) + ).valueOr: + return err("Could not update NetConfig: " & error) + + return ok(netConf) + +proc updateEnr(waku: ptr Waku): Future[Result[void, string]] {.async.} = + let netConf: NetConfig = (await getRunningNetConfig(waku)).valueOr: + return err("error calling updateNetConfig: " & $error) + let record = enrConfiguration(waku[].conf, netConf).valueOr: + return err("ENR setup failed: " & error) + + if isClusterMismatched(record, waku[].conf.clusterId): + return err("cluster-id mismatch configured shards") + + waku[].node.enr = record + + return ok() + +proc updateAddressInENR(waku: ptr Waku): Result[void, string] = + let addresses: seq[MultiAddress] = waku[].node.announcedAddresses + let encodedAddrs = multiaddr.encodeMultiaddrs(addresses) + + ## First update the enr info contained in WakuNode + let keyBytes = waku[].key.getRawBytes().valueOr: + return err("failed to retrieve raw bytes from waku key: " & $error) + + let parsedPk = keys.PrivateKey.fromHex(keyBytes.toHex()).valueOr: + return err("failed to parse the private key: " & $error) + + let enrFields = @[toFieldPair(MultiaddrEnrField, encodedAddrs)] + waku[].node.enr.update(parsedPk, extraFields = enrFields).isOkOr: + return err("failed to update multiaddress in ENR updateAddressInENR: " & $error) + + debug "Waku node ENR updated successfully with new multiaddress", + enr = waku[].node.enr.toUri(), record = $(waku[].node.enr) + + ## Now update the ENR infor in discv5 + if not waku[].wakuDiscv5.isNil(): + waku[].wakuDiscv5.protocol.localNode.record = waku[].node.enr + let enr = waku[].wakuDiscv5.protocol.localNode.record + + debug "Waku discv5 ENR updated successfully with new multiaddress", + enr = enr.toUri(), record = $(enr) + + return ok() + +proc updateWaku(waku: ptr Waku): Future[Result[void, string]] {.async.} = + let conf = waku[].conf + if conf.endpointConf.p2pTcpPort == Port(0) or + (conf.websocketConf.isSome() and conf.websocketConf.get.port == Port(0)): + (await updateEnr(waku)).isOkOr: + return err("error calling updateEnr: " & $error) + + ?updateAnnouncedAddrWithPrimaryIpAddr(waku[].node) + + ?updateAddressInENR(waku) + + return ok() + +proc startDnsDiscoveryRetryLoop(waku: ptr Waku): Future[void] {.async.} = + while true: + await sleepAsync(30.seconds) + if waku.conf.dnsDiscoveryConf.isSome(): + let dnsDiscoveryConf = waku.conf.dnsDiscoveryConf.get() + let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes( + dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers + ) + if dynamicBootstrapNodesRes.isErr(): + error "Retrieving dynamic bootstrap nodes failed", + error = dynamicBootstrapNodesRes.error + continue + + waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get() + + if not waku[].wakuDiscv5.isNil(): + let dynamicBootstrapEnrs = waku[].dynamicBootstrapNodes + .filterIt(it.hasUdpPort()) + .mapIt(it.enr.get().toUri()) + var discv5BootstrapEnrs: seq[enr.Record] + # parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq + for enrUri in dynamicBootstrapEnrs: + addBootstrapNode(enrUri, discv5BootstrapEnrs) + + waku[].wakuDiscv5.updateBootstrapRecords( + waku[].wakuDiscv5.protocol.bootstrapRecords & discv5BootstrapEnrs + ) + + info "Connecting to dynamic bootstrap peers" + try: + await connectToNodes( + waku[].node, waku[].dynamicBootstrapNodes, "dynamic bootstrap" + ) + except CatchableError: + error "failed to connect to dynamic bootstrap nodes: " & getCurrentExceptionMsg() + return + +proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} = + if waku[].node.started: + warn "startWaku: waku node already started" + return ok() + + debug "Retrieve dynamic bootstrap nodes" + let conf = waku[].conf + + if conf.dnsDiscoveryConf.isSome(): + let dnsDiscoveryConf = waku.conf.dnsDiscoveryConf.get() + let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes( + dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers + ) + + if dynamicBootstrapNodesRes.isErr(): + error "Retrieving dynamic bootstrap nodes failed", + error = dynamicBootstrapNodesRes.error + # Start Dns Discovery retry loop + waku[].dnsRetryLoopHandle = waku.startDnsDiscoveryRetryLoop() + else: + waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get() + + (await startNode(waku.node, waku.conf, waku.dynamicBootstrapNodes)).isOkOr: + return err("error while calling startNode: " & $error) + + ## Update waku data that is set dynamically on node start + (await updateWaku(waku)).isOkOr: + return err("Error in updateApp: " & $error) + + ## Discv5 + if conf.discv5Conf.isSome(): + waku[].wakuDiscV5 = waku_discv5.setupDiscoveryV5( + waku.node.enr, + waku.node.peerManager, + waku.node.topicSubscriptionQueue, + conf.discv5Conf.get(), + waku.dynamicBootstrapNodes, + waku.rng, + conf.nodeKey, + conf.endpointConf.p2pListenAddress, + conf.portsShift, + ) + + (await waku.wakuDiscV5.start()).isOkOr: + return err("failed to start waku discovery v5: " & $error) + + ## Reliability + if not waku[].deliveryMonitor.isNil(): + waku[].deliveryMonitor.startDeliveryMonitor() + + ## Health Monitor + waku[].healthMonitor.startHealthMonitor().isOkOr: + return err("failed to start health monitor: " & $error) + + if conf.restServerConf.isSome(): + rest_server_builder.startRestServerProtocolSupport( + waku[].restServer, + waku[].node, + waku[].wakuDiscv5, + conf.restServerConf.get(), + conf.relay, + conf.lightPush, + conf.clusterId, + conf.subscribeShards, + conf.contentTopics, + ).isOkOr: + return err ("Starting protocols support REST server failed: " & $error) + + if conf.metricsServerConf.isSome(): + waku[].metricsServer = ( + await ( + waku_metrics.startMetricsServerAndLogging( + conf.metricsServerConf.get(), conf.portsShift + ) + ) + ).valueOr: + return err("Starting monitoring and external interfaces failed: " & error) + + waku[].healthMonitor.setOverallHealth(HealthStatus.READY) + + return ok() + +proc stop*(waku: Waku): Future[void] {.async: (raises: [Exception]).} = + ## Waku shutdown + + if not waku.node.started: + warn "stop: attempting to stop node that isn't running" + + waku.healthMonitor.setOverallHealth(HealthStatus.SHUTTING_DOWN) + + if not waku.metricsServer.isNil(): + await waku.metricsServer.stop() + + if not waku.wakuDiscv5.isNil(): + await waku.wakuDiscv5.stop() + + if not waku.node.isNil(): + await waku.node.stop() + + if not waku.dnsRetryLoopHandle.isNil(): + await waku.dnsRetryLoopHandle.cancelAndWait() + + if not waku.healthMonitor.isNil(): + await waku.healthMonitor.stopHealthMonitor() + + if not waku.restServer.isNil(): + await waku.restServer.stop() diff --git a/third-party/nwaku/waku/factory/waku_conf.nim b/third-party/nwaku/waku/factory/waku_conf.nim new file mode 100644 index 0000000..fa6da69 --- /dev/null +++ b/third-party/nwaku/waku/factory/waku_conf.nim @@ -0,0 +1,235 @@ +import + std/[net, options, strutils], + chronicles, + libp2p/crypto/crypto, + libp2p/multiaddress, + libp2p/crypto/curve25519, + secp256k1, + results + +import + ../waku_rln_relay/rln_relay, + ../waku_api/rest/builder, + ../discovery/waku_discv5, + ../node/waku_metrics, + ../common/logging, + ../common/rate_limit/setting, + ../waku_enr/capabilities, + ./networks_config + +export RlnRelayConf, RlnRelayCreds, RestServerConf, Discv5Conf, MetricsServerConf + +logScope: + topics = "waku conf" + +type WebSocketSecureConf* {.requiresInit.} = object + keyPath*: string + certPath*: string + +type WebSocketConf* = object + port*: Port + secureConf*: Option[WebSocketSecureConf] + +# TODO: should be defined in validator_signed.nim and imported here +type ProtectedShard* {.requiresInit.} = object + shard*: uint16 + key*: secp256k1.SkPublicKey + +type DnsDiscoveryConf* {.requiresInit.} = object + enrTreeUrl*: string + # TODO: should probably only have one set of name servers (see dnsaddrs) + nameServers*: seq[IpAddress] + +type StoreSyncConf* {.requiresInit.} = object + rangeSec*: uint32 + intervalSec*: uint32 + relayJitterSec*: uint32 + +type MixConf* = ref object + mixKey*: Curve25519Key + mixPubKey*: Curve25519Key + +type StoreServiceConf* {.requiresInit.} = object + dbMigration*: bool + dbURl*: string + dbVacuum*: bool + supportV2*: bool + maxNumDbConnections*: int + retentionPolicy*: string + resume*: bool + storeSyncConf*: Option[StoreSyncConf] + +type FilterServiceConf* {.requiresInit.} = object + maxPeersToServe*: uint32 + subscriptionTimeout*: uint16 + maxCriteria*: uint32 + +type EndpointConf* = object # TODO: make enum + natStrategy*: string + p2pTcpPort*: Port + dns4DomainName*: Option[string] + p2pListenAddress*: IpAddress + extMultiAddrs*: seq[MultiAddress] + extMultiAddrsOnly*: bool + +## `WakuConf` is a valid configuration for a Waku node +## All information needed by a waku node should be contained +## In this object. A convenient `validate` method enables doing +## sanity checks beyond type enforcement. +## If `Option` is `some` it means the related protocol is enabled. +type WakuConf* {.requiresInit.} = ref object + # ref because `getRunningNetConfig` modifies it + nodeKey*: crypto.PrivateKey + + clusterId*: uint16 + subscribeShards*: seq[uint16] + protectedShards*: seq[ProtectedShard] + + shardingConf*: ShardingConf + contentTopics*: seq[string] + + relay*: bool + lightPush*: bool + peerExchangeService*: bool + peerExchangeDiscovery*: bool + + # TODO: remove relay peer exchange + relayPeerExchange*: bool + rendezvous*: bool + circuitRelayClient*: bool + + discv5Conf*: Option[Discv5Conf] + dnsDiscoveryConf*: Option[DnsDiscoveryConf] + filterServiceConf*: Option[FilterServiceConf] + storeServiceConf*: Option[StoreServiceConf] + rlnRelayConf*: Option[RlnRelayConf] + restServerConf*: Option[RestServerConf] + metricsServerConf*: Option[MetricsServerConf] + webSocketConf*: Option[WebSocketConf] + mixConf*: Option[MixConf] + + portsShift*: uint16 + dnsAddrsNameServers*: seq[IpAddress] + endpointConf*: EndpointConf + wakuFlags*: CapabilitiesBitfield + + # TODO: could probably make it a `PeerRemoteInfo` + staticNodes*: seq[string] + remoteStoreNode*: Option[string] + remoteLightPushNode*: Option[string] + remoteFilterNode*: Option[string] + remotePeerExchangeNode*: Option[string] + + maxMessageSizeBytes*: uint64 + + logLevel*: logging.LogLevel + logFormat*: logging.LogFormat + + peerPersistence*: bool + # TODO: should clearly be a uint + peerStoreCapacity*: Option[int] + # TODO: should clearly be a uint + maxConnections*: int + + agentString*: string + + colocationLimit*: int + + rateLimit*: ProtocolRateLimitSettings + + # TODO: those could be in a relay conf object + maxRelayPeers*: Option[int] + relayShardedPeerManagement*: bool + # TODO: use proper type + relayServiceRatio*: string + + p2pReliability*: bool + +proc logConf*(conf: WakuConf) = + info "Configuration: Enabled protocols", + relay = conf.relay, + rlnRelay = conf.rlnRelayConf.isSome(), + store = conf.storeServiceConf.isSome(), + filter = conf.filterServiceConf.isSome(), + lightPush = conf.lightPush, + peerExchange = conf.peerExchangeService + + info "Configuration. Network", cluster = conf.clusterId + + for shard in conf.subscribeShards: + info "Configuration. Active Relay Shards", shard = shard + + if conf.discv5Conf.isSome(): + for i in conf.discv5Conf.get().bootstrapNodes: + info "Configuration. Bootstrap nodes", node = i.string + + if conf.rlnRelayConf.isSome(): + var rlnRelayConf = conf.rlnRelayConf.get() + if rlnRelayConf.dynamic: + info "Configuration. Validation", + mechanism = "onchain rln", + contract = rlnRelayConf.ethContractAddress.string, + maxMessageSize = conf.maxMessageSizeBytes, + rlnEpochSizeSec = rlnRelayConf.epochSizeSec, + rlnRelayUserMessageLimit = rlnRelayConf.userMessageLimit + +proc validateNodeKey(wakuConf: WakuConf): Result[void, string] = + wakuConf.nodeKey.getPublicKey().isOkOr: + return err("nodekey param is invalid") + return ok() + +proc validateNoEmptyStrings(wakuConf: WakuConf): Result[void, string] = + if wakuConf.endpointConf.dns4DomainName.isSome() and + isEmptyOrWhiteSpace(wakuConf.endpointConf.dns4DomainName.get().string): + return err("dns4-domain-name is an empty string, set it to none(string) instead") + + if isEmptyOrWhiteSpace(wakuConf.relayServiceRatio): + return err("relay-service-ratio is an empty string") + + for sn in wakuConf.staticNodes: + if isEmptyOrWhiteSpace(sn): + return err("staticnode contain an empty string") + + if wakuConf.remoteStoreNode.isSome() and + isEmptyOrWhiteSpace(wakuConf.remoteStoreNode.get()): + return err("storenode is an empty string, set it to none(string) instead") + + if wakuConf.remoteLightPushNode.isSome() and + isEmptyOrWhiteSpace(wakuConf.remoteLightPushNode.get()): + return err("lightpushnode is an empty string, set it to none(string) instead") + + if wakuConf.remotePeerExchangeNode.isSome() and + isEmptyOrWhiteSpace(wakuConf.remotePeerExchangeNode.get()): + return err("peer-exchange-node is an empty string, set it to none(string) instead") + + if wakuConf.remoteFilterNode.isSome() and + isEmptyOrWhiteSpace(wakuConf.remoteFilterNode.get()): + return err("filternode is an empty string, set it to none(string) instead") + + if wakuConf.dnsDiscoveryConf.isSome() and + isEmptyOrWhiteSpace(wakuConf.dnsDiscoveryConf.get().enrTreeUrl): + return err("dns-discovery-url is an empty string") + + # TODO: rln relay config should validate itself + if wakuConf.rlnRelayConf.isSome(): + let rlnRelayConf = wakuConf.rlnRelayConf.get() + + if rlnRelayConf.ethClientUrls.len == 0: + return err("rln-relay-eth-client-address is empty") + if isEmptyOrWhiteSpace(rlnRelayConf.ethContractAddress): + return err("rln-relay-eth-contract-address is an empty string") + + if rlnRelayConf.creds.isSome(): + let creds = rlnRelayConf.creds.get() + if isEmptyOrWhiteSpace(creds.path): + return err ("rln-relay-cred-path is an empty string") + if isEmptyOrWhiteSpace(creds.password): + return err ("rln-relay-cred-password is an empty string") + + return ok() + +proc validate*(wakuConf: WakuConf): Result[void, string] = + ?wakuConf.validateNodeKey() + ?wakuConf.shardingConf.validateShards(wakuConf.subscribeShards) + ?wakuConf.validateNoEmptyStrings() + return ok() diff --git a/third-party/nwaku/waku/incentivization/common.nim b/third-party/nwaku/waku/incentivization/common.nim new file mode 100644 index 0000000..071b4c1 --- /dev/null +++ b/third-party/nwaku/waku/incentivization/common.nim @@ -0,0 +1,9 @@ +import std/options + +import waku/incentivization/rpc + +proc init*(T: type EligibilityStatus, isEligible: bool): T = + if isEligible: + EligibilityStatus(statusCode: uint32(200), statusDesc: some("OK")) + else: + EligibilityStatus(statusCode: uint32(402), statusDesc: some("Payment Required")) diff --git a/third-party/nwaku/waku/incentivization/eligibility_manager.nim b/third-party/nwaku/waku/incentivization/eligibility_manager.nim new file mode 100644 index 0000000..b10b293 --- /dev/null +++ b/third-party/nwaku/waku/incentivization/eligibility_manager.nim @@ -0,0 +1,98 @@ +import std/[options, sets], chronos, web3, stew/byteutils, stint, results, chronicles + +import waku/incentivization/rpc, tests/waku_rln_relay/utils_onchain + +const SimpleTransferGasUsed = Quantity(21000) +const TxReceiptQueryTimeout = 3.seconds + +type EligibilityManager* = ref object # FIXME: make web3 private? + web3*: Web3 + seenTxIds*: HashSet[TxHash] + +# Initialize the eligibilityManager with a web3 instance +proc init*( + T: type EligibilityManager, ethClient: string +): Future[EligibilityManager] {.async.} = + return + EligibilityManager(web3: await newWeb3(ethClient), seenTxIds: initHashSet[TxHash]()) + # TODO: handle error if web3 instance is not established + +# Clean up the web3 instance +proc close*(eligibilityManager: EligibilityManager) {.async.} = + await eligibilityManager.web3.close() + +proc getTransactionByHash( + eligibilityManager: EligibilityManager, txHash: TxHash +): Future[TransactionObject] {.async.} = + await eligibilityManager.web3.provider.eth_getTransactionByHash(txHash) + +proc getMinedTransactionReceipt( + eligibilityManager: EligibilityManager, txHash: TxHash +): Future[Result[ReceiptObject, string]] {.async.} = + let txReceipt = eligibilityManager.web3.getMinedTransactionReceipt(txHash) + if (await txReceipt.withTimeout(TxReceiptQueryTimeout)): + return ok(txReceipt.value()) + else: + return err("Timeout on tx receipt query, tx hash: " & $txHash) + +proc getTxAndTxReceipt( + eligibilityManager: EligibilityManager, txHash: TxHash +): Future[Result[(TransactionObject, ReceiptObject), string]] {.async.} = + let txFuture = eligibilityManager.getTransactionByHash(txHash) + let receiptFuture = eligibilityManager.getMinedTransactionReceipt(txHash) + await allFutures(txFuture, receiptFuture) + let tx = txFuture.read() + let txReceipt = receiptFuture.read() + if txReceipt.isErr(): + return err("Cannot get tx receipt: " & txReceipt.error) + return ok((tx, txReceipt.get())) + +proc isEligibleTxId*( + eligibilityManager: EligibilityManager, + eligibilityProof: EligibilityProof, + expectedToAddress: Address, + expectedValueWei: UInt256, +): Future[Result[void, string]] {.async.} = + ## We consider a tx eligible, + ## in the context of service incentivization PoC, + ## if it is confirmed and pays the expected amount to the server's address. + ## See spec: https://github.com/waku-org/specs/blob/master/standards/core/incentivization.md + if eligibilityProof.proofOfPayment.isNone(): + return err("Eligibility proof is empty") + var tx: TransactionObject + var txReceipt: ReceiptObject + let txHash = TxHash.fromHex(byteutils.toHex(eligibilityProof.proofOfPayment.get())) + # check that it is not a double-spend + let txHashWasSeen = (txHash in eligibilityManager.seenTxIds) + eligibilityManager.seenTxIds.incl(txHash) + if txHashWasSeen: + return err("TxHash " & $txHash & " was already checked (double-spend attempt)") + try: + let txAndTxReceipt = await eligibilityManager.getTxAndTxReceipt(txHash) + txAndTxReceipt.isOkOr: + return err("Failed to fetch tx or tx receipt") + (tx, txReceipt) = txAndTxReceipt.value() + except ValueError: + let errorMsg = "Failed to fetch tx or tx receipt: " & getCurrentExceptionMsg() + error "exception in isEligibleTxId", error = $errorMsg + return err($errorMsg) + # check that it is not a contract creation tx + let toAddressOption = txReceipt.to + if toAddressOption.isNone(): + # this is a contract creation tx + return err("A contract creation tx is not eligible") + # check that it is a simple transfer (not a contract call) + # a simple transfer uses 21000 gas + let gasUsed = txReceipt.gasUsed + let isSimpleTransferTx = (gasUsed == SimpleTransferGasUsed) + if not isSimpleTransferTx: + return err("A contract call tx is not eligible") + # check that the to address is "as expected" + let toAddress = toAddressOption.get() + if toAddress != expectedToAddress: + return err("Wrong destination address: " & $toAddress) + # check that the amount is "as expected" + let txValueWei = tx.value + if txValueWei != expectedValueWei: + return err("Wrong tx value: got " & $txValueWei & ", expected " & $expectedValueWei) + return ok() diff --git a/third-party/nwaku/waku/incentivization/reputation_manager.nim b/third-party/nwaku/waku/incentivization/reputation_manager.nim new file mode 100644 index 0000000..3177c0f --- /dev/null +++ b/third-party/nwaku/waku/incentivization/reputation_manager.nim @@ -0,0 +1,48 @@ +import tables, std/options +import ../waku_lightpush_legacy/rpc + +type + PeerId = string + + ResponseQuality* = enum + BadResponse + GoodResponse + + # Encode reputation indicator as Option[bool]: + # some(true) => GoodRep + # some(false) => BadRep + # none(bool) => unknown / not set + ReputationManager* = ref object + reputationOf*: Table[PeerId, Option[bool]] + +proc init*(T: type ReputationManager): ReputationManager = + return ReputationManager(reputationOf: initTable[PeerId, Option[bool]]()) + +proc setReputation*( + manager: var ReputationManager, peer: PeerId, repValue: Option[bool] +) = + manager.reputationOf[peer] = repValue + +proc getReputation*(manager: ReputationManager, peer: PeerId): Option[bool] = + if peer in manager.reputationOf: + result = manager.reputationOf[peer] + else: + result = none(bool) + +# Evaluate the quality of a PushResponse by checking its isSuccess field +proc evaluateResponse*(response: PushResponse): ResponseQuality = + if response.isSuccess: + return GoodResponse + else: + return BadResponse + +# Update reputation of the peer based on the quality of the response +proc updateReputationFromResponse*( + manager: var ReputationManager, peer: PeerId, response: PushResponse +) = + let respQuality = evaluateResponse(response) + case respQuality + of BadResponse: + manager.setReputation(peer, some(false)) # false => BadRep + of GoodResponse: + manager.setReputation(peer, some(true)) # true => GoodRep diff --git a/third-party/nwaku/waku/incentivization/rpc.nim b/third-party/nwaku/waku/incentivization/rpc.nim new file mode 100644 index 0000000..5223f5b --- /dev/null +++ b/third-party/nwaku/waku/incentivization/rpc.nim @@ -0,0 +1,12 @@ +import std/options + +# Implementing the RFC: +# https://github.com/vacp2p/rfc/tree/master/content/docs/rfcs/73 + +type + EligibilityProof* = object + proofOfPayment*: Option[seq[byte]] + + EligibilityStatus* = object + statusCode*: uint32 + statusDesc*: Option[string] diff --git a/third-party/nwaku/waku/incentivization/rpc_codec.nim b/third-party/nwaku/waku/incentivization/rpc_codec.nim new file mode 100644 index 0000000..9529ddf --- /dev/null +++ b/third-party/nwaku/waku/incentivization/rpc_codec.nim @@ -0,0 +1,51 @@ +import std/options +import ../common/protobuf, ./rpc + +# Codec for EligibilityProof + +proc encode*(epRpc: EligibilityProof): ProtoBuffer = + var pb = initProtoBuffer() + if epRpc.proofOfPayment.isSome(): + let proofOfPayment = epRpc.proofOfPayment.get() + pb.write3(1, proofOfPayment) + else: + # there is no proof + discard + pb + +proc decode*(T: type EligibilityProof, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var epRpc = EligibilityProof() + var proofOfPayment = newSeq[byte]() + if not ?pb.getField(1, proofOfPayment): + epRpc.proofOfPayment = none(seq[byte]) + else: + epRpc.proofOfPayment = some(proofOfPayment) + ok(epRpc) + +# Codec for EligibilityStatus + +proc encode*(esRpc: EligibilityStatus): ProtoBuffer = + var pb = initProtoBuffer() + pb.write3(1, esRpc.statusCode) + if esRpc.statusDesc.isSome(): + pb.write3(2, esRpc.statusDesc.get()) + pb + +proc decode*(T: type EligibilityStatus, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var esRpc = EligibilityStatus() + # status code + var code = uint32(0) + if not ?pb.getField(1, code): + # status code is mandatory + return err(ProtobufError.missingRequiredField("status_code")) + else: + esRpc.statusCode = code + # status description + var description = "" + if not ?pb.getField(2, description): + esRpc.statusDesc = none(string) + else: + esRpc.statusDesc = some(description) + ok(esRpc) diff --git a/third-party/nwaku/waku/node/delivery_monitor/delivery_callback.nim b/third-party/nwaku/waku/node/delivery_monitor/delivery_callback.nim new file mode 100644 index 0000000..c996bc7 --- /dev/null +++ b/third-party/nwaku/waku/node/delivery_monitor/delivery_callback.nim @@ -0,0 +1,17 @@ +import ../../waku_core + +type DeliveryDirection* {.pure.} = enum + PUBLISHING + RECEIVING + +type DeliverySuccess* {.pure.} = enum + SUCCESSFUL + UNSUCCESSFUL + +type DeliveryFeedbackCallback* = proc( + success: DeliverySuccess, + dir: DeliveryDirection, + comment: string, + msgHash: WakuMessageHash, + msg: WakuMessage, +) {.gcsafe, raises: [].} diff --git a/third-party/nwaku/waku/node/delivery_monitor/delivery_monitor.nim b/third-party/nwaku/waku/node/delivery_monitor/delivery_monitor.nim new file mode 100644 index 0000000..4dda542 --- /dev/null +++ b/third-party/nwaku/waku/node/delivery_monitor/delivery_monitor.nim @@ -0,0 +1,43 @@ +## This module helps to ensure the correct transmission and reception of messages + +import results +import chronos +import + ./recv_monitor, + ./send_monitor, + ./delivery_callback, + ../../waku_core, + ../../waku_store/client, + ../../waku_relay/protocol, + ../../waku_lightpush/client, + ../../waku_filter_v2/client + +type DeliveryMonitor* = ref object + sendMonitor: SendMonitor + recvMonitor: RecvMonitor + +proc new*( + T: type DeliveryMonitor, + storeClient: WakuStoreClient, + wakuRelay: protocol.WakuRelay, + wakuLightpushClient: WakuLightpushClient, + wakuFilterClient: WakuFilterClient, +): Result[T, string] = + ## storeClient is needed to give store visitility to DeliveryMonitor + ## wakuRelay and wakuLightpushClient are needed to give a mechanism to SendMonitor to re-publish + let sendMonitor = ?SendMonitor.new(storeClient, wakuRelay, wakuLightpushClient) + let recvMonitor = RecvMonitor.new(storeClient, wakuFilterClient) + return ok(DeliveryMonitor(sendMonitor: sendMonitor, recvMonitor: recvMonitor)) + +proc startDeliveryMonitor*(self: DeliveryMonitor) = + self.sendMonitor.startSendMonitor() + self.recvMonitor.startRecvMonitor() + +proc stopDeliveryMonitor*(self: DeliveryMonitor) {.async.} = + self.sendMonitor.stopSendMonitor() + await self.recvMonitor.stopRecvMonitor() + +proc setDeliveryCallback*(self: DeliveryMonitor, deliveryCb: DeliveryFeedbackCallback) = + ## The deliveryCb is a proc defined by the api client so that it can get delivery feedback + self.sendMonitor.setDeliveryCallback(deliveryCb) + self.recvMonitor.setDeliveryCallback(deliveryCb) diff --git a/third-party/nwaku/waku/node/delivery_monitor/not_delivered_storage/migrations.nim b/third-party/nwaku/waku/node/delivery_monitor/not_delivered_storage/migrations.nim new file mode 100644 index 0000000..66fb858 --- /dev/null +++ b/third-party/nwaku/waku/node/delivery_monitor/not_delivered_storage/migrations.nim @@ -0,0 +1,26 @@ +{.push raises: [].} + +import std/[tables, strutils, os], results, chronicles +import ../../../common/databases/db_sqlite, ../../../common/databases/common + +logScope: + topics = "waku node delivery_monitor" + +const TargetSchemaVersion* = 1 + # increase this when there is an update in the database schema + +template projectRoot(): string = + currentSourcePath.rsplit(DirSep, 1)[0] / ".." / ".." / ".." / ".." + +const PeerStoreMigrationPath: string = projectRoot / "migrations" / "sent_msgs" + +proc migrate*(db: SqliteDatabase): DatabaseResult[void] = + debug "starting peer store's sqlite database migration for sent messages" + + let migrationRes = + migrate(db, TargetSchemaVersion, migrationsScriptsDir = PeerStoreMigrationPath) + if migrationRes.isErr(): + return err("failed to execute migration scripts: " & migrationRes.error) + + debug "finished peer store's sqlite database migration for sent messages" + ok() diff --git a/third-party/nwaku/waku/node/delivery_monitor/not_delivered_storage/not_delivered_storage.nim b/third-party/nwaku/waku/node/delivery_monitor/not_delivered_storage/not_delivered_storage.nim new file mode 100644 index 0000000..8561131 --- /dev/null +++ b/third-party/nwaku/waku/node/delivery_monitor/not_delivered_storage/not_delivered_storage.nim @@ -0,0 +1,38 @@ +## This module is aimed to keep track of the sent/published messages that are considered +## not being properly delivered. +## +## The archiving of such messages will happen in a local sqlite database. +## +## In the very first approach, we consider that a message is sent properly is it has been +## received by any store node. +## + +import results +import + ../../../common/databases/db_sqlite, + ../../../waku_core/message/message, + ../../../node/delivery_monitor/not_delivered_storage/migrations + +const NotDeliveredMessagesDbUrl = "not-delivered-messages.db" + +type NotDeliveredStorage* = ref object + database: SqliteDatabase + +type TrackedWakuMessage = object + msg: WakuMessage + numTrials: uint + ## for statistics purposes. Counts the number of times the node has tried to publish it + +proc new*(T: type NotDeliveredStorage): Result[T, string] = + let db = ?SqliteDatabase.new(NotDeliveredMessagesDbUrl) + + ?migrate(db) + + return ok(NotDeliveredStorage(database: db)) + +proc archiveMessage*( + self: NotDeliveredStorage, msg: WakuMessage +): Result[void, string] = + ## Archives a waku message so that we can keep track of it + ## even when the app restarts + return ok() diff --git a/third-party/nwaku/waku/node/delivery_monitor/publish_observer.nim b/third-party/nwaku/waku/node/delivery_monitor/publish_observer.nim new file mode 100644 index 0000000..1f517f8 --- /dev/null +++ b/third-party/nwaku/waku/node/delivery_monitor/publish_observer.nim @@ -0,0 +1,9 @@ +import chronicles +import ../../waku_core/message/message + +type PublishObserver* = ref object of RootObj + +method onMessagePublished*( + self: PublishObserver, pubsubTopic: string, message: WakuMessage +) {.base, gcsafe, raises: [].} = + error "onMessagePublished not implemented" diff --git a/third-party/nwaku/waku/node/delivery_monitor/recv_monitor.nim b/third-party/nwaku/waku/node/delivery_monitor/recv_monitor.nim new file mode 100644 index 0000000..7e89df5 --- /dev/null +++ b/third-party/nwaku/waku/node/delivery_monitor/recv_monitor.nim @@ -0,0 +1,196 @@ +## This module is in charge of taking care of the messages that this node is expecting to +## receive and is backed by store-v3 requests to get an additional degree of certainty +## + +import std/[tables, sequtils, options] +import chronos, chronicles, libp2p/utility +import + ../../waku_core, + ./delivery_callback, + ./subscriptions_observer, + ../../waku_store/[client, common], + ../../waku_filter_v2/client, + ../../waku_core/topics + +const StoreCheckPeriod = chronos.minutes(5) ## How often to perform store queries + +const MaxMessageLife = chronos.minutes(7) ## Max time we will keep track of rx messages + +const PruneOldMsgsPeriod = chronos.minutes(1) + +const DelayExtra* = chronos.seconds(5) + ## Additional security time to overlap the missing messages queries + +type TupleHashAndMsg = tuple[hash: WakuMessageHash, msg: WakuMessage] + +type RecvMessage = object + msgHash: WakuMessageHash + rxTime: Timestamp + ## timestamp of the rx message. We will not keep the rx messages forever + +type RecvMonitor* = ref object of SubscriptionObserver + topicsInterest: Table[PubsubTopic, seq[ContentTopic]] + ## Tracks message verification requests and when was the last time a + ## pubsub topic was verified for missing messages + ## The key contains pubsub-topics + + storeClient: WakuStoreClient + deliveryCb: DeliveryFeedbackCallback + + recentReceivedMsgs: seq[RecvMessage] + + msgCheckerHandler: Future[void] ## allows to stop the msgChecker async task + msgPrunerHandler: Future[void] ## removes too old messages + + startTimeToCheck: Timestamp + endTimeToCheck: Timestamp + +proc getMissingMsgsFromStore( + self: RecvMonitor, msgHashes: seq[WakuMessageHash] +): Future[Result[seq[TupleHashAndMsg], string]] {.async.} = + let storeResp: StoreQueryResponse = ( + await self.storeClient.queryToAny( + StoreQueryRequest(includeData: true, messageHashes: msgHashes) + ) + ).valueOr: + return err("getMissingMsgsFromStore: " & $error) + + let otherwiseMsg = WakuMessage() + ## message to be returned if the Option message is none + return ok( + storeResp.messages.mapIt((hash: it.messageHash, msg: it.message.get(otherwiseMsg))) + ) + +proc performDeliveryFeedback( + self: RecvMonitor, + success: DeliverySuccess, + dir: DeliveryDirection, + comment: string, + msgHash: WakuMessageHash, + msg: WakuMessage, +) {.gcsafe, raises: [].} = + ## This procs allows to bring delivery feedback to the API client + ## It requires a 'deliveryCb' to be registered beforehand. + if self.deliveryCb.isNil(): + error "deliveryCb is nil in performDeliveryFeedback", + success, dir, comment, msg_hash + return + + debug "recv monitor performDeliveryFeedback", + success, dir, comment, msg_hash = shortLog(msgHash) + self.deliveryCb(success, dir, comment, msgHash, msg) + +proc msgChecker(self: RecvMonitor) {.async.} = + ## Continuously checks if a message has been received + while true: + await sleepAsync(StoreCheckPeriod) + + self.endTimeToCheck = getNowInNanosecondTime() + + var msgHashesInStore = newSeq[WakuMessageHash](0) + for pubsubTopic, cTopics in self.topicsInterest.pairs: + let storeResp: StoreQueryResponse = ( + await self.storeClient.queryToAny( + StoreQueryRequest( + includeData: false, + pubsubTopic: some(PubsubTopic(pubsubTopic)), + contentTopics: cTopics, + startTime: some(self.startTimeToCheck - DelayExtra.nanos), + endTime: some(self.endTimeToCheck + DelayExtra.nanos), + ) + ) + ).valueOr: + error "msgChecker failed to get remote msgHashes", + pubsubTopic, cTopics, error = $error + continue + + msgHashesInStore.add(storeResp.messages.mapIt(it.messageHash)) + + ## compare the msgHashes seen from the store vs the ones received directly + let rxMsgHashes = self.recentReceivedMsgs.mapIt(it.msgHash) + let missedHashes: seq[WakuMessageHash] = + msgHashesInStore.filterIt(not rxMsgHashes.contains(it)) + + ## Now retrieve the missed WakuMessages + let missingMsgsRet = await self.getMissingMsgsFromStore(missedHashes) + if missingMsgsRet.isOk(): + ## Give feedback so that the api client can perfom any action with the missed messages + for msgTuple in missingMsgsRet.get(): + self.performDeliveryFeedback( + DeliverySuccess.UNSUCCESSFUL, RECEIVING, "Missed message", msgTuple.hash, + msgTuple.msg, + ) + else: + error "failed to retrieve missing messages: ", error = $missingMsgsRet.error + + ## update next check times + self.startTimeToCheck = self.endTimeToCheck + +method onSubscribe( + self: RecvMonitor, pubsubTopic: string, contentTopics: seq[string] +) {.gcsafe, raises: [].} = + debug "onSubscribe", pubsubTopic, contentTopics + self.topicsInterest.withValue(pubsubTopic, contentTopicsOfInterest): + contentTopicsOfInterest[].add(contentTopics) + do: + self.topicsInterest[pubsubTopic] = contentTopics + +method onUnsubscribe( + self: RecvMonitor, pubsubTopic: string, contentTopics: seq[string] +) {.gcsafe, raises: [].} = + debug "onUnsubscribe", pubsubTopic, contentTopics + + self.topicsInterest.withValue(pubsubTopic, contentTopicsOfInterest): + let remainingCTopics = + contentTopicsOfInterest[].filterIt(not contentTopics.contains(it)) + contentTopicsOfInterest[] = remainingCTopics + + if remainingCTopics.len == 0: + self.topicsInterest.del(pubsubTopic) + do: + error "onUnsubscribe unsubscribing from wrong topic", pubsubTopic, contentTopics + +proc new*( + T: type RecvMonitor, + storeClient: WakuStoreClient, + wakuFilterClient: WakuFilterClient, +): T = + ## The storeClient will help to acquire any possible missed messages + + let now = getNowInNanosecondTime() + var recvMonitor = RecvMonitor(storeClient: storeClient, startTimeToCheck: now) + + if not wakuFilterClient.isNil(): + wakuFilterClient.addSubscrObserver(recvMonitor) + + let filterPushHandler = proc( + pubsubTopic: PubsubTopic, message: WakuMessage + ) {.async, closure.} = + ## Captures all the messages recived through filter + + let msgHash = computeMessageHash(pubSubTopic, message) + let rxMsg = RecvMessage(msgHash: msgHash, rxTime: message.timestamp) + recvMonitor.recentReceivedMsgs.add(rxMsg) + + wakuFilterClient.registerPushHandler(filterPushHandler) + + return recvMonitor + +proc loopPruneOldMessages(self: RecvMonitor) {.async.} = + while true: + let oldestAllowedTime = getNowInNanosecondTime() - MaxMessageLife.nanos + self.recentReceivedMsgs.keepItIf(it.rxTime > oldestAllowedTime) + await sleepAsync(PruneOldMsgsPeriod) + +proc startRecvMonitor*(self: RecvMonitor) = + self.msgCheckerHandler = self.msgChecker() + self.msgPrunerHandler = self.loopPruneOldMessages() + +proc stopRecvMonitor*(self: RecvMonitor) {.async.} = + if not self.msgCheckerHandler.isNil(): + await self.msgCheckerHandler.cancelAndWait() + if not self.msgPrunerHandler.isNil(): + await self.msgPrunerHandler.cancelAndWait() + +proc setDeliveryCallback*(self: RecvMonitor, deliveryCb: DeliveryFeedbackCallback) = + self.deliveryCb = deliveryCb diff --git a/third-party/nwaku/waku/node/delivery_monitor/send_monitor.nim b/third-party/nwaku/waku/node/delivery_monitor/send_monitor.nim new file mode 100644 index 0000000..1d3d71f --- /dev/null +++ b/third-party/nwaku/waku/node/delivery_monitor/send_monitor.nim @@ -0,0 +1,212 @@ +## This module reinforces the publish operation with regular store-v3 requests. +## + +import std/[sequtils, tables] +import chronos, chronicles, libp2p/utility +import + ./delivery_callback, + ./publish_observer, + ../../waku_core, + ./not_delivered_storage/not_delivered_storage, + ../../waku_store/[client, common], + ../../waku_archive/archive, + ../../waku_relay/protocol, + ../../waku_lightpush/client + +const MaxTimeInCache* = chronos.minutes(1) + ## Messages older than this time will get completely forgotten on publication and a + ## feedback will be given when that happens + +const SendCheckInterval* = chronos.seconds(3) + ## Interval at which we check that messages have been properly received by a store node + +const MaxMessagesToCheckAtOnce = 100 + ## Max number of messages to check if they were properly archived by a store node + +const ArchiveTime = chronos.seconds(3) + ## Estimation of the time we wait until we start confirming that a message has been properly + ## received and archived by a store node + +type DeliveryInfo = object + pubsubTopic: string + msg: WakuMessage + +type SendMonitor* = ref object of PublishObserver + publishedMessages: Table[WakuMessageHash, DeliveryInfo] + ## Cache that contains the delivery info per message hash. + ## This is needed to make sure the published messages are properly published + + msgStoredCheckerHandle: Future[void] ## handle that allows to stop the async task + + notDeliveredStorage: NotDeliveredStorage + ## NOTE: this is not fully used because that might be tackled by higher abstraction layers + + storeClient: WakuStoreClient + deliveryCb: DeliveryFeedbackCallback + + wakuRelay: protocol.WakuRelay + wakuLightpushClient: WakuLightPushClient + +proc new*( + T: type SendMonitor, + storeClient: WakuStoreClient, + wakuRelay: protocol.WakuRelay, + wakuLightpushClient: WakuLightPushClient, +): Result[T, string] = + if wakuRelay.isNil() and wakuLightpushClient.isNil(): + return err( + "Could not create SendMonitor. wakuRelay or wakuLightpushClient should be set" + ) + + let notDeliveredStorage = ?NotDeliveredStorage.new() + + let sendMonitor = SendMonitor( + notDeliveredStorage: notDeliveredStorage, + storeClient: storeClient, + wakuRelay: wakuRelay, + wakuLightpushClient: wakuLightPushClient, + ) + + if not wakuRelay.isNil(): + wakuRelay.addPublishObserver(sendMonitor) + + if not wakuLightpushClient.isNil(): + wakuLightpushClient.addPublishObserver(sendMonitor) + + return ok(sendMonitor) + +proc performFeedbackAndCleanup( + self: SendMonitor, + msgsToDiscard: Table[WakuMessageHash, DeliveryInfo], + success: DeliverySuccess, + dir: DeliveryDirection, + comment: string, +) = + ## This procs allows to bring delivery feedback to the API client + ## It requires a 'deliveryCb' to be registered beforehand. + if self.deliveryCb.isNil(): + error "deliveryCb is nil in performFeedbackAndCleanup", + success, dir, comment, hashes = toSeq(msgsToDiscard.keys).mapIt(shortLog(it)) + return + + for hash, deliveryInfo in msgsToDiscard: + debug "send monitor performFeedbackAndCleanup", + success, dir, comment, msg_hash = shortLog(hash) + + self.deliveryCb(success, dir, comment, hash, deliveryInfo.msg) + self.publishedMessages.del(hash) + +proc checkMsgsInStore( + self: SendMonitor, msgsToValidate: Table[WakuMessageHash, DeliveryInfo] +): Future[ + Result[ + tuple[ + publishedCorrectly: Table[WakuMessageHash, DeliveryInfo], + notYetPublished: Table[WakuMessageHash, DeliveryInfo], + ], + void, + ] +] {.async.} = + let hashesToValidate = toSeq(msgsToValidate.keys) + + let storeResp: StoreQueryResponse = ( + await self.storeClient.queryToAny( + StoreQueryRequest(includeData: false, messageHashes: hashesToValidate) + ) + ).valueOr: + error "checkMsgsInStore failed to get remote msgHashes", + hashes = hashesToValidate.mapIt(shortLog(it)), error = $error + return err() + + let publishedHashes = storeResp.messages.mapIt(it.messageHash) + + var notYetPublished: Table[WakuMessageHash, DeliveryInfo] + var publishedCorrectly: Table[WakuMessageHash, DeliveryInfo] + + for msgHash, deliveryInfo in msgsToValidate.pairs: + if publishedHashes.contains(msgHash): + publishedCorrectly[msgHash] = deliveryInfo + self.publishedMessages.del(msgHash) ## we will no longer track that message + else: + notYetPublished[msgHash] = deliveryInfo + + return ok((publishedCorrectly: publishedCorrectly, notYetPublished: notYetPublished)) + +proc processMessages(self: SendMonitor) {.async.} = + var msgsToValidate: Table[WakuMessageHash, DeliveryInfo] + var msgsToDiscard: Table[WakuMessageHash, DeliveryInfo] + + let now = getNowInNanosecondTime() + let timeToCheckThreshold = now - ArchiveTime.nanos + let maxLifeTime = now - MaxTimeInCache.nanos + + for hash, deliveryInfo in self.publishedMessages.pairs: + if deliveryInfo.msg.timestamp < maxLifeTime: + ## message is too old + msgsToDiscard[hash] = deliveryInfo + + if deliveryInfo.msg.timestamp < timeToCheckThreshold: + msgsToValidate[hash] = deliveryInfo + + ## Discard the messages that are too old + self.performFeedbackAndCleanup( + msgsToDiscard, DeliverySuccess.UNSUCCESSFUL, DeliveryDirection.PUBLISHING, + "Could not publish messages. Please try again.", + ) + + let (publishedCorrectly, notYetPublished) = ( + await self.checkMsgsInStore(msgsToValidate) + ).valueOr: + return ## the error log is printed in checkMsgsInStore + + ## Give positive feedback for the correctly published messages + self.performFeedbackAndCleanup( + publishedCorrectly, DeliverySuccess.SUCCESSFUL, DeliveryDirection.PUBLISHING, + "messages published correctly", + ) + + ## Try to publish again + for msgHash, deliveryInfo in notYetPublished.pairs: + let pubsubTopic = deliveryInfo.pubsubTopic + let msg = deliveryInfo.msg + if not self.wakuRelay.isNil(): + debug "trying to publish again with wakuRelay", msgHash, pubsubTopic + (await self.wakuRelay.publish(pubsubTopic, msg)).isOkOr: + error "could not publish with wakuRelay.publish", + msgHash, pubsubTopic, error = $error + continue + + if not self.wakuLightpushClient.isNil(): + debug "trying to publish again with wakuLightpushClient", msgHash, pubsubTopic + (await self.wakuLightpushClient.publishToAny(pubsubTopic, msg)).isOkOr: + error "could not publish with publishToAny", error = $error + continue + +proc checkIfMessagesStored(self: SendMonitor) {.async.} = + ## Continuously monitors that the sent messages have been received by a store node + while true: + await self.processMessages() + await sleepAsync(SendCheckInterval) + +method onMessagePublished( + self: SendMonitor, pubsubTopic: string, msg: WakuMessage +) {.gcsafe, raises: [].} = + ## Implementation of the PublishObserver interface. + ## + ## When publishing a message either through relay or lightpush, we want to add some extra effort + ## to make sure it is received to one store node. Hence, keep track of those published messages. + + debug "onMessagePublished" + let msgHash = computeMessageHash(pubSubTopic, msg) + + if not self.publishedMessages.hasKey(msgHash): + self.publishedMessages[msgHash] = DeliveryInfo(pubsubTopic: pubsubTopic, msg: msg) + +proc startSendMonitor*(self: SendMonitor) = + self.msgStoredCheckerHandle = self.checkIfMessagesStored() + +proc stopSendMonitor*(self: SendMonitor) = + discard self.msgStoredCheckerHandle.cancelAndWait() + +proc setDeliveryCallback*(self: SendMonitor, deliveryCb: DeliveryFeedbackCallback) = + self.deliveryCb = deliveryCb diff --git a/third-party/nwaku/waku/node/delivery_monitor/subscriptions_observer.nim b/third-party/nwaku/waku/node/delivery_monitor/subscriptions_observer.nim new file mode 100644 index 0000000..800117a --- /dev/null +++ b/third-party/nwaku/waku/node/delivery_monitor/subscriptions_observer.nim @@ -0,0 +1,13 @@ +import chronicles + +type SubscriptionObserver* = ref object of RootObj + +method onSubscribe*( + self: SubscriptionObserver, pubsubTopic: string, contentTopics: seq[string] +) {.base, gcsafe, raises: [].} = + error "onSubscribe not implemented" + +method onUnsubscribe*( + self: SubscriptionObserver, pubsubTopic: string, contentTopics: seq[string] +) {.base, gcsafe, raises: [].} = + error "onUnsubscribe not implemented" diff --git a/third-party/nwaku/waku/node/health_monitor.nim b/third-party/nwaku/waku/node/health_monitor.nim new file mode 100644 index 0000000..854a8bb --- /dev/null +++ b/third-party/nwaku/waku/node/health_monitor.nim @@ -0,0 +1,4 @@ +import + health_monitor/[node_health_monitor, protocol_health, online_monitor, health_status] + +export node_health_monitor, protocol_health, online_monitor, health_status diff --git a/third-party/nwaku/waku/node/health_monitor/health_status.nim b/third-party/nwaku/waku/node/health_monitor/health_status.nim new file mode 100644 index 0000000..4dd2bdd --- /dev/null +++ b/third-party/nwaku/waku/node/health_monitor/health_status.nim @@ -0,0 +1,16 @@ +import results, std/strutils + +type HealthStatus* {.pure.} = enum + INITIALIZING + SYNCHRONIZING + READY + NOT_READY + NOT_MOUNTED + SHUTTING_DOWN + +proc init*(t: typedesc[HealthStatus], strRep: string): Result[HealthStatus, string] = + try: + let status = parseEnum[HealthStatus](strRep) + return ok(status) + except ValueError: + return err("Invalid HealthStatus string representation: " & strRep) diff --git a/third-party/nwaku/waku/node/health_monitor/node_health_monitor.nim b/third-party/nwaku/waku/node/health_monitor/node_health_monitor.nim new file mode 100644 index 0000000..09f37b5 --- /dev/null +++ b/third-party/nwaku/waku/node/health_monitor/node_health_monitor.nim @@ -0,0 +1,425 @@ +{.push raises: [].} + +import + std/[options, sets, strformat, random, sequtils], + chronos, + chronicles, + libp2p/protocols/rendezvous + +import + ../waku_node, + ../../waku_rln_relay, + ../../waku_relay, + ../peer_manager, + ./online_monitor, + ./health_status, + ./protocol_health + +## This module is aimed to check the state of the "self" Waku Node + +# randomize initializes sdt/random's random number generator +# if not called, the outcome of randomization procedures will be the same in every run +randomize() + +type + HealthReport* = object + nodeHealth*: HealthStatus + protocolsHealth*: seq[ProtocolHealth] + + NodeHealthMonitor* = ref object + nodeHealth: HealthStatus + node: WakuNode + onlineMonitor*: OnlineMonitor + keepAliveFut: Future[void] + +template checkWakuNodeNotNil(node: WakuNode, p: ProtocolHealth): untyped = + if node.isNil(): + warn "WakuNode is not set, cannot check health", protocol_health_instance = $p + return p.notMounted() + +proc getRelayHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init("Relay") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuRelay == nil: + return p.notMounted() + + let relayPeers = hm.node.wakuRelay.getConnectedPubSubPeers(pubsubTopic = "").valueOr: + return p.notMounted() + + if relayPeers.len() == 0: + return p.notReady("No connected peers") + + return p.ready() + +proc getRlnRelayHealth(hm: NodeHealthMonitor): Future[ProtocolHealth] {.async.} = + var p = ProtocolHealth.init("Rln Relay") + if hm.node.isNil(): + warn "WakuNode is not set, cannot check health", protocol_health_instance = $p + return p.notMounted() + + if hm.node.wakuRlnRelay.isNil(): + return p.notMounted() + + const FutIsReadyTimout = 5.seconds + + let isReadyStateFut = hm.node.wakuRlnRelay.isReady() + if not await isReadyStateFut.withTimeout(FutIsReadyTimout): + return p.notReady("Ready state check timed out") + + try: + if not isReadyStateFut.completed(): + return p.notReady("Ready state check timed out") + elif isReadyStateFut.read(): + return p.ready() + + return p.synchronizing() + except: + error "exception reading state: " & getCurrentExceptionMsg() + return p.notReady("State cannot be determined") + +proc getLightpushHealth( + hm: NodeHealthMonitor, relayHealth: HealthStatus +): ProtocolHealth = + var p = ProtocolHealth.init("Lightpush") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuLightPush == nil: + return p.notMounted() + + if relayHealth == HealthStatus.READY: + return p.ready() + + return p.notReady("Node has no relay peers to fullfill push requests") + +proc getLightpushClientHealth( + hm: NodeHealthMonitor, relayHealth: HealthStatus +): ProtocolHealth = + var p = ProtocolHealth.init("Lightpush Client") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuLightpushClient == nil: + return p.notMounted() + + let selfServiceAvailable = + hm.node.wakuLightPush != nil and relayHealth == HealthStatus.READY + let servicePeerAvailable = hm.node.peerManager.selectPeer(WakuLightPushCodec).isSome() + + if selfServiceAvailable or servicePeerAvailable: + return p.ready() + + return p.notReady("No Lightpush service peer available yet") + +proc getLegacyLightpushHealth( + hm: NodeHealthMonitor, relayHealth: HealthStatus +): ProtocolHealth = + var p = ProtocolHealth.init("Legacy Lightpush") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuLegacyLightPush == nil: + return p.notMounted() + + if relayHealth == HealthStatus.READY: + return p.ready() + + return p.notReady("Node has no relay peers to fullfill push requests") + +proc getLegacyLightpushClientHealth( + hm: NodeHealthMonitor, relayHealth: HealthStatus +): ProtocolHealth = + var p = ProtocolHealth.init("Legacy Lightpush Client") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuLegacyLightpushClient == nil: + return p.notMounted() + + if (hm.node.wakuLegacyLightPush != nil and relayHealth == HealthStatus.READY) or + hm.node.peerManager.selectPeer(WakuLegacyLightPushCodec).isSome(): + return p.ready() + + return p.notReady("No Lightpush service peer available yet") + +proc getFilterHealth(hm: NodeHealthMonitor, relayHealth: HealthStatus): ProtocolHealth = + var p = ProtocolHealth.init("Filter") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuFilter == nil: + return p.notMounted() + + if relayHealth == HealthStatus.READY: + return p.ready() + + return p.notReady("Relay is not ready, filter will not be able to sort out messages") + +proc getFilterClientHealth( + hm: NodeHealthMonitor, relayHealth: HealthStatus +): ProtocolHealth = + var p = ProtocolHealth.init("Filter Client") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuFilterClient == nil: + return p.notMounted() + + if hm.node.peerManager.selectPeer(WakuFilterSubscribeCodec).isSome(): + return p.ready() + + return p.notReady("No Filter service peer available yet") + +proc getStoreHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init("Store") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuStore == nil: + return p.notMounted() + + return p.ready() + +proc getStoreClientHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init("Store Client") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuStoreClient == nil: + return p.notMounted() + + if hm.node.peerManager.selectPeer(WakuStoreCodec).isSome() or hm.node.wakuStore != nil: + return p.ready() + + return p.notReady( + "No Store service peer available yet, neither Store service set up for the node" + ) + +proc getLegacyStoreHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init("Legacy Store") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuLegacyStore == nil: + return p.notMounted() + + return p.ready() + +proc getLegacyStoreClientHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init("Legacy Store Client") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuLegacyStoreClient == nil: + return p.notMounted() + + if hm.node.peerManager.selectPeer(WakuLegacyStoreCodec).isSome() or + hm.node.wakuLegacyStore != nil: + return p.ready() + + return p.notReady( + "No Legacy Store service peers are available yet, neither Store service set up for the node" + ) + +proc getPeerExchangeHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init("Peer Exchange") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuPeerExchange == nil: + return p.notMounted() + + return p.ready() + +proc getRendezvousHealth(hm: NodeHealthMonitor): ProtocolHealth = + var p = ProtocolHealth.init("Rendezvous") + checkWakuNodeNotNil(hm.node, p) + + if hm.node.wakuRendezvous == nil: + return p.notMounted() + + if hm.node.peerManager.switch.peerStore.peers(RendezVousCodec).len() == 0: + return p.notReady("No Rendezvous peers are available yet") + + return p.ready() + +proc selectRandomPeersForKeepalive( + node: WakuNode, outPeers: seq[PeerId], numRandomPeers: int +): Future[seq[PeerId]] {.async.} = + ## Select peers for random keepalive, prioritizing mesh peers + + if node.wakuRelay.isNil(): + return selectRandomPeers(outPeers, numRandomPeers) + + let meshPeers = node.wakuRelay.getPeersInMesh().valueOr: + error "Failed getting peers in mesh for ping", error = error + # Fallback to random selection from all outgoing peers + return selectRandomPeers(outPeers, numRandomPeers) + + trace "Mesh peers for keepalive", meshPeers = meshPeers + + # Get non-mesh peers and shuffle them + var nonMeshPeers = outPeers.filterIt(it notin meshPeers) + shuffle(nonMeshPeers) + + # Combine mesh peers + random non-mesh peers up to numRandomPeers total + let numNonMeshPeers = max(0, numRandomPeers - len(meshPeers)) + let selectedNonMeshPeers = nonMeshPeers[0 ..< min(len(nonMeshPeers), numNonMeshPeers)] + + let selectedPeers = meshPeers & selectedNonMeshPeers + trace "Selected peers for keepalive", selected = selectedPeers + return selectedPeers + +proc keepAliveLoop( + node: WakuNode, + randomPeersKeepalive: chronos.Duration, + allPeersKeepAlive: chronos.Duration, + numRandomPeers = 10, +) {.async.} = + # Calculate how many random peer cycles before pinging all peers + let randomToAllRatio = + int(allPeersKeepAlive.seconds() / randomPeersKeepalive.seconds()) + var countdownToPingAll = max(0, randomToAllRatio - 1) + + # Sleep detection configuration + let sleepDetectionInterval = 3 * randomPeersKeepalive + + # Failure tracking + var consecutiveIterationFailures = 0 + const maxAllowedConsecutiveFailures = 2 + + var lastTimeExecuted = Moment.now() + + while true: + trace "Running keepalive loop" + await sleepAsync(randomPeersKeepalive) + + if not node.started: + continue + + let currentTime = Moment.now() + + # Check for sleep detection + if currentTime - lastTimeExecuted > sleepDetectionInterval: + warn "Keep alive hasn't been executed recently. Killing all connections" + await node.peerManager.disconnectAllPeers() + lastTimeExecuted = currentTime + consecutiveIterationFailures = 0 + continue + + # Check for consecutive failures + if consecutiveIterationFailures > maxAllowedConsecutiveFailures: + warn "Too many consecutive ping failures, node likely disconnected. Killing all connections", + consecutiveIterationFailures, maxAllowedConsecutiveFailures + await node.peerManager.disconnectAllPeers() + consecutiveIterationFailures = 0 + lastTimeExecuted = currentTime + continue + + # Determine which peers to ping + let outPeers = node.peerManager.connectedPeers()[1] + let peersToPing = + if countdownToPingAll > 0: + await selectRandomPeersForKeepalive(node, outPeers, numRandomPeers) + else: + outPeers + + let numPeersToPing = len(peersToPing) + + if countdownToPingAll > 0: + trace "Pinging random peers", + count = numPeersToPing, countdownToPingAll = countdownToPingAll + countdownToPingAll.dec() + else: + trace "Pinging all peers", count = numPeersToPing + countdownToPingAll = max(0, randomToAllRatio - 1) + + # Execute keepalive pings + let successfulPings = await parallelPings(node, peersToPing) + + if successfulPings != numPeersToPing: + waku_node_errors.inc( + amount = numPeersToPing - successfulPings, labelValues = ["keep_alive_failure"] + ) + + trace "Keepalive results", + attemptedPings = numPeersToPing, successfulPings = successfulPings + + # Update failure tracking + if numPeersToPing > 0 and successfulPings == 0: + consecutiveIterationFailures.inc() + error "All pings failed", consecutiveFailures = consecutiveIterationFailures + else: + consecutiveIterationFailures = 0 + + lastTimeExecuted = currentTime + +# 2 minutes default - 20% of the default chronosstream timeout duration +proc startKeepalive*( + hm: NodeHealthMonitor, + randomPeersKeepalive = 10.seconds, + allPeersKeepalive = 2.minutes, +): Result[void, string] = + # Validate input parameters + if randomPeersKeepalive.isZero() or allPeersKeepAlive.isZero(): + error "startKeepalive: allPeersKeepAlive and randomPeersKeepalive must be greater than 0", + randomPeersKeepalive = $randomPeersKeepalive, + allPeersKeepAlive = $allPeersKeepAlive + return err( + "startKeepalive: allPeersKeepAlive and randomPeersKeepalive must be greater than 0" + ) + + if allPeersKeepAlive < randomPeersKeepalive: + error "startKeepalive: allPeersKeepAlive can't be less than randomPeersKeepalive", + allPeersKeepAlive = $allPeersKeepAlive, + randomPeersKeepalive = $randomPeersKeepalive + return + err("startKeepalive: allPeersKeepAlive can't be less than randomPeersKeepalive") + + info "starting keepalive", + randomPeersKeepalive = randomPeersKeepalive, allPeersKeepalive = allPeersKeepalive + + hm.keepAliveFut = hm.node.keepAliveLoop(randomPeersKeepalive, allPeersKeepalive) + return ok() + +proc getNodeHealthReport*(hm: NodeHealthMonitor): Future[HealthReport] {.async.} = + var report: HealthReport + report.nodeHealth = hm.nodeHealth + + if not hm.node.isNil(): + let relayHealth = hm.getRelayHealth() + report.protocolsHealth.add(relayHealth) + report.protocolsHealth.add(await hm.getRlnRelayHealth()) + report.protocolsHealth.add(hm.getLightpushHealth(relayHealth.health)) + report.protocolsHealth.add(hm.getLegacyLightpushHealth(relayHealth.health)) + report.protocolsHealth.add(hm.getFilterHealth(relayHealth.health)) + report.protocolsHealth.add(hm.getStoreHealth()) + report.protocolsHealth.add(hm.getLegacyStoreHealth()) + report.protocolsHealth.add(hm.getPeerExchangeHealth()) + report.protocolsHealth.add(hm.getRendezvousHealth()) + + report.protocolsHealth.add(hm.getLightpushClientHealth(relayHealth.health)) + report.protocolsHealth.add(hm.getLegacyLightpushClientHealth(relayHealth.health)) + report.protocolsHealth.add(hm.getStoreClientHealth()) + report.protocolsHealth.add(hm.getLegacyStoreClientHealth()) + report.protocolsHealth.add(hm.getFilterClientHealth(relayHealth.health)) + return report + +proc setNodeToHealthMonitor*(hm: NodeHealthMonitor, node: WakuNode) = + hm.node = node + +proc setOverallHealth*(hm: NodeHealthMonitor, health: HealthStatus) = + hm.nodeHealth = health + +proc startHealthMonitor*(hm: NodeHealthMonitor): Result[void, string] = + hm.onlineMonitor.startOnlineMonitor() + hm.startKeepalive().isOkOr: + return err("startHealthMonitor: failed starting keep alive: " & error) + return ok() + +proc stopHealthMonitor*(hm: NodeHealthMonitor) {.async.} = + if not hm.onlineMonitor.isNil(): + await hm.onlineMonitor.stopOnlineMonitor() + + if not hm.keepAliveFut.isNil(): + await hm.keepAliveFut.cancelAndWait() + +proc new*( + T: type NodeHealthMonitor, + dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")], +): T = + T( + nodeHealth: INITIALIZING, + node: nil, + onlineMonitor: OnlineMonitor.init(dnsNameServers), + ) diff --git a/third-party/nwaku/waku/node/health_monitor/online_monitor.nim b/third-party/nwaku/waku/node/health_monitor/online_monitor.nim new file mode 100644 index 0000000..27bd53b --- /dev/null +++ b/third-party/nwaku/waku/node/health_monitor/online_monitor.nim @@ -0,0 +1,77 @@ +import std/sequtils +import chronos, chronicles, libp2p/nameresolving/dnsresolver, libp2p/peerstore + +import ../peer_manager/waku_peer_store, waku/waku_core/peers + +type + OnOnlineStateChange* = proc(online: bool) {.gcsafe, raises: [].} + + OnlineMonitor* = ref object + onOnlineStateChange: OnOnlineStateChange + dnsNameServers*: seq[IpAddress] + onlineStateObservers: seq[OnOnlineStateChange] + networkConnLoopHandle: Future[void] # node: WakuNode + peerStore: PeerStore + online: bool + +proc checkInternetConnectivity( + nameServerIps: seq[IpAddress], timeout = 2.seconds +): Future[bool] {.async.} = + const DNSCheckDomain = "one.one.one.one" + let nameServers = nameServerIps.mapIt(initTAddress(it, Port(53))) + let dnsResolver = DnsResolver.new(nameServers) + + # Resolve domain IP + let resolved = await dnsResolver.resolveIp(DNSCheckDomain, 0.Port, Domain.AF_UNSPEC) + if resolved.len > 0: + return true + else: + return false + +proc updateOnlineState(self: OnlineMonitor) {.async.} = + if self.onlineStateObservers.len == 0: + trace "No online state observers registered, cannot notify about online state change" + return + + let numConnectedPeers = + if self.peerStore.isNil(): + 0 + else: + self.peerStore.peers().countIt(it.connectedness == Connected) + + self.online = + if numConnectedPeers > 0: + true + else: + await checkInternetConnectivity(self.dnsNameServers) + + for onlineStateObserver in self.onlineStateObservers: + onlineStateObserver(self.online) + +proc networkConnectivityLoop(self: OnlineMonitor): Future[void] {.async.} = + ## Checks periodically whether the node is online or not + ## and triggers any change that depends on the network connectivity state + while true: + await self.updateOnlineState() + await sleepAsync(5.seconds) + +proc startOnlineMonitor*(self: OnlineMonitor) = + self.networkConnLoopHandle = self.networkConnectivityLoop() + +proc stopOnlineMonitor*(self: OnlineMonitor) {.async.} = + if not self.networkConnLoopHandle.isNil(): + await self.networkConnLoopHandle.cancelAndWait() + +proc setPeerStoreToOnlineMonitor*(self: OnlineMonitor, peerStore: PeerStore) = + self.peerStore = peerStore + +proc addOnlineStateObserver*(self: OnlineMonitor, observer: OnOnlineStateChange) = + ## Adds an observer that will be called when the online state changes + if observer notin self.onlineStateObservers: + self.onlineStateObservers.add(observer) + +proc amIOnline*(self: OnlineMonitor): bool = + return self.online + +proc init*(T: type OnlineMonitor, dnsNameServers: seq[IpAddress]): OnlineMonitor = + T(dnsNameServers: dnsNameServers, onlineStateObservers: @[]) diff --git a/third-party/nwaku/waku/node/health_monitor/protocol_health.nim b/third-party/nwaku/waku/node/health_monitor/protocol_health.nim new file mode 100644 index 0000000..7bacea9 --- /dev/null +++ b/third-party/nwaku/waku/node/health_monitor/protocol_health.nim @@ -0,0 +1,46 @@ +import std/[options, strformat] +import ./health_status + +type ProtocolHealth* = object + protocol*: string + health*: HealthStatus + desc*: Option[string] ## describes why a certain protocol is considered `NOT_READY` + +proc notReady*(p: var ProtocolHealth, desc: string): ProtocolHealth = + p.health = HealthStatus.NOT_READY + p.desc = some(desc) + return p + +proc ready*(p: var ProtocolHealth): ProtocolHealth = + p.health = HealthStatus.READY + p.desc = none[string]() + return p + +proc notMounted*(p: var ProtocolHealth): ProtocolHealth = + p.health = HealthStatus.NOT_MOUNTED + p.desc = none[string]() + return p + +proc synchronizing*(p: var ProtocolHealth): ProtocolHealth = + p.health = HealthStatus.SYNCHRONIZING + p.desc = none[string]() + return p + +proc initializing*(p: var ProtocolHealth): ProtocolHealth = + p.health = HealthStatus.INITIALIZING + p.desc = none[string]() + return p + +proc shuttingDown*(p: var ProtocolHealth): ProtocolHealth = + p.health = HealthStatus.SHUTTING_DOWN + p.desc = none[string]() + return p + +proc `$`*(p: ProtocolHealth): string = + return fmt"protocol: {p.protocol}, health: {p.health}, description: {p.desc}" + +proc init*(p: typedesc[ProtocolHealth], protocol: string): ProtocolHealth = + let p = ProtocolHealth( + protocol: protocol, health: HealthStatus.NOT_MOUNTED, desc: none[string]() + ) + return p diff --git a/third-party/nwaku/waku/node/net_config.nim b/third-party/nwaku/waku/node/net_config.nim new file mode 100644 index 0000000..4802694 --- /dev/null +++ b/third-party/nwaku/waku/node/net_config.nim @@ -0,0 +1,189 @@ +{.push raises: [].} + +import + std/[options, sequtils, strutils, net], results, libp2p/[multiaddress, multicodec] +import ../../waku/waku_core/peers +import ../waku_enr + +type NetConfig* = object + hostAddress*: MultiAddress + clusterId*: uint16 + wsHostAddress*: Option[MultiAddress] + hostExtAddress*: Option[MultiAddress] + wsExtAddress*: Option[MultiAddress] + wssEnabled*: bool + extIp*: Option[IpAddress] + extPort*: Option[Port] + dns4DomainName*: Option[string] + dnsNameServers*: seq[IpAddress] + announcedAddresses*: seq[MultiAddress] + extMultiAddrs*: seq[MultiAddress] + enrMultiAddrs*: seq[MultiAddress] + enrIp*: Option[IpAddress] + enrPort*: Option[Port] + discv5UdpPort*: Option[Port] + wakuFlags*: Option[CapabilitiesBitfield] + bindIp*: IpAddress + bindPort*: Port + +type NetConfigResult* = Result[NetConfig, string] + +template ip4TcpEndPoint(address, port): MultiAddress = + MultiAddress.init(address, tcpProtocol, port) + +template dns4Ma(dns4DomainName: string): MultiAddress = + MultiAddress.init("/dns4/" & dns4DomainName).tryGet() + +template tcpPortMa(port: Port): MultiAddress = + MultiAddress.init("/tcp/" & $port).tryGet() + +template dns4TcpEndPoint(dns4DomainName: string, port: Port): MultiAddress = + dns4Ma(dns4DomainName) & tcpPortMa(port) + +template wsFlag(wssEnabled: bool): MultiAddress = + if wssEnabled: + MultiAddress.init("/wss").tryGet() + else: + MultiAddress.init("/ws").tryGet() + +proc formatListenAddress(inputMultiAdd: MultiAddress): MultiAddress = + let inputStr = $inputMultiAdd + # If MultiAddress contains "0.0.0.0", replace it for "127.0.0.1" + return MultiAddress.init(inputStr.replace("0.0.0.0", "127.0.0.1")).get() + +proc isWsAddress*(ma: MultiAddress): bool = + let + isWs = ma.contains(multiCodec("ws")).get() + isWss = ma.contains(multiCodec("wss")).get() + + return isWs or isWss + +proc containsWsAddress(extMultiAddrs: seq[MultiAddress]): bool = + return extMultiAddrs.filterIt(it.isWsAddress()).len > 0 + +const DefaultWsBindPort = static(Port(8000)) +# TODO: migrate to builder pattern with nested configs +proc init*( + T: type NetConfig, + bindIp: IpAddress, + bindPort: Port, + extIp = none(IpAddress), + extPort = none(Port), + extMultiAddrs = newSeq[MultiAddress](), + extMultiAddrsOnly: bool = false, + wsBindPort: Option[Port] = some(DefaultWsBindPort), + wsEnabled: bool = false, + wssEnabled: bool = false, + dns4DomainName = none(string), + discv5UdpPort = none(Port), + clusterId: uint16 = 0, + wakuFlags = none(CapabilitiesBitfield), + dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")], +): NetConfigResult = + ## Initialize and validate waku node network configuration + + # Bind addresses + let hostAddress = ip4TcpEndPoint(bindIp, bindPort) + + var wsHostAddress = none(MultiAddress) + if wsEnabled or wssEnabled: + try: + wsHostAddress = some( + ip4TcpEndPoint(bindIp, wsbindPort.get(DefaultWsBindPort)) & wsFlag(wssEnabled) + ) + except CatchableError: + return err(getCurrentExceptionMsg()) + + let enrIp = + if extIp.isSome(): + extIp + else: + some(bindIp) + let enrPort = + if extPort.isSome(): + extPort + else: + some(bindPort) + + # Setup external addresses, if available + var hostExtAddress, wsExtAddress = none(MultiAddress) + + if dns4DomainName.isSome(): + # Use dns4 for externally announced addresses + try: + hostExtAddress = some(dns4TcpEndPoint(dns4DomainName.get(), extPort.get())) + except CatchableError: + return err(getCurrentExceptionMsg()) + + if wsHostAddress.isSome(): + try: + wsExtAddress = some( + dns4TcpEndPoint(dns4DomainName.get(), wsBindPort.get(DefaultWsBindPort)) & + wsFlag(wssEnabled) + ) + except CatchableError: + return err(getCurrentExceptionMsg()) + else: + # No public domain name, use ext IP if available + if extIp.isSome() and extPort.isSome(): + hostExtAddress = some(ip4TcpEndPoint(extIp.get(), extPort.get())) + + if wsHostAddress.isSome(): + try: + wsExtAddress = some( + ip4TcpEndPoint(extIp.get(), wsBindPort.get(DefaultWsBindPort)) & + wsFlag(wssEnabled) + ) + except CatchableError: + return err(getCurrentExceptionMsg()) + + var announcedAddresses = newSeq[MultiAddress]() + + if not extMultiAddrsOnly: + if hostExtAddress.isSome(): + announcedAddresses.add(hostExtAddress.get()) + else: + announcedAddresses.add(formatListenAddress(hostAddress)) + # We always have at least a bind address for the host + + if wsExtAddress.isSome(): + announcedAddresses.add(wsExtAddress.get()) + elif wsHostAddress.isSome() and not containsWsAddress(extMultiAddrs): + # Only publish wsHostAddress if a WS address is not set in extMultiAddrs + announcedAddresses.add(wsHostAddress.get()) + + # External multiaddrs that the operator may have configured + if extMultiAddrs.len > 0: + announcedAddresses.add(extMultiAddrs) + + let + # enrMultiaddrs are just addresses which cannot be represented in ENR, as described in + # https://rfc.vac.dev/spec/31/#many-connection-types + enrMultiaddrs = announcedAddresses.filterIt( + it.hasProtocol("dns4") or it.hasProtocol("dns6") or it.hasProtocol("ws") or + it.hasProtocol("wss") + ) + + ok( + NetConfig( + hostAddress: hostAddress, + clusterId: clusterId, + wsHostAddress: wsHostAddress, + hostExtAddress: hostExtAddress, + wsExtAddress: wsExtAddress, + extIp: extIp, + extPort: extPort, + wssEnabled: wssEnabled, + dns4DomainName: dns4DomainName, + dnsNameServers: dnsNameServers, + announcedAddresses: announcedAddresses, + extMultiAddrs: extMultiAddrs, + enrMultiaddrs: enrMultiaddrs, + enrIp: enrIp, + enrPort: enrPort, + discv5UdpPort: discv5UdpPort, + bindIp: bindIp, + bindPort: bindPort, + wakuFlags: wakuFlags, + ) + ) diff --git a/third-party/nwaku/waku/node/peer_manager.nim b/third-party/nwaku/waku/node/peer_manager.nim new file mode 100644 index 0000000..16cb06c --- /dev/null +++ b/third-party/nwaku/waku/node/peer_manager.nim @@ -0,0 +1,3 @@ +import ./peer_manager/peer_manager + +export peer_manager diff --git a/third-party/nwaku/waku/node/peer_manager/peer_manager.nim b/third-party/nwaku/waku/node/peer_manager/peer_manager.nim new file mode 100644 index 0000000..ca7b2f0 --- /dev/null +++ b/third-party/nwaku/waku/node/peer_manager/peer_manager.nim @@ -0,0 +1,1127 @@ +{.push raises: [].} + +import + std/[options, sets, sequtils, times, strformat, strutils, math, random, tables], + chronos, + chronicles, + metrics, + libp2p/multistream, + libp2p/muxers/muxer, + libp2p/nameresolving/nameresolver, + libp2p/peerstore + +import + ../../common/nimchronos, + ../../common/enr, + ../../common/callbacks, + ../../common/utils/parse_size_units, + ../../waku_core, + ../../waku_relay, + ../../waku_relay/protocol, + ../../waku_enr/sharding, + ../../waku_enr/capabilities, + ../../waku_metadata, + ../health_monitor/online_monitor, + ./peer_store/peer_storage, + ./waku_peer_store + +export waku_peer_store, peer_storage, peers + +declareCounter waku_peers_dials, "Number of peer dials", ["outcome"] +# TODO: Populate from PeerStore.Source when ready +declarePublicCounter waku_node_conns_initiated, + "Number of connections initiated", ["source"] +declarePublicCounter waku_peers_errors, "Number of peer manager errors", ["type"] +declarePublicGauge waku_connected_peers, + "Number of physical connections per direction and protocol", + labels = ["direction", "protocol"] +declarePublicGauge waku_connected_peers_per_shard, + "Number of physical connections per shard", labels = ["shard"] +declarePublicGauge waku_connected_peers_per_agent, + "Number of physical connections per agent", labels = ["agent"] +declarePublicGauge waku_streams_peers, + "Number of streams per direction and protocol", labels = ["direction", "protocol"] +declarePublicGauge waku_peer_store_size, "Number of peers managed by the peer store" +declarePublicGauge waku_service_peers, + "Service peer protocol and multiaddress ", labels = ["protocol", "peerId"] +declarePublicGauge waku_total_unique_peers, "total number of unique peers" + +logScope: + topics = "waku node peer_manager" + +randomize() + +const + # TODO: Make configurable + DefaultDialTimeout* = chronos.seconds(10) + + # Max attempts before removing the peer + MaxFailedAttempts = 5 + + # Time to wait before attempting to dial again is calculated as: + # initialBackoffInSec*(backoffFactor^(failedAttempts-1)) + # 120s, 480s, 1920, 7680s + InitialBackoffInSec = 120 + BackoffFactor = 4 + + # Limit the amount of paralel dials + MaxParallelDials = 10 + + # Delay between consecutive relayConnectivityLoop runs + ConnectivityLoopInterval = chronos.seconds(30) + + # How often the peer store is pruned + PrunePeerStoreInterval = chronos.minutes(10) + + # How often metrics and logs are shown/updated + LogAndMetricsInterval = chronos.minutes(5) + + # Max peers that we allow from the same IP + DefaultColocationLimit* = 5 + +type ConnectionChangeHandler* = proc( + peerId: PeerId, peerEvent: PeerEventKind +): Future[void] {.gcsafe, raises: [Defect].} + +type PeerManager* = ref object of RootObj + switch*: Switch + wakuMetadata*: WakuMetadata + initialBackoffInSec*: int + backoffFactor*: int + maxFailedAttempts*: int + storage*: PeerStorage + serviceSlots*: Table[string, RemotePeerInfo] + relayServiceRatio*: string + maxRelayPeers*: int + maxServicePeers*: int + outRelayPeersTarget: int + inRelayPeersTarget: int + ipTable*: Table[string, seq[PeerId]] + colocationLimit*: int + started: bool + shardedPeerManagement: bool # temp feature flag + onConnectionChange*: ConnectionChangeHandler + online: bool ## state managed by online_monitor module + getShards: GetShards + +#~~~~~~~~~~~~~~~~~~~# +# Helper Functions # +#~~~~~~~~~~~~~~~~~~~# + +proc calculateBackoff( + initialBackoffInSec: int, backoffFactor: int, failedAttempts: int +): timer.Duration = + if failedAttempts == 0: + return chronos.seconds(0) + return chronos.seconds(initialBackoffInSec * (backoffFactor ^ (failedAttempts - 1))) + +proc protocolMatcher*(codec: string): Matcher = + ## Returns a protocol matcher function for the provided codec + proc match(proto: string): bool {.gcsafe.} = + ## Matches a proto with any postfix to the provided codec. + ## E.g. if the codec is `/vac/waku/filter/2.0.0` it matches the protos: + ## `/vac/waku/filter/2.0.0`, `/vac/waku/filter/2.0.0-beta3`, `/vac/waku/filter/2.0.0-actualnonsense` + return proto.startsWith(codec) + + return match + +#~~~~~~~~~~~~~~~~~~~~~~~~~~# +# Peer Storage Management # +#~~~~~~~~~~~~~~~~~~~~~~~~~~# + +proc insertOrReplace(ps: PeerStorage, remotePeerInfo: RemotePeerInfo) {.gcsafe.} = + ## Insert peer entry into persistent storage, or replace existing entry with updated info + ps.put(remotePeerInfo).isOkOr: + warn "failed to store peers", err = error + waku_peers_errors.inc(labelValues = ["storage_failure"]) + return + +proc addPeer*( + pm: PeerManager, remotePeerInfo: RemotePeerInfo, origin = UnknownOrigin +) {.gcsafe.} = + ## Adds peer to manager for the specified protocol + + if remotePeerInfo.peerId == pm.switch.peerInfo.peerId: + trace "skipping to manage our unmanageable self" + return + + pm.switch.peerStore.addPeer(remotePeerInfo, origin) + + trace "Adding peer to manager", + peerId = remotePeerInfo.peerId, addresses = remotePeerInfo.addrs, origin + + waku_total_unique_peers.inc() + + # Add peer to storage. Entry will subsequently be updated with connectedness information + if not pm.storage.isNil: + # Reading from the db (pm.storage) is only done on startup, hence you need to connect to all saved peers. + # `remotePeerInfo.connectedness` should already be `NotConnected`, but both we reset it to `NotConnected` just in case. + # This reset is also done when reading from storage, I believe, to ensure the `connectedness` state is the correct one. + # So many resets are likely redudant, but I haven't verified whether this is the case or not. + remotePeerInfo.connectedness = NotConnected + + pm.storage.insertOrReplace(remotePeerInfo) + +proc getPeer*(pm: PeerManager, peerId: PeerId): RemotePeerInfo = + return pm.switch.peerStore.getPeer(peerId) + +proc loadFromStorage(pm: PeerManager) {.gcsafe.} = + ## Load peers from storage, if available + + trace "loading peers from storage" + + var amount = 0 + + proc onData(remotePeerInfo: RemotePeerInfo) = + let peerId = remotePeerInfo.peerId + + if pm.switch.peerInfo.peerId == peerId: + # Do not manage self + return + + trace "loading peer", + peerId = peerId, + address = remotePeerInfo.addrs, + protocols = remotePeerInfo.protocols, + agent = remotePeerInfo.agent, + version = remotePeerInfo.protoVersion + + # nim-libp2p books + pm.switch.peerStore[AddressBook][peerId] = remotePeerInfo.addrs + pm.switch.peerStore[ProtoBook][peerId] = remotePeerInfo.protocols + pm.switch.peerStore[KeyBook][peerId] = remotePeerInfo.publicKey + pm.switch.peerStore[AgentBook][peerId] = remotePeerInfo.agent + pm.switch.peerStore[ProtoVersionBook][peerId] = remotePeerInfo.protoVersion + + # custom books + pm.switch.peerStore[ConnectionBook][peerId] = NotConnected + # Reset connectedness state + pm.switch.peerStore[DisconnectBook][peerId] = remotePeerInfo.disconnectTime + pm.switch.peerStore[SourceBook][peerId] = remotePeerInfo.origin + + if remotePeerInfo.enr.isSome(): + pm.switch.peerStore[ENRBook][peerId] = remotePeerInfo.enr.get() + + amount.inc() + + pm.storage.getAll(onData).isOkOr: + warn "loading peers from storage failed", err = error + waku_peers_errors.inc(labelValues = ["storage_load_failure"]) + return + + trace "recovered peers from storage", amount = amount + +proc selectPeer*( + pm: PeerManager, proto: string, shard: Option[PubsubTopic] = none(PubsubTopic) +): Option[RemotePeerInfo] = + # Selects the best peer for a given protocol + + var peers = pm.switch.peerStore.getPeersByProtocol(proto) + trace "Selecting peer from peerstore", + protocol = proto, peers, address = cast[uint](pm.switch.peerStore) + + if shard.isSome(): + peers.keepItIf((it.enr.isSome() and it.enr.get().containsShard(shard.get()))) + + shuffle(peers) + + # No criteria for selecting a peer for WakuRelay, random one + if proto == WakuRelayCodec: + # TODO: proper heuristic here that compares peer scores and selects "best" one. For now the first peer for the given protocol is returned + if peers.len > 0: + trace "Got peer from peerstore", + peerId = peers[0].peerId, multi = peers[0].addrs[0], protocol = proto + return some(peers[0]) + trace "No peer found for protocol", protocol = proto + return none(RemotePeerInfo) + + # For other protocols, we select the peer that is slotted for the given protocol + pm.serviceSlots.withValue(proto, serviceSlot): + trace "Got peer from service slots", + peerId = serviceSlot[].peerId, multi = serviceSlot[].addrs[0], protocol = proto + return some(serviceSlot[]) + + # If not slotted, we select a random peer for the given protocol + if peers.len > 0: + trace "Got peer from peerstore", + peerId = peers[0].peerId, multi = peers[0].addrs[0], protocol = proto + return some(peers[0]) + trace "No peer found for protocol", protocol = proto + return none(RemotePeerInfo) + +# Adds a peer to the service slots, which is a list of peers that are slotted for a given protocol +proc addServicePeer*(pm: PeerManager, remotePeerInfo: RemotePeerInfo, proto: string) = + # Do not add relay peers + if proto == WakuRelayCodec: + warn "Can't add relay peer to service peers slots" + return + + # Check if the number of service peers has reached the maximum limit + if pm.serviceSlots.len >= pm.maxServicePeers: + warn "Maximum number of service peers reached. Cannot add more.", + peerId = remotePeerInfo.peerId, service = proto + return + + info "Adding peer to service slots", + peerId = remotePeerInfo.peerId, addr = remotePeerInfo.addrs[0], service = proto + waku_service_peers.set(1, labelValues = [$proto, $remotePeerInfo.addrs[0]]) + + # Set peer for service slot + pm.serviceSlots[proto] = remotePeerInfo + + pm.addPeer(remotePeerInfo) + +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# +# Connection Lifecycle Management # +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# + +# require pre-connection +proc pruneInRelayConns(pm: PeerManager, amount: int) {.async.} + +# Connects to a given node. Note that this function uses `connect` and +# does not provide a protocol. Streams for relay (gossipsub) are created +# automatically without the needing to dial. +proc connectPeer*( + pm: PeerManager, + peer: RemotePeerInfo, + dialTimeout = DefaultDialTimeout, + source = "api", +): Future[bool] {.async.} = + let peerId = peer.peerId + + var peerStore = pm.switch.peerStore + + # Do not attempt to dial self + if peerId == pm.switch.peerInfo.peerId: + return false + + if not peerStore.peerExists(peerId): + pm.addPeer(peer) + + let failedAttempts = peerStore[NumberFailedConnBook][peerId] + trace "Connecting to peer", + wireAddr = peer.addrs, peerId = peerId, failedAttempts = failedAttempts + + var deadline = sleepAsync(dialTimeout) + let workfut = pm.switch.connect(peerId, peer.addrs) + + # Can't use catch: with .withTimeout() in this case + let res = catch: + await workfut or deadline + + let reasonFailed = + if not workfut.finished(): + await workfut.cancelAndWait() + "timed out" + elif res.isErr(): + res.error.msg + else: + if not deadline.finished(): + await deadline.cancelAndWait() + + waku_peers_dials.inc(labelValues = ["successful"]) + waku_node_conns_initiated.inc(labelValues = [source]) + + peerStore[NumberFailedConnBook][peerId] = 0 + + return true + + # Dial failed + peerStore[NumberFailedConnBook][peerId] = peerStore[NumberFailedConnBook][peerId] + 1 + peerStore[LastFailedConnBook][peerId] = Moment.init(getTime().toUnix, Second) + peerStore[ConnectionBook][peerId] = CannotConnect + + trace "Connecting peer failed", + peerId = peerId, + reason = reasonFailed, + failedAttempts = peerStore[NumberFailedConnBook][peerId] + waku_peers_dials.inc(labelValues = [reasonFailed]) + + return false + +proc connectToNodes*( + pm: PeerManager, + nodes: seq[string] | seq[RemotePeerInfo], + dialTimeout = DefaultDialTimeout, + source = "api", +) {.async.} = + if nodes.len == 0: + return + + info "Dialing multiple peers", numOfPeers = nodes.len, nodes = $nodes + + var futConns: seq[Future[bool]] + var connectedPeers: seq[RemotePeerInfo] + for node in nodes: + let node = parsePeerInfo(node) + if node.isOk(): + futConns.add(pm.connectPeer(node.value)) + connectedPeers.add(node.value) + else: + error "Couldn't parse node info", error = node.error + + await allFutures(futConns) + + # Filtering successful connectedPeers based on futConns + let combined = zip(connectedPeers, futConns) + connectedPeers = combined.filterIt(it[1].read() == true).mapIt(it[0]) + + when defined(debugDiscv5): + let peerIds = connectedPeers.mapIt(it.peerId) + let origin = connectedPeers.mapIt(it.origin) + if peerIds.len > 0: + notice "established connections with found peers", + peerIds = peerIds.mapIt(shortLog(it)), origin = origin + else: + notice "could not connect to new peers", attempted = nodes.len + + info "Finished dialing multiple peers", + successfulConns = connectedPeers.len, attempted = nodes.len + +proc disconnectNode*(pm: PeerManager, peerId: PeerId) {.async.} = + await pm.switch.disconnect(peerId) + +proc disconnectNode*(pm: PeerManager, peer: RemotePeerInfo) {.async.} = + let peerId = peer.peerId + await pm.disconnectNode(peerId) + +# Dialing should be used for just protocols that require a stream to write and read +# This shall not be used to dial Relay protocols, since that would create +# unneccesary unused streams. +proc dialPeer( + pm: PeerManager, + peerId: PeerID, + addrs: seq[MultiAddress], + proto: string, + dialTimeout = DefaultDialTimeout, + source = "api", +): Future[Option[Connection]] {.async.} = + if peerId == pm.switch.peerInfo.peerId: + error "could not dial self" + return none(Connection) + + if proto == WakuRelayCodec: + error "dial shall not be used to connect to relays" + return none(Connection) + + trace "Dialing peer", wireAddr = addrs, peerId = peerId, proto = proto + + # Dial Peer + let dialFut = pm.switch.dial(peerId, addrs, proto) + + let res = catch: + if await dialFut.withTimeout(dialTimeout): + return some(dialFut.read()) + else: + await cancelAndWait(dialFut) + + let reasonFailed = if res.isOk: "timed out" else: res.error.msg + + trace "Dialing peer failed", peerId = peerId, reason = reasonFailed, proto = proto + + return none(Connection) + +proc dialPeer*( + pm: PeerManager, + remotePeerInfo: RemotePeerInfo, + proto: string, + dialTimeout = DefaultDialTimeout, + source = "api", +): Future[Option[Connection]] {.async.} = + # Dial a given peer and add it to the list of known peers + # TODO: check peer validity and score before continuing. Limit number of peers to be managed. + + # First add dialed peer info to peer store, if it does not exist yet.. + # TODO: nim libp2p peerstore already adds them + if not pm.switch.peerStore.hasPeer(remotePeerInfo.peerId, proto): + trace "Adding newly dialed peer to manager", + peerId = $remotePeerInfo.peerId, address = $remotePeerInfo.addrs[0], proto = proto + pm.addPeer(remotePeerInfo) + + return await pm.dialPeer( + remotePeerInfo.peerId, remotePeerInfo.addrs, proto, dialTimeout, source + ) + +proc dialPeer*( + pm: PeerManager, + peerId: PeerID, + proto: string, + dialTimeout = DefaultDialTimeout, + source = "api", +): Future[Option[Connection]] {.async.} = + # Dial an existing peer by looking up it's existing addrs in the switch's peerStore + # TODO: check peer validity and score before continuing. Limit number of peers to be managed. + + let addrs = pm.switch.peerStore[AddressBook][peerId] + return await pm.dialPeer(peerId, addrs, proto, dialTimeout, source) + +proc canBeConnected*(pm: PeerManager, peerId: PeerId): bool = + # Returns if we can try to connect to this peer, based on past failed attempts + # It uses an exponential backoff. Each connection attempt makes us + # wait more before trying again. + let peerStore = pm.switch.peerStore + let failedAttempts = peerStore[NumberFailedConnBook][peerId] + + # if it never errored, we can try to connect + if failedAttempts == 0: + return true + + # if there are too many failed attempts, do not reconnect + if failedAttempts >= pm.maxFailedAttempts: + return false + + # If it errored we wait an exponential backoff from last connection + # the more failed attempts, the greater the backoff since last attempt + let now = Moment.init(getTime().toUnix, Second) + let lastFailed = peerStore[LastFailedConnBook][peerId] + let backoff = + calculateBackoff(pm.initialBackoffInSec, pm.backoffFactor, failedAttempts) + + return now >= (lastFailed + backoff) + +proc connectedPeers*( + pm: PeerManager, protocol: string = "" +): (seq[PeerId], seq[PeerId]) = + ## Returns the peerIds of physical connections (in and out) + ## If a protocol is specified, only returns peers with at least one stream of that protocol + + var inPeers: seq[PeerId] + var outPeers: seq[PeerId] + + for peerId, muxers in pm.switch.connManager.getConnections(): + for peerConn in muxers: + let streams = peerConn.getStreams() + if protocol.len == 0 or streams.anyIt(it.protocol == protocol): + if peerConn.connection.transportDir == Direction.In: + inPeers.add(peerId) + elif peerConn.connection.transportDir == Direction.Out: + outPeers.add(peerId) + + return (inPeers, outPeers) + +proc disconnectAllPeers*(pm: PeerManager) {.async.} = + let (inPeerIds, outPeerIds) = pm.connectedPeers() + let connectedPeers = concat(inPeerIds, outPeerIds) + + let futs = connectedPeers.mapIt(pm.disconnectNode(it)) + await allFutures(futs) + +proc getStreamByPeerIdAndProtocol*( + pm: PeerManager, peerId: PeerId, protocol: string +): Future[Result[Connection, string]] {.async.} = + ## Establishes a new stream to the given peer and protocol or returns the existing stream, if any. + ## Notice that the "Connection" type represents a stream within a transport connection + ## (we will need to adapt this term.) + + let peerIdsMuxers: Table[PeerId, seq[Muxer]] = pm.switch.connManager.getConnections() + if not peerIdsMuxers.contains(peerId): + return err("peerId not found in connManager: " & $peerId) + + let muxers = peerIdsMuxers[peerId] + + var streams = newSeq[Connection](0) + for m in muxers: + for s in m.getStreams(): + ## getStreams is defined in nim-libp2p + streams.add(s) + + ## Try to get the opened streams for the given protocol + let streamsOfInterest = streams.filterIt( + it.protocol == protocol and not LPStream(it).isClosed and + not LPStream(it).isClosedRemotely + ) + + if streamsOfInterest.len > 0: + ## In theory there should be one stream per protocol. Then we just pick up the 1st + return ok(streamsOfInterest[0]) + + ## There isn't still a stream. Let's dial to create one + let streamRes = await pm.dialPeer(peerId, protocol) + if streamRes.isNone(): + return err("getStreamByPeerIdProto no connection to peer: " & $peerId) + + return ok(streamRes.get()) + +proc connectToRelayPeers*(pm: PeerManager) {.async.} = + # only attempt if current node is online + if not pm.online: + error "connectToRelayPeers: won't attempt new connections - node is offline" + return + + var (inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec) + let totalRelayPeers = inRelayPeers.len + outRelayPeers.len + + if inRelayPeers.len > pm.inRelayPeersTarget: + await pm.pruneInRelayConns(inRelayPeers.len - pm.inRelayPeersTarget) + + if outRelayPeers.len >= pm.outRelayPeersTarget: + return + + let notConnectedPeers = pm.switch.peerStore.getDisconnectedPeers() + + var outsideBackoffPeers = notConnectedPeers.filterIt(pm.canBeConnected(it.peerId)) + + shuffle(outsideBackoffPeers) + + var index = 0 + var numPendingConnReqs = + min(outsideBackoffPeers.len, pm.outRelayPeersTarget - outRelayPeers.len) + ## number of outstanding connection requests + + while numPendingConnReqs > 0 and outRelayPeers.len < pm.outRelayPeersTarget: + let numPeersToConnect = min(numPendingConnReqs, MaxParallelDials) + await pm.connectToNodes(outsideBackoffPeers[index ..< (index + numPeersToConnect)]) + + (inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec) + + index += numPeersToConnect + numPendingConnReqs -= numPeersToConnect + +proc reconnectPeers*( + pm: PeerManager, proto: string, backoffTime: chronos.Duration = chronos.seconds(0) +) {.async.} = + ## Reconnect to peers registered for this protocol. This will update connectedness. + ## Especially useful to resume connections from persistent storage after a restart. + + debug "Reconnecting peers", proto = proto + + # Proto is not persisted, we need to iterate over all peers. + for peerInfo in pm.switch.peerStore.peers(protocolMatcher(proto)): + # Check that the peer can be connected + if peerInfo.connectedness == CannotConnect: + error "Not reconnecting to unreachable or non-existing peer", + peerId = peerInfo.peerId + continue + + if backoffTime > ZeroDuration: + debug "Backing off before reconnect", + peerId = peerInfo.peerId, backoffTime = backoffTime + # We disconnected recently and still need to wait for a backoff period before connecting + await sleepAsync(backoffTime) + + await pm.connectToNodes(@[peerInfo]) + +proc getNumStreams*(pm: PeerManager, protocol: string): (int, int) = + var + numStreamsIn = 0 + numStreamsOut = 0 + for peerId, muxers in pm.switch.connManager.getConnections(): + for peerConn in muxers: + for stream in peerConn.getStreams(): + if stream.protocol == protocol: + if stream.dir == Direction.In: + numStreamsIn += 1 + elif stream.dir == Direction.Out: + numStreamsOut += 1 + return (numStreamsIn, numStreamsOut) + +proc getPeerIp(pm: PeerManager, peerId: PeerId): Option[string] = + if not pm.switch.connManager.getConnections().hasKey(peerId): + return none(string) + + let conns = pm.switch.connManager.getConnections().getOrDefault(peerId) + if conns.len == 0: + return none(string) + + let obAddr = conns[0].connection.observedAddr.valueOr: + return none(string) + + # TODO: think if circuit relay ips should be handled differently + + return some(obAddr.getHostname()) + +#~~~~~~~~~~~~~~~~~# +# Event Handling # +#~~~~~~~~~~~~~~~~~# + +proc onPeerMetadata(pm: PeerManager, peerId: PeerId) {.async.} = + let res = catch: + await pm.switch.dial(peerId, WakuMetadataCodec) + + var reason: string + block guardClauses: + let conn = res.valueOr: + reason = "dial failed: " & error.msg + break guardClauses + + let metadata = (await pm.wakuMetadata.request(conn)).valueOr: + reason = "waku metatdata request failed: " & error + break guardClauses + + let clusterId = metadata.clusterId.valueOr: + reason = "empty cluster-id reported" + break guardClauses + + if pm.wakuMetadata.clusterId != clusterId: + reason = + "different clusterId reported: " & $pm.wakuMetadata.clusterId & " vs " & + $clusterId + break guardClauses + + return + + info "disconnecting from peer", peerId = peerId, reason = reason + asyncSpawn(pm.switch.disconnect(peerId)) + pm.switch.peerStore.delete(peerId) + +# called when a peer i) first connects to us ii) disconnects all connections from us +proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} = + if not pm.wakuMetadata.isNil() and event.kind == PeerEventKind.Joined: + await pm.onPeerMetadata(peerId) + + var peerStore = pm.switch.peerStore + var direction: PeerDirection + var connectedness: Connectedness + + case event.kind + of Joined: + direction = if event.initiator: Outbound else: Inbound + connectedness = Connected + + ## Check max allowed in-relay peers + let inRelayPeers = pm.connectedPeers(WakuRelayCodec)[0] + if inRelayPeers.len > pm.inRelayPeersTarget and + peerStore.hasPeer(peerId, WakuRelayCodec): + debug "disconnecting relay peer because reached max num in-relay peers", + peerId = peerId, + inRelayPeers = inRelayPeers.len, + inRelayPeersTarget = pm.inRelayPeersTarget + await pm.switch.disconnect(peerId) + + ## Apply max ip colocation limit + if (let ip = pm.getPeerIp(peerId); ip.isSome()): + pm.ipTable.mgetOrPut(ip.get, newSeq[PeerId]()).add(peerId) + + # in theory this should always be one, but just in case + let peersBehindIp = pm.ipTable[ip.get] + + # pm.colocationLimit == 0 disables the ip colocation limit + if pm.colocationLimit != 0 and peersBehindIp.len > pm.colocationLimit: + for peerId in peersBehindIp[0 ..< (peersBehindIp.len - pm.colocationLimit)]: + debug "Pruning connection due to ip colocation", peerId = peerId, ip = ip + asyncSpawn(pm.switch.disconnect(peerId)) + peerStore.delete(peerId) + + if not pm.onConnectionChange.isNil(): + # we don't want to await for the callback to finish + asyncSpawn pm.onConnectionChange(peerId, Joined) + of Left: + direction = UnknownDirection + connectedness = CanConnect + + # note we cant access the peerId ip here as the connection was already closed + for ip, peerIds in pm.ipTable.pairs: + if peerIds.contains(peerId): + pm.ipTable[ip] = pm.ipTable[ip].filterIt(it != peerId) + if pm.ipTable[ip].len == 0: + pm.ipTable.del(ip) + break + + if not pm.onConnectionChange.isNil(): + # we don't want to await for the callback to finish + asyncSpawn pm.onConnectionChange(peerId, Left) + of Identified: + debug "event identified", peerId = peerId + + peerStore[ConnectionBook][peerId] = connectedness + peerStore[DirectionBook][peerId] = direction + + if not pm.storage.isNil: + var remotePeerInfo = peerStore.getPeer(peerId) + + if event.kind == PeerEventKind.Left: + remotePeerInfo.disconnectTime = getTime().toUnix + + pm.storage.insertOrReplace(remotePeerInfo) + +#~~~~~~~~~~~~~~~~~# +# Metrics Logging # +#~~~~~~~~~~~~~~~~~# + +proc logAndMetrics(pm: PeerManager) {.async.} = + heartbeat "Scheduling log and metrics run", LogAndMetricsInterval: + var peerStore = pm.switch.peerStore + # log metrics + let (inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec) + let maxConnections = pm.switch.connManager.inSema.size + let notConnectedPeers = + peerStore.getDisconnectedPeers().mapIt(RemotePeerInfo.init(it.peerId, it.addrs)) + let outsideBackoffPeers = notConnectedPeers.filterIt(pm.canBeConnected(it.peerId)) + let connections = pm.switch.connManager.getConnections() + let totalConnections = connections.len + + info "Relay peer connections", + inRelayConns = $inRelayPeers.len & "/" & $pm.inRelayPeersTarget, + outRelayConns = $outRelayPeers.len & "/" & $pm.outRelayPeersTarget, + totalConnections = $totalConnections & "/" & $maxConnections, + notConnectedPeers = notConnectedPeers.len, + outsideBackoffPeers = outsideBackoffPeers.len + + # update prometheus metrics + for proto in peerStore.getWakuProtos(): + let (protoConnsIn, protoConnsOut) = pm.connectedPeers(proto) + let (protoStreamsIn, protoStreamsOut) = pm.getNumStreams(proto) + waku_connected_peers.set( + protoConnsIn.len.float64, labelValues = [$Direction.In, proto] + ) + waku_connected_peers.set( + protoConnsOut.len.float64, labelValues = [$Direction.Out, proto] + ) + waku_streams_peers.set( + protoStreamsIn.float64, labelValues = [$Direction.In, proto] + ) + waku_streams_peers.set( + protoStreamsOut.float64, labelValues = [$Direction.Out, proto] + ) + + var agentCounts = initTable[string, int]() + var connectedPeerIds: HashSet[PeerId] + for peerId, muxers in connections: + connectedPeerIds.incl(peerId) + if peerStore[AgentBook].contains(peerId): + let agent = peerStore[AgentBook][peerId] + agentCounts[agent] = agentCounts.getOrDefault(agent, 0) + 1 + for agent, count in agentCounts: + waku_connected_peers_per_agent.set(count.float64, labelValues = [$agent]) + + for shard in pm.getShards().items: + # peers known for this shard + let shardPeers = + peerStore.getPeersByShard(uint16(pm.wakuMetadata.clusterId), shard) + + # keep only those that are physically connected right now + let connectedInShard = shardPeers.filterIt(connectedPeerIds.contains(it.peerId)) + + waku_connected_peers_per_shard.set( + connectedInShard.len.float64, labelValues = [$shard] + ) + +proc getOnlineStateObserver*(pm: PeerManager): OnOnlineStateChange = + return proc(online: bool) {.gcsafe, raises: [].} = + pm.online = online + +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# +# Pruning and Maintenance (Stale Peers Management) # +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# + +proc manageRelayPeers*(pm: PeerManager) {.async.} = + let shardsCount = pm.getShards().len + #TODO: this check should not be based on whether shards are present, but rather if relay is mounted + if shardsCount == 0: + return + + if not pm.online: + error "manageRelayPeers: won't attempt new connections - node is offline" + return + + var peersToConnect: HashSet[PeerId] # Can't use RemotePeerInfo as they are ref objects + var peersToDisconnect: int + + # Get all connected peers for Waku Relay + var (inPeers, outPeers) = pm.connectedPeers(WakuRelayCodec) + + # Calculate in/out target number of peers for each shards + let inTarget = pm.inRelayPeersTarget div shardsCount + let outTarget = pm.outRelayPeersTarget div shardsCount + + var peerStore = pm.switch.peerStore + + for shard in pm.getShards().items: + # Filter out peer not on this shard + let connectedInPeers = + inPeers.filterIt(peerStore.hasShard(it, uint16(pm.wakuMetadata.clusterId), shard)) + + let connectedOutPeers = outPeers.filterIt( + peerStore.hasShard(it, uint16(pm.wakuMetadata.clusterId), shard) + ) + + # Calculate the difference between current values and targets + let inPeerDiff = connectedInPeers.len - inTarget + let outPeerDiff = outTarget - connectedOutPeers.len + + if inPeerDiff > 0: + peersToDisconnect += inPeerDiff + + if outPeerDiff <= 0: + continue + + # Get all peers for this shard + var connectablePeers = + peerStore.getPeersByShard(uint16(pm.wakuMetadata.clusterId), uint16(shard)) + + let shardCount = connectablePeers.len + + connectablePeers.keepItIf( + not peerStore.isConnected(it.peerId) and pm.canBeConnected(it.peerId) + ) + + let connectableCount = connectablePeers.len + + connectablePeers.keepItIf(peerStore.hasCapability(it.peerId, Relay)) + + let relayCount = connectablePeers.len + + debug "Sharded Peer Management", + shard = shard, + connectable = $connectableCount & "/" & $shardCount, + relayConnectable = $relayCount & "/" & $shardCount, + relayInboundTarget = $connectedInPeers.len & "/" & $inTarget, + relayOutboundTarget = $connectedOutPeers.len & "/" & $outTarget + + # Always pick random connectable relay peers + shuffle(connectablePeers) + + let length = min(outPeerDiff, connectablePeers.len) + for peer in connectablePeers[0 ..< length]: + trace "Peer To Connect To", peerId = $peer.peerId + peersToConnect.incl(peer.peerId) + + await pm.pruneInRelayConns(peersToDisconnect) + + if peersToConnect.len == 0: + return + + let uniquePeers = toSeq(peersToConnect).mapIt(peerStore.getPeer(it)) + + # Connect to all nodes + for i in countup(0, uniquePeers.len, MaxParallelDials): + let stop = min(i + MaxParallelDials, uniquePeers.len) + trace "Connecting to Peers", peerIds = $uniquePeers[i ..< stop] + await pm.connectToNodes(uniquePeers[i ..< stop]) + +proc prunePeerStore*(pm: PeerManager) = + let peerStore = pm.switch.peerStore + let numPeers = peerStore[AddressBook].book.len + let capacity = peerStore.getCapacity() + if numPeers <= capacity: + return + + trace "Peer store capacity exceeded", numPeers = numPeers, capacity = capacity + let pruningCount = numPeers - capacity + var peersToPrune: HashSet[PeerId] + + # prune failed connections + for peerId, count in peerStore[NumberFailedConnBook].book.pairs: + if count < pm.maxFailedAttempts: + continue + + if peersToPrune.len >= pruningCount: + break + + peersToPrune.incl(peerId) + + var notConnected = peerStore.getDisconnectedPeers().mapIt(it.peerId) + + # Always pick random non-connected peers + shuffle(notConnected) + + var shardlessPeers: seq[PeerId] + var peersByShard = initTable[uint16, seq[PeerId]]() + + for peer in notConnected: + if not peerStore[ENRBook].contains(peer): + shardlessPeers.add(peer) + continue + + let record = peerStore[ENRBook][peer] + + let rec = record.toTyped().valueOr: + shardlessPeers.add(peer) + continue + + let rs = rec.relaySharding().valueOr: + shardlessPeers.add(peer) + continue + + for shard in rs.shardIds: + peersByShard.mgetOrPut(shard, @[]).add(peer) + + # prune not connected peers without shard + for peer in shardlessPeers: + if peersToPrune.len >= pruningCount: + break + + peersToPrune.incl(peer) + + # calculate the avg peers per shard + let total = sum(toSeq(peersByShard.values).mapIt(it.len)) + let avg = min(1, total div max(1, peersByShard.len)) + + # prune peers from shard with higher than avg count + for shard, peers in peersByShard.pairs: + let count = max(peers.len - avg, 0) + for peer in peers[0 .. count]: + if peersToPrune.len >= pruningCount: + break + + peersToPrune.incl(peer) + + for peer in peersToPrune: + peerStore.delete(peer) + + let afterNumPeers = peerStore[AddressBook].book.len + + trace "Finished pruning peer store", + beforeNumPeers = numPeers, + afterNumPeers = afterNumPeers, + capacity = capacity, + pruned = peersToPrune.len + +# Prunes peers from peerstore to remove old/stale ones +proc prunePeerStoreLoop(pm: PeerManager) {.async.} = + trace "Starting prune peerstore loop" + while pm.started: + pm.prunePeerStore() + await sleepAsync(PrunePeerStoreInterval) + +# Ensures a healthy amount of connected relay peers +proc relayConnectivityLoop*(pm: PeerManager) {.async.} = + trace "Starting relay connectivity loop" + while pm.started: + if pm.shardedPeerManagement: + await pm.manageRelayPeers() + else: + await pm.connectToRelayPeers() + let + (inRelayPeers, outRelayPeers) = pm.connectedPeers(WakuRelayCodec) + excessInConns = max(inRelayPeers.len - pm.inRelayPeersTarget, 0) + + # One minus the percentage of excess connections relative to the target, limited to 100% + # We calculate one minus this percentage because we want the factor to be inversely proportional to the number of excess peers + inFactor = 1 - min(excessInConns / pm.inRelayPeersTarget, 1) + # Percentage of out relay peers relative to the target + outFactor = min(outRelayPeers.len / pm.outRelayPeersTarget, 1) + factor = min(outFactor, inFactor) + dynamicSleepInterval = + chronos.seconds(int(float(ConnectivityLoopInterval.seconds()) * factor)) + + # Shorten the connectivity loop interval dynamically based on percentage of peers to fill or connections to prune + await sleepAsync(max(dynamicSleepInterval, chronos.seconds(1))) + +proc pruneInRelayConns(pm: PeerManager, amount: int) {.async.} = + if amount <= 0: + return + + let (inRelayPeers, _) = pm.connectedPeers(WakuRelayCodec) + let connsToPrune = min(amount, inRelayPeers.len) + + for p in inRelayPeers[0 ..< connsToPrune]: + trace "Pruning Peer", Peer = $p + asyncSpawn(pm.switch.disconnect(p)) + +proc addExtPeerEventHandler*( + pm: PeerManager, eventHandler: PeerEventHandler, eventKind: PeerEventKind +) = + pm.switch.addPeerEventHandler(eventHandler, eventKind) + +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# +# Initialization and Constructor # +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# + +proc setShardGetter*(pm: PeerManager, c: GetShards) = + pm.getShards = c + +proc start*(pm: PeerManager) = + pm.started = true + asyncSpawn pm.relayConnectivityLoop() + asyncSpawn pm.prunePeerStoreLoop() + asyncSpawn pm.logAndMetrics() + +proc stop*(pm: PeerManager) = + pm.started = false + +proc new*( + T: type PeerManager, + switch: Switch, + wakuMetadata: WakuMetadata = nil, + maxRelayPeers: Option[int] = none(int), + maxServicePeers: Option[int] = none(int), + relayServiceRatio: string = "60:40", + storage: PeerStorage = nil, + initialBackoffInSec = InitialBackoffInSec, + backoffFactor = BackoffFactor, + maxFailedAttempts = MaxFailedAttempts, + colocationLimit = DefaultColocationLimit, + shardedPeerManagement = false, +): PeerManager {.gcsafe.} = + let capacity = switch.peerStore.capacity + let maxConnections = switch.connManager.inSema.size + if maxConnections > capacity: + error "Max number of connections can't be greater than PeerManager capacity", + capacity = capacity, maxConnections = maxConnections + raise newException( + Defect, "Max number of connections can't be greater than PeerManager capacity" + ) + + var relayRatio: float64 + var serviceRatio: float64 + (relayRatio, serviceRatio) = parseRelayServiceRatio(relayServiceRatio).get() + + var relayPeers = int(ceil(float(maxConnections) * relayRatio)) + var servicePeers = int(floor(float(maxConnections) * serviceRatio)) + + let minRelayPeers = WakuRelay.getDHigh() + + if relayPeers < minRelayPeers: + let errorMsg = + fmt"""Doesn't fulfill minimum criteria for relay (which increases the chance of the node becoming isolated.) + relayPeers: {relayPeers}, should be greater or equal than minRelayPeers: {minRelayPeers} + relayServiceRatio: {relayServiceRatio} + maxConnections: {maxConnections}""" + error "Wrong relay peers config", error = errorMsg + return + + let outRelayPeersTarget = relayPeers div 3 + let inRelayPeersTarget = relayPeers - outRelayPeersTarget + + # attempt to calculate max backoff to prevent potential overflows or unreasonably high values + let backoff = calculateBackoff(initialBackoffInSec, backoffFactor, maxFailedAttempts) + if backoff.weeks() > 1: + error "Max backoff time can't be over 1 week", maxBackoff = backoff + raise newException(Defect, "Max backoff time can't be over 1 week") + + let pm = PeerManager( + switch: switch, + wakuMetadata: wakuMetadata, + storage: storage, + initialBackoffInSec: initialBackoffInSec, + backoffFactor: backoffFactor, + maxRelayPeers: relayPeers, + maxServicePeers: servicePeers, + outRelayPeersTarget: outRelayPeersTarget, + inRelayPeersTarget: inRelayPeersTarget, + maxFailedAttempts: maxFailedAttempts, + colocationLimit: colocationLimit, + shardedPeerManagement: shardedPeerManagement, + online: true, + ) + + proc peerHook( + peerId: PeerId, event: PeerEvent + ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = + try: + await onPeerEvent(pm, peerId, event) + except CatchableError: + error "exception in onPeerEvent", error = getCurrentExceptionMsg() + + var peerStore = pm.switch.peerStore + + proc peerStoreChanged(peerId: PeerId) {.gcsafe.} = + waku_peer_store_size.set(toSeq(peerStore[AddressBook].book.keys).len.int64) + + pm.switch.addPeerEventHandler(peerHook, PeerEventKind.Joined) + pm.switch.addPeerEventHandler(peerHook, PeerEventKind.Left) + + # called every time the peerstore is updated + peerStore[AddressBook].addHandler(peerStoreChanged) + + pm.serviceSlots = initTable[string, RemotePeerInfo]() + pm.ipTable = initTable[string, seq[PeerId]]() + + if not storage.isNil(): + trace "found persistent peer storage" + pm.loadFromStorage() # Load previously managed peers. + else: + trace "no peer storage found" + + return pm diff --git a/third-party/nwaku/waku/node/peer_manager/peer_store/migrations.nim b/third-party/nwaku/waku/node/peer_manager/peer_store/migrations.nim new file mode 100644 index 0000000..cd43a3d --- /dev/null +++ b/third-party/nwaku/waku/node/peer_manager/peer_store/migrations.nim @@ -0,0 +1,33 @@ +{.push raises: [].} + +import std/[tables, strutils, os], results, chronicles +import ../../../common/databases/db_sqlite, ../../../common/databases/common + +logScope: + topics = "waku node peer_manager" + +const SchemaVersion* = 1 # increase this when there is an update in the database schema + +template projectRoot(): string = + currentSourcePath.rsplit(DirSep, 1)[0] / ".." / ".." / ".." / ".." / ".." + +const PeerStoreMigrationPath: string = projectRoot / "migrations" / "peer_store" + +proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult[void] = + ## Compares the `user_version` of the sqlite database with the provided `targetVersion`, then + ## it runs migration scripts if the `user_version` is outdated. The `migrationScriptsDir` path + ## points to the directory holding the migrations scripts once the db is updated, it sets the + ## `user_version` to the `tragetVersion`. + ## + ## If not `targetVersion` is provided, it defaults to `SchemaVersion`. + ## + ## NOTE: Down migration it is not currently supported + debug "starting peer store's sqlite database migration" + + let migrationRes = + migrate(db, targetVersion, migrationsScriptsDir = PeerStoreMigrationPath) + if migrationRes.isErr(): + return err("failed to execute migration scripts: " & migrationRes.error) + + debug "finished peer store's sqlite database migration" + ok() diff --git a/third-party/nwaku/waku/node/peer_manager/peer_store/peer_storage.nim b/third-party/nwaku/waku/node/peer_manager/peer_store/peer_storage.nim new file mode 100644 index 0000000..2aa1eae --- /dev/null +++ b/third-party/nwaku/waku/node/peer_manager/peer_store/peer_storage.nim @@ -0,0 +1,25 @@ +{.push raises: [].} + +import results +import ../../../waku_core, ../waku_peer_store + +## This module defines a peer storage interface. Implementations of +## PeerStorage are used to store and retrieve peers + +type + PeerStorage* = ref object of RootObj + + PeerStorageResult*[T] = Result[T, string] + + DataProc* = proc(remotePeerInfo: RemotePeerInfo) {.closure, gcsafe, raises: [Defect].} + +# PeerStorage interface +method put*( + db: PeerStorage, remotePeerInfo: RemotePeerInfo +): PeerStorageResult[void] {.base, gcsafe.} = + return err("Unimplemented") + +method getAll*( + db: PeerStorage, onData: DataProc +): PeerStorageResult[void] {.base, gcsafe.} = + return err("Unimplemented") diff --git a/third-party/nwaku/waku/node/peer_manager/peer_store/waku_peer_storage.nim b/third-party/nwaku/waku/node/peer_manager/peer_store/waku_peer_storage.nim new file mode 100644 index 0000000..876e8e2 --- /dev/null +++ b/third-party/nwaku/waku/node/peer_manager/peer_store/waku_peer_storage.nim @@ -0,0 +1,172 @@ +{.push raises: [].} + +import + std/[sets, options], + results, + sqlite3_abi, + eth/p2p/discoveryv5/enr, + libp2p/protobuf/minprotobuf +import + ../../../common/databases/db_sqlite, + ../../../waku_core, + ../waku_peer_store, + ./peer_storage + +export db_sqlite + +type WakuPeerStorage* = ref object of PeerStorage + database*: SqliteDatabase + replaceStmt: SqliteStmt[(seq[byte], seq[byte]), void] + +########################## +# Protobuf Serialisation # +########################## + +proc decode*(T: type RemotePeerInfo, buffer: seq[byte]): ProtoResult[T] = + var + multiaddrSeq: seq[MultiAddress] + protoSeq: seq[string] + storedInfo = RemotePeerInfo() + rlpBytes: seq[byte] + connectedness: uint32 + disconnectTime: uint64 + + var pb = initProtoBuffer(buffer) + + discard ?pb.getField(1, storedInfo.peerId) + discard ?pb.getRepeatedField(2, multiaddrSeq) + discard ?pb.getRepeatedField(3, protoSeq) + discard ?pb.getField(4, storedInfo.publicKey) + discard ?pb.getField(5, connectedness) + discard ?pb.getField(6, disconnectTime) + let hasENR = ?pb.getField(7, rlpBytes) + + storedInfo.addrs = multiaddrSeq + storedInfo.protocols = protoSeq + storedInfo.connectedness = Connectedness(connectedness) + storedInfo.disconnectTime = int64(disconnectTime) + + if hasENR: + var record: Record + + if record.fromBytes(rlpBytes): + storedInfo.enr = some(record) + + ok(storedInfo) + +proc encode*(remotePeerInfo: RemotePeerInfo): PeerStorageResult[ProtoBuffer] = + var pb = initProtoBuffer() + + pb.write(1, remotePeerInfo.peerId) + + for multiaddr in remotePeerInfo.addrs.items: + pb.write(2, multiaddr) + + for proto in remotePeerInfo.protocols.items: + pb.write(3, proto) + + let catchRes = catch: + pb.write(4, remotePeerInfo.publicKey) + if catchRes.isErr(): + return err("Enncoding public key failed: " & catchRes.error.msg) + + pb.write(5, uint32(ord(remotePeerInfo.connectedness))) + + pb.write(6, uint64(remotePeerInfo.disconnectTime)) + + if remotePeerInfo.enr.isSome(): + pb.write(7, remotePeerInfo.enr.get().raw) + + return ok(pb) + +########################## +# Storage implementation # +########################## + +proc new*(T: type WakuPeerStorage, db: SqliteDatabase): PeerStorageResult[T] = + # Misconfiguration can lead to nil DB + if db.isNil(): + return err("db not initialized") + + # Create the "Peer" table + # It contains: + # - peer id as primary key, stored as a blob + # - stored info (serialised protobuf), stored as a blob + let createStmt = db + .prepareStmt( + """ + CREATE TABLE IF NOT EXISTS Peer ( + peerId BLOB PRIMARY KEY, + storedInfo BLOB + ) WITHOUT ROWID; + """, + NoParams, void, + ) + .expect("Valid statement") + + createStmt.exec(()).isOkOr: + return err("failed to exec") + + # We dispose of this prepared statement here, as we never use it again + createStmt.dispose() + + # Reusable prepared statements + let replaceStmt = db + .prepareStmt( + "REPLACE INTO Peer (peerId, storedInfo) VALUES (?, ?);", + (seq[byte], seq[byte]), + void, + ) + .expect("Valid statement") + + # General initialization + let ps = WakuPeerStorage(database: db, replaceStmt: replaceStmt) + + return ok(ps) + +method put*( + db: WakuPeerStorage, remotePeerInfo: RemotePeerInfo +): PeerStorageResult[void] {.gcsafe.} = + ## Adds a peer to storage or replaces existing entry if it already exists + + let encoded = remotePeerInfo.encode().valueOr: + return err("peer info encoding failed: " & error) + + db.replaceStmt.exec((remotePeerInfo.peerId.data, encoded.buffer)).isOkOr: + return err("DB operation failed: " & error) + + return ok() + +method getAll*( + db: WakuPeerStorage, onData: peer_storage.DataProc +): PeerStorageResult[void] = + ## Retrieves all peers from storage + + proc peer(s: ptr sqlite3_stmt) {.gcsafe, raises: [ResultError[ProtoError]].} = + let + # Stored Info + sTo = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, 1)) + sToL = sqlite3_column_bytes(s, 1) + storedInfo = RemotePeerInfo.decode(@(toOpenArray(sTo, 0, sToL - 1))).tryGet() + + onData(storedInfo) + + let catchRes = catch: + db.database.query("SELECT peerId, storedInfo FROM Peer", peer) + + let queryRes = + if catchRes.isErr(): + return err("failed to extract peer from query result: " & catchRes.error.msg) + else: + catchRes.get() + + if queryRes.isErr(): + return err("peer storage query failed: " & queryRes.error) + + return ok() + +proc close*(db: WakuPeerStorage) = + ## Closes the database. + + db.replaceStmt.dispose() + db.database.close() diff --git a/third-party/nwaku/waku/node/peer_manager/waku_peer_store.nim b/third-party/nwaku/waku/node/peer_manager/waku_peer_store.nim new file mode 100644 index 0000000..0098c16 --- /dev/null +++ b/third-party/nwaku/waku/node/peer_manager/waku_peer_store.nim @@ -0,0 +1,204 @@ +{.push raises: [].} + +import + std/[tables, sequtils, sets, options, strutils], + chronos, + chronicles, + eth/p2p/discoveryv5/enr, + libp2p/builders, + libp2p/peerstore + +import + ../../waku_core, + ../../waku_enr/sharding, + ../../waku_enr/capabilities, + ../../common/utils/sequence, + ../../waku_core/peers + +export peerstore, builders + +type + # Keeps track of the Connectedness state of a peer + ConnectionBook* = ref object of PeerBook[Connectedness] + + # Keeps track of the timestamp of the last failed connection attempt + LastFailedConnBook* = ref object of PeerBook[Moment] + + # Keeps track of the number of failed connection attempts + NumberFailedConnBook* = ref object of PeerBook[int] + + # Keeps track of when peers were disconnected in Unix timestamps + DisconnectBook* = ref object of PeerBook[int64] + + # Keeps track of the origin of a peer + SourceBook* = ref object of PeerBook[PeerOrigin] + + # Keeps track of the direction of a peer connection + DirectionBook* = ref object of PeerBook[PeerDirection] + + # Keeps track of the ENR (Ethereum Node Record) of a peer + ENRBook* = ref object of PeerBook[enr.Record] + +proc getPeer*(peerStore: PeerStore, peerId: PeerId): RemotePeerInfo = + let addresses = + if peerStore[LastSeenBook][peerId].isSome(): + @[peerStore[LastSeenBook][peerId].get()] & peerStore[AddressBook][peerId] + else: + peerStore[AddressBook][peerId] + + RemotePeerInfo( + peerId: peerId, + addrs: addresses, + enr: + if peerStore[ENRBook][peerId] != default(enr.Record): + some(peerStore[ENRBook][peerId]) + else: + none(enr.Record), + protocols: peerStore[ProtoBook][peerId], + agent: peerStore[AgentBook][peerId], + protoVersion: peerStore[ProtoVersionBook][peerId], + publicKey: peerStore[KeyBook][peerId], + connectedness: peerStore[ConnectionBook][peerId], + disconnectTime: peerStore[DisconnectBook][peerId], + origin: peerStore[SourceBook][peerId], + direction: peerStore[DirectionBook][peerId], + lastFailedConn: peerStore[LastFailedConnBook][peerId], + numberFailedConn: peerStore[NumberFailedConnBook][peerId], + ) + +proc delete*(peerStore: PeerStore, peerId: PeerId) = + # Delete all the information of a given peer. + peerStore.del(peerId) + +proc peers*(peerStore: PeerStore): seq[RemotePeerInfo] = + let allKeys = concat( + toSeq(peerStore[LastSeenBook].book.keys()), + toSeq(peerStore[AddressBook].book.keys()), + toSeq(peerStore[ProtoBook].book.keys()), + toSeq(peerStore[KeyBook].book.keys()), + ) + .toHashSet() + + return allKeys.mapIt(peerStore.getPeer(it)) + +proc addPeer*(peerStore: PeerStore, peer: RemotePeerInfo, origin = UnknownOrigin) = + ## Notice that the origin parameter is used to manually override the given peer origin. + ## At the time of writing, this is used in waku_discv5 or waku_node (peer exchange.) + if peerStore[AddressBook][peer.peerId] == peer.addrs and + peerStore[KeyBook][peer.peerId] == peer.publicKey and + peerStore[ENRBook][peer.peerId].raw.len > 0: + let incomingEnr = peer.enr.valueOr: + trace "peer already managed and incoming ENR is empty", + remote_peer_id = $peer.peerId + return + + if peerStore[ENRBook][peer.peerId].raw == incomingEnr.raw or + peerStore[ENRBook][peer.peerId].seqNum > incomingEnr.seqNum: + trace "peer already managed and ENR info is already saved", + remote_peer_id = $peer.peerId + return + + peerStore[AddressBook][peer.peerId] = peer.addrs + + var protos = peerStore[ProtoBook][peer.peerId] + for new_proto in peer.protocols: + ## append new discovered protocols to the current known protocols set + if not protos.contains(new_proto): + protos.add($new_proto) + peerStore[ProtoBook][peer.peerId] = protos + + ## We don't care whether the item was already present in the table or not. Hence, we always discard the hasKeyOrPut's bool returned value + discard peerStore[AgentBook].book.hasKeyOrPut(peer.peerId, peer.agent) + discard peerStore[ProtoVersionBook].book.hasKeyOrPut(peer.peerId, peer.protoVersion) + discard peerStore[KeyBook].book.hasKeyOrPut(peer.peerId, peer.publicKey) + + discard peerStore[ConnectionBook].book.hasKeyOrPut(peer.peerId, peer.connectedness) + discard peerStore[DisconnectBook].book.hasKeyOrPut(peer.peerId, peer.disconnectTime) + if origin != UnknownOrigin: + peerStore[SourceBook][peer.peerId] = origin + else: + discard peerStore[SourceBook].book.hasKeyOrPut(peer.peerId, peer.origin) + + discard peerStore[DirectionBook].book.hasKeyOrPut(peer.peerId, peer.direction) + discard + peerStore[LastFailedConnBook].book.hasKeyOrPut(peer.peerId, peer.lastFailedConn) + discard + peerStore[NumberFailedConnBook].book.hasKeyOrPut(peer.peerId, peer.numberFailedConn) + if peer.enr.isSome(): + peerStore[ENRBook][peer.peerId] = peer.enr.get() + +proc peers*(peerStore: PeerStore, proto: string): seq[RemotePeerInfo] = + peerStore.peers().filterIt(it.protocols.contains(proto)) + +proc peers*(peerStore: PeerStore, protocolMatcher: Matcher): seq[RemotePeerInfo] = + peerStore.peers().filterIt(it.protocols.anyIt(protocolMatcher(it))) + +proc connectedness*(peerStore: PeerStore, peerId: PeerId): Connectedness = + peerStore[ConnectionBook].book.getOrDefault(peerId, NotConnected) + +proc hasShard*(peerStore: PeerStore, peerId: PeerID, cluster, shard: uint16): bool = + peerStore[ENRBook].book.getOrDefault(peerId).containsShard(cluster, shard) + +proc hasCapability*(peerStore: PeerStore, peerId: PeerID, cap: Capabilities): bool = + peerStore[ENRBook].book.getOrDefault(peerId).supportsCapability(cap) + +proc peerExists*(peerStore: PeerStore, peerId: PeerId): bool = + peerStore[AddressBook].contains(peerId) + +proc isConnected*(peerStore: PeerStore, peerId: PeerID): bool = + # Returns `true` if the peer is connected + peerStore.connectedness(peerId) == Connected + +proc hasPeer*(peerStore: PeerStore, peerId: PeerID, proto: string): bool = + # Returns `true` if peer is included in manager for the specified protocol + # TODO: What if peer does not exist in the peerStore? + peerStore.getPeer(peerId).protocols.contains(proto) + +proc hasPeers*(peerStore: PeerStore, proto: string): bool = + # Returns `true` if the peerstore has any peer for the specified protocol + toSeq(peerStore[ProtoBook].book.values()).anyIt(it.anyIt(it == proto)) + +proc hasPeers*(peerStore: PeerStore, protocolMatcher: Matcher): bool = + # Returns `true` if the peerstore has any peer matching the protocolMatcher + toSeq(peerStore[ProtoBook].book.values()).anyIt(it.anyIt(protocolMatcher(it))) + +proc getCapacity*(peerStore: PeerStore): int = + peerStore.capacity + +proc setCapacity*(peerStore: PeerStore, capacity: int) = + peerStore.capacity = capacity + +proc getWakuProtos*(peerStore: PeerStore): seq[string] = + toSeq(peerStore[ProtoBook].book.values()).flatten().deduplicate().filterIt( + it.startsWith("/vac/waku") + ) + +proc getPeersByDirection*( + peerStore: PeerStore, direction: PeerDirection +): seq[RemotePeerInfo] = + return peerStore.peers.filterIt(it.direction == direction) + +proc getDisconnectedPeers*(peerStore: PeerStore): seq[RemotePeerInfo] = + return peerStore.peers.filterIt(it.connectedness != Connected) + +proc getConnectedPeers*(peerStore: PeerStore): seq[RemotePeerInfo] = + return peerStore.peers.filterIt(it.connectedness == Connected) + +proc getPeersByProtocol*(peerStore: PeerStore, proto: string): seq[RemotePeerInfo] = + return peerStore.peers.filterIt(it.protocols.contains(proto)) + +proc getReachablePeers*(peerStore: PeerStore): seq[RemotePeerInfo] = + return peerStore.peers.filterIt(it.connectedness != CannotConnect) + +proc getPeersByShard*( + peerStore: PeerStore, cluster, shard: uint16 +): seq[RemotePeerInfo] = + return peerStore.peers.filterIt( + it.enr.isSome() and it.enr.get().containsShard(cluster, shard) + ) + +proc getPeersByCapability*( + peerStore: PeerStore, cap: Capabilities +): seq[RemotePeerInfo] = + return + peerStore.peers.filterIt(it.enr.isSome() and it.enr.get().supportsCapability(cap)) diff --git a/third-party/nwaku/waku/node/waku_metrics.nim b/third-party/nwaku/waku/node/waku_metrics.nim new file mode 100644 index 0000000..8d38624 --- /dev/null +++ b/third-party/nwaku/waku/node/waku_metrics.nim @@ -0,0 +1,90 @@ +{.push raises: [].} + +import chronicles, chronos, metrics, metrics/chronos_httpserver +import + ../waku_rln_relay/protocol_metrics as rln_metrics, + ../utils/collector, + ./peer_manager, + ./waku_node + +const LogInterval = 10.minutes + +logScope: + topics = "waku node metrics" + +type MetricsServerConf* = object + httpAddress*: IpAddress + httpPort*: Port + logging*: bool + +proc startMetricsLog*() = + var logMetrics: CallbackFunc + + var cumulativeErrors = 0.float64 + var cumulativeConns = 0.float64 + + let logRlnMetrics = getRlnMetricsLogger() + + logMetrics = CallbackFunc( + proc(udata: pointer) {.gcsafe.} = + # TODO: libp2p_pubsub_peers is not public, so we need to make this either + # public in libp2p or do our own peer counting after all. + + # track cumulative values + let freshErrorCount = parseAndAccumulate(waku_node_errors, cumulativeErrors) + let freshConnCount = + parseAndAccumulate(waku_node_conns_initiated, cumulativeConns) + + let totalMessages = collectorAsF64(waku_node_messages) + let storePeers = collectorAsF64(waku_store_peers) + let pxPeers = collectorAsF64(waku_px_peers) + let lightpushPeers = collectorAsF64(waku_lightpush_peers) + let filterPeers = collectorAsF64(waku_filter_peers) + + info "Total connections initiated", count = $freshConnCount + info "Total messages", count = totalMessages + info "Total store peers", count = storePeers + info "Total peer exchange peers", count = pxPeers + info "Total lightpush peers", count = lightpushPeers + info "Total filter peers", count = filterPeers + info "Total errors", count = $freshErrorCount + + # Start protocol specific metrics logging + logRlnMetrics() + + discard setTimer(Moment.fromNow(LogInterval), logMetrics) + ) + + discard setTimer(Moment.fromNow(LogInterval), logMetrics) + +proc startMetricsServer( + serverIp: IpAddress, serverPort: Port +): Future[Result[MetricsHttpServerRef, string]] {.async.} = + info "Starting metrics HTTP server", serverIp = $serverIp, serverPort = $serverPort + + let server = MetricsHttpServerRef.new($serverIp, serverPort).valueOr: + return err("metrics HTTP server start failed: " & $error) + + try: + await server.start() + except CatchableError: + return err("metrics HTTP server start failed: " & getCurrentExceptionMsg()) + + info "Metrics HTTP server started", serverIp = $serverIp, serverPort = $serverPort + return ok(server) + +proc startMetricsServerAndLogging*( + conf: MetricsServerConf, portsShift: uint16 +): Future[Result[MetricsHttpServerRef, string]] {.async.} = + var metricsServer: MetricsHttpServerRef + metricsServer = ( + await ( + startMetricsServer(conf.httpAddress, Port(conf.httpPort.uint16 + portsShift)) + ) + ).valueOr: + return err("Starting metrics server failed. Continuing in current state:" & $error) + + if conf.logging: + startMetricsLog() + + return ok(metricsServer) diff --git a/third-party/nwaku/waku/node/waku_node.nim b/third-party/nwaku/waku/node/waku_node.nim new file mode 100644 index 0000000..d47ac31 --- /dev/null +++ b/third-party/nwaku/waku/node/waku_node.nim @@ -0,0 +1,1675 @@ +{.push raises: [].} + +import + std/[hashes, options, sugar, tables, strutils, sequtils, os, net, random], + chronos, + chronicles, + metrics, + results, + stew/byteutils, + eth/keys, + nimcrypto, + bearssl/rand, + eth/p2p/discoveryv5/enr, + libp2p/crypto/crypto, + libp2p/crypto/curve25519, + libp2p/[multiaddress, multicodec], + libp2p/protocols/ping, + libp2p/protocols/pubsub/gossipsub, + libp2p/protocols/pubsub/rpc/messages, + libp2p/builders, + libp2p/transports/transport, + libp2p/transports/tcptransport, + libp2p/transports/wstransport, + libp2p/utility, + mix, + mix/mix_node, + mix/mix_protocol + +import + ../waku_core, + ../waku_core/topics/sharding, + ../waku_relay, + ../waku_archive, + ../waku_archive_legacy, + ../waku_store_legacy/protocol as legacy_store, + ../waku_store_legacy/client as legacy_store_client, + ../waku_store_legacy/common as legacy_store_common, + ../waku_store/protocol as store, + ../waku_store/client as store_client, + ../waku_store/common as store_common, + ../waku_store/resume, + ../waku_store_sync, + ../waku_filter_v2, + ../waku_filter_v2/client as filter_client, + ../waku_filter_v2/subscriptions as filter_subscriptions, + ../waku_metadata, + ../waku_rendezvous/protocol, + ../waku_lightpush_legacy/client as legacy_ligntpuhs_client, + ../waku_lightpush_legacy as legacy_lightpush_protocol, + ../waku_lightpush/client as ligntpuhs_client, + ../waku_lightpush as lightpush_protocol, + ../waku_enr, + ../waku_peer_exchange, + ../waku_rln_relay, + ./net_config, + ./peer_manager, + ../common/rate_limit/setting, + ../common/callbacks, + ../common/nimchronos, + ../waku_mix + +declarePublicCounter waku_node_messages, "number of messages received", ["type"] +declarePublicHistogram waku_histogram_message_size, + "message size histogram in kB", + buckets = [ + 0.0, 1.0, 3.0, 5.0, 15.0, 50.0, 75.0, 100.0, 125.0, 150.0, 500.0, 700.0, 1000.0, Inf + ] + +declarePublicGauge waku_version, + "Waku version info (in git describe format)", ["version"] +declarePublicCounter waku_node_errors, "number of wakunode errors", ["type"] +declarePublicGauge waku_lightpush_peers, "number of lightpush peers" +declarePublicGauge waku_filter_peers, "number of filter peers" +declarePublicGauge waku_store_peers, "number of store peers" +declarePublicGauge waku_px_peers, + "number of peers (in the node's peerManager) supporting the peer exchange protocol" + +logScope: + topics = "waku node" + +# randomize initializes sdt/random's random number generator +# if not called, the outcome of randomization procedures will be the same in every run +randomize() + +# TODO: Move to application instance (e.g., `WakuNode2`) +# Git version in git describe format (defined compile time) +const git_version* {.strdefine.} = "n/a" + +# Default clientId +const clientId* = "Nimbus Waku v2 node" + +const WakuNodeVersionString* = "version / git commit hash: " & git_version + +# key and crypto modules different +type + # TODO: Move to application instance (e.g., `WakuNode2`) + WakuInfo* = object # NOTE One for simplicity, can extend later as needed + listenAddresses*: seq[string] + enrUri*: string #multiaddrStrings*: seq[string] + + # NOTE based on Eth2Node in NBC eth2_network.nim + WakuNode* = ref object + peerManager*: PeerManager + switch*: Switch + wakuRelay*: WakuRelay + wakuArchive*: waku_archive.WakuArchive + wakuLegacyArchive*: waku_archive_legacy.WakuArchive + wakuLegacyStore*: legacy_store.WakuStore + wakuLegacyStoreClient*: legacy_store_client.WakuStoreClient + wakuStore*: store.WakuStore + wakuStoreClient*: store_client.WakuStoreClient + wakuStoreResume*: StoreResume + wakuStoreReconciliation*: SyncReconciliation + wakuStoreTransfer*: SyncTransfer + wakuFilter*: waku_filter_v2.WakuFilter + wakuFilterClient*: filter_client.WakuFilterClient + wakuRlnRelay*: WakuRLNRelay + wakuLegacyLightPush*: WakuLegacyLightPush + wakuLegacyLightpushClient*: WakuLegacyLightPushClient + wakuLightPush*: WakuLightPush + wakuLightpushClient*: WakuLightPushClient + wakuPeerExchange*: WakuPeerExchange + wakuPeerExchangeClient*: WakuPeerExchangeClient + wakuMetadata*: WakuMetadata + wakuAutoSharding*: Option[Sharding] + enr*: enr.Record + libp2pPing*: Ping + rng*: ref rand.HmacDrbgContext + wakuRendezvous*: WakuRendezVous + announcedAddresses*: seq[MultiAddress] + started*: bool # Indicates that node has started listening + topicSubscriptionQueue*: AsyncEventQueue[SubscriptionEvent] + rateLimitSettings*: ProtocolRateLimitSettings + wakuMix*: WakuMix + +proc getShardsGetter(node: WakuNode): GetShards = + return proc(): seq[uint16] {.closure, gcsafe, raises: [].} = + # fetch pubsubTopics subscribed to relay and convert them to shards + if node.wakuRelay.isNil(): + return @[] + let subscribedTopics = node.wakuRelay.subscribedTopics() + let relayShards = topicsToRelayShards(subscribedTopics).valueOr: + error "could not convert relay topics to shards", + error = $error, topics = subscribedTopics + return @[] + if relayShards.isSome(): + let shards = relayShards.get().shardIds + return shards + return @[] + +proc getCapabilitiesGetter(node: WakuNode): GetCapabilities = + return proc(): seq[Capabilities] {.closure, gcsafe, raises: [].} = + if node.wakuRelay.isNil(): + return @[] + return node.enr.getCapabilities() + +proc new*( + T: type WakuNode, + netConfig: NetConfig, + enr: enr.Record, + switch: Switch, + peerManager: PeerManager, + rateLimitSettings: ProtocolRateLimitSettings = DefaultProtocolRateLimit, + # TODO: make this argument required after tests are updated + rng: ref HmacDrbgContext = crypto.newRng(), +): T {.raises: [Defect, LPError, IOError, TLSStreamProtocolError].} = + ## Creates a Waku Node instance. + + info "Initializing networking", addrs = $netConfig.announcedAddresses + + let queue = newAsyncEventQueue[SubscriptionEvent](0) + let node = WakuNode( + peerManager: peerManager, + switch: switch, + rng: rng, + enr: enr, + announcedAddresses: netConfig.announcedAddresses, + topicSubscriptionQueue: queue, + rateLimitSettings: rateLimitSettings, + ) + + peerManager.setShardGetter(node.getShardsGetter()) + + return node + +proc peerInfo*(node: WakuNode): PeerInfo = + node.switch.peerInfo + +proc peerId*(node: WakuNode): PeerId = + node.peerInfo.peerId + +# TODO: Move to application instance (e.g., `WakuNode2`) +# TODO: Extend with more relevant info: topics, peers, memory usage, online time, etc +proc info*(node: WakuNode): WakuInfo = + ## Returns information about the Node, such as what multiaddress it can be reached at. + + let peerInfo = node.switch.peerInfo + + var listenStr: seq[string] + for address in node.announcedAddresses: + var fulladdr = $address & "/p2p/" & $peerInfo.peerId + listenStr &= fulladdr + let enrUri = node.enr.toUri() + let wakuInfo = WakuInfo(listenAddresses: listenStr, enrUri: enrUri) + return wakuInfo + +proc connectToNodes*( + node: WakuNode, nodes: seq[RemotePeerInfo] | seq[string], source = "api" +) {.async.} = + ## `source` indicates source of node addrs (static config, api call, discovery, etc) + # NOTE Connects to the node without a give protocol, which automatically creates streams for relay + await peer_manager.connectToNodes(node.peerManager, nodes, source = source) + +proc disconnectNode*(node: WakuNode, remotePeer: RemotePeerInfo) {.async.} = + await peer_manager.disconnectNode(node.peerManager, remotePeer) + +proc mountMetadata*( + node: WakuNode, clusterId: uint32, shards: seq[uint16] +): Result[void, string] = + if not node.wakuMetadata.isNil(): + return err("Waku metadata already mounted, skipping") + + let metadata = WakuMetadata.new(clusterId, node.getShardsGetter()) + + node.wakuMetadata = metadata + node.peerManager.wakuMetadata = metadata + + let catchRes = catch: + node.switch.mount(node.wakuMetadata, protocolMatcher(WakuMetadataCodec)) + if catchRes.isErr(): + return err(catchRes.error.msg) + + return ok() + +## Waku AutoSharding +proc mountAutoSharding*( + node: WakuNode, clusterId: uint16, shardCount: uint32 +): Result[void, string] = + info "mounting auto sharding", clusterId = clusterId, shardCount = shardCount + node.wakuAutoSharding = + some(Sharding(clusterId: clusterId, shardCountGenZero: shardCount)) + return ok() + +proc getMixNodePoolSize*(node: WakuNode): int = + return node.wakuMix.getNodePoolSize() + +proc mountMix*( + node: WakuNode, clusterId: uint16, mixPrivKey: Curve25519Key +): Future[Result[void, string]] {.async.} = + info "mounting mix protocol", nodeId = node.info #TODO log the config used + + if node.announcedAddresses.len == 0: + return err("Trying to mount mix without having announced addresses") + + let localaddrStr = node.announcedAddresses[0].toString().valueOr: + return err("Failed to convert multiaddress to string.") + info "local addr", localaddr = localaddrStr + + let nodeAddr = localaddrStr & "/p2p/" & $node.peerId + # TODO: Pass bootnodes from config, + node.wakuMix = WakuMix.new(nodeAddr, node.peerManager, clusterId, mixPrivKey).valueOr: + error "Waku Mix protocol initialization failed", err = error + return + node.wakuMix.registerDestReadBehavior(WakuLightPushCodec, readLp(int(-1))) + let catchRes = catch: + node.switch.mount(node.wakuMix) + if catchRes.isErr(): + return err(catchRes.error.msg) + return ok() + +## Waku Sync + +proc mountStoreSync*( + node: WakuNode, + cluster: uint16, + shards: seq[uint16], + contentTopics: seq[string], + storeSyncRange: uint32, + storeSyncInterval: uint32, + storeSyncRelayJitter: uint32, +): Future[Result[void, string]] {.async.} = + let idsChannel = newAsyncQueue[(SyncID, PubsubTopic, ContentTopic)](0) + let wantsChannel = newAsyncQueue[(PeerId)](0) + let needsChannel = newAsyncQueue[(PeerId, WakuMessageHash)](0) + + let pubsubTopics = shards.mapIt($RelayShard(clusterId: cluster, shardId: it)) + + let recon = + ?await SyncReconciliation.new( + pubsubTopics, contentTopics, node.peerManager, node.wakuArchive, + storeSyncRange.seconds, storeSyncInterval.seconds, storeSyncRelayJitter.seconds, + idsChannel, wantsChannel, needsChannel, + ) + + node.wakuStoreReconciliation = recon + + let reconMountRes = catch: + node.switch.mount( + node.wakuStoreReconciliation, protocolMatcher(WakuReconciliationCodec) + ) + if reconMountRes.isErr(): + return err(reconMountRes.error.msg) + + let transfer = SyncTransfer.new( + node.peerManager, node.wakuArchive, idsChannel, wantsChannel, needsChannel + ) + + node.wakuStoreTransfer = transfer + + let transMountRes = catch: + node.switch.mount(node.wakuStoreTransfer, protocolMatcher(WakuTransferCodec)) + if transMountRes.isErr(): + return err(transMountRes.error.msg) + + return ok() + +## Waku relay + +proc registerRelayHandler( + node: WakuNode, topic: PubsubTopic, appHandler: WakuRelayHandler +) = + ## Registers the only handler for the given topic. + ## Notice that this handler internally calls other handlers, such as filter, + ## archive, etc, plus the handler provided by the application. + + if node.wakuRelay.isSubscribed(topic): + return + + proc traceHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} = + let msgSizeKB = msg.payload.len / 1000 + + waku_node_messages.inc(labelValues = ["relay"]) + waku_histogram_message_size.observe(msgSizeKB) + + proc filterHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} = + if node.wakuFilter.isNil(): + return + + await node.wakuFilter.handleMessage(topic, msg) + + proc archiveHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} = + if not node.wakuLegacyArchive.isNil(): + ## we try to store with legacy archive + await node.wakuLegacyArchive.handleMessage(topic, msg) + return + + if node.wakuArchive.isNil(): + return + + await node.wakuArchive.handleMessage(topic, msg) + + proc syncHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} = + if node.wakuStoreReconciliation.isNil(): + return + + node.wakuStoreReconciliation.messageIngress(topic, msg) + + let uniqueTopicHandler = proc( + topic: PubsubTopic, msg: WakuMessage + ): Future[void] {.async, gcsafe.} = + await traceHandler(topic, msg) + await filterHandler(topic, msg) + await archiveHandler(topic, msg) + await syncHandler(topic, msg) + await appHandler(topic, msg) + + node.wakuRelay.subscribe(topic, uniqueTopicHandler) + +proc subscribe*( + node: WakuNode, subscription: SubscriptionEvent, handler: WakuRelayHandler +): Result[void, string] = + ## Subscribes to a PubSub or Content topic. Triggers handler when receiving messages on + ## this topic. WakuRelayHandler is a method that takes a topic and a Waku message. + + if node.wakuRelay.isNil(): + error "Invalid API call to `subscribe`. WakuRelay not mounted." + return err("Invalid API call to `subscribe`. WakuRelay not mounted.") + + let (pubsubTopic, contentTopicOp) = + case subscription.kind + of ContentSub: + if node.wakuAutoSharding.isSome(): + let shard = node.wakuAutoSharding.get().getShard((subscription.topic)).valueOr: + error "Autosharding error", error = error + return err("Autosharding error: " & error) + ($shard, some(subscription.topic)) + else: + return err( + "Static sharding is used, relay subscriptions must specify a pubsub topic" + ) + of PubsubSub: + (subscription.topic, none(ContentTopic)) + else: + return err("Unsupported subscription type in relay subscribe") + + if node.wakuRelay.isSubscribed(pubsubTopic): + warn "No-effect API call to subscribe. Already subscribed to topic", pubsubTopic + return ok() + + node.registerRelayHandler(pubsubTopic, handler) + node.topicSubscriptionQueue.emit((kind: PubsubSub, topic: pubsubTopic)) + + return ok() + +proc unsubscribe*( + node: WakuNode, subscription: SubscriptionEvent +): Result[void, string] = + ## Unsubscribes from a specific PubSub or Content topic. + + if node.wakuRelay.isNil(): + error "Invalid API call to `unsubscribe`. WakuRelay not mounted." + return err("Invalid API call to `unsubscribe`. WakuRelay not mounted.") + + let (pubsubTopic, contentTopicOp) = + case subscription.kind + of ContentUnsub: + if node.wakuAutoSharding.isSome(): + let shard = node.wakuAutoSharding.get().getShard((subscription.topic)).valueOr: + error "Autosharding error", error = error + return err("Autosharding error: " & error) + ($shard, some(subscription.topic)) + else: + return err( + "Static sharding is used, relay subscriptions must specify a pubsub topic" + ) + of PubsubUnsub: + (subscription.topic, none(ContentTopic)) + else: + return err("Unsupported subscription type in relay unsubscribe") + + if not node.wakuRelay.isSubscribed(pubsubTopic): + warn "No-effect API call to `unsubscribe`. Was not subscribed", pubsubTopic + return ok() + + debug "unsubscribe", pubsubTopic, contentTopicOp + node.wakuRelay.unsubscribe(pubsubTopic) + node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: pubsubTopic)) + + return ok() + +proc publish*( + node: WakuNode, pubsubTopicOp: Option[PubsubTopic], message: WakuMessage +): Future[Result[void, string]] {.async, gcsafe.} = + ## Publish a `WakuMessage`. Pubsub topic contains; none, a named or static shard. + ## `WakuMessage` should contain a `contentTopic` field for light node functionality. + ## It is also used to determine the shard. + + if node.wakuRelay.isNil(): + let msg = + "Invalid API call to `publish`. WakuRelay not mounted. Try `lightpush` instead." + error "publish error", err = msg + # TODO: Improve error handling + return err(msg) + + let pubsubTopic = pubsubTopicOp.valueOr: + if node.wakuAutoSharding.isNone(): + return err("Pubsub topic must be specified when static sharding is enabled.") + node.wakuAutoSharding.get().getShard(message.contentTopic).valueOr: + let msg = "Autosharding error: " & error + return err(msg) + + #TODO instead of discard return error when 0 peers received the message + discard await node.wakuRelay.publish(pubsubTopic, message) + + notice "waku.relay published", + peerId = node.peerId, + pubsubTopic = pubsubTopic, + msg_hash = pubsubTopic.computeMessageHash(message).to0xHex(), + publishTime = getNowInNanosecondTime() + + return ok() + +proc startRelay*(node: WakuNode) {.async.} = + ## Setup and start relay protocol + info "starting relay protocol" + + if node.wakuRelay.isNil(): + error "Failed to start relay. Not mounted." + return + + ## Setup relay protocol + + # Resume previous relay connections + if node.peerManager.switch.peerStore.hasPeers(protocolMatcher(WakuRelayCodec)): + info "Found previous WakuRelay peers. Reconnecting." + + # Reconnect to previous relay peers. This will respect a backoff period, if necessary + let backoffPeriod = + node.wakuRelay.parameters.pruneBackoff + chronos.seconds(BackoffSlackTime) + + await node.peerManager.reconnectPeers(WakuRelayCodec, backoffPeriod) + + # Start the WakuRelay protocol + await node.wakuRelay.start() + + info "relay started successfully" + +proc mountRelay*( + node: WakuNode, + peerExchangeHandler = none(RoutingRecordsHandler), + maxMessageSize = int(DefaultMaxWakuMessageSize), +): Future[Result[void, string]] {.async.} = + if not node.wakuRelay.isNil(): + error "wakuRelay already mounted, skipping" + return err("wakuRelay already mounted, skipping") + + ## The default relay topics is the union of all configured topics plus default PubsubTopic(s) + info "mounting relay protocol" + + node.wakuRelay = WakuRelay.new(node.switch, maxMessageSize).valueOr: + error "failed mounting relay protocol", error = error + return err("failed mounting relay protocol: " & error) + + ## Add peer exchange handler + if peerExchangeHandler.isSome(): + node.wakuRelay.parameters.enablePX = true + # Feature flag for peer exchange in nim-libp2p + node.wakuRelay.routingRecordsHandler.add(peerExchangeHandler.get()) + + if node.started: + await node.startRelay() + + node.switch.mount(node.wakuRelay, protocolMatcher(WakuRelayCodec)) + + info "relay mounted successfully" + return ok() + +## Waku filter + +proc mountFilter*( + node: WakuNode, + subscriptionTimeout: Duration = + filter_subscriptions.DefaultSubscriptionTimeToLiveSec, + maxFilterPeers: uint32 = filter_subscriptions.MaxFilterPeers, + maxFilterCriteriaPerPeer: uint32 = filter_subscriptions.MaxFilterCriteriaPerPeer, + messageCacheTTL: Duration = filter_subscriptions.MessageCacheTTL, + rateLimitSetting: RateLimitSetting = FilterDefaultPerPeerRateLimit, +) {.async: (raises: []).} = + ## Mounting filter v2 protocol + + info "mounting filter protocol" + node.wakuFilter = WakuFilter.new( + node.peerManager, + subscriptionTimeout, + maxFilterPeers, + maxFilterCriteriaPerPeer, + messageCacheTTL, + some(rateLimitSetting), + ) + + try: + await node.wakuFilter.start() + except CatchableError: + error "failed to start wakuFilter", error = getCurrentExceptionMsg() + + try: + node.switch.mount(node.wakuFilter, protocolMatcher(WakuFilterSubscribeCodec)) + except LPError: + error "failed to mount wakuFilter", error = getCurrentExceptionMsg() + +proc filterHandleMessage*( + node: WakuNode, pubsubTopic: PubsubTopic, message: WakuMessage +) {.async.} = + if node.wakuFilter.isNil(): + error "cannot handle filter message", error = "waku filter is required" + return + + await node.wakuFilter.handleMessage(pubsubTopic, message) + +proc mountFilterClient*(node: WakuNode) {.async: (raises: []).} = + ## Mounting both filter + ## Giving option for application level to choose btw own push message handling or + ## rely on node provided cache. - This only applies for v2 filter client + info "mounting filter client" + + if not node.wakuFilterClient.isNil(): + trace "Filter client already mounted." + return + + node.wakuFilterClient = WakuFilterClient.new(node.peerManager, node.rng) + + try: + await node.wakuFilterClient.start() + except CatchableError: + error "failed to start wakuFilterClient", error = getCurrentExceptionMsg() + + try: + node.switch.mount(node.wakuFilterClient, protocolMatcher(WakuFilterSubscribeCodec)) + except LPError: + error "failed to mount wakuFilterClient", error = getCurrentExceptionMsg() + +proc filterSubscribe*( + node: WakuNode, + pubsubTopic: Option[PubsubTopic], + contentTopics: ContentTopic | seq[ContentTopic], + peer: RemotePeerInfo | string, +): Future[FilterSubscribeResult] {.async: (raises: []).} = + ## Registers for messages that match a specific filter. Triggers the handler whenever a message is received. + if node.wakuFilterClient.isNil(): + error "cannot register filter subscription to topic", + error = "waku filter client is not set up" + return err(FilterSubscribeError.serviceUnavailable()) + + let remotePeerRes = parsePeerInfo(peer) + if remotePeerRes.isErr(): + error "Couldn't parse the peer info properly", error = remotePeerRes.error + return err(FilterSubscribeError.serviceUnavailable("No peers available")) + + let remotePeer = remotePeerRes.value + + if pubsubTopic.isSome(): + info "registering filter subscription to content", + pubsubTopic = pubsubTopic.get(), + contentTopics = contentTopics, + peer = remotePeer.peerId + + when (contentTopics is ContentTopic): + let contentTopics = @[contentTopics] + let subRes = await node.wakuFilterClient.subscribe( + remotePeer, pubsubTopic.get(), contentTopics + ) + if subRes.isOk(): + info "v2 subscribed to topic", + pubsubTopic = pubsubTopic, contentTopics = contentTopics + + # Purpose is to update Waku Metadata + node.topicSubscriptionQueue.emit((kind: PubsubSub, topic: pubsubTopic.get())) + else: + error "failed filter v2 subscription", error = subRes.error + waku_node_errors.inc(labelValues = ["subscribe_filter_failure"]) + + return subRes + elif node.wakuAutoSharding.isNone(): + error "Failed filter subscription, pubsub topic must be specified with static sharding" + waku_node_errors.inc(labelValues = ["subscribe_filter_failure"]) + else: + # No pubsub topic, autosharding is used to deduce it + # but content topics must be well-formed for this + let topicMapRes = + node.wakuAutoSharding.get().getShardsFromContentTopics(contentTopics) + + let topicMap = + if topicMapRes.isErr(): + error "can't get shard", error = topicMapRes.error + return err(FilterSubscribeError.badResponse("can't get shard")) + else: + topicMapRes.get() + + var futures = collect(newSeq): + for shard, topics in topicMap.pairs: + info "registering filter subscription to content", + shard = shard, contentTopics = topics, peer = remotePeer.peerId + let content = topics.mapIt($it) + node.wakuFilterClient.subscribe(remotePeer, $shard, content) + + var subRes: FilterSubscribeResult = FilterSubscribeResult.ok() + try: + let finished = await allFinished(futures) + + for fut in finished: + let res = fut.read() + + if res.isErr(): + error "failed filter subscription", error = res.error + waku_node_errors.inc(labelValues = ["subscribe_filter_failure"]) + subRes = FilterSubscribeResult.err(res.error) + + for pubsub, topics in topicMap.pairs: + info "subscribed to topic", pubsubTopic = pubsub, contentTopics = topics + + # Purpose is to update Waku Metadata + node.topicSubscriptionQueue.emit((kind: PubsubSub, topic: $pubsub)) + except CatchableError: + let errMsg = "exception in filterSubscribe: " & getCurrentExceptionMsg() + error "exception in filterSubscribe", error = getCurrentExceptionMsg() + waku_node_errors.inc(labelValues = ["subscribe_filter_failure"]) + subRes = + FilterSubscribeResult.err(FilterSubscribeError.serviceUnavailable(errMsg)) + + # return the last error or ok + return subRes + +proc filterUnsubscribe*( + node: WakuNode, + pubsubTopic: Option[PubsubTopic], + contentTopics: ContentTopic | seq[ContentTopic], + peer: RemotePeerInfo | string, +): Future[FilterSubscribeResult] {.async: (raises: []).} = + ## Unsubscribe from a content filter V2". + + let remotePeerRes = parsePeerInfo(peer) + if remotePeerRes.isErr(): + error "couldn't parse remotePeerInfo", error = remotePeerRes.error + return err(FilterSubscribeError.serviceUnavailable("No peers available")) + + let remotePeer = remotePeerRes.value + + if pubsubTopic.isSome(): + info "deregistering filter subscription to content", + pubsubTopic = pubsubTopic.get(), + contentTopics = contentTopics, + peer = remotePeer.peerId + + let unsubRes = await node.wakuFilterClient.unsubscribe( + remotePeer, pubsubTopic.get(), contentTopics + ) + if unsubRes.isOk(): + info "unsubscribed from topic", + pubsubTopic = pubsubTopic.get(), contentTopics = contentTopics + + # Purpose is to update Waku Metadata + node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: pubsubTopic.get())) + else: + error "failed filter unsubscription", error = unsubRes.error + waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"]) + + return unsubRes + elif node.wakuAutoSharding.isNone(): + error "Failed filter un-subscription, pubsub topic must be specified with static sharding" + waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"]) + else: # pubsubTopic.isNone + let topicMapRes = + node.wakuAutoSharding.get().getShardsFromContentTopics(contentTopics) + + let topicMap = + if topicMapRes.isErr(): + error "can't get shard", error = topicMapRes.error + return err(FilterSubscribeError.badResponse("can't get shard")) + else: + topicMapRes.get() + + var futures = collect(newSeq): + for shard, topics in topicMap.pairs: + info "deregistering filter subscription to content", + shard = shard, contentTopics = topics, peer = remotePeer.peerId + let content = topics.mapIt($it) + node.wakuFilterClient.unsubscribe(remotePeer, $shard, content) + + var unsubRes: FilterSubscribeResult = FilterSubscribeResult.ok() + try: + let finished = await allFinished(futures) + + for fut in finished: + let res = fut.read() + + if res.isErr(): + error "failed filter unsubscription", error = res.error + waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"]) + unsubRes = FilterSubscribeResult.err(res.error) + + for pubsub, topics in topicMap.pairs: + info "unsubscribed from topic", pubsubTopic = pubsub, contentTopics = topics + + # Purpose is to update Waku Metadata + node.topicSubscriptionQueue.emit((kind: PubsubUnsub, topic: $pubsub)) + except CatchableError: + let errMsg = "exception in filterUnsubscribe: " & getCurrentExceptionMsg() + error "exception in filterUnsubscribe", error = getCurrentExceptionMsg() + waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"]) + unsubRes = + FilterSubscribeResult.err(FilterSubscribeError.serviceUnavailable(errMsg)) + + # return the last error or ok + return unsubRes + +proc filterUnsubscribeAll*( + node: WakuNode, peer: RemotePeerInfo | string +): Future[FilterSubscribeResult] {.async: (raises: []).} = + ## Unsubscribe from a content filter V2". + + let remotePeerRes = parsePeerInfo(peer) + if remotePeerRes.isErr(): + error "couldn't parse remotePeerInfo", error = remotePeerRes.error + return err(FilterSubscribeError.serviceUnavailable("No peers available")) + + let remotePeer = remotePeerRes.value + + info "deregistering all filter subscription to content", peer = remotePeer.peerId + + let unsubRes = await node.wakuFilterClient.unsubscribeAll(remotePeer) + if unsubRes.isOk(): + info "unsubscribed from all content-topic", peerId = remotePeer.peerId + else: + error "failed filter unsubscription from all content-topic", error = unsubRes.error + waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"]) + + return unsubRes + +# NOTICE: subscribe / unsubscribe methods are removed - they were already depricated +# yet incompatible to handle both type of filters - use specific filter registration instead + +## Waku archive +proc mountArchive*( + node: WakuNode, + driver: waku_archive.ArchiveDriver, + retentionPolicy = none(waku_archive.RetentionPolicy), +): Result[void, string] = + node.wakuArchive = waku_archive.WakuArchive.new( + driver = driver, retentionPolicy = retentionPolicy + ).valueOr: + return err("error in mountArchive: " & error) + + node.wakuArchive.start() + + return ok() + +proc mountLegacyArchive*( + node: WakuNode, driver: waku_archive_legacy.ArchiveDriver +): Result[void, string] = + node.wakuLegacyArchive = waku_archive_legacy.WakuArchive.new(driver = driver).valueOr: + return err("error in mountLegacyArchive: " & error) + + return ok() + +## Legacy Waku Store + +# TODO: Review this mapping logic. Maybe, move it to the appplication code +proc toArchiveQuery( + request: legacy_store_common.HistoryQuery +): waku_archive_legacy.ArchiveQuery = + waku_archive_legacy.ArchiveQuery( + pubsubTopic: request.pubsubTopic, + contentTopics: request.contentTopics, + cursor: request.cursor.map( + proc(cursor: HistoryCursor): waku_archive_legacy.ArchiveCursor = + waku_archive_legacy.ArchiveCursor( + pubsubTopic: cursor.pubsubTopic, + senderTime: cursor.senderTime, + storeTime: cursor.storeTime, + digest: cursor.digest, + ) + ), + startTime: request.startTime, + endTime: request.endTime, + pageSize: request.pageSize.uint, + direction: request.direction, + requestId: request.requestId, + ) + +# TODO: Review this mapping logic. Maybe, move it to the appplication code +proc toHistoryResult*( + res: waku_archive_legacy.ArchiveResult +): legacy_store_common.HistoryResult = + if res.isErr(): + let error = res.error + case res.error.kind + of waku_archive_legacy.ArchiveErrorKind.DRIVER_ERROR, + waku_archive_legacy.ArchiveErrorKind.INVALID_QUERY: + err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST, cause: res.error.cause)) + else: + err(HistoryError(kind: HistoryErrorKind.UNKNOWN)) + else: + let response = res.get() + ok( + HistoryResponse( + messages: response.messages, + cursor: response.cursor.map( + proc(cursor: waku_archive_legacy.ArchiveCursor): HistoryCursor = + HistoryCursor( + pubsubTopic: cursor.pubsubTopic, + senderTime: cursor.senderTime, + storeTime: cursor.storeTime, + digest: cursor.digest, + ) + ), + ) + ) + +proc mountLegacyStore*( + node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit +) {.async.} = + info "mounting waku legacy store protocol" + + if node.wakuLegacyArchive.isNil(): + error "failed to mount waku legacy store protocol", error = "waku archive not set" + return + + # TODO: Review this handler logic. Maybe, move it to the appplication code + let queryHandler: HistoryQueryHandler = proc( + request: HistoryQuery + ): Future[legacy_store_common.HistoryResult] {.async.} = + if request.cursor.isSome(): + request.cursor.get().checkHistCursor().isOkOr: + return err(error) + + let request = request.toArchiveQuery() + let response = await node.wakuLegacyArchive.findMessagesV2(request) + return response.toHistoryResult() + + node.wakuLegacyStore = legacy_store.WakuStore.new( + node.peerManager, node.rng, queryHandler, some(rateLimit) + ) + + if node.started: + # Node has started already. Let's start store too. + await node.wakuLegacyStore.start() + + node.switch.mount( + node.wakuLegacyStore, protocolMatcher(legacy_store_common.WakuLegacyStoreCodec) + ) + +proc mountLegacyStoreClient*(node: WakuNode) = + info "mounting legacy store client" + + node.wakuLegacyStoreClient = + legacy_store_client.WakuStoreClient.new(node.peerManager, node.rng) + +proc query*( + node: WakuNode, query: legacy_store_common.HistoryQuery, peer: RemotePeerInfo +): Future[legacy_store_common.WakuStoreResult[legacy_store_common.HistoryResponse]] {. + async, gcsafe +.} = + ## Queries known nodes for historical messages + if node.wakuLegacyStoreClient.isNil(): + return err("waku legacy store client is nil") + + let queryRes = await node.wakuLegacyStoreClient.query(query, peer) + if queryRes.isErr(): + return err("legacy store client query error: " & $queryRes.error) + + let response = queryRes.get() + + return ok(response) + +# TODO: Move to application module (e.g., wakunode2.nim) +proc query*( + node: WakuNode, query: legacy_store_common.HistoryQuery +): Future[legacy_store_common.WakuStoreResult[legacy_store_common.HistoryResponse]] {. + async, gcsafe, deprecated: "Use 'node.query()' with peer destination instead" +.} = + ## Queries known nodes for historical messages + if node.wakuLegacyStoreClient.isNil(): + return err("waku legacy store client is nil") + + let peerOpt = node.peerManager.selectPeer(legacy_store_common.WakuLegacyStoreCodec) + if peerOpt.isNone(): + error "no suitable remote peers" + return err("peer_not_found_failure") + + return await node.query(query, peerOpt.get()) + +when defined(waku_exp_store_resume): + # TODO: Move to application module (e.g., wakunode2.nim) + proc resume*( + node: WakuNode, peerList: Option[seq[RemotePeerInfo]] = none(seq[RemotePeerInfo]) + ) {.async, gcsafe.} = + ## resume proc retrieves the history of waku messages published on the default waku pubsub topic since the last time the waku node has been online + ## for resume to work properly the waku node must have the store protocol mounted in the full mode (i.e., persisting messages) + ## messages are stored in the wakuStore's messages field and in the message db + ## the offline time window is measured as the difference between the current time and the timestamp of the most recent persisted waku message + ## an offset of 20 second is added to the time window to count for nodes asynchrony + ## peerList indicates the list of peers to query from. The history is fetched from the first available peer in this list. Such candidates should be found through a discovery method (to be developed). + ## if no peerList is passed, one of the peers in the underlying peer manager unit of the store protocol is picked randomly to fetch the history from. + ## The history gets fetched successfully if the dialed peer has been online during the queried time window. + if node.wakuLegacyStoreClient.isNil(): + return + + let retrievedMessages = await node.wakuLegacyStoreClient.resume(peerList) + if retrievedMessages.isErr(): + error "failed to resume store", error = retrievedMessages.error + return + + info "the number of retrieved messages since the last online time: ", + number = retrievedMessages.value + +## Waku Store + +proc toArchiveQuery(request: StoreQueryRequest): waku_archive.ArchiveQuery = + var query = waku_archive.ArchiveQuery() + + query.includeData = request.includeData + query.pubsubTopic = request.pubsubTopic + query.contentTopics = request.contentTopics + query.startTime = request.startTime + query.endTime = request.endTime + query.hashes = request.messageHashes + query.cursor = request.paginationCursor + query.direction = request.paginationForward + query.requestId = request.requestId + + if request.paginationLimit.isSome(): + query.pageSize = uint(request.paginationLimit.get()) + + return query + +proc toStoreResult(res: waku_archive.ArchiveResult): StoreQueryResult = + let response = res.valueOr: + return err(StoreError.new(300, "archive error: " & $error)) + + var res = StoreQueryResponse() + + res.statusCode = 200 + res.statusDesc = "OK" + + for i in 0 ..< response.hashes.len: + let hash = response.hashes[i] + + let kv = store_common.WakuMessageKeyValue(messageHash: hash) + + res.messages.add(kv) + + for i in 0 ..< response.messages.len: + res.messages[i].message = some(response.messages[i]) + res.messages[i].pubsubTopic = some(response.topics[i]) + + res.paginationCursor = response.cursor + + return ok(res) + +proc mountStore*( + node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit +) {.async.} = + if node.wakuArchive.isNil(): + error "failed to mount waku store protocol", error = "waku archive not set" + return + + info "mounting waku store protocol" + + let requestHandler: StoreQueryRequestHandler = proc( + request: StoreQueryRequest + ): Future[StoreQueryResult] {.async.} = + let request = request.toArchiveQuery() + let response = await node.wakuArchive.findMessages(request) + + return response.toStoreResult() + + node.wakuStore = + store.WakuStore.new(node.peerManager, node.rng, requestHandler, some(rateLimit)) + + if node.started: + await node.wakuStore.start() + + node.switch.mount(node.wakuStore, protocolMatcher(store_common.WakuStoreCodec)) + +proc mountStoreClient*(node: WakuNode) = + info "mounting store client" + + node.wakuStoreClient = store_client.WakuStoreClient.new(node.peerManager, node.rng) + +proc query*( + node: WakuNode, request: store_common.StoreQueryRequest, peer: RemotePeerInfo +): Future[store_common.WakuStoreResult[store_common.StoreQueryResponse]] {. + async, gcsafe +.} = + ## Queries known nodes for historical messages + if node.wakuStoreClient.isNil(): + return err("waku store v3 client is nil") + + let response = (await node.wakuStoreClient.query(request, peer)).valueOr: + var res = StoreQueryResponse() + res.statusCode = uint32(error.kind) + res.statusDesc = $error + + return ok(res) + + return ok(response) + +proc setupStoreResume*(node: WakuNode) = + node.wakuStoreResume = StoreResume.new( + node.peerManager, node.wakuArchive, node.wakuStoreClient + ).valueOr: + error "Failed to setup Store Resume", error = $error + return + +## Waku lightpush +proc mountLegacyLightPush*( + node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit +) {.async.} = + info "mounting legacy light push" + + let pushHandler = + if node.wakuRelay.isNil: + debug "mounting legacy lightpush without relay (nil)" + legacy_lightpush_protocol.getNilPushHandler() + else: + debug "mounting legacy lightpush with relay" + let rlnPeer = + if isNil(node.wakuRlnRelay): + debug "mounting legacy lightpush without rln-relay" + none(WakuRLNRelay) + else: + debug "mounting legacy lightpush with rln-relay" + some(node.wakuRlnRelay) + legacy_lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer) + + node.wakuLegacyLightPush = + WakuLegacyLightPush.new(node.peerManager, node.rng, pushHandler, some(rateLimit)) + + if node.started: + # Node has started already. Let's start lightpush too. + await node.wakuLegacyLightPush.start() + + node.switch.mount(node.wakuLegacyLightPush, protocolMatcher(WakuLegacyLightPushCodec)) + +proc mountLegacyLightPushClient*(node: WakuNode) = + info "mounting legacy light push client" + + if node.wakuLegacyLightpushClient.isNil(): + node.wakuLegacyLightpushClient = + WakuLegacyLightPushClient.new(node.peerManager, node.rng) + +proc legacyLightpushPublish*( + node: WakuNode, + pubsubTopic: Option[PubsubTopic], + message: WakuMessage, + peer: RemotePeerInfo, +): Future[legacy_lightpush_protocol.WakuLightPushResult[string]] {.async, gcsafe.} = + ## Pushes a `WakuMessage` to a node which relays it further on PubSub topic. + ## Returns whether relaying was successful or not. + ## `WakuMessage` should contain a `contentTopic` field for light node + ## functionality. + if node.wakuLegacyLightpushClient.isNil() and node.wakuLegacyLightPush.isNil(): + error "failed to publish message as legacy lightpush not available" + return err("Waku lightpush not available") + + let internalPublish = proc( + node: WakuNode, + pubsubTopic: PubsubTopic, + message: WakuMessage, + peer: RemotePeerInfo, + ): Future[legacy_lightpush_protocol.WakuLightPushResult[string]] {.async, gcsafe.} = + let msgHash = pubsubTopic.computeMessageHash(message).to0xHex() + if not node.wakuLegacyLightpushClient.isNil(): + notice "publishing message with legacy lightpush", + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + target_peer_id = peer.peerId, + msg_hash = msgHash + return await node.wakuLegacyLightpushClient.publish(pubsubTopic, message, peer) + + if not node.wakuLegacyLightPush.isNil(): + notice "publishing message with self hosted legacy lightpush", + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + target_peer_id = peer.peerId, + msg_hash = msgHash + return + await node.wakuLegacyLightPush.handleSelfLightPushRequest(pubsubTopic, message) + try: + if pubsubTopic.isSome(): + return await internalPublish(node, pubsubTopic.get(), message, peer) + + if node.wakuAutoSharding.isNone(): + return err("Pubsub topic must be specified when static sharding is enabled") + let topicMapRes = + node.wakuAutoSharding.get().getShardsFromContentTopics(message.contentTopic) + + let topicMap = + if topicMapRes.isErr(): + return err(topicMapRes.error) + else: + topicMapRes.get() + + for pubsub, _ in topicMap.pairs: # There's only one pair anyway + return await internalPublish(node, $pubsub, message, peer) + except CatchableError: + return err(getCurrentExceptionMsg()) + +# TODO: Move to application module (e.g., wakunode2.nim) +proc legacyLightpushPublish*( + node: WakuNode, pubsubTopic: Option[PubsubTopic], message: WakuMessage +): Future[legacy_lightpush_protocol.WakuLightPushResult[string]] {. + async, gcsafe, deprecated: "Use 'node.legacyLightpushPublish()' instead" +.} = + if node.wakuLegacyLightpushClient.isNil() and node.wakuLegacyLightPush.isNil(): + error "failed to publish message as legacy lightpush not available" + return err("waku legacy lightpush not available") + + var peerOpt: Option[RemotePeerInfo] = none(RemotePeerInfo) + if not node.wakuLegacyLightpushClient.isNil(): + peerOpt = node.peerManager.selectPeer(WakuLegacyLightPushCodec) + if peerOpt.isNone(): + let msg = "no suitable remote peers" + error "failed to publish message", err = msg + return err(msg) + elif not node.wakuLegacyLightPush.isNil(): + peerOpt = some(RemotePeerInfo.init($node.switch.peerInfo.peerId)) + + return await node.legacyLightpushPublish(pubsubTopic, message, peer = peerOpt.get()) + +proc mountLightPush*( + node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit +) {.async.} = + info "mounting light push" + + let pushHandler = + if node.wakuRelay.isNil(): + debug "mounting lightpush v2 without relay (nil)" + lightpush_protocol.getNilPushHandler() + else: + debug "mounting lightpush with relay" + let rlnPeer = + if isNil(node.wakuRlnRelay): + debug "mounting lightpush without rln-relay" + none(WakuRLNRelay) + else: + debug "mounting lightpush with rln-relay" + some(node.wakuRlnRelay) + lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer) + + node.wakuLightPush = WakuLightPush.new( + node.peerManager, node.rng, pushHandler, node.wakuAutoSharding, some(rateLimit) + ) + + if node.started: + # Node has started already. Let's start lightpush too. + await node.wakuLightPush.start() + + node.switch.mount(node.wakuLightPush, protocolMatcher(WakuLightPushCodec)) + +proc mountLightPushClient*(node: WakuNode) = + info "mounting light push client" + + if node.wakuLightpushClient.isNil(): + node.wakuLightpushClient = WakuLightPushClient.new(node.peerManager, node.rng) + +proc lightpushPublishHandler( + node: WakuNode, + pubsubTopic: PubsubTopic, + message: WakuMessage, + peer: RemotePeerInfo | PeerInfo, + mixify: bool = false, +): Future[lightpush_protocol.WakuLightPushResult] {.async.} = + let msgHash = pubsubTopic.computeMessageHash(message).to0xHex() + + if not node.wakuLightpushClient.isNil(): + notice "publishing message with lightpush", + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + target_peer_id = peer.peerId, + msg_hash = msgHash, + mixify = mixify + if mixify: #indicates we want to use mix to send the message + #TODO: How to handle multiple addresses? + let conn = node.wakuMix.toConnection( + MixDestination.init(peer.peerId, peer.addrs[0]), + WakuLightPushCodec, + Opt.some( + MixParameters(expectReply: Opt.some(true), numSurbs: Opt.some(byte(1))) + # indicating we only want a single path to be used for reply hence numSurbs = 1 + ), + ).valueOr: + error "could not create mix connection" + return lighpushErrorResult( + LightPushErrorCode.SERVICE_NOT_AVAILABLE, + "Waku lightpush with mix not available", + ) + + return await node.wakuLightpushClient.publishWithConn( + pubsubTopic, message, conn, peer.peerId + ) + else: + return await node.wakuLightpushClient.publish(some(pubsubTopic), message, peer) + + if not node.wakuLightPush.isNil(): + if mixify: + error "mixify is not supported with self hosted lightpush" + return lighpushErrorResult( + LightPushErrorCode.SERVICE_NOT_AVAILABLE, + "Waku lightpush with mix not available", + ) + notice "publishing message with self hosted lightpush", + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + target_peer_id = peer.peerId, + msg_hash = msgHash + return + await node.wakuLightPush.handleSelfLightPushRequest(some(pubsubTopic), message) + +proc lightpushPublish*( + node: WakuNode, + pubsubTopic: Option[PubsubTopic], + message: WakuMessage, + peerOpt: Option[RemotePeerInfo] = none(RemotePeerInfo), + mixify: bool = false, +): Future[lightpush_protocol.WakuLightPushResult] {.async.} = + if node.wakuLightpushClient.isNil() and node.wakuLightPush.isNil(): + error "failed to publish message as lightpush not available" + return lighpushErrorResult( + LightPushErrorCode.SERVICE_NOT_AVAILABLE, "Waku lightpush not available" + ) + if mixify and node.wakuMix.isNil(): + error "failed to publish message using mix as mix protocol is not mounted" + return lighpushErrorResult( + LightPushErrorCode.SERVICE_NOT_AVAILABLE, "Waku lightpush with mix not available" + ) + let toPeer: RemotePeerInfo = peerOpt.valueOr: + if not node.wakuLightPush.isNil(): + RemotePeerInfo.init(node.peerId()) + elif not node.wakuLightpushClient.isNil(): + node.peerManager.selectPeer(WakuLightPushCodec).valueOr: + let msg = "no suitable remote peers" + error "failed to publish message", msg = msg + return lighpushErrorResult(LightPushErrorCode.NO_PEERS_TO_RELAY, msg) + else: + return lighpushErrorResult( + LightPushErrorCode.NO_PEERS_TO_RELAY, "no suitable remote peers" + ) + + let pubsubForPublish = pubSubTopic.valueOr: + if node.wakuAutoSharding.isNone(): + let msg = "Pubsub topic must be specified when static sharding is enabled" + error "lightpush publish error", error = msg + return lighpushErrorResult(LightPushErrorCode.INVALID_MESSAGE, msg) + + let parsedTopic = NsContentTopic.parse(message.contentTopic).valueOr: + let msg = "Invalid content-topic:" & $error + error "lightpush request handling error", error = msg + return lighpushErrorResult(LightPushErrorCode.INVALID_MESSAGE, msg) + + node.wakuAutoSharding.get().getShard(parsedTopic).valueOr: + let msg = "Autosharding error: " & error + error "lightpush publish error", error = msg + return lighpushErrorResult(LightPushErrorCode.INTERNAL_SERVER_ERROR, msg) + + return await lightpushPublishHandler(node, pubsubForPublish, message, toPeer, mixify) + +## Waku RLN Relay +proc mountRlnRelay*( + node: WakuNode, + rlnConf: WakuRlnConfig, + spamHandler = none(SpamHandler), + registrationHandler = none(RegistrationHandler), +) {.async.} = + info "mounting rln relay" + + if node.wakuRelay.isNil(): + raise newException( + CatchableError, "WakuRelay protocol is not mounted, cannot mount WakuRlnRelay" + ) + + let rlnRelayRes = await WakuRlnRelay.new(rlnConf, registrationHandler) + if rlnRelayRes.isErr(): + raise + newException(CatchableError, "failed to mount WakuRlnRelay: " & rlnRelayRes.error) + let rlnRelay = rlnRelayRes.get() + if (rlnConf.userMessageLimit > rlnRelay.groupManager.rlnRelayMaxMessageLimit): + error "rln-relay-user-message-limit can't exceed the MAX_MESSAGE_LIMIT in the rln contract" + let validator = generateRlnValidator(rlnRelay, spamHandler) + + # register rln validator as default validator + debug "Registering RLN validator" + node.wakuRelay.addValidator(validator, "RLN validation failed") + + node.wakuRlnRelay = rlnRelay + +## Waku peer-exchange + +proc mountPeerExchange*( + node: WakuNode, + cluster: Option[uint16] = none(uint16), + rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit, +) {.async: (raises: []).} = + info "mounting waku peer exchange" + + node.wakuPeerExchange = + WakuPeerExchange.new(node.peerManager, cluster, some(rateLimit)) + + if node.started: + try: + await node.wakuPeerExchange.start() + except CatchableError: + error "failed to start wakuPeerExchange", error = getCurrentExceptionMsg() + + try: + node.switch.mount(node.wakuPeerExchange, protocolMatcher(WakuPeerExchangeCodec)) + except LPError: + error "failed to mount wakuPeerExchange", error = getCurrentExceptionMsg() + +proc mountPeerExchangeClient*(node: WakuNode) {.async: (raises: []).} = + info "mounting waku peer exchange client" + if node.wakuPeerExchangeClient.isNil(): + node.wakuPeerExchangeClient = WakuPeerExchangeClient.new(node.peerManager) + +proc fetchPeerExchangePeers*( + node: Wakunode, amount = DefaultPXNumPeersReq +): Future[Result[int, PeerExchangeResponseStatus]] {.async: (raises: []).} = + if node.wakuPeerExchangeClient.isNil(): + error "could not get peers from px, waku peer-exchange-client is nil" + return err( + ( + status_code: PeerExchangeResponseStatusCode.SERVICE_UNAVAILABLE, + status_desc: some("PeerExchangeClient is not mounted"), + ) + ) + + info "Retrieving peer info via peer exchange protocol", amount + let pxPeersRes = await node.wakuPeerExchangeClient.request(amount) + if pxPeersRes.isOk(): + var validPeers = 0 + let peers = pxPeersRes.get().peerInfos + for pi in peers: + var record: enr.Record + if enr.fromBytes(record, pi.enr): + node.peerManager.addPeer(record.toRemotePeerInfo().get, PeerExchange) + validPeers += 1 + info "Retrieved peer info via peer exchange protocol", + validPeers = validPeers, totalPeers = peers.len + return ok(validPeers) + else: + warn "failed to retrieve peer info via peer exchange protocol", + error = pxPeersRes.error + return err(pxPeersRes.error) + +proc peerExchangeLoop(node: WakuNode) {.async.} = + while true: + if not node.started: + await sleepAsync(5.seconds) + continue + (await node.fetchPeerExchangePeers()).isOkOr: + warn "Cannot fetch peers from peer exchange", cause = error + await sleepAsync(1.minutes) + +proc startPeerExchangeLoop*(node: WakuNode) = + if node.wakuPeerExchangeClient.isNil(): + error "startPeerExchangeLoop: Peer Exchange is not mounted" + return + info "Starting peer exchange loop" + node.wakuPeerExchangeClient.pxLoopHandle = node.peerExchangeLoop() + +# TODO: Move to application module (e.g., wakunode2.nim) +proc setPeerExchangePeer*( + node: WakuNode, peer: RemotePeerInfo | MultiAddress | string +) = + if node.wakuPeerExchange.isNil(): + error "could not set peer, waku peer-exchange is nil" + return + + info "Set peer-exchange peer", peer = peer + + let remotePeerRes = parsePeerInfo(peer) + if remotePeerRes.isErr(): + error "could not parse peer info", error = remotePeerRes.error + return + + node.peerManager.addPeer(remotePeerRes.value, PeerExchange) + waku_px_peers.inc() + +## Other protocols + +proc mountLibp2pPing*(node: WakuNode) {.async: (raises: []).} = + info "mounting libp2p ping protocol" + + try: + node.libp2pPing = Ping.new(rng = node.rng) + except Exception as e: + error "failed to create ping", error = getCurrentExceptionMsg() + + if node.started: + # Node has started already. Let's start ping too. + try: + await node.libp2pPing.start() + except CatchableError: + error "failed to start libp2pPing", error = getCurrentExceptionMsg() + + try: + node.switch.mount(node.libp2pPing) + except LPError: + error "failed to mount libp2pPing", error = getCurrentExceptionMsg() + +proc pingPeer(node: WakuNode, peerId: PeerId): Future[Result[void, string]] {.async.} = + ## Ping a single peer and return the result + + try: + # Establish a stream + let stream = (await node.peerManager.dialPeer(peerId, PingCodec)).valueOr: + error "pingPeer: failed dialing peer", peerId = peerId + return err("pingPeer failed dialing peer peerId: " & $peerId) + defer: + # Always close the stream + try: + await stream.close() + except CatchableError as e: + debug "Error closing ping connection", peerId = peerId, error = e.msg + + # Perform ping + let pingDuration = await node.libp2pPing.ping(stream) + + trace "Ping successful", peerId = peerId, duration = pingDuration + return ok() + except CatchableError as e: + error "pingPeer: exception raised pinging peer", peerId = peerId, error = e.msg + return err("pingPeer: exception raised pinging peer: " & e.msg) + +proc selectRandomPeers*(peers: seq[PeerId], numRandomPeers: int): seq[PeerId] = + var randomPeers = peers + shuffle(randomPeers) + return randomPeers[0 ..< min(len(randomPeers), numRandomPeers)] + +# Returns the number of succesful pings performed +proc parallelPings*(node: WakuNode, peerIds: seq[PeerId]): Future[int] {.async.} = + if len(peerIds) == 0: + return 0 + + var pingFuts: seq[Future[Result[void, string]]] + + # Create ping futures for each peer + for i, peerId in peerIds: + let fut = pingPeer(node, peerId) + pingFuts.add(fut) + + # Wait for all pings to complete + discard await allFutures(pingFuts).withTimeout(5.seconds) + + var successCount = 0 + for fut in pingFuts: + if not fut.completed() or fut.failed(): + continue + + let res = fut.read() + if res.isOk(): + successCount.inc() + + return successCount + +proc mountRendezvous*(node: WakuNode, clusterId: uint16) {.async: (raises: []).} = + info "mounting rendezvous discovery protocol" + + node.wakuRendezvous = WakuRendezVous.new( + node.switch, + node.peerManager, + clusterId, + node.getShardsGetter(), + node.getCapabilitiesGetter(), + ).valueOr: + error "initializing waku rendezvous failed", error = error + return + + if node.started: + await node.wakuRendezvous.start() + +proc isBindIpWithZeroPort(inputMultiAdd: MultiAddress): bool = + let inputStr = $inputMultiAdd + if inputStr.contains("0.0.0.0/tcp/0") or inputStr.contains("127.0.0.1/tcp/0"): + return true + + return false + +proc updateAnnouncedAddrWithPrimaryIpAddr*(node: WakuNode): Result[void, string] = + let peerInfo = node.switch.peerInfo + var announcedStr = "" + var listenStr = "" + var localIp = "0.0.0.0" + + try: + localIp = $getPrimaryIPAddr() + except Exception as e: + warn "Could not retrieve localIp", msg = e.msg + + info "PeerInfo", peerId = peerInfo.peerId, addrs = peerInfo.addrs + + ## Update the WakuNode addresses + var newAnnouncedAddresses = newSeq[MultiAddress](0) + for address in node.announcedAddresses: + ## Replace "0.0.0.0" or "127.0.0.1" with the localIp + let newAddr = ($address).replace("0.0.0.0", localIp).replace("127.0.0.1", localIp) + let fulladdr = "[" & $newAddr & "/p2p/" & $peerInfo.peerId & "]" + announcedStr &= fulladdr + let newMultiAddr = MultiAddress.init(newAddr).valueOr: + return err("error in updateAnnouncedAddrWithPrimaryIpAddr: " & $error) + newAnnouncedAddresses.add(newMultiAddr) + + node.announcedAddresses = newAnnouncedAddresses + + ## Update the Switch addresses + node.switch.peerInfo.addrs = newAnnouncedAddresses + + for transport in node.switch.transports: + for address in transport.addrs: + let fulladdr = "[" & $address & "/p2p/" & $peerInfo.peerId & "]" + listenStr &= fulladdr + + info "Listening on", + full = listenStr, localIp = localIp, switchAddress = $(node.switch.peerInfo.addrs) + info "Announcing addresses", full = announcedStr + info "DNS: discoverable ENR ", enr = node.enr.toUri() + + return ok() + +proc start*(node: WakuNode) {.async.} = + ## Starts a created Waku Node and + ## all its mounted protocols. + + waku_version.set(1, labelValues = [git_version]) + info "Starting Waku node", version = git_version + + var zeroPortPresent = false + for address in node.announcedAddresses: + if isBindIpWithZeroPort(address): + zeroPortPresent = true + + # Perform relay-specific startup tasks TODO: this should be rethought + if not node.wakuRelay.isNil(): + await node.startRelay() + + if not node.wakuMix.isNil(): + node.wakuMix.start() + + if not node.wakuMetadata.isNil(): + node.wakuMetadata.start() + + if not node.wakuStoreResume.isNil(): + await node.wakuStoreResume.start() + + if not node.wakuRendezvous.isNil(): + await node.wakuRendezvous.start() + + if not node.wakuStoreReconciliation.isNil(): + node.wakuStoreReconciliation.start() + + if not node.wakuStoreTransfer.isNil(): + node.wakuStoreTransfer.start() + + ## The switch uses this mapper to update peer info addrs + ## with announced addrs after start + let addressMapper = proc( + listenAddrs: seq[MultiAddress] + ): Future[seq[MultiAddress]] {.gcsafe, async: (raises: [CancelledError]).} = + return node.announcedAddresses + node.switch.peerInfo.addressMappers.add(addressMapper) + + ## The switch will update addresses after start using the addressMapper + await node.switch.start() + + node.started = true + + if not zeroPortPresent: + updateAnnouncedAddrWithPrimaryIpAddr(node).isOkOr: + error "failed update announced addr", error = $error + else: + info "Listening port is dynamically allocated, address and ENR generation postponed" + + info "Node started successfully" + +proc stop*(node: WakuNode) {.async.} = + ## By stopping the switch we are stopping all the underlying mounted protocols + await node.switch.stop() + + node.peerManager.stop() + + if not node.wakuRlnRelay.isNil(): + try: + await node.wakuRlnRelay.stop() ## this can raise an exception + except Exception: + error "exception stopping the node", error = getCurrentExceptionMsg() + + if not node.wakuArchive.isNil(): + await node.wakuArchive.stopWait() + + if not node.wakuStoreResume.isNil(): + await node.wakuStoreResume.stopWait() + + if not node.wakuStoreReconciliation.isNil(): + node.wakuStoreReconciliation.stop() + + if not node.wakuStoreTransfer.isNil(): + node.wakuStoreTransfer.stop() + + if not node.wakuPeerExchange.isNil() and not node.wakuPeerExchange.pxLoopHandle.isNil(): + await node.wakuPeerExchange.pxLoopHandle.cancelAndWait() + + if not node.wakuPeerExchangeClient.isNil() and + not node.wakuPeerExchangeClient.pxLoopHandle.isNil(): + await node.wakuPeerExchangeClient.pxLoopHandle.cancelAndWait() + + if not node.wakuRendezvous.isNil(): + await node.wakuRendezvous.stopWait() + + node.started = false + +proc isReady*(node: WakuNode): Future[bool] {.async: (raises: [Exception]).} = + if node.wakuRlnRelay == nil: + return true + return await node.wakuRlnRelay.isReady() + ## TODO: add other protocol `isReady` checks diff --git a/third-party/nwaku/waku/node/waku_switch.nim b/third-party/nwaku/waku/node/waku_switch.nim new file mode 100644 index 0000000..4f3642e --- /dev/null +++ b/third-party/nwaku/waku/node/waku_switch.nim @@ -0,0 +1,117 @@ +# Waku Switch utils. +{.push raises: [].} + +import + std/options, + chronos, + chronicles, + eth/keys, + libp2p/crypto/crypto, + libp2p/protocols/pubsub/gossipsub, + libp2p/protocols/rendezvous, + libp2p/protocols/connectivity/relay/relay, + libp2p/nameresolving/nameresolver, + libp2p/builders, + libp2p/switch, + libp2p/transports/[transport, tcptransport, wstransport] + +# override nim-libp2p default value (which is also 1) +const MaxConnectionsPerPeer* = 1 + +proc withWsTransport*(b: SwitchBuilder): SwitchBuilder = + b.withTransport( + proc(upgr: Upgrade, privateKey: crypto.PrivateKey): Transport = + WsTransport.new(upgr) + ) + +proc getSecureKey(path: string): TLSPrivateKey {.raises: [Defect, IOError].} = + trace "Key path is.", path = path + let stringkey: string = readFile(path) + try: + let key = TLSPrivateKey.init(stringkey) + return key + except TLSStreamProtocolError as exc: + debug "exception raised from getSecureKey", err = exc.msg + +proc getSecureCert(path: string): TLSCertificate {.raises: [Defect, IOError].} = + trace "Certificate path is.", path = path + let stringCert: string = readFile(path) + try: + let cert = TLSCertificate.init(stringCert) + return cert + except TLSStreamProtocolError as exc: + debug "exception raised from getSecureCert", err = exc.msg + +proc withWssTransport*( + b: SwitchBuilder, secureKeyPath: string, secureCertPath: string +): SwitchBuilder {.raises: [Defect, IOError].} = + let key: TLSPrivateKey = getSecureKey(secureKeyPath) + let cert: TLSCertificate = getSecureCert(secureCertPath) + b.withWsTransport( + tlsPrivateKey = key, + tlsCertificate = cert, + {TLSFlags.NoVerifyHost, TLSFlags.NoVerifyServerName}, # THIS IS INSECURE, NO? + ) + +proc newWakuSwitch*( + privKey = none(crypto.PrivateKey), + address = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet(), + wsAddress = none(MultiAddress), + secureManagers: openarray[SecureProtocol] = [SecureProtocol.Noise], + transportFlags: set[ServerFlags] = {}, + rng: ref HmacDrbgContext, + inTimeout: Duration = 5.minutes, + outTimeout: Duration = 5.minutes, + maxConnections = MaxConnections, + maxIn = -1, + maxOut = -1, + maxConnsPerPeer = MaxConnectionsPerPeer, + nameResolver: NameResolver = nil, + sendSignedPeerRecord = false, + wssEnabled: bool = false, + secureKeyPath: string = "", + secureCertPath: string = "", + agentString = none(string), # defaults to nim-libp2p version + peerStoreCapacity = none(int), # defaults to 1.25 maxConnections + rendezvous: RendezVous = nil, + circuitRelay: Relay, +): Switch {.raises: [Defect, IOError, LPError].} = + var b = SwitchBuilder + .new() + .withRng(rng) + .withMaxConnections(maxConnections) + .withMaxIn(maxIn) + .withMaxOut(maxOut) + .withMaxConnsPerPeer(maxConnsPerPeer) + .withYamux() + .withMplex(inTimeout, outTimeout) + .withNoise() + .withTcpTransport(transportFlags) + .withNameResolver(nameResolver) + .withSignedPeerRecord(sendSignedPeerRecord) + .withCircuitRelay(circuitRelay) + .withAutonat() + + if peerStoreCapacity.isSome(): + b = b.withPeerStore(peerStoreCapacity.get()) + else: + let defaultPeerStoreCapacity = int(maxConnections) * 5 + b = b.withPeerStore(defaultPeerStoreCapacity) + if agentString.isSome(): + b = b.withAgentVersion(agentString.get()) + if privKey.isSome(): + b = b.withPrivateKey(privKey.get()) + if wsAddress.isSome(): + b = b.withAddresses(@[wsAddress.get(), address]) + + if wssEnabled: + b = b.withWssTransport(secureKeyPath, secureCertPath) + else: + b = b.withWsTransport() + else: + b = b.withAddress(address) + + if not rendezvous.isNil(): + b = b.withRendezVous(rendezvous) + + b.build() diff --git a/third-party/nwaku/waku/utils/DEPRECATION_NOTICE.md b/third-party/nwaku/waku/utils/DEPRECATION_NOTICE.md new file mode 100644 index 0000000..391ec92 --- /dev/null +++ b/third-party/nwaku/waku/utils/DEPRECATION_NOTICE.md @@ -0,0 +1,6 @@ +# :warning: DEPRECATION NOTICE :warning: + +The `utils` module has been marked as deprecated. +This package submodules are planned to be moved to different modules. + +**No new sub-modules must be added to this folder.** diff --git a/third-party/nwaku/waku/utils/collector.nim b/third-party/nwaku/waku/utils/collector.nim new file mode 100644 index 0000000..3586a2d --- /dev/null +++ b/third-party/nwaku/waku/utils/collector.nim @@ -0,0 +1,40 @@ +{.push raises: [].} + +import metrics + +proc parseCollectorIntoF64(collector: SimpleCollector): float64 {.gcsafe, raises: [].} = + {.gcsafe.}: + var total = 0.float64 + for metrics in collector.metrics: + for metric in metrics: + try: + total = total + metric.value + except KeyError: + discard + return total + +template parseAndAccumulate*(collector: Collector, cumulativeValue: float64): float64 = + ## This template is used to get metrics in a window + ## according to a cumulative value passed in + {.gcsafe.}: + let total = parseCollectorIntoF64(collector) + let freshCount = total - cumulativeValue + cumulativeValue = total + freshCount + +template parseAndAccumulate*( + collector: typedesc[IgnoredCollector], cumulativeValue: float64 +): float64 = + ## Used when metrics are disabled (undefined `metrics` compilation flag) + 0.0 + +template collectorAsF64*(collector: Collector): float64 = + ## This template is used to get metrics from 0 + ## Serves as a wrapper for parseCollectorIntoF64 which is gcsafe + {.gcsafe.}: + let total = parseCollectorIntoF64(collector) + total + +template collectorAsF64*(collector: typedesc[IgnoredCollector]): float64 = + ## Used when metrics are disabled (undefined `metrics` compilation flag) + 0.0 diff --git a/third-party/nwaku/waku/utils/noise.nim b/third-party/nwaku/waku/utils/noise.nim new file mode 100644 index 0000000..2e01592 --- /dev/null +++ b/third-party/nwaku/waku/utils/noise.nim @@ -0,0 +1,40 @@ +{.push raises: [].} + +import results +import ../waku_core, ../waku_noise/noise_types, ../waku_noise/noise_utils + +# Decodes a WakuMessage to a PayloadV2 +# Currently, this is just a wrapper over deserializePayloadV2 and encryption/decryption is done on top (no KeyInfo) +proc decodePayloadV2*( + message: WakuMessage +): Result[PayloadV2, cstring] {.raises: [NoiseMalformedHandshake, NoisePublicKeyError].} = + # We check message version (only 2 is supported in this proc) + case message.version + of 2: + # We attempt to decode the WakuMessage payload + let deserializedPayload2 = deserializePayloadV2(message.payload) + if deserializedPayload2.isOk(): + return ok(deserializedPayload2.get()) + else: + return err("Failed to decode WakuMessage") + else: + return err("Wrong message version while decoding payload") + +# Encodes a PayloadV2 to a WakuMessage +# Currently, this is just a wrapper over serializePayloadV2 and encryption/decryption is done on top (no KeyInfo) +proc encodePayloadV2*( + payload2: PayloadV2, contentTopic: ContentTopic = default(ContentTopic) +): Result[WakuMessage, cstring] {. + raises: [NoiseMalformedHandshake, NoisePublicKeyError] +.} = + # We attempt to encode the PayloadV2 + let serializedPayload2 = serializePayloadV2(payload2) + if not serializedPayload2.isOk(): + return err("Failed to encode PayloadV2") + + # If successful, we create and return a WakuMessage + let msg = WakuMessage( + payload: serializedPayload2.get(), version: 2, contentTopic: contentTopic + ) + + return ok(msg) diff --git a/third-party/nwaku/waku/utils/requests.nim b/third-party/nwaku/waku/utils/requests.nim new file mode 100644 index 0000000..5e5b9d9 --- /dev/null +++ b/third-party/nwaku/waku/utils/requests.nim @@ -0,0 +1,10 @@ +# Request utils. + +{.push raises: [].} + +import bearssl/rand, stew/byteutils + +proc generateRequestId*(rng: ref HmacDrbgContext): string = + var bytes: array[10, byte] + hmacDrbgGenerate(rng[], bytes) + return toHex(bytes) diff --git a/third-party/nwaku/waku/utils/tableutils.nim b/third-party/nwaku/waku/utils/tableutils.nim new file mode 100644 index 0000000..dacea7d --- /dev/null +++ b/third-party/nwaku/waku/utils/tableutils.nim @@ -0,0 +1,33 @@ +import std/tables, stew/templateutils + +template keepItIf*[A, B](tableParam: var Table[A, B], itPredicate: untyped) = + bind evalTemplateParamOnce + evalTemplateParamOnce(tableParam, t): + var itemsToDelete: seq[A] + var key {.inject.}: A + var val {.inject.}: B + + for k, v in t.mpairs(): + key = k + val = v + if not itPredicate: + itemsToDelete.add(key) + + for item in itemsToDelete: + t.del(item) + +template keepItIf*[A, B](tableParam: var TableRef[A, B], itPredicate: untyped) = + bind evalTemplateParamOnce + evalTemplateParamOnce(tableParam, t): + var itemsToDelete: seq[A] + let key {.inject.}: A + let val {.inject.}: B + + for k, v in t[].mpairs(): + key = k + val = v + if not itPredicate: + itemsToDelete.add(key) + + for item in itemsToDelete: + t[].del(item) diff --git a/third-party/nwaku/waku/waku_api.nim b/third-party/nwaku/waku/waku_api.nim new file mode 100644 index 0000000..b584bfa --- /dev/null +++ b/third-party/nwaku/waku/waku_api.nim @@ -0,0 +1,3 @@ +import ./waku_api/message_cache, ./waku_api/rest, ./waku_api/json_rpc + +export message_cache, rest diff --git a/third-party/nwaku/waku/waku_api/handlers.nim b/third-party/nwaku/waku/waku_api/handlers.nim new file mode 100644 index 0000000..4fc922f --- /dev/null +++ b/third-party/nwaku/waku/waku_api/handlers.nim @@ -0,0 +1,37 @@ +{.push raises: [].} + +import chronos, std/[options, sequtils], results +import ../discovery/waku_discv5, ../waku_relay, ../waku_core, ./message_cache + +### Discovery + +type DiscoveryHandler* = + proc(): Future[Result[Option[RemotePeerInfo], string]] {.async, closure.} + +proc defaultDiscoveryHandler*( + discv5: WakuDiscoveryV5, cap: Capabilities +): DiscoveryHandler = + proc(): Future[Result[Option[RemotePeerInfo], string]] {.async, closure.} = + #Discv5 is already filtering peers by shards no need to pass a predicate. + let findPeers = discv5.findRandomPeers() + + if not await findPeers.withTimeout(60.seconds): + return err("discovery process timed out!") + + var peers = findPeers.read() + + peers.keepItIf(it.supportsCapability(cap)) + + if peers.len == 0: + return ok(none(RemotePeerInfo)) + + let remotePeerInfo = peers[0].toRemotePeerInfo().valueOr: + return err($error) + + return ok(some(remotePeerInfo)) + +### Message Cache + +proc messageCacheHandler*(cache: MessageCache): WakuRelayHandler = + return proc(pubsubTopic: string, msg: WakuMessage): Future[void] {.async, closure.} = + cache.addMessage(pubsubTopic, msg) diff --git a/third-party/nwaku/waku/waku_api/message_cache.nim b/third-party/nwaku/waku/waku_api/message_cache.nim new file mode 100644 index 0000000..acef5a7 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/message_cache.nim @@ -0,0 +1,278 @@ +{.push raises: [].} + +import + std/[sequtils, sugar, algorithm, options], + results, + chronicles, + chronos, + libp2p/protocols/pubsub +import ../waku_core + +logScope: + topics = "waku node message_cache" + +const DefaultMessageCacheCapacity: int = 50 + +type MessageCache* = ref object + pubsubTopics: seq[PubsubTopic] + contentTopics: seq[ContentTopic] + + pubsubIndex: seq[tuple[pubsubIdx: int, msgIdx: int]] + contentIndex: seq[tuple[contentIdx: int, msgIdx: int]] + + messages: seq[WakuMessage] + + capacity: int + +func `$`*(self: MessageCache): string = + "Messages: " & $self.messages.len & " \nPubsubTopics: " & $self.pubsubTopics & + " \nContentTopics: " & $self.contentTopics & " \nPubsubIndex: " & $self.pubsubIndex & + " \nContentIndex: " & $self.contentIndex + +func init*(T: type MessageCache, capacity = DefaultMessageCacheCapacity): T = + MessageCache(capacity: capacity) + +proc messagesCount*(self: MessageCache): int = + self.messages.len + +proc pubsubTopicCount*(self: MessageCache): int = + self.pubsubTopics.len + +proc contentTopicCount*(self: MessageCache): int = + self.contentTopics.len + +proc pubsubSearch(self: MessageCache, pubsubTopic: PubsubTopic): Option[int] = + # Return some with the index if found none otherwise. + + for i, topic in self.pubsubTopics: + if topic == pubsubTopic: + return some(i) + + return none(int) + +proc contentSearch(self: MessageCache, contentTopic: ContentTopic): Option[int] = + # Return some with the index if found none otherwise. + + for i, topic in self.contentTopics: + if topic == contentTopic: + return some(i) + + return none(int) + +proc isPubsubSubscribed*(self: MessageCache, pubsubTopic: PubsubTopic): bool = + self.pubsubSearch(pubsubTopic).isSome() + +proc isContentSubscribed*(self: MessageCache, contentTopic: ContentTopic): bool = + self.contentSearch(contentTopic).isSome() + +proc pubsubSubscribe*(self: MessageCache, pubsubTopic: PubsubTopic) = + if self.pubsubSearch(pubsubTopic).isNone(): + self.pubsubTopics.add(pubsubTopic) + +proc contentSubscribe*(self: MessageCache, contentTopic: ContentTopic) = + if self.contentSearch(contentTopic).isNone(): + self.contentTopics.add(contentTopic) + +proc removeMessage(self: MessageCache, idx: int) = + # get last index because del() is a swap + let lastIndex = self.messages.high + + self.messages.del(idx) + + # update indices + var j = self.pubsubIndex.high + while j > -1: + let (pId, mId) = self.pubsubIndex[j] + + if mId == idx: + self.pubsubIndex.del(j) + elif mId == lastIndex: + self.pubsubIndex[j] = (pId, idx) + + dec(j) + + j = self.contentIndex.high + while j > -1: + let (cId, mId) = self.contentIndex[j] + + if mId == idx: + self.contentIndex.del(j) + elif mId == lastIndex: + self.contentIndex[j] = (cId, idx) + + dec(j) + +proc pubsubUnsubscribe*(self: MessageCache, pubsubTopic: PubsubTopic) = + let pubsubIdxOp = self.pubsubSearch(pubsubTopic) + + let pubsubIdx = + if pubsubIdxOp.isSome(): + pubsubIdxOp.get() + else: + return + + let lastIndex = self.pubsubTopics.high + self.pubsubTopics.del(pubsubIdx) + + var msgIndices = newSeq[int](0) + + var j = self.pubsubIndex.high + while j > -1: + let (pId, mId) = self.pubsubIndex[j] + + if pId == pubsubIdx: + # remove index for this topic + self.pubsubIndex.del(j) + msgIndices.add(mId) + elif pId == lastIndex: + # swap the index because pubsubTopics.del() is a swap + self.pubsubIndex[j] = (pubsubIdx, mId) + + dec(j) + + # check if messages on this pubsub topic are indexed by any content topic, if not remove them. + for mId in msgIndices.sorted(SortOrder.Descending): + if not self.contentIndex.anyIt(it.msgIdx == mId): + self.removeMessage(mId) + +proc contentUnsubscribe*(self: MessageCache, contentTopic: ContentTopic) = + let contentIdxOP = self.contentSearch(contentTopic) + + let contentIdx = + if contentIdxOP.isSome(): + contentIdxOP.get() + else: + return + + let lastIndex = self.contentTopics.high + self.contentTopics.del(contentIdx) + + var msgIndices = newSeq[int](0) + + var j = self.contentIndex.high + while j > -1: + let (cId, mId) = self.contentIndex[j] + + if cId == contentIdx: + # remove indices for this topic + self.contentIndex.del(j) + msgIndices.add(mId) + elif cId == lastIndex: + # swap the indices because contentTopics.del() is a swap + self.contentIndex[j] = (contentIdx, mId) + + dec(j) + + # check if messages on this content topic are indexed by any pubsub topic, if not remove them. + for mId in msgIndices.sorted(SortOrder.Descending): + if not self.pubsubIndex.anyIt(it.msgIdx == mId): + self.removeMessage(mId) + +proc reset*(self: MessageCache) = + self.messages.setLen(0) + self.pubsubTopics.setLen(0) + self.contentTopics.setLen(0) + self.pubsubIndex.setLen(0) + self.contentIndex.setLen(0) + +proc addMessage*(self: MessageCache, pubsubTopic: PubsubTopic, msg: WakuMessage) = + ## Idempotent message addition. + + var oldestTime = int64.high + var oldestMsg = int.high + for i, message in self.messages.reversed: + if message == msg: + return + + if message.timestamp < oldestTime: + oldestTime = message.timestamp + oldestMsg = i + + # reverse index + oldestMsg = self.messages.high - oldestMsg + + var pubsubIdxOp = self.pubsubSearch(pubsubTopic) + var contentIdxOp = self.contentSearch(msg.contentTopic) + + if pubsubIdxOp.isNone() and contentIdxOp.isNone(): + return + + let pubsubIdx = + if pubsubIdxOp.isNone(): + self.pubsubTopics.add(pubsubTopic) + self.pubsubTopics.high + else: + pubsubIdxOp.get() + + let contentIdx = + if contentIdxOp.isNone(): + self.contentTopics.add(msg.contentTopic) + self.contentTopics.high + else: + contentIdxOp.get() + + # add the message, make space if needed + if self.messages.len >= self.capacity: + self.removeMessage(oldestMsg) + + let msgIdx = self.messages.len + self.messages.add(msg) + + self.pubsubIndex.add((pubsubIdx, msgIdx)) + self.contentIndex.add((contentIdx, msgIdx)) + +proc getMessages*( + self: MessageCache, pubsubTopic: PubsubTopic, clear = false +): Result[seq[WakuMessage], string] = + ## Return all messages on this pubsub topic + + if self.pubsubTopics.len == 0: + return err("not subscribed to any pubsub topics") + + let pubsubIdxOp = self.pubsubSearch(pubsubTopic) + let pubsubIdx = + if pubsubIdxOp.isNone: + return err("not subscribed to this pubsub topic") + else: + pubsubIdxOp.get() + + let msgIndices = collect: + for (pId, mId) in self.pubsubIndex: + if pId == pubsubIdx: + mId + + let messages = msgIndices.mapIt(self.messages[it]) + + if clear: + for idx in msgIndices.reversed: + self.removeMessage(idx) + + return ok(messages) + +proc getAutoMessages*( + self: MessageCache, contentTopic: ContentTopic, clear = false +): Result[seq[WakuMessage], string] = + ## Return all messages on this content topic + + if self.contentTopics.len == 0: + return err("not subscribed to any content topics") + + let contentIdxOp = self.contentSearch(contentTopic) + let contentIdx = + if contentIdxOp.isNone(): + return err("not subscribed to this content topic") + else: + contentIdxOp.get() + + let msgIndices = collect: + for (cId, mId) in self.contentIndex: + if cId == contentIdx: + mId + + let messages = msgIndices.mapIt(self.messages[it]) + + if clear: + for idx in msgIndices.sorted(SortOrder.Descending): + self.removeMessage(idx) + + return ok(messages) diff --git a/third-party/nwaku/waku/waku_api/rest/admin/client.nim b/third-party/nwaku/waku/waku_api/rest/admin/client.nim new file mode 100644 index 0000000..87d46dd --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/admin/client.nim @@ -0,0 +1,75 @@ +{.push raises: [].} + +import chronicles, json_serialization, presto/[route, client], stew/byteutils + +import ../serdes, ../rest_serdes, ./types + +export types + +logScope: + topics = "waku node rest admin api" + +proc encodeBytes*(value: seq[string], contentType: string): RestResult[seq[byte]] = + return encodeBytesOf(value, contentType) + +proc getPeers*(): RestResponse[seq[WakuPeer]] {. + rest, endpoint: "/admin/v1/peers", meth: HttpMethod.MethodGet +.} + +proc postPeers*( + body: seq[string] +): RestResponse[string] {. + rest, endpoint: "/admin/v1/peers", meth: HttpMethod.MethodPost +.} + +proc getPeerById*( + peerId: string +): RestResponse[WakuPeer] {. + rest, endpoint: "/admin/v1/peer/{peerId}", meth: HttpMethod.MethodGet +.} + +proc getServicePeers*(): RestResponse[seq[WakuPeer]] {. + rest, endpoint: "/admin/v1/peers/service", meth: HttpMethod.MethodGet +.} + +proc getConnectedPeers*(): RestResponse[seq[WakuPeer]] {. + rest, endpoint: "/admin/v1/peers/connected", meth: HttpMethod.MethodGet +.} + +proc getConnectedPeersByShard*( + shardId: uint16 +): RestResponse[seq[WakuPeer]] {. + rest, endpoint: "/admin/v1/peers/connected/on/{shardId}", meth: HttpMethod.MethodGet +.} + +proc getRelayPeers*(): RestResponse[PeersOfShards] {. + rest, endpoint: "/admin/v1/peers/relay", meth: HttpMethod.MethodGet +.} + +proc getRelayPeersByShard*( + shardId: uint16 +): RestResponse[PeersOfShard] {. + rest, endpoint: "/admin/v1/peers/relay/on/{shardId}", meth: HttpMethod.MethodGet +.} + +proc getMeshPeers*(): RestResponse[PeersOfShards] {. + rest, endpoint: "/admin/v1/peers/mesh", meth: HttpMethod.MethodGet +.} + +proc getMeshPeersByShard*( + shardId: uint16 +): RestResponse[PeersOfShard] {. + rest, endpoint: "/admin/v1/peers/mesh/on/{shardId}", meth: HttpMethod.MethodGet +.} + +proc getPeersStats*(): RestResponse[PeerStats] {. + rest, endpoint: "/admin/v1/peers/stats", meth: HttpMethod.MethodGet +.} + +proc getFilterSubscriptions*(): RestResponse[seq[FilterSubscription]] {. + rest, endpoint: "/admin/v1/filter/subscriptions", meth: HttpMethod.MethodGet +.} + +proc getFilterSubscriptionsFilterNotMounted*(): RestResponse[string] {. + rest, endpoint: "/admin/v1/filter/subscriptions", meth: HttpMethod.MethodGet +.} diff --git a/third-party/nwaku/waku/waku_api/rest/admin/handlers.nim b/third-party/nwaku/waku/waku_api/rest/admin/handlers.nim new file mode 100644 index 0000000..1721723 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/admin/handlers.nim @@ -0,0 +1,474 @@ +{.push raises: [].} + +import + std/[sets, strformat, sequtils, tables], + chronicles, + chronicles/topics_registry, + json_serialization, + presto/route, + libp2p/[peerinfo, switch, peerid, protocols/pubsub/pubsubpeer] + +import + waku/[ + waku_core, + waku_core/topics/pubsub_topic, + waku_store_legacy/common, + waku_store/common, + waku_filter_v2, + waku_lightpush_legacy/common, + waku_relay, + waku_peer_exchange, + waku_node, + node/peer_manager, + waku_enr/sharding, + ], + ../responses, + ../serdes, + ../rest_serdes, + ./types + +export types + +logScope: + topics = "waku node rest admin api" + +const ROUTE_ADMIN_V1_PEERS_STATS* = "/admin/v1/peers/stats" # provides peer statistics + +const ROUTE_ADMIN_V1_PEERS* = "/admin/v1/peers" # returns all peers +const ROUTE_ADMIN_V1_SINGLE_PEER* = "/admin/v1/peer/{peerId}" + +const ROUTE_ADMIN_V1_SERVICE_PEERS* = "/admin/v1/peers/service" # returns all peers + +const ROUTE_ADMIN_V1_CONNECTED_PEERS* = "/admin/v1/peers/connected" +const ROUTE_ADMIN_V1_CONNECTED_PEERS_ON_SHARD* = + "/admin/v1/peers/connected/on/{shardId}" +const ROUTE_ADMIN_V1_RELAY_PEERS* = "/admin/v1/peers/relay" +const ROUTE_ADMIN_V1_RELAY_PEERS_ON_SHARD* = "/admin/v1/peers/relay/on/{shardId}" +const ROUTE_ADMIN_V1_MESH_PEERS* = "/admin/v1/peers/mesh" +const ROUTE_ADMIN_V1_MESH_PEERS_ON_SHARD* = "/admin/v1/peers/mesh/on/{shardId}" + +const ROUTE_ADMIN_V1_FILTER_SUBS* = "/admin/v1/filter/subscriptions" + +const ROUTE_ADMIN_V1_POST_LOG_LEVEL* = "/admin/v1/log-level/{logLevel}" + # sets the new log level for the node + +type PeerProtocolTuple = + tuple[ + multiaddr: string, + protocol: string, + shards: seq[uint16], + connected: Connectedness, + agent: string, + origin: PeerOrigin, + ] + +proc tuplesToWakuPeers(peers: var WakuPeers, peersTup: seq[PeerProtocolTuple]) = + for peer in peersTup: + peers.add( + peer.multiaddr, peer.protocol, peer.shards, peer.connected, peer.agent, + peer.origin, + ) + +proc populateAdminPeerInfo( + peers: var WakuPeers, node: WakuNode, codec: Option[string] = none[string]() +) = + if codec.isNone(): + peers = node.peerManager.switch.peerStore.peers().mapIt(WakuPeer.init(it)) + else: + let peersTuples = node.peerManager.switch.peerStore.peers(codec.get()).mapIt( + ( + multiaddr: constructMultiaddrStr(it), + protocol: codec.get(), + shards: it.getShards(), + connected: it.connectedness, + agent: it.agent, + origin: it.origin, + ) + ) + tuplesToWakuPeers(peers, peersTuples) + +proc populateAdminPeerInfoForAll(node: WakuNode): WakuPeers = + var peers: WakuPeers = @[] + populateAdminPeerInfo(peers, node) + return peers + +proc populateAdminPeerInfoForCodecs(node: WakuNode, codecs: seq[string]): WakuPeers = + var peers: WakuPeers = @[] + + for codec in codecs: + populateAdminPeerInfo(peers, node, some(codec)) + + return peers + +proc getRelayPeers(node: WakuNode): PeersOfShards = + var relayPeers: PeersOfShards = @[] + if not node.wakuRelay.isNil(): + for topic in node.wakuRelay.getSubscribedTopics(): + let relayShard = RelayShard.parse(topic).valueOr: + error "Invalid subscribed topic", error = error, topic = topic + continue + let pubsubPeers = + node.wakuRelay.getConnectedPubSubPeers(topic).get(initHashSet[PubSubPeer](0)) + relayPeers.add( + PeersOfShard( + shard: relayShard.shardId, + peers: toSeq(pubsubPeers).mapIt(WakuPeer.init(it, node.peerManager)), + ) + ) + return relayPeers + +proc getMeshPeers(node: WakuNode): PeersOfShards = + var meshPeers: PeersOfShards = @[] + if not node.wakuRelay.isNil(): + for topic in node.wakuRelay.getSubscribedTopics(): + let relayShard = RelayShard.parse(topic).valueOr: + error "Invalid subscribed topic", error = error, topic = topic + continue + let peers = + node.wakuRelay.getPubSubPeersInMesh(topic).get(initHashSet[PubSubPeer](0)) + meshPeers.add( + PeersOfShard( + shard: relayShard.shardId, + peers: toSeq(peers).mapIt(WakuPeer.init(it, node.peerManager)), + ) + ) + return meshPeers + +proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) = + router.api(MethodGet, ROUTE_ADMIN_V1_PEERS) do() -> RestApiResponse: + let peers = populateAdminPeerInfoForAll(node) + + let resp = RestApiResponse.jsonResponse(peers, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {error}") + ) + + return resp + + router.api(MethodGet, ROUTE_ADMIN_V1_SINGLE_PEER) do( + peerId: string + ) -> RestApiResponse: + let peerIdString = peerId.valueOr: + return RestApiResponse.badRequest("Invalid argument:" & $error) + + let peerIdVal: PeerId = PeerId.init(peerIdString).valueOr: + return RestApiResponse.badRequest("Invalid argument:" & $error) + + if node.peerManager.switch.peerStore.peerExists(peerIdVal): + let peerInfo = node.peerManager.switch.peerStore.getPeer(peerIdVal) + let peer = WakuPeer.init(peerInfo) + let resp = RestApiResponse.jsonResponse(peer, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {error}") + ) + + return resp + else: + return RestApiResponse.notFound(fmt("Peer with ID {peerId} not found")) + + router.api(MethodGet, ROUTE_ADMIN_V1_SERVICE_PEERS) do() -> RestApiResponse: + let peers = populateAdminPeerInfoForCodecs( + node, + @[ + WakuRelayCodec, WakuFilterSubscribeCodec, WakuStoreCodec, WakuLegacyStoreCodec, + WakuLegacyLightPushCodec, WakuLightPushCodec, WakuPeerExchangeCodec, + WakuReconciliationCodec, WakuTransferCodec, + ], + ) + + let resp = RestApiResponse.jsonResponse(peers, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {error}") + ) + + return resp + + router.api(MethodGet, ROUTE_ADMIN_V1_CONNECTED_PEERS) do() -> RestApiResponse: + let allPeers = populateAdminPeerInfoForAll(node) + + let connectedPeers = allPeers.filterIt(it.connected == Connectedness.Connected) + + let resp = RestApiResponse.jsonResponse(connectedPeers, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {error}") + ) + + return resp + + router.api(MethodGet, ROUTE_ADMIN_V1_CONNECTED_PEERS_ON_SHARD) do( + shardId: uint16 + ) -> RestApiResponse: + let shard = shardId.valueOr: + return RestApiResponse.badRequest(fmt("Invalid shardId: {error}")) + + let allPeers = populateAdminPeerInfoForAll(node) + + let connectedPeers = allPeers.filterIt( + it.connected == Connectedness.Connected and it.shards.contains(shard) + ) + + let resp = RestApiResponse.jsonResponse(connectedPeers, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {error}") + ) + + return resp + + router.api(MethodGet, ROUTE_ADMIN_V1_RELAY_PEERS) do() -> RestApiResponse: + if node.wakuRelay.isNil(): + return RestApiResponse.serviceUnavailable( + "Error: Relay Protocol is not mounted to the node" + ) + + var relayPeers: PeersOfShards = getRelayPeers(node) + + let resp = RestApiResponse.jsonResponse(relayPeers, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {error}") + ) + + return resp + + router.api(MethodGet, ROUTE_ADMIN_V1_RELAY_PEERS_ON_SHARD) do( + shardId: uint16 + ) -> RestApiResponse: + let shard = shardId.valueOr: + return RestApiResponse.badRequest(fmt("Invalid shardId: {error}")) + + if node.wakuMetadata.isNil(): + return RestApiResponse.serviceUnavailable( + "Error: Metadata Protocol is not mounted to the node" + ) + + if node.wakuRelay.isNil(): + return RestApiResponse.serviceUnavailable( + "Error: Relay Protocol is not mounted to the node" + ) + + # TODO: clusterId and shards should be uint16 across all codebase and probably be defined as a type + let topic = toPubsubTopic( + RelayShard(clusterId: node.wakuMetadata.clusterId.uint16, shardId: shard) + ) + let pubsubPeers = + node.wakuRelay.getConnectedPubSubPeers(topic).get(initHashSet[PubSubPeer](0)) + let relayPeer = PeersOfShard( + shard: shard, peers: toSeq(pubsubPeers).mapIt(WakuPeer.init(it, node.peerManager)) + ) + + let resp = RestApiResponse.jsonResponse(relayPeer, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {error}") + ) + + return resp + + router.api(MethodGet, ROUTE_ADMIN_V1_MESH_PEERS) do() -> RestApiResponse: + if node.wakuRelay.isNil(): + return RestApiResponse.serviceUnavailable( + "Error: Relay Protocol is not mounted to the node" + ) + + var meshPeers: PeersOfShards = getMeshPeers(node) + + let resp = RestApiResponse.jsonResponse(meshPeers, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {error}") + ) + + return resp + + router.api(MethodGet, ROUTE_ADMIN_V1_MESH_PEERS_ON_SHARD) do( + shardId: uint16 + ) -> RestApiResponse: + let shard = shardId.valueOr: + return RestApiResponse.badRequest(fmt("Invalid shardId: {error}")) + + if node.wakuMetadata.isNil(): + return RestApiResponse.serviceUnavailable( + "Error: Metadata Protocol is not mounted to the node" + ) + + if node.wakuRelay.isNil(): + return RestApiResponse.serviceUnavailable( + "Error: Relay Protocol is not mounted to the node" + ) + + let topic = toPubsubTopic( + RelayShard(clusterId: node.wakuMetadata.clusterId.uint16, shardId: shard) + ) + let peers = + node.wakuRelay.getPubSubPeersInMesh(topic).get(initHashSet[PubSubPeer](0)) + let relayPeer = PeersOfShard( + shard: shard, peers: toSeq(peers).mapIt(WakuPeer.init(it, node.peerManager)) + ) + + let resp = RestApiResponse.jsonResponse(relayPeer, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {error}") + ) + + return resp + + router.api(MethodGet, ROUTE_ADMIN_V1_PEERS_STATS) do() -> RestApiResponse: + let peers = populateAdminPeerInfoForAll(node) + + var stats: PeerStats = initOrderedTable[string, OrderedTable[string, int]]() + + stats["Sum"] = {"Total peers": peers.len()}.toOrderedTable() + + # stats of connectedness + var connectednessStats = initOrderedTable[string, int]() + connectednessStats[$Connectedness.Connected] = + peers.countIt(it.connected == Connectedness.Connected) + connectednessStats[$Connectedness.NotConnected] = + peers.countIt(it.connected == Connectedness.NotConnected) + connectednessStats[$Connectedness.CannotConnect] = + peers.countIt(it.connected == Connectedness.CannotConnect) + connectednessStats[$Connectedness.CanConnect] = + peers.countIt(it.connected == Connectedness.CanConnect) + stats["By Connectedness"] = connectednessStats + + # stats of relay peers + var totalRelayPeers = 0 + stats["Relay peers"] = block: + let relayPeers = getRelayPeers(node) + var stat = initOrderedTable[string, int]() + for ps in relayPeers: + totalRelayPeers += ps.peers.len + stat[$ps.shard] = ps.peers.len + stat["Total relay peers"] = relayPeers.len + stat + + # stats of mesh peers + stats["Mesh peers"] = block: + let meshPeers = getMeshPeers(node) + var totalMeshPeers = 0 + var stat = initOrderedTable[string, int]() + for ps in meshPeers: + totalMeshPeers += ps.peers.len + stat[$ps.shard] = ps.peers.len + stat["Total mesh peers"] = meshPeers.len + stat + + var protoStats = initOrderedTable[string, int]() + protoStats[WakuRelayCodec] = peers.countIt(it.protocols.contains(WakuRelayCodec)) + protoStats[WakuFilterSubscribeCodec] = + peers.countIt(it.protocols.contains(WakuFilterSubscribeCodec)) + protoStats[WakuFilterPushCodec] = + peers.countIt(it.protocols.contains(WakuFilterPushCodec)) + protoStats[WakuStoreCodec] = peers.countIt(it.protocols.contains(WakuStoreCodec)) + protoStats[WakuLegacyStoreCodec] = + peers.countIt(it.protocols.contains(WakuLegacyStoreCodec)) + protoStats[WakuLightPushCodec] = + peers.countIt(it.protocols.contains(WakuLightPushCodec)) + protoStats[WakuLegacyLightPushCodec] = + peers.countIt(it.protocols.contains(WakuLegacyLightPushCodec)) + protoStats[WakuPeerExchangeCodec] = + peers.countIt(it.protocols.contains(WakuPeerExchangeCodec)) + protoStats[WakuReconciliationCodec] = + peers.countIt(it.protocols.contains(WakuReconciliationCodec)) + + stats["By Protocols"] = protoStats + + let resp = RestApiResponse.jsonResponse(stats, status = Http200).valueOr: + error "An error occurred while building the json response: ", error = error + return RestApiResponse.internalServerError( + fmt("An error occurred while building the json response: {error}") + ) + + return resp + +proc installAdminV1PostPeersHandler(router: var RestRouter, node: WakuNode) = + router.api(MethodPost, ROUTE_ADMIN_V1_PEERS) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: + let peers: seq[string] = decodeRequestBody[seq[string]](contentBody).valueOr: + let e = $error + return RestApiResponse.badRequest(fmt("Failed to decode request: {e}")) + + for i, peer in peers: + let peerInfo = parsePeerInfo(peer).valueOr: + let e = $error + return RestApiResponse.badRequest(fmt("Couldn't parse remote peer info: {e}")) + + if not (await node.peerManager.connectPeer(peerInfo, source = "rest")): + return RestApiResponse.badRequest( + fmt("Failed to connect to peer at index: {i} - {peer}") + ) + + return RestApiResponse.ok() + +proc installAdminV1GetFilterSubsHandler(router: var RestRouter, node: WakuNode) = + router.api(MethodGet, ROUTE_ADMIN_V1_FILTER_SUBS) do() -> RestApiResponse: + if node.wakuFilter.isNil(): + return + RestApiResponse.badRequest("Error: Filter Protocol is not mounted to the node") + + var + subscriptions: seq[FilterSubscription] = @[] + filterCriteria: seq[FilterTopic] + + for peerId in node.wakuFilter.subscriptions.peersSubscribed.keys: + filterCriteria = node.wakuFilter.subscriptions.getPeerSubscriptions(peerId).mapIt( + FilterTopic(pubsubTopic: it[0], contentTopic: it[1]) + ) + + subscriptions.add( + FilterSubscription(peerId: $peerId, filterCriteria: filterCriteria) + ) + + let resp = RestApiResponse.jsonResponse(subscriptions, status = Http200) + if resp.isErr(): + error "An error ocurred while building the json respose: ", error = resp.error + return RestApiResponse.internalServerError( + fmt("An error ocurred while building the json respose: {resp.error}") + ) + + return resp.get() + +proc installAdminV1PostLogLevelHandler(router: var RestRouter, node: WakuNode) = + router.api(MethodPost, ROUTE_ADMIN_V1_POST_LOG_LEVEL) do( + logLevel: string + ) -> RestApiResponse: + when runtimeFilteringEnabled: + if logLevel.isErr() or logLevel.value().isEmptyOrWhitespace(): + return RestApiResponse.badRequest("Invalid log-level, it can’t be empty") + + try: + let newLogLevel = parseEnum[LogLevel](logLevel.value().capitalizeAscii()) + + if newLogLevel < enabledLogLevel: + return RestApiResponse.badRequest( + fmt( + "Log level {newLogLevel} is lower than the lowest log level - {enabledLogLevel} - the binary is compiled with." + ) + ) + + setLogLevel(newLogLevel) + except ValueError: + return RestApiResponse.badRequest( + fmt( + "Invalid log-level: {logLevel.value()}. Please specify one of TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL" + ) + ) + + return RestApiResponse.ok() + else: + return RestApiResponse.serviceUnavailable( + "Dynamic Log level management is not enabled in this build. Please recompile with `-d:chronicles_runtime_filtering:on`." + ) + +proc installAdminApiHandlers*(router: var RestRouter, node: WakuNode) = + installAdminV1GetPeersHandler(router, node) + installAdminV1PostPeersHandler(router, node) + installAdminV1GetFilterSubsHandler(router, node) + installAdminV1PostLogLevelHandler(router, node) diff --git a/third-party/nwaku/waku/waku_api/rest/admin/types.nim b/third-party/nwaku/waku/waku_api/rest/admin/types.nim new file mode 100644 index 0000000..483acf8 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/admin/types.nim @@ -0,0 +1,324 @@ +{.push raises: [].} + +import + chronicles, + json_serialization, + json_serialization/std/options, + json_serialization/lexer, + results, + libp2p/protocols/pubsub/pubsubpeer +import waku/[waku_core, node/peer_manager], ../serdes + +#### Types +type WakuPeer* = object + multiaddr*: string + protocols*: seq[string] + shards*: seq[uint16] + connected*: Connectedness + agent*: string + origin*: PeerOrigin + score*: Option[float64] + +type WakuPeers* = seq[WakuPeer] + +type PeersOfShard* = object + shard*: uint16 + peers*: WakuPeers + +type PeersOfShards* = seq[PeersOfShard] + +type FilterTopic* = object + pubsubTopic*: string + contentTopic*: string + +type FilterSubscription* = object + peerId*: string + filterCriteria*: seq[FilterTopic] + +type PeerStats* = OrderedTable[string, OrderedTable[string, int]] + # maps high level grouping to low level grouping of counters + +#### Serialization and deserialization +proc writeValue*( + writer: var JsonWriter[RestJson], value: WakuPeer +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("multiaddr", value.multiaddr) + writer.writeField("protocols", value.protocols) + writer.writeField("shards", value.shards) + writer.writeField("connected", value.connected) + writer.writeField("agent", value.agent) + writer.writeField("origin", value.origin) + writer.writeField("score", value.score) + writer.endRecord() + +proc writeValue*( + writer: var JsonWriter[RestJson], value: PeersOfShard +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("shard", value.shard) + writer.writeField("peers", value.peers) + writer.endRecord() + +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterTopic +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("pubsubTopic", value.pubsubTopic) + writer.writeField("contentTopic", value.contentTopic) + writer.endRecord() + +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterSubscription +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("peerId", value.peerId) + writer.writeField("filterCriteria", value.filterCriteria) + writer.endRecord() + +proc writeValue*( + writer: var JsonWriter[RestJson], value: OrderedTable[string, int] +) {.raises: [IOError].} = + writer.beginRecord() + for key, value in value.pairs: + writer.writeField(key, value) + writer.endRecord() + +proc writeValue*( + writer: var JsonWriter[RestJson], + value: OrderedTable[string, OrderedTable[string, int]], +) {.raises: [IOError].} = + writer.beginRecord() + for group, subTab in value.pairs: + writer.writeField(group, subTab) + writer.endRecord() + +proc readValue*( + reader: var JsonReader[RestJson], value: var WakuPeer +) {.gcsafe, raises: [SerializationError, IOError].} = + var + multiaddr: Option[string] + protocols: Option[seq[string]] + shards: Option[seq[uint16]] + connected: Option[Connectedness] + agent: Option[string] + origin: Option[PeerOrigin] + score: Option[float64] + + for fieldName in readObjectFields(reader): + case fieldName + of "multiaddr": + if multiaddr.isSome(): + reader.raiseUnexpectedField("Multiple `multiaddr` fields found", "WakuPeer") + multiaddr = some(reader.readValue(string)) + of "protocols": + if protocols.isSome(): + reader.raiseUnexpectedField("Multiple `protocols` fields found", "WakuPeer") + protocols = some(reader.readValue(seq[string])) + of "shards": + if shards.isSome(): + reader.raiseUnexpectedField("Multiple `shards` fields found", "WakuPeer") + shards = some(reader.readValue(seq[uint16])) + of "connected": + if connected.isSome(): + reader.raiseUnexpectedField("Multiple `connected` fields found", "WakuPeer") + connected = some(reader.readValue(Connectedness)) + of "agent": + if agent.isSome(): + reader.raiseUnexpectedField("Multiple `agent` fields found", "WakuPeer") + agent = some(reader.readValue(string)) + of "origin": + if origin.isSome(): + reader.raiseUnexpectedField("Multiple `origin` fields found", "WakuPeer") + origin = some(reader.readValue(PeerOrigin)) + of "score": + if score.isSome(): + reader.raiseUnexpectedField("Multiple `score` fields found", "WakuPeer") + score = some(reader.readValue(float64)) + else: + unrecognizedFieldWarning(value) + + if multiaddr.isNone(): + reader.raiseUnexpectedValue("Field `multiaddr` is missing") + + if protocols.isNone(): + reader.raiseUnexpectedValue("Field `protocols` are missing") + + if shards.isNone(): + reader.raiseUnexpectedValue("Field `shards` is missing") + + if connected.isNone(): + reader.raiseUnexpectedValue("Field `connected` is missing") + + if agent.isNone(): + reader.raiseUnexpectedValue("Field `agent` is missing") + + if origin.isNone(): + reader.raiseUnexpectedValue("Field `origin` is missing") + + value = WakuPeer( + multiaddr: multiaddr.get(), + protocols: protocols.get(), + shards: shards.get(), + connected: connected.get(), + agent: agent.get(), + origin: origin.get(), + score: score, + ) + +proc readValue*( + reader: var JsonReader[RestJson], value: var PeersOfShard +) {.gcsafe, raises: [SerializationError, IOError].} = + var + shard: Option[uint16] + peers: Option[WakuPeers] + + for fieldName in readObjectFields(reader): + case fieldName + of "shard": + if shard.isSome(): + reader.raiseUnexpectedField("Multiple `shard` fields found", "PeersOfShard") + shard = some(reader.readValue(uint16)) + of "peers": + if peers.isSome(): + reader.raiseUnexpectedField("Multiple `peers` fields found", "PeersOfShard") + peers = some(reader.readValue(WakuPeers)) + else: + unrecognizedFieldWarning(value) + + if shard.isNone(): + reader.raiseUnexpectedValue("Field `shard` is missing") + + if peers.isNone(): + reader.raiseUnexpectedValue("Field `peers` are missing") + + value = PeersOfShard(shard: shard.get(), peers: peers.get()) + +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterTopic +) {.gcsafe, raises: [SerializationError, IOError].} = + var + pubsubTopic: Option[string] + contentTopic: Option[string] + + for fieldName in readObjectFields(reader): + case fieldName + of "pubsubTopic": + if pubsubTopic.isSome(): + reader.raiseUnexpectedField( + "Multiple `pubsubTopic` fields found", "FilterTopic" + ) + pubsubTopic = some(reader.readValue(string)) + of "contentTopic": + if contentTopic.isSome(): + reader.raiseUnexpectedField( + "Multiple `contentTopic` fields found", "FilterTopic" + ) + contentTopic = some(reader.readValue(string)) + else: + unrecognizedFieldWarning(value) + + if pubsubTopic.isNone(): + reader.raiseUnexpectedValue("Field `pubsubTopic` is missing") + + if contentTopic.isNone(): + reader.raiseUnexpectedValue("Field `contentTopic` are missing") + + value = FilterTopic(pubsubTopic: pubsubTopic.get(), contentTopic: contentTopic.get()) + +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterSubscription +) {.gcsafe, raises: [SerializationError, IOError].} = + var + peerId: Option[string] + filterCriteria: Option[seq[FilterTopic]] + + for fieldName in readObjectFields(reader): + case fieldName + of "peerId": + if peerId.isSome(): + reader.raiseUnexpectedField( + "Multiple `peerId` fields found", "FilterSubscription" + ) + peerId = some(reader.readValue(string)) + of "filterCriteria": + if filterCriteria.isSome(): + reader.raiseUnexpectedField( + "Multiple `filterCriteria` fields found", "FilterSubscription" + ) + filterCriteria = some(reader.readValue(seq[FilterTopic])) + else: + unrecognizedFieldWarning(value) + + if peerId.isNone(): + reader.raiseUnexpectedValue("Field `peerId` is missing") + + if filterCriteria.isNone(): + reader.raiseUnexpectedValue("Field `filterCriteria` are missing") + + value = FilterSubscription(peerId: peerId.get(), filterCriteria: filterCriteria.get()) + +proc readValue*( + reader: var JsonReader[RestJson], value: var OrderedTable[string, int] +) {.gcsafe, raises: [SerializationError, IOError].} = + for fieldName in readObjectFields(reader): + let fieldValue = reader.readValue(int) + value[fieldName] = fieldValue + +proc readValue*( + reader: var JsonReader[RestJson], + value: var OrderedTable[string, OrderedTable[string, int]], +) {.gcsafe, raises: [SerializationError, IOError].} = + for fieldName in readObjectFields(reader): + let fieldValue = reader.readValue(OrderedTable[string, int]) + value[fieldName] = fieldValue + +func `==`*(a, b: WakuPeer): bool {.inline.} = + return a.multiaddr == b.multiaddr + +proc init*(T: type WakuPeer, peerInfo: RemotePeerInfo): WakuPeer = + result = WakuPeer( + multiaddr: constructMultiaddrStr(peerInfo), + protocols: peerInfo.protocols, + shards: peerInfo.getShards(), + connected: peerInfo.connectedness, + agent: peerInfo.agent, + origin: peerInfo.origin, + score: none(float64), + ) + +proc init*(T: type WakuPeer, pubsubPeer: PubSubPeer, pm: PeerManager): WakuPeer = + let peerInfo = pm.getPeer(pubsubPeer.peerId) + result = WakuPeer( + multiaddr: constructMultiaddrStr(peerInfo), + protocols: peerInfo.protocols, + shards: peerInfo.getShards(), + connected: peerInfo.connectedness, + agent: peerInfo.agent, + origin: peerInfo.origin, + score: some(pubsubPeer.score), + ) + +proc add*( + peers: var WakuPeers, + multiaddr: string, + protocol: string, + shards: seq[uint16], + connected: Connectedness, + agent: string, + origin: PeerOrigin, +) = + var peer: WakuPeer = WakuPeer( + multiaddr: multiaddr, + protocols: @[protocol], + shards: shards, + connected: connected, + agent: agent, + origin: origin, + ) + let idx = peers.find(peer) + + if idx < 0: + peers.add(peer) + else: + peers[idx].protocols.add(protocol) diff --git a/third-party/nwaku/waku/waku_api/rest/builder.nim b/third-party/nwaku/waku/waku_api/rest/builder.nim new file mode 100644 index 0000000..eb51443 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/builder.nim @@ -0,0 +1,219 @@ +{.push raises: [].} + +import net, tables +import presto +import + waku/waku_node, + waku/discovery/waku_discv5, + waku/waku_api/message_cache, + waku/waku_api/handlers, + waku/waku_api/rest/server, + waku/waku_api/rest/debug/handlers as rest_debug_api, + waku/waku_api/rest/relay/handlers as rest_relay_api, + waku/waku_api/rest/filter/handlers as rest_filter_api, + waku/waku_api/rest/legacy_lightpush/handlers as rest_legacy_lightpush_api, + waku/waku_api/rest/lightpush/handlers as rest_lightpush_api, + waku/waku_api/rest/store/handlers as rest_store_api, + waku/waku_api/rest/legacy_store/handlers as rest_store_legacy_api, + waku/waku_api/rest/health/handlers as rest_health_api, + waku/waku_api/rest/admin/handlers as rest_admin_api, + waku/waku_core/topics, + waku/waku_relay/protocol + +## Monitoring and external interfaces + +# Used to register api endpoints that are not currently installed as keys, +# values are holding error messages to be returned to the client +# NOTE: {.threadvar.} is used to make the global variable GC safe for the closure uses it +# It will always be called from main thread anyway. +# Ref: https://nim-lang.org/docs/manual.html#threads-gc-safety +var restServerNotInstalledTab {.threadvar.}: TableRef[string, string] +restServerNotInstalledTab = newTable[string, string]() + +export WakuRestServerRef + +type RestServerConf* = object + allowOrigin*: seq[string] + listenAddress*: IpAddress + port*: Port + admin*: bool + relayCacheCapacity*: uint32 + +proc startRestServerEssentials*( + nodeHealthMonitor: NodeHealthMonitor, conf: RestServerConf, portsShift: uint16 +): Result[WakuRestServerRef, string] = + let requestErrorHandler: RestRequestErrorHandler = proc( + error: RestRequestError, request: HttpRequestRef + ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = + try: + case error + of RestRequestError.Invalid: + return await request.respond(Http400, "Invalid request", HttpTable.init()) + of RestRequestError.NotFound: + let paths = request.rawPath.split("/") + let rootPath = + if len(paths) > 1: + paths[1] + else: + "" + restServerNotInstalledTab[].withValue(rootPath, errMsg): + return await request.respond(Http404, errMsg[], HttpTable.init()) + do: + return await request.respond( + Http400, + "Bad request initiated. Invalid path or method used.", + HttpTable.init(), + ) + of RestRequestError.InvalidContentBody: + return await request.respond(Http400, "Invalid content body", HttpTable.init()) + of RestRequestError.InvalidContentType: + return await request.respond(Http400, "Invalid content type", HttpTable.init()) + of RestRequestError.Unexpected: + return defaultResponse() + except HttpWriteError: + error "Failed to write response to client", error = getCurrentExceptionMsg() + discard + + return defaultResponse() + + let allowedOrigin = + if len(conf.allowOrigin) > 0: + some(conf.allowOrigin.join(",")) + else: + none(string) + + let address = conf.listenAddress + let port = Port(conf.port.uint16 + portsShift) + let server = + ?newRestHttpServer( + address, + port, + allowedOrigin = allowedOrigin, + requestErrorHandler = requestErrorHandler, + ) + + ## Health REST API + installHealthApiHandler(server.router, nodeHealthMonitor) + + restServerNotInstalledTab["admin"] = + "/admin endpoints are not available while initializing." + restServerNotInstalledTab["debug"] = + "/debug endpoints are not available while initializing." + restServerNotInstalledTab["relay"] = + "/relay endpoints are not available while initializing." + restServerNotInstalledTab["filter"] = + "/filter endpoints are not available while initializing." + restServerNotInstalledTab["lightpush"] = + "/lightpush endpoints are not available while initializing." + restServerNotInstalledTab["store"] = + "/store endpoints are not available while initializing." + + server.start() + info "Starting REST HTTP server", url = "http://" & $address & ":" & $port & "/" + + ok(server) + +proc startRestServerProtocolSupport*( + restServer: WakuRestServerRef, + node: WakuNode, + wakuDiscv5: WakuDiscoveryV5, + conf: RestServerConf, + relayEnabled: bool, + lightPushEnabled: bool, + clusterId: uint16, + shards: seq[uint16], + contentTopics: seq[string], +): Result[void, string] = + var router = restServer.router + ## Admin REST API + if conf.admin: + installAdminApiHandlers(router, node) + else: + restServerNotInstalledTab["admin"] = + "/admin endpoints are not available. Please check your configuration: --rest-admin=true" + + ## Debug REST API + installDebugApiHandlers(router, node) + + ## Relay REST API + if relayEnabled: + ## This MessageCache is used, f.e., in js-waku<>nwaku interop tests. + ## js-waku tests asks nwaku-docker through REST whether a message is properly received. + let cache = MessageCache.init(int(conf.relayCacheCapacity)) + + let handler: WakuRelayHandler = messageCacheHandler(cache) + + for shard in shards: + let pubsubTopic = $RelayShard(clusterId: clusterId, shardId: shard) + cache.pubsubSubscribe(pubsubTopic) + + node.subscribe((kind: PubsubSub, topic: pubsubTopic), handler).isOkOr: + error "Could not subscribe", pubsubTopic, error + continue + + if node.wakuAutoSharding.isSome(): + # Only deduce pubsub topics to subscribe to from content topics if autosharding is enabled + for contentTopic in contentTopics: + cache.contentSubscribe(contentTopic) + + let shard = node.wakuAutoSharding.get().getShard(contentTopic).valueOr: + error "Autosharding error in REST", error = error + continue + let pubsubTopic = $shard + + node.subscribe((kind: PubsubSub, topic: pubsubTopic), handler).isOkOr: + error "Could not subscribe", pubsubTopic, error + continue + + installRelayApiHandlers(router, node, cache) + else: + restServerNotInstalledTab["relay"] = + "/relay endpoints are not available. Please check your configuration: --relay" + + ## Filter REST API + if node.wakuFilterClient != nil: + let filterCache = MessageCache.init() + + let filterDiscoHandler = + if not wakuDiscv5.isNil(): + some(defaultDiscoveryHandler(wakuDiscv5, Filter)) + else: + none(DiscoveryHandler) + + rest_filter_api.installFilterRestApiHandlers( + router, node, filterCache, filterDiscoHandler + ) + else: + restServerNotInstalledTab["filter"] = "/filter endpoints are not available." + + ## Store REST API + let storeDiscoHandler = + if not wakuDiscv5.isNil(): + some(defaultDiscoveryHandler(wakuDiscv5, Store)) + else: + none(DiscoveryHandler) + + rest_store_api.installStoreApiHandlers(router, node, storeDiscoHandler) + rest_store_legacy_api.installStoreApiHandlers(router, node, storeDiscoHandler) + + ## Light push API + ## Install it either if client is mounted) + ## or install it to be used with self-hosted lightpush service + ## We either get lightpushnode (lightpush service node) from config or discovered or self served + if (node.wakuLegacyLightpushClient != nil) or + (lightPushEnabled and node.wakuLegacyLightPush != nil and node.wakuRelay != nil): + let lightDiscoHandler = + if not wakuDiscv5.isNil(): + some(defaultDiscoveryHandler(wakuDiscv5, Lightpush)) + else: + none(DiscoveryHandler) + + rest_legacy_lightpush_api.installLightPushRequestHandler( + router, node, lightDiscoHandler + ) + rest_lightpush_api.installLightPushRequestHandler(router, node, lightDiscoHandler) + else: + restServerNotInstalledTab["lightpush"] = "/lightpush endpoints are not available." + + info "REST services are installed" + return ok() diff --git a/third-party/nwaku/waku/waku_api/rest/client.nim b/third-party/nwaku/waku/waku_api/rest/client.nim new file mode 100644 index 0000000..2f61eaa --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/client.nim @@ -0,0 +1,6 @@ +{.push raises: [].} + +import presto/client + +proc newRestHttpClient*(address: TransportAddress): RestClientRef = + RestClientRef.new(address, HttpClientScheme.NonSecure) diff --git a/third-party/nwaku/waku/waku_api/rest/debug/client.nim b/third-party/nwaku/waku/waku_api/rest/debug/client.nim new file mode 100644 index 0000000..c2d5881 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/debug/client.nim @@ -0,0 +1,20 @@ +{.push raises: [].} + +import + chronicles, json_serialization, json_serialization/std/options, presto/[route, client] +import ../serdes, ../rest_serdes, ./types + +export types + +logScope: + topics = "waku node rest debug_api" + +# TODO: Check how we can use a constant to set the method endpoint (improve "rest" pragma under nim-presto) +proc debugInfoV1*(): RestResponse[DebugWakuInfo] {. + rest, endpoint: "/info", meth: HttpMethod.MethodGet +.} + +# TODO: Check how we can use a constant to set the method endpoint (improve "rest" pragma under nim-presto) +proc debugVersionV1*(): RestResponse[string] {. + rest, endpoint: "/version", meth: HttpMethod.MethodGet +.} diff --git a/third-party/nwaku/waku/waku_api/rest/debug/handlers.nim b/third-party/nwaku/waku/waku_api/rest/debug/handlers.nim new file mode 100644 index 0000000..4ae90d0 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/debug/handlers.nim @@ -0,0 +1,44 @@ +{.push raises: [].} + +import chronicles, json_serialization, presto/route +import ../../../waku_node, ../responses, ../serdes, ./types + +export types + +logScope: + topics = "waku node rest debug_api" + +const ROUTE_INFOV1* = "/info" +# /debug route is deprecated, will be removed +const ROUTE_DEBUG_INFOV1 = "/debug/v1/info" + +proc installDebugInfoV1Handler(router: var RestRouter, node: WakuNode) = + let getInfo = proc(): RestApiResponse = + let info = node.info().toDebugWakuInfo() + let resp = RestApiResponse.jsonResponse(info, status = Http200) + if resp.isErr(): + debug "An error occurred while building the json respose", error = resp.error + return RestApiResponse.internalServerError() + + return resp.get() + + # /debug route is deprecated, will be removed + router.api(MethodGet, ROUTE_DEBUG_INFOV1) do() -> RestApiResponse: + return getInfo() + router.api(MethodGet, ROUTE_INFOV1) do() -> RestApiResponse: + return getInfo() + +const ROUTE_VERSIONV1* = "/version" +# /debug route is deprecated, will be removed +const ROUTE_DEBUG_VERSIONV1 = "/debug/v1/version" + +proc installDebugVersionV1Handler(router: var RestRouter, node: WakuNode) = + # /debug route is deprecated, will be removed + router.api(MethodGet, ROUTE_DEBUG_VERSIONV1) do() -> RestApiResponse: + return RestApiResponse.textResponse(git_version, status = Http200) + router.api(MethodGet, ROUTE_VERSIONV1) do() -> RestApiResponse: + return RestApiResponse.textResponse(git_version, status = Http200) + +proc installDebugApiHandlers*(router: var RestRouter, node: WakuNode) = + installDebugInfoV1Handler(router, node) + installDebugVersionV1Handler(router, node) diff --git a/third-party/nwaku/waku/waku_api/rest/debug/types.nim b/third-party/nwaku/waku/waku_api/rest/debug/types.nim new file mode 100644 index 0000000..8fa1068 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/debug/types.nim @@ -0,0 +1,56 @@ +{.push raises: [].} + +import chronicles, json_serialization, json_serialization/std/options +import ../../../waku_node, ../serdes +import std/typetraits + +#### Types + +type DebugWakuInfo* = object + listenAddresses*: seq[string] + enrUri*: Option[string] + +#### Type conversion + +proc toDebugWakuInfo*(nodeInfo: WakuInfo): DebugWakuInfo = + DebugWakuInfo( + listenAddresses: nodeInfo.listenAddresses, enrUri: some(nodeInfo.enrUri) + ) + +#### Serialization and deserialization + +proc writeValue*( + writer: var JsonWriter[RestJson], value: DebugWakuInfo +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("listenAddresses", value.listenAddresses) + if value.enrUri.isSome(): + writer.writeField("enrUri", value.enrUri.get()) + writer.endRecord() + +proc readValue*( + reader: var JsonReader[RestJson], value: var DebugWakuInfo +) {.raises: [SerializationError, IOError].} = + var + listenAddresses: Option[seq[string]] + enrUri: Option[string] + + for fieldName in readObjectFields(reader): + case fieldName + of "listenAddresses": + if listenAddresses.isSome(): + reader.raiseUnexpectedField( + "Multiple `listenAddresses` fields found", "DebugWakuInfo" + ) + listenAddresses = some(reader.readValue(seq[string])) + of "enrUri": + if enrUri.isSome(): + reader.raiseUnexpectedField("Multiple `enrUri` fields found", "DebugWakuInfo") + enrUri = some(reader.readValue(string)) + else: + unrecognizedFieldWarning(value) + + if listenAddresses.isNone(): + reader.raiseUnexpectedValue("Field `listenAddresses` is missing") + + value = DebugWakuInfo(listenAddresses: listenAddresses.get, enrUri: enrUri) diff --git a/third-party/nwaku/waku/waku_api/rest/filter/client.nim b/third-party/nwaku/waku/waku_api/rest/filter/client.nim new file mode 100644 index 0000000..db1a689 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/filter/client.nim @@ -0,0 +1,71 @@ +{.push raises: [].} + +import + json, + stew/byteutils, + chronicles, + json_serialization, + json_serialization/std/options, + presto/[route, client, common] +import ../../../common/base64, ../serdes, ../rest_serdes, ./types + +export types + +logScope: + topics = "waku node rest client v2" + +proc encodeBytes*( + value: FilterSubscribeRequest, contentType: string +): RestResult[seq[byte]] = + return encodeBytesOf(value, contentType) + +proc encodeBytes*( + value: FilterSubscriberPing, contentType: string +): RestResult[seq[byte]] = + return encodeBytesOf(value, contentType) + +proc encodeBytes*( + value: FilterUnsubscribeRequest, contentType: string +): RestResult[seq[byte]] = + return encodeBytesOf(value, contentType) + +proc encodeBytes*( + value: FilterUnsubscribeAllRequest, contentType: string +): RestResult[seq[byte]] = + return encodeBytesOf(value, contentType) + +proc filterSubscriberPing*( + requestId: string +): RestResponse[FilterSubscriptionResponse] {. + rest, endpoint: "/filter/v2/subscriptions/{requestId}", meth: HttpMethod.MethodGet +.} + +proc filterPostSubscriptions*( + body: FilterSubscribeRequest +): RestResponse[FilterSubscriptionResponse] {. + rest, endpoint: "/filter/v2/subscriptions", meth: HttpMethod.MethodPost +.} + +proc filterPutSubscriptions*( + body: FilterSubscribeRequest +): RestResponse[FilterSubscriptionResponse] {. + rest, endpoint: "/filter/v2/subscriptions", meth: HttpMethod.MethodPut +.} + +proc filterDeleteSubscriptions*( + body: FilterUnsubscribeRequest +): RestResponse[FilterSubscriptionResponse] {. + rest, endpoint: "/filter/v2/subscriptions", meth: HttpMethod.MethodDelete +.} + +proc filterDeleteAllSubscriptions*( + body: FilterUnsubscribeAllRequest +): RestResponse[FilterSubscriptionResponse] {. + rest, endpoint: "/filter/v2/subscriptions/all", meth: HttpMethod.MethodDelete +.} + +proc filterGetMessagesV1*( + contentTopic: string +): RestResponse[FilterGetMessagesResponse] {. + rest, endpoint: "/filter/v2/messages/{contentTopic}", meth: HttpMethod.MethodGet +.} diff --git a/third-party/nwaku/waku/waku_api/rest/filter/handlers.nim b/third-party/nwaku/waku/waku_api/rest/filter/handlers.nim new file mode 100644 index 0000000..d684335 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/filter/handlers.nim @@ -0,0 +1,432 @@ +{.push raises: [].} + +import + std/strformat, + std/sequtils, + stew/byteutils, + chronicles, + json_serialization, + json_serialization/std/options, + presto/route, + presto/common +import + ../../../waku_core, + ../../../waku_node, + ../../../node/peer_manager, + ../../../waku_filter_v2, + ../../../waku_filter_v2/client as filter_protocol_client, + ../../../waku_filter_v2/common as filter_protocol_type, + ../../message_cache, + ../../handlers, + ../serdes, + ../responses, + ../rest_serdes, + ./types + +export types + +logScope: + topics = "waku node rest filter_api_v2" + +const futTimeoutForSubscriptionProcessing* = 5.seconds + +#### Request handlers + +const ROUTE_FILTER_SUBSCRIPTIONS* = "/filter/v2/subscriptions" + +const ROUTE_FILTER_ALL_SUBSCRIPTIONS* = "/filter/v2/subscriptions/all" + +func decodeRequestBody[T]( + contentBody: Option[ContentBody] +): Result[T, RestApiResponse] = + if contentBody.isNone(): + return err(RestApiResponse.badRequest("Missing content body")) + + let reqBodyContentType = MediaType.init($contentBody.get().contentType) + if reqBodyContentType != MIMETYPE_JSON: + return + err(RestApiResponse.badRequest("Wrong Content-Type, expected application/json")) + + let reqBodyData = contentBody.get().data + + let requestResult = decodeFromJsonBytes(T, reqBodyData) + if requestResult.isErr(): + return err( + RestApiResponse.badRequest( + "Invalid content body, could not decode. " & $requestResult.error + ) + ) + + return ok(requestResult.get()) + +proc getStatusDesc( + protocolClientRes: filter_protocol_type.FilterSubscribeResult +): string = + ## Retrieve proper error cause of FilterSubscribeError - due stringify make some parts of text double + if protocolClientRes.isOk: + return "OK" + + let err = protocolClientRes.error + case err.kind + of FilterSubscribeErrorKind.PEER_DIAL_FAILURE: + err.address + of FilterSubscribeErrorKind.BAD_RESPONSE, FilterSubscribeErrorKind.BAD_REQUEST, + FilterSubscribeErrorKind.NOT_FOUND, FilterSubscribeErrorKind.TOO_MANY_REQUESTS, + FilterSubscribeErrorKind.SERVICE_UNAVAILABLE: + err.cause + of FilterSubscribeErrorKind.UNKNOWN: + "UNKNOWN" + +proc convertResponse( + T: type FilterSubscriptionResponse, + requestId: string, + protocolClientRes: filter_protocol_type.FilterSubscribeResult, +): T = + ## Properly convert filter protocol's response to rest response + return FilterSubscriptionResponse( + requestId: requestId, statusDesc: getStatusDesc(protocolClientRes) + ) + +proc convertResponse( + T: type FilterSubscriptionResponse, + requestId: string, + protocolClientRes: filter_protocol_type.FilterSubscribeError, +): T = + ## Properly convert filter protocol's response to rest response in case of error + return + FilterSubscriptionResponse(requestId: requestId, statusDesc: $protocolClientRes) + +proc convertErrorKindToHttpStatus( + kind: filter_protocol_type.FilterSubscribeErrorKind +): HttpCode = + ## Filter protocol's error code is not directly convertible to HttpCodes hence this converter + + case kind + of filter_protocol_type.FilterSubscribeErrorKind.UNKNOWN: + return Http200 + of filter_protocol_type.FilterSubscribeErrorKind.PEER_DIAL_FAILURE: + return Http504 #gateway timout + of filter_protocol_type.FilterSubscribeErrorKind.BAD_RESPONSE: + return Http500 # internal server error + of filter_protocol_type.FilterSubscribeErrorKind.BAD_REQUEST: + return Http400 + of filter_protocol_type.FilterSubscribeErrorKind.NOT_FOUND: + return Http404 + of filter_protocol_type.FilterSubscribeErrorKind.TOO_MANY_REQUESTS: + return Http429 + of filter_protocol_type.FilterSubscribeErrorKind.SERVICE_UNAVAILABLE: + return Http503 + +proc makeRestResponse( + requestId: string, protocolClientRes: filter_protocol_type.FilterSubscribeResult +): RestApiResponse = + let filterSubscriptionResponse = + FilterSubscriptionResponse.convertResponse(requestId, protocolClientRes) + + var httpStatus: HttpCode = Http200 + + if protocolClientRes.isErr(): + httpStatus = convertErrorKindToHttpStatus(protocolClientRes.error().kind) + # TODO: convert status codes! + + let resp = + RestApiResponse.jsonResponse(filterSubscriptionResponse, status = httpStatus) + + if resp.isErr(): + error "An error ocurred while building the json respose: ", error = resp.error + return RestApiResponse.internalServerError( + fmt("An error ocurred while building the json respose: {resp.error}") + ) + + return resp.get() + +proc makeRestResponse( + requestId: string, protocolClientRes: filter_protocol_type.FilterSubscribeError +): RestApiResponse = + let filterSubscriptionResponse = + FilterSubscriptionResponse.convertResponse(requestId, protocolClientRes) + + let httpStatus = convertErrorKindToHttpStatus(protocolClientRes.kind) + # TODO: convert status codes! + + let resp = + RestApiResponse.jsonResponse(filterSubscriptionResponse, status = httpStatus) + + if resp.isErr(): + error "An error ocurred while building the json respose: ", error = resp.error + return RestApiResponse.internalServerError( + fmt("An error ocurred while building the json respose: {resp.error}") + ) + + return resp.get() + +const NoPeerNoDiscoError = FilterSubscribeError.serviceUnavailable( + "No suitable service peer & no discovery method" +) + +const NoPeerNoneFoundError = + FilterSubscribeError.serviceUnavailable("No suitable service peer & none discovered") + +proc filterPostPutSubscriptionRequestHandler( + node: WakuNode, + contentBody: Option[ContentBody], + cache: MessageCache, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +): Future[RestApiResponse] {.async.} = + ## handles any filter subscription requests, adds or modifies. + + let decodedBody = decodeRequestBody[FilterSubscribeRequest](contentBody) + + if decodedBody.isErr(): + return makeRestResponse( + "unknown", + FilterSubscribeError.badRequest( + fmt("Failed to decode request: {decodedBody.error}") + ), + ) + + let req: FilterSubscribeRequest = decodedBody.value() + + let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: + let handler = discHandler.valueOr: + return makeRestResponse(req.requestId, NoPeerNoDiscoError) + + let peerOp = (await handler()).valueOr: + return RestApiResponse.internalServerError($error) + + peerOp.valueOr: + return makeRestResponse(req.requestId, NoPeerNoneFoundError) + + let subFut = node.filterSubscribe(req.pubsubTopic, req.contentFilters, peer) + + if not await subFut.withTimeout(futTimeoutForSubscriptionProcessing): + error "Failed to subscribe to contentFilters do to timeout!" + return makeRestResponse( + req.requestId, + FilterSubscribeError.serviceUnavailable("Subscription request timed out"), + ) + + # Successfully subscribed to all content filters + for cTopic in req.contentFilters: + cache.contentSubscribe(cTopic) + + return makeRestResponse(req.requestId, subFut.read()) + +proc installFilterPostSubscriptionsHandler( + router: var RestRouter, + node: WakuNode, + cache: MessageCache, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + router.api(MethodPost, ROUTE_FILTER_SUBSCRIPTIONS) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: + ## Subscribes a node to a list of contentTopics of a pubsubTopic + debug "post", ROUTE_FILTER_SUBSCRIPTIONS, contentBody + + return await filterPostPutSubscriptionRequestHandler( + node, contentBody, cache, discHandler + ) + +proc installFilterPutSubscriptionsHandler( + router: var RestRouter, + node: WakuNode, + cache: MessageCache, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + router.api(MethodPut, ROUTE_FILTER_SUBSCRIPTIONS) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: + ## Modifies a subscribtion of a node to a list of contentTopics of a pubsubTopic + debug "put", ROUTE_FILTER_SUBSCRIPTIONS, contentBody + + return await filterPostPutSubscriptionRequestHandler( + node, contentBody, cache, discHandler + ) + +proc installFilterDeleteSubscriptionsHandler( + router: var RestRouter, + node: WakuNode, + cache: MessageCache, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + router.api(MethodDelete, ROUTE_FILTER_SUBSCRIPTIONS) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: + ## Subscribes a node to a list of contentTopics of a PubSub topic + debug "delete", ROUTE_FILTER_SUBSCRIPTIONS, contentBody + + let decodedBody = decodeRequestBody[FilterUnsubscribeRequest](contentBody) + + if decodedBody.isErr(): + return makeRestResponse( + "unknown", + FilterSubscribeError.badRequest( + fmt("Failed to decode request: {decodedBody.error}") + ), + ) + + let req: FilterUnsubscribeRequest = decodedBody.value() + + let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: + let handler = discHandler.valueOr: + return makeRestResponse(req.requestId, NoPeerNoDiscoError) + + let peerOp = (await handler()).valueOr: + return RestApiResponse.internalServerError($error) + + peerOp.valueOr: + return makeRestResponse(req.requestId, NoPeerNoneFoundError) + + let unsubFut = node.filterUnsubscribe(req.pubsubTopic, req.contentFilters, peer) + + if not await unsubFut.withTimeout(futTimeoutForSubscriptionProcessing): + error "Failed to unsubscribe from contentFilters due to timeout!" + return makeRestResponse( + req.requestId, + FilterSubscribeError.serviceUnavailable( + "Failed to unsubscribe from contentFilters due to timeout!" + ), + ) + + # Successfully subscribed to all content filters + for cTopic in req.contentFilters: + cache.contentUnsubscribe(cTopic) + + # Successfully unsubscribed from all requested contentTopics + return makeRestResponse(req.requestId, unsubFut.read()) + +proc installFilterDeleteAllSubscriptionsHandler( + router: var RestRouter, + node: WakuNode, + cache: MessageCache, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + router.api(MethodDelete, ROUTE_FILTER_ALL_SUBSCRIPTIONS) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: + ## Subscribes a node to a list of contentTopics of a PubSub topic + debug "delete", ROUTE_FILTER_ALL_SUBSCRIPTIONS, contentBody + + let decodedBody = decodeRequestBody[FilterUnsubscribeAllRequest](contentBody) + + if decodedBody.isErr(): + return makeRestResponse( + "unknown", + FilterSubscribeError.badRequest( + fmt("Failed to decode request: {decodedBody.error}") + ), + ) + + let req: FilterUnsubscribeAllRequest = decodedBody.value() + + let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: + let handler = discHandler.valueOr: + return makeRestResponse(req.requestId, NoPeerNoDiscoError) + + let peerOp = (await handler()).valueOr: + return RestApiResponse.internalServerError($error) + + peerOp.valueOr: + return makeRestResponse(req.requestId, NoPeerNoneFoundError) + + let unsubFut = node.filterUnsubscribeAll(peer) + + if not await unsubFut.withTimeout(futTimeoutForSubscriptionProcessing): + error "Failed to unsubscribe from contentFilters due to timeout!" + return makeRestResponse( + req.requestId, + FilterSubscribeError.serviceUnavailable( + "Failed to unsubscribe from all contentFilters due to timeout!" + ), + ) + + cache.reset() + + # Successfully unsubscribed from all requested contentTopics + return makeRestResponse(req.requestId, unsubFut.read()) + +const ROUTE_FILTER_SUBSCRIBER_PING* = "/filter/v2/subscriptions/{requestId}" + +proc installFilterPingSubscriberHandler( + router: var RestRouter, + node: WakuNode, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + router.api(MethodGet, ROUTE_FILTER_SUBSCRIBER_PING) do( + requestId: string + ) -> RestApiResponse: + ## Checks if a node has valid subscription or not. + debug "get", ROUTE_FILTER_SUBSCRIBER_PING, requestId + + let peer = node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr: + let handler = discHandler.valueOr: + return makeRestResponse(requestId.get(), NoPeerNoDiscoError) + + let peerOp = (await handler()).valueOr: + return RestApiResponse.internalServerError($error) + + peerOp.valueOr: + return makeRestResponse(requestId.get(), NoPeerNoneFoundError) + + let pingFutRes = node.wakuFilterClient.ping(peer) + + if not await pingFutRes.withTimeout(futTimeoutForSubscriptionProcessing): + error "Failed to ping filter service peer due to timeout!" + return makeRestResponse( + requestId.get(), FilterSubscribeError.serviceUnavailable("Ping timed out") + ) + + return makeRestResponse(requestId.get(), pingFutRes.read()) + +const ROUTE_FILTER_MESSAGES* = "/filter/v2/messages/{contentTopic}" + +proc installFilterGetMessagesHandler( + router: var RestRouter, node: WakuNode, cache: MessageCache +) = + let pushHandler: FilterPushHandler = proc( + pubsubTopic: PubsubTopic, msg: WakuMessage + ) {.async, gcsafe, closure.} = + cache.addMessage(pubsubTopic, msg) + + node.wakuFilterClient.registerPushHandler(pushHandler) + + router.api(MethodGet, ROUTE_FILTER_MESSAGES) do( + contentTopic: string + ) -> RestApiResponse: + ## Returns all WakuMessages received on a specified content topic since the + ## last time this method was called + ## TODO: ability to specify a return message limit, maybe use cursor to control paging response. + debug "get", ROUTE_FILTER_MESSAGES, contentTopic = contentTopic + + if contentTopic.isErr(): + return RestApiResponse.badRequest("Missing contentTopic") + + let contentTopic = contentTopic.get() + + let msgRes = cache.getAutoMessages(contentTopic, clear = true) + if msgRes.isErr(): + return RestApiResponse.badRequest("Not subscribed to topic: " & contentTopic) + + let data = FilterGetMessagesResponse(msgRes.get().map(toFilterWakuMessage)) + let resp = RestApiResponse.jsonResponse(data, status = Http200) + if resp.isErr(): + error "An error ocurred while building the json respose: ", error = resp.error + return RestApiResponse.internalServerError( + "An error ocurred while building the json respose" + ) + + return resp.get() + +proc installFilterRestApiHandlers*( + router: var RestRouter, + node: WakuNode, + cache: MessageCache, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + installFilterPingSubscriberHandler(router, node, discHandler) + installFilterPostSubscriptionsHandler(router, node, cache, discHandler) + installFilterPutSubscriptionsHandler(router, node, cache, discHandler) + installFilterDeleteSubscriptionsHandler(router, node, cache, discHandler) + installFilterDeleteAllSubscriptionsHandler(router, node, cache, discHandler) + installFilterGetMessagesHandler(router, node, cache) diff --git a/third-party/nwaku/waku/waku_api/rest/filter/types.nim b/third-party/nwaku/waku/waku_api/rest/filter/types.nim new file mode 100644 index 0000000..6d18e7f --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/filter/types.nim @@ -0,0 +1,426 @@ +{.push raises: [].} + +import + std/[sets, strformat], + chronicles, + json_serialization, + json_serialization/std/options, + presto/[route, client, common], + libp2p/peerid +import ../../../common/base64, ../../../waku_core, ../serdes + +#### Types + +type FilterWakuMessage* = object + payload*: Base64String + contentTopic*: Option[ContentTopic] + version*: Option[Natural] + timestamp*: Option[int64] + meta*: Option[Base64String] + ephemeral*: Option[bool] + +type FilterGetMessagesResponse* = seq[FilterWakuMessage] + +type FilterLegacySubscribeRequest* = object + # Subscription request for legacy filter support + pubsubTopic*: Option[PubSubTopic] + contentFilters*: seq[ContentTopic] + +type FilterSubscriberPing* = object + requestId*: string + +type FilterSubscribeRequest* = object + requestId*: string + pubsubTopic*: Option[PubSubTopic] + contentFilters*: seq[ContentTopic] + +type FilterUnsubscribeRequest* = object + requestId*: string + pubsubTopic*: Option[PubSubTopic] + contentFilters*: seq[ContentTopic] + +type FilterUnsubscribeAllRequest* = object + requestId*: string + +type FilterSubscriptionResponse* = object + requestId*: string + statusDesc*: string + +#### Type conversion + +proc toFilterWakuMessage*(msg: WakuMessage): FilterWakuMessage = + FilterWakuMessage( + payload: base64.encode(msg.payload), + contentTopic: some(msg.contentTopic), + version: some(Natural(msg.version)), + timestamp: some(msg.timestamp), + meta: + if msg.meta.len > 0: + some(base64.encode(msg.meta)) + else: + none(Base64String), + ephemeral: some(msg.ephemeral), + ) + +proc toWakuMessage*(msg: FilterWakuMessage, version = 0): Result[WakuMessage, string] = + let + payload = ?msg.payload.decode() + contentTopic = msg.contentTopic.get(DefaultContentTopic) + version = uint32(msg.version.get(version)) + timestamp = msg.timestamp.get(0) + meta = ?msg.meta.get(Base64String("")).decode() + ephemeral = msg.ephemeral.get(false) + + ok( + WakuMessage( + payload: payload, + contentTopic: contentTopic, + version: version, + timestamp: timestamp, + meta: meta, + ephemeral: ephemeral, + ) + ) + +#### Serialization and deserialization + +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterWakuMessage +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("payload", value.payload) + if value.contentTopic.isSome(): + writer.writeField("contentTopic", value.contentTopic.get()) + if value.version.isSome(): + writer.writeField("version", value.version.get()) + if value.timestamp.isSome(): + writer.writeField("timestamp", value.timestamp.get()) + if value.meta.isSome(): + writer.writeField("meta", value.meta.get()) + if value.ephemeral.isSome(): + writer.writeField("ephemeral", value.ephemeral.get()) + writer.endRecord() + +proc writeValue*( + writer: var JsonWriter, value: FilterLegacySubscribeRequest +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("pubsubTopic", value.pubsubTopic) + writer.writeField("contentFilters", value.contentFilters) + writer.endRecord() + +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterSubscriptionResponse +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("requestId", value.requestId) + writer.writeField("statusDesc", value.statusDesc) + writer.endRecord() + +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterSubscribeRequest +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("requestId", value.requestId) + if value.pubsubTopic.isSome(): + writer.writeField("pubsubTopic", value.pubsubTopic.get()) + writer.writeField("contentFilters", value.contentFilters) + writer.endRecord() + +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterSubscriberPing +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("requestId", value.requestId) + writer.endRecord() + +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterUnsubscribeRequest +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("requestId", value.requestId) + if value.pubsubTopic.isSome(): + writer.writeField("pubsubTopic", value.pubsubTopic.get()) + writer.writeField("contentFilters", value.contentFilters) + writer.endRecord() + +proc writeValue*( + writer: var JsonWriter[RestJson], value: FilterUnsubscribeAllRequest +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("requestId", value.requestId) + writer.endRecord() + +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterWakuMessage +) {.raises: [SerializationError, IOError].} = + var + payload = none(Base64String) + contentTopic = none(ContentTopic) + version = none(Natural) + timestamp = none(int64) + meta = none(Base64String) + ephemeral = none(bool) + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "FilterWakuMessage") + + case fieldName + of "payload": + payload = some(reader.readValue(Base64String)) + of "contentTopic": + contentTopic = some(reader.readValue(ContentTopic)) + of "version": + version = some(reader.readValue(Natural)) + of "timestamp": + timestamp = some(reader.readValue(int64)) + of "meta": + meta = some(reader.readValue(Base64String)) + of "ephemeral": + ephemeral = some(reader.readValue(bool)) + else: + unrecognizedFieldWarning(value) + + if payload.isNone(): + reader.raiseUnexpectedValue("Field `payload` is missing") + + value = FilterWakuMessage( + payload: payload.get(), + contentTopic: contentTopic, + version: version, + timestamp: timestamp, + meta: meta, + ephemeral: ephemeral, + ) + +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterLegacySubscribeRequest +) {.raises: [SerializationError, IOError].} = + var + pubsubTopic = none(PubsubTopic) + contentFilters = none(seq[ContentTopic]) + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "FilterLegacySubscribeRequest") + + case fieldName + of "pubsubTopic": + pubsubTopic = some(reader.readValue(PubsubTopic)) + of "contentFilters": + contentFilters = some(reader.readValue(seq[ContentTopic])) + else: + unrecognizedFieldWarning(value) + + if contentFilters.isNone(): + reader.raiseUnexpectedValue("Field `contentFilters` is missing") + + if contentFilters.get().len() == 0: + reader.raiseUnexpectedValue("Field `contentFilters` is empty") + + value = FilterLegacySubscribeRequest( + pubsubTopic: + if pubsubTopic.isNone() or pubsubTopic.get() == "": + none(string) + else: + some(pubsubTopic.get()), + contentFilters: contentFilters.get(), + ) + +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterSubscriberPing +) {.raises: [SerializationError, IOError].} = + var requestId = none(string) + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "FilterSubscriberPing") + + case fieldName + of "requestId": + requestId = some(reader.readValue(string)) + else: + unrecognizedFieldWarning(value) + + if requestId.isNone(): + reader.raiseUnexpectedValue("Field `requestId` is missing") + + value = FilterSubscriberPing(requestId: requestId.get()) + +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterSubscribeRequest +) {.raises: [SerializationError, IOError].} = + var + requestId = none(string) + pubsubTopic = none(PubsubTopic) + contentFilters = none(seq[ContentTopic]) + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "FilterSubscribeRequest") + + case fieldName + of "requestId": + requestId = some(reader.readValue(string)) + of "pubsubTopic": + pubsubTopic = some(reader.readValue(PubsubTopic)) + of "contentFilters": + contentFilters = some(reader.readValue(seq[ContentTopic])) + else: + unrecognizedFieldWarning(value) + + if requestId.isNone(): + reader.raiseUnexpectedValue("Field `requestId` is missing") + + if contentFilters.isNone(): + reader.raiseUnexpectedValue("Field `contentFilters` is missing") + + if contentFilters.get().len() == 0: + reader.raiseUnexpectedValue("Field `contentFilters` is empty") + + value = FilterSubscribeRequest( + requestId: requestId.get(), + pubsubTopic: + if pubsubTopic.isNone() or pubsubTopic.get() == "": + none(string) + else: + some(pubsubTopic.get()), + contentFilters: contentFilters.get(), + ) + +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterUnsubscribeRequest +) {.raises: [SerializationError, IOError].} = + var + requestId = none(string) + pubsubTopic = none(PubsubTopic) + contentFilters = none(seq[ContentTopic]) + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "FilterUnsubscribeRequest") + + case fieldName + of "requestId": + requestId = some(reader.readValue(string)) + of "pubsubTopic": + pubsubTopic = some(reader.readValue(PubsubTopic)) + of "contentFilters": + contentFilters = some(reader.readValue(seq[ContentTopic])) + else: + unrecognizedFieldWarning(value) + + if requestId.isNone(): + reader.raiseUnexpectedValue("Field `requestId` is missing") + + if contentFilters.isNone(): + reader.raiseUnexpectedValue("Field `contentFilters` is missing") + + if contentFilters.get().len() == 0: + reader.raiseUnexpectedValue("Field `contentFilters` is empty") + + value = FilterUnsubscribeRequest( + requestId: requestId.get(), + pubsubTopic: + if pubsubTopic.isNone() or pubsubTopic.get() == "": + none(string) + else: + some(pubsubTopic.get()), + contentFilters: contentFilters.get(), + ) + +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterUnsubscribeAllRequest +) {.raises: [SerializationError, IOError].} = + var requestId = none(string) + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "FilterUnsubscribeAllRequest") + + case fieldName + of "requestId": + requestId = some(reader.readValue(string)) + else: + unrecognizedFieldWarning(value) + + if requestId.isNone(): + reader.raiseUnexpectedValue("Field `requestId` is missing") + + value = FilterUnsubscribeAllRequest(requestId: requestId.get()) + +proc readValue*( + reader: var JsonReader[RestJson], value: var FilterSubscriptionResponse +) {.raises: [SerializationError, IOError].} = + var + requestId = none(string) + statusDesc = none(string) + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "FilterSubscriptionResponse") + + case fieldName + of "requestId": + requestId = some(reader.readValue(string)) + of "statusDesc": + statusDesc = some(reader.readValue(string)) + else: + unrecognizedFieldWarning(value) + + if requestId.isNone(): + reader.raiseUnexpectedValue("Field `requestId` is missing") + + value = FilterSubscriptionResponse( + requestId: requestId.get(), statusDesc: statusDesc.get("") + ) diff --git a/third-party/nwaku/waku/waku_api/rest/health/client.nim b/third-party/nwaku/waku/waku_api/rest/health/client.nim new file mode 100644 index 0000000..97f4a2c --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/health/client.nim @@ -0,0 +1,11 @@ +{.push raises: [].} + +import chronicles, json_serialization, presto/[route, client] +import ./types, ../serdes, ../rest_serdes, waku/node/health_monitor + +logScope: + topics = "waku node rest health_api" + +proc healthCheck*(): RestResponse[HealthReport] {. + rest, endpoint: "/health", meth: HttpMethod.MethodGet +.} diff --git a/third-party/nwaku/waku/waku_api/rest/health/handlers.nim b/third-party/nwaku/waku/waku_api/rest/health/handlers.nim new file mode 100644 index 0000000..aa6b1e9 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/health/handlers.nim @@ -0,0 +1,39 @@ +{.push raises: [].} + +import chronicles, json_serialization, presto/route +import ../../../waku_node, ../responses, ../serdes, ./types + +logScope: + topics = "waku node rest health_api" + +const ROUTE_HEALTH* = "/health" + +const FutHealthReportTimeout = 5.seconds + +proc installHealthApiHandler*( + router: var RestRouter, nodeHealthMonitor: NodeHealthMonitor +) = + router.api(MethodGet, ROUTE_HEALTH) do() -> RestApiResponse: + let healthReportFut = nodeHealthMonitor.getNodeHealthReport() + if not await healthReportFut.withTimeout(FutHealthReportTimeout): + return RestApiResponse.internalServerError("Health check timed out") + + var msg = "" + var status = Http200 + + try: + if healthReportFut.completed(): + let healthReport = healthReportFut.read() + return RestApiResponse.jsonResponse(healthReport, Http200).valueOr: + debug "An error ocurred while building the json healthReport response", + error = error + return + RestApiResponse.internalServerError("Failed to serialize health report") + else: + msg = "Health check failed" + status = Http503 + except: + msg = "exception reading state: " & getCurrentExceptionMsg() + status = Http500 + + return RestApiResponse.textResponse(msg, status) diff --git a/third-party/nwaku/waku/waku_api/rest/health/types.nim b/third-party/nwaku/waku/waku_api/rest/health/types.nim new file mode 100644 index 0000000..57f8b28 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/health/types.nim @@ -0,0 +1,83 @@ +{.push raises: [].} + +import results +import chronicles, json_serialization, json_serialization/std/options +import ../../../waku_node, ../serdes + +#### Serialization and deserialization + +proc writeValue*( + writer: var JsonWriter[RestJson], value: ProtocolHealth +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField(value.protocol, $value.health) + writer.writeField("desc", value.desc) + writer.endRecord() + +proc readValue*( + reader: var JsonReader[RestJson], value: var ProtocolHealth +) {.gcsafe, raises: [SerializationError, IOError].} = + var protocol = none[string]() + var health = none[HealthStatus]() + var desc = none[string]() + for fieldName in readObjectFields(reader): + if fieldName == "desc": + if desc.isSome(): + reader.raiseUnexpectedField("Multiple `desc` fields found", "ProtocolHealth") + desc = some(reader.readValue(string)) + else: + if protocol.isSome(): + reader.raiseUnexpectedField( + "Multiple `protocol` fields and value found", "ProtocolHealth" + ) + + let fieldValue = reader.readValue(string) + let h = HealthStatus.init(fieldValue).valueOr: + reader.raiseUnexpectedValue("Invalid `health` value: " & $error) + health = some(h) + protocol = some(fieldName) + + value = ProtocolHealth(protocol: protocol.get(), health: health.get(), desc: desc) + +proc writeValue*( + writer: var JsonWriter[RestJson], value: HealthReport +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("nodeHealth", $value.nodeHealth) + writer.writeField("protocolsHealth", value.protocolsHealth) + writer.endRecord() + +proc readValue*( + reader: var JsonReader[RestJson], value: var HealthReport +) {.raises: [SerializationError, IOError].} = + var + nodeHealth: Option[HealthStatus] + protocolsHealth: Option[seq[ProtocolHealth]] + + for fieldName in readObjectFields(reader): + case fieldName + of "nodeHealth": + if nodeHealth.isSome(): + reader.raiseUnexpectedField( + "Multiple `nodeHealth` fields found", "HealthReport" + ) + + let health = HealthStatus.init(reader.readValue(string)).valueOr: + reader.raiseUnexpectedValue("Invalid `health` value: " & $error) + + nodeHealth = some(health) + of "protocolsHealth": + if protocolsHealth.isSome(): + reader.raiseUnexpectedField( + "Multiple `protocolsHealth` fields found", "HealthReport" + ) + + protocolsHealth = some(reader.readValue(seq[ProtocolHealth])) + else: + unrecognizedFieldWarning(value) + + if nodeHealth.isNone(): + reader.raiseUnexpectedValue("Field `nodeHealth` is missing") + + value = + HealthReport(nodeHealth: nodeHealth.get, protocolsHealth: protocolsHealth.get(@[])) diff --git a/third-party/nwaku/waku/waku_api/rest/legacy_lightpush/client.nim b/third-party/nwaku/waku/waku_api/rest/legacy_lightpush/client.nim new file mode 100644 index 0000000..a1e4428 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/legacy_lightpush/client.nim @@ -0,0 +1,15 @@ +{.push raises: [].} + +import chronicles, json_serialization, presto/[route, client, common] +import ../serdes, ../rest_serdes, ./types + +export types + +proc encodeBytes*(value: PushRequest, contentType: string): RestResult[seq[byte]] = + return encodeBytesOf(value, contentType) + +proc sendPushRequest*( + body: PushRequest +): RestResponse[string] {. + rest, endpoint: "/lightpush/v1/message", meth: HttpMethod.MethodPost +.} diff --git a/third-party/nwaku/waku/waku_api/rest/legacy_lightpush/handlers.nim b/third-party/nwaku/waku/waku_api/rest/legacy_lightpush/handlers.nim new file mode 100644 index 0000000..5d7c66b --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/legacy_lightpush/handlers.nim @@ -0,0 +1,91 @@ +{.push raises: [].} + +import + std/strformat, + stew/byteutils, + chronicles, + json_serialization, + json_serialization/std/options, + presto/route, + presto/common + +import + waku/node/peer_manager, + waku/waku_lightpush_legacy/common, + ../../../waku_node, + ../../handlers, + ../serdes, + ../responses, + ../rest_serdes, + ./types + +export types + +logScope: + topics = "waku node rest legacy lightpush api" + +const FutTimeoutForPushRequestProcessing* = 5.seconds + +const NoPeerNoDiscoError = + RestApiResponse.serviceUnavailable("No suitable service peer & no discovery method") + +const NoPeerNoneFoundError = + RestApiResponse.serviceUnavailable("No suitable service peer & none discovered") + +proc useSelfHostedLightPush(node: WakuNode): bool = + return node.wakuLegacyLightPush != nil and node.wakuLegacyLightPushClient == nil + +#### Request handlers + +const ROUTE_LIGHTPUSH = "/lightpush/v1/message" + +proc installLightPushRequestHandler*( + router: var RestRouter, + node: WakuNode, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + router.api(MethodPost, ROUTE_LIGHTPUSH) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: + ## Send a request to push a waku message + debug "post", ROUTE_LIGHTPUSH, contentBody + + let decodedBody = decodeRequestBody[PushRequest](contentBody) + + if decodedBody.isErr(): + return decodedBody.error() + + let req: PushRequest = decodedBody.value() + + let msg = req.message.toWakuMessage().valueOr: + return RestApiResponse.badRequest("Invalid message: " & $error) + + var peer = RemotePeerInfo.init($node.switch.peerInfo.peerId) + if useSelfHostedLightPush(node): + discard + else: + peer = node.peerManager.selectPeer(WakuLegacyLightPushCodec).valueOr: + let handler = discHandler.valueOr: + return NoPeerNoDiscoError + + let peerOp = (await handler()).valueOr: + return RestApiResponse.internalServerError("No value in peerOp: " & $error) + + peerOp.valueOr: + return NoPeerNoneFoundError + + let subFut = node.legacyLightpushPublish(req.pubsubTopic, msg, peer) + + if not await subFut.withTimeout(FutTimeoutForPushRequestProcessing): + error "Failed to request a message push due to timeout!" + return RestApiResponse.serviceUnavailable("Push request timed out") + + if subFut.value().isErr(): + if subFut.value().error == TooManyRequestsMessage: + return RestApiResponse.tooManyRequests("Request rate limmit reached") + + return RestApiResponse.serviceUnavailable( + fmt("Failed to request a message push: {subFut.value().error}") + ) + + return RestApiResponse.ok() diff --git a/third-party/nwaku/waku/waku_api/rest/legacy_lightpush/types.nim b/third-party/nwaku/waku/waku_api/rest/legacy_lightpush/types.nim new file mode 100644 index 0000000..6036840 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/legacy_lightpush/types.nim @@ -0,0 +1,67 @@ +{.push raises: [].} + +import + std/[sets, strformat], + chronicles, + json_serialization, + json_serialization/std/options, + presto/[route, client] + +import ../../../waku_core, ../relay/types as relay_types, ../serdes + +export relay_types + +#### Types + +type PushRequest* = object + pubsubTopic*: Option[PubSubTopic] + message*: RelayWakuMessage + +#### Serialization and deserialization + +proc writeValue*( + writer: var JsonWriter[RestJson], value: PushRequest +) {.raises: [IOError].} = + writer.beginRecord() + if value.pubsubTopic.isSome(): + writer.writeField("pubsubTopic", value.pubsubTopic.get()) + writer.writeField("message", value.message) + writer.endRecord() + +proc readValue*( + reader: var JsonReader[RestJson], value: var PushRequest +) {.raises: [SerializationError, IOError].} = + var + pubsubTopic = none(PubsubTopic) + message = none(RelayWakuMessage) + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "PushRequest") + + case fieldName + of "pubsubTopic": + pubsubTopic = some(reader.readValue(PubsubTopic)) + of "message": + message = some(reader.readValue(RelayWakuMessage)) + else: + unrecognizedFieldWarning(value) + + if message.isNone(): + reader.raiseUnexpectedValue("Field `message` is missing") + + value = PushRequest( + pubsubTopic: + if pubsubTopic.isNone() or pubsubTopic.get() == "": + none(string) + else: + some(pubsubTopic.get()), + message: message.get(), + ) diff --git a/third-party/nwaku/waku/waku_api/rest/legacy_store/client.nim b/third-party/nwaku/waku/waku_api/rest/legacy_store/client.nim new file mode 100644 index 0000000..24ad38d --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/legacy_store/client.nim @@ -0,0 +1,75 @@ +{.push raises: [].} + +import + chronicles, json_serialization, json_serialization/std/options, presto/[route, client] +import ../../../waku_store_legacy/common, ../serdes, ../responses, ./types + +export types + +logScope: + topics = "waku node rest legacy store_api" + +proc decodeBytes*( + t: typedesc[StoreResponseRest], + data: openArray[byte], + contentType: Opt[ContentTypeData], +): RestResult[StoreResponseRest] = + if MediaType.init($contentType) == MIMETYPE_JSON: + let decoded = ?decodeFromJsonBytes(StoreResponseRest, data) + return ok(decoded) + + if MediaType.init($contentType) == MIMETYPE_TEXT: + var res: string + if len(data) > 0: + res = newString(len(data)) + copyMem(addr res[0], unsafeAddr data[0], len(data)) + + return ok( + StoreResponseRest( + messages: newSeq[StoreWakuMessage](0), + cursor: none(HistoryCursorRest), + # field that contain error information + errorMessage: some(res), + ) + ) + + # If everything goes wrong + return err(cstring("Unsupported contentType " & $contentType)) + +proc getStoreMessagesV1*( + # URL-encoded reference to the store-node + peerAddr: string = "", + pubsubTopic: string = "", + # URL-encoded comma-separated list of content topics + contentTopics: string = "", + startTime: string = "", + endTime: string = "", + + # Optional cursor fields + senderTime: string = "", + storeTime: string = "", + digest: string = "", # base64-encoded digest + pageSize: string = "", + ascending: string = "", +): RestResponse[StoreResponseRest] {. + rest, endpoint: "/store/v1/messages", meth: HttpMethod.MethodGet +.} + +proc getStoreMessagesV1*( + # URL-encoded reference to the store-node + peerAddr: Option[string], + pubsubTopic: string = "", + # URL-encoded comma-separated list of content topics + contentTopics: string = "", + startTime: string = "", + endTime: string = "", + + # Optional cursor fields + senderTime: string = "", + storeTime: string = "", + digest: string = "", # base64-encoded digest + pageSize: string = "", + ascending: string = "", +): RestResponse[StoreResponseRest] {. + rest, endpoint: "/store/v1/messages", meth: HttpMethod.MethodGet +.} diff --git a/third-party/nwaku/waku/waku_api/rest/legacy_store/handlers.nim b/third-party/nwaku/waku/waku_api/rest/legacy_store/handlers.nim new file mode 100644 index 0000000..d960f24 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/legacy_store/handlers.nim @@ -0,0 +1,256 @@ +{.push raises: [].} + +import std/strformat, results, chronicles, uri, json_serialization, presto/route +import + ../../../waku_core, + ../../../waku_store_legacy/common, + ../../../waku_store_legacy/self_req_handler, + ../../../waku_node, + ../../../node/peer_manager, + ../../../common/paging, + ../../handlers, + ../responses, + ../serdes, + ./types + +export types + +logScope: + topics = "waku node rest legacy store_api" + +const futTimeout* = 5.seconds # Max time to wait for futures + +const NoPeerNoDiscError* = + RestApiResponse.preconditionFailed("No suitable service peer & no discovery method") + +# Queries the store-node with the query parameters and +# returns a RestApiResponse that is sent back to the api client. +proc performHistoryQuery( + selfNode: WakuNode, histQuery: HistoryQuery, storePeer: RemotePeerInfo +): Future[RestApiResponse] {.async.} = + let queryFut = selfNode.query(histQuery, storePeer) + if not await queryFut.withTimeout(futTimeout): + const msg = "No history response received (timeout)" + error msg + return RestApiResponse.internalServerError(msg) + + let res = queryFut.read() + if res.isErr(): + const msg = "Error occurred in queryFut.read()" + error msg, error = res.error + return RestApiResponse.internalServerError(fmt("{msg} [{res.error}]")) + + let storeResp = res.value.toStoreResponseRest() + let resp = RestApiResponse.jsonResponse(storeResp, status = Http200) + if resp.isErr(): + const msg = "Error building the json respose" + error msg, error = resp.error + return RestApiResponse.internalServerError(fmt("{msg} [{resp.error}]")) + + return resp.get() + +# Converts a string time representation into an Option[Timestamp]. +# Only positive time is considered a valid Timestamp in the request +proc parseTime(input: Option[string]): Result[Option[Timestamp], string] = + if input.isSome() and input.get() != "": + try: + let time = parseInt(input.get()) + if time > 0: + return ok(some(Timestamp(time))) + except ValueError: + return err("Problem parsing time [" & getCurrentExceptionMsg() & "]") + + return ok(none(Timestamp)) + +# Generates a history query cursor as per the given params +proc parseCursor( + parsedPubsubTopic: Option[string], + senderTime: Option[string], + storeTime: Option[string], + digest: Option[string], +): Result[Option[HistoryCursor], string] = + # Parse sender time + let parsedSenderTime = parseTime(senderTime) + if not parsedSenderTime.isOk(): + return err(parsedSenderTime.error) + + # Parse store time + let parsedStoreTime = parseTime(storeTime) + if not parsedStoreTime.isOk(): + return err(parsedStoreTime.error) + + # Parse message digest + let parsedMsgDigest = parseMsgDigest(digest) + if not parsedMsgDigest.isOk(): + return err(parsedMsgDigest.error) + + # Parse cursor information + if parsedPubsubTopic.isSome() and parsedSenderTime.value.isSome() and + parsedStoreTime.value.isSome() and parsedMsgDigest.value.isSome(): + return ok( + some( + HistoryCursor( + pubsubTopic: parsedPubsubTopic.get(), + senderTime: parsedSenderTime.value.get(), + storeTime: parsedStoreTime.value.get(), + digest: parsedMsgDigest.value.get(), + ) + ) + ) + else: + return ok(none(HistoryCursor)) + +# Creates a HistoryQuery from the given params +proc createHistoryQuery( + pubsubTopic: Option[string], + contentTopics: Option[string], + senderTime: Option[string], + storeTime: Option[string], + digest: Option[string], + startTime: Option[string], + endTime: Option[string], + pageSize: Option[string], + direction: Option[string], +): Result[HistoryQuery, string] = + # Parse pubsubTopic parameter + var parsedPubsubTopic = none(string) + if pubsubTopic.isSome(): + let decodedPubsubTopic = decodeUrl(pubsubTopic.get()) + if decodedPubsubTopic != "": + parsedPubsubTopic = some(decodedPubsubTopic) + + # Parse the content topics + var parsedContentTopics = newSeq[ContentTopic](0) + if contentTopics.isSome(): + let ctList = decodeUrl(contentTopics.get()) + if ctList != "": + for ct in ctList.split(','): + parsedContentTopics.add(ct) + + # Parse cursor information + let parsedCursor = ?parseCursor(parsedPubsubTopic, senderTime, storeTime, digest) + + # Parse page size field + var parsedPagedSize = DefaultPageSize + if pageSize.isSome() and pageSize.get() != "": + try: + parsedPagedSize = uint64(parseInt(pageSize.get())) + except CatchableError: + return err("Problem parsing page size [" & getCurrentExceptionMsg() & "]") + + # Parse start time + let parsedStartTime = ?parseTime(startTime) + + # Parse end time + let parsedEndTime = ?parseTime(endTime) + + # Parse ascending field + var parsedDirection = default() + if direction.isSome() and direction.get() != "": + parsedDirection = direction.get().into() + + return ok( + HistoryQuery( + pubsubTopic: parsedPubsubTopic, + contentTopics: parsedContentTopics, + startTime: parsedStartTime, + endTime: parsedEndTime, + direction: parsedDirection, + pageSize: parsedPagedSize, + cursor: parsedCursor, + ) + ) + +# Simple type conversion. The "Option[Result[string, cstring]]" +# type is used by the nim-presto library. +proc toOpt(self: Option[Result[string, cstring]]): Option[string] = + if not self.isSome() or self.get().value == "": + return none(string) + if self.isSome() and self.get().value != "": + return some(self.get().value) + +proc retrieveMsgsFromSelfNode( + self: WakuNode, histQuery: HistoryQuery +): Future[RestApiResponse] {.async.} = + ## Performs a "store" request to the local node (self node.) + ## Notice that this doesn't follow the regular store libp2p channel because a node + ## it is not allowed to libp2p-dial a node to itself, by default. + ## + + let selfResp = (await self.wakuLegacyStore.handleSelfStoreRequest(histQuery)).valueOr: + return RestApiResponse.internalServerError($error) + + let storeResp = selfResp.toStoreResponseRest() + let resp = RestApiResponse.jsonResponse(storeResp, status = Http200).valueOr: + const msg = "Error building the json respose" + let e = $error + error msg, error = e + return RestApiResponse.internalServerError(fmt("{msg} [{e}]")) + + return resp + +# Subscribes the rest handler to attend "/store/v1/messages" requests +proc installStoreApiHandlers*( + router: var RestRouter, + node: WakuNode, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + # Handles the store-query request according to the passed parameters + router.api(MethodGet, "/store/v1/messages") do( + peerAddr: Option[string], + pubsubTopic: Option[string], + contentTopics: Option[string], + senderTime: Option[string], + storeTime: Option[string], + digest: Option[string], + startTime: Option[string], + endTime: Option[string], + pageSize: Option[string], + ascending: Option[string] + ) -> RestApiResponse: + debug "REST-GET /store/v1/messages ", peer_addr = $peerAddr + + # All the GET parameters are URL-encoded (https://en.wikipedia.org/wiki/URL_encoding) + # Example: + # /store/v1/messages?peerAddr=%2Fip4%2F127.0.0.1%2Ftcp%2F60001%2Fp2p%2F16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\&pubsubTopic=my-waku-topic + + # Parse the rest of the parameters and create a HistoryQuery + let histQuery = createHistoryQuery( + pubsubTopic.toOpt(), + contentTopics.toOpt(), + senderTime.toOpt(), + storeTime.toOpt(), + digest.toOpt(), + startTime.toOpt(), + endTime.toOpt(), + pageSize.toOpt(), + ascending.toOpt(), + ) + + if not histQuery.isOk(): + return RestApiResponse.badRequest(histQuery.error) + + if peerAddr.isNone() and not node.wakuLegacyStore.isNil(): + ## The user didn't specify a peer address and self-node is configured as a store node. + ## In this case we assume that the user is willing to retrieve the messages stored by + ## the local/self store node. + return await node.retrieveMsgsFromSelfNode(histQuery.get()) + + # Parse the peer address parameter + let parsedPeerAddr = parseUrlPeerAddr(peerAddr.toOpt()).valueOr: + return RestApiResponse.badRequest(error) + + let peerAddr = parsedPeerAddr.valueOr: + node.peerManager.selectPeer(WakuLegacyStoreCodec).valueOr: + let handler = discHandler.valueOr: + return NoPeerNoDiscError + + let peerOp = (await handler()).valueOr: + return RestApiResponse.internalServerError($error) + + peerOp.valueOr: + return RestApiResponse.preconditionFailed( + "No suitable service peer & none discovered" + ) + + return await node.performHistoryQuery(histQuery.value, peerAddr) diff --git a/third-party/nwaku/waku/waku_api/rest/legacy_store/types.nim b/third-party/nwaku/waku/waku_api/rest/legacy_store/types.nim new file mode 100644 index 0000000..eee3ac2 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/legacy_store/types.nim @@ -0,0 +1,379 @@ +{.push raises: [].} + +import + std/[sets, strformat, uri], + stew/byteutils, + chronicles, + json_serialization, + json_serialization/std/options, + presto/[route, client, common] +import + ../../../waku_store_legacy/common as waku_store_common, + ../../../common/base64, + ../../../waku_core, + ../serdes + +#### Types + +type + HistoryCursorRest* = object + pubsubTopic*: PubsubTopic + senderTime*: Timestamp + storeTime*: Timestamp + digest*: waku_store_common.MessageDigest + + StoreRequestRest* = object + # inspired by https://github.com/waku-org/nwaku/blob/f95147f5b7edfd45f914586f2d41cd18fb0e0d18/waku/v2//waku_store/common.nim#L52 + pubsubTopic*: Option[PubsubTopic] + contentTopics*: seq[ContentTopic] + cursor*: Option[HistoryCursorRest] + startTime*: Option[Timestamp] + endTime*: Option[Timestamp] + pageSize*: uint64 + ascending*: bool + + StoreWakuMessage* = object + payload*: Base64String + contentTopic*: Option[ContentTopic] + version*: Option[uint32] + timestamp*: Option[Timestamp] + ephemeral*: Option[bool] + meta*: Option[Base64String] + + StoreResponseRest* = object # inspired by https://rfc.vac.dev/spec/16/#storeresponse + messages*: seq[StoreWakuMessage] + cursor*: Option[HistoryCursorRest] + # field that contains error information + errorMessage*: Option[string] + +createJsonFlavor RestJson + +Json.setWriter JsonWriter, PreferredOutput = string + +#### Type conversion + +# Converts a URL-encoded-base64 string into a 'MessageDigest' +proc parseMsgDigest*( + input: Option[string] +): Result[Option[waku_store_common.MessageDigest], string] = + if not input.isSome() or input.get() == "": + return ok(none(waku_store_common.MessageDigest)) + + let decodedUrl = decodeUrl(input.get()) + let base64Decoded = base64.decode(Base64String(decodedUrl)) + var messageDigest = waku_store_common.MessageDigest() + + if not base64Decoded.isOk(): + return err(base64Decoded.error) + + let base64DecodedArr = base64Decoded.get() + # Next snippet inspired by "nwaku/waku/waku_archive/archive.nim" + # TODO: Improve coherence of MessageDigest type + messageDigest = block: + var data: array[32, byte] + for i in 0 ..< min(base64DecodedArr.len, 32): + data[i] = base64DecodedArr[i] + + waku_store_common.MessageDigest(data: data) + + return ok(some(messageDigest)) + +# Converts a given MessageDigest object into a suitable +# Base64-URL-encoded string suitable to be transmitted in a Rest +# request-response. The MessageDigest is first base64 encoded +# and this result is URL-encoded. +proc toRestStringMessageDigest*(self: waku_store_common.MessageDigest): string = + let base64Encoded = base64.encode(self.data) + encodeUrl($base64Encoded) + +proc toWakuMessage*(message: StoreWakuMessage): WakuMessage = + WakuMessage( + payload: base64.decode(message.payload).get(), + contentTopic: message.contentTopic.get(), + version: message.version.get(), + timestamp: message.timestamp.get(), + ephemeral: message.ephemeral.get(), + meta: message.meta.get(Base64String("")).decode().get(), + ) + +# Converts a 'HistoryResponse' object to an 'StoreResponseRest' +# that can be serialized to a json object. +proc toStoreResponseRest*(histResp: HistoryResponse): StoreResponseRest = + proc toStoreWakuMessage(message: WakuMessage): StoreWakuMessage = + StoreWakuMessage( + payload: base64.encode(message.payload), + contentTopic: some(message.contentTopic), + version: some(message.version), + timestamp: some(message.timestamp), + ephemeral: some(message.ephemeral), + meta: + if message.meta.len > 0: + some(base64.encode(message.meta)) + else: + none(Base64String), + ) + + var storeWakuMsgs: seq[StoreWakuMessage] + for m in histResp.messages: + storeWakuMsgs.add(m.toStoreWakuMessage()) + + var cursor = none(HistoryCursorRest) + if histResp.cursor.isSome: + cursor = some( + HistoryCursorRest( + pubsubTopic: histResp.cursor.get().pubsubTopic, + senderTime: histResp.cursor.get().senderTime, + storeTime: histResp.cursor.get().storeTime, + digest: histResp.cursor.get().digest, + ) + ) + + StoreResponseRest(messages: storeWakuMsgs, cursor: cursor) + +## Beginning of StoreWakuMessage serde + +proc writeValue*( + writer: var JsonWriter, value: StoreWakuMessage +) {.gcsafe, raises: [IOError].} = + writer.beginRecord() + writer.writeField("payload", $value.payload) + if value.contentTopic.isSome(): + writer.writeField("contentTopic", value.contentTopic.get()) + if value.version.isSome(): + writer.writeField("version", value.version.get()) + if value.timestamp.isSome(): + writer.writeField("timestamp", value.timestamp.get()) + if value.ephemeral.isSome(): + writer.writeField("ephemeral", value.ephemeral.get()) + if value.meta.isSome(): + writer.writeField("meta", value.meta.get()) + writer.endRecord() + +proc readValue*( + reader: var JsonReader, value: var StoreWakuMessage +) {.gcsafe, raises: [SerializationError, IOError].} = + var + payload = none(Base64String) + contentTopic = none(ContentTopic) + version = none(uint32) + timestamp = none(Timestamp) + ephemeral = none(bool) + meta = none(Base64String) + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "StoreWakuMessage") + + case fieldName + of "payload": + payload = some(reader.readValue(Base64String)) + of "contentTopic": + contentTopic = some(reader.readValue(ContentTopic)) + of "version": + version = some(reader.readValue(uint32)) + of "timestamp": + timestamp = some(reader.readValue(Timestamp)) + of "ephemeral": + ephemeral = some(reader.readValue(bool)) + of "meta": + meta = some(reader.readValue(Base64String)) + else: + reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) + + if payload.isNone(): + reader.raiseUnexpectedValue("Field `payload` is missing") + + value = StoreWakuMessage( + payload: payload.get(), + contentTopic: contentTopic, + version: version, + timestamp: timestamp, + ephemeral: ephemeral, + meta: meta, + ) + +## End of StoreWakuMessage serde + +## Beginning of MessageDigest serde + +proc writeValue*( + writer: var JsonWriter, value: waku_store_common.MessageDigest +) {.gcsafe, raises: [IOError].} = + writer.beginRecord() + writer.writeField("data", base64.encode(value.data)) + writer.endRecord() + +proc readValue*( + reader: var JsonReader, value: var waku_store_common.MessageDigest +) {.gcsafe, raises: [SerializationError, IOError].} = + var data = none(seq[byte]) + + for fieldName in readObjectFields(reader): + case fieldName + of "data": + if data.isSome(): + reader.raiseUnexpectedField("Multiple `data` fields found", "MessageDigest") + let decoded = base64.decode(reader.readValue(Base64String)) + if not decoded.isOk(): + reader.raiseUnexpectedField("Failed decoding data", "MessageDigest") + data = some(decoded.get()) + else: + reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) + + if data.isNone(): + reader.raiseUnexpectedValue("Field `data` is missing") + + for i in 0 ..< 32: + value.data[i] = data.get()[i] + +## End of MessageDigest serde + +## Beginning of HistoryCursorRest serde + +proc writeValue*( + writer: var JsonWriter, value: HistoryCursorRest +) {.gcsafe, raises: [IOError].} = + writer.beginRecord() + writer.writeField("pubsubTopic", value.pubsubTopic) + writer.writeField("senderTime", value.senderTime) + writer.writeField("storeTime", value.storeTime) + writer.writeField("digest", value.digest) + writer.endRecord() + +proc readValue*( + reader: var JsonReader, value: var HistoryCursorRest +) {.gcsafe, raises: [SerializationError, IOError].} = + var + pubsubTopic = none(PubsubTopic) + senderTime = none(Timestamp) + storeTime = none(Timestamp) + digest = none(waku_store_common.MessageDigest) + + for fieldName in readObjectFields(reader): + case fieldName + of "pubsubTopic": + if pubsubTopic.isSome(): + reader.raiseUnexpectedField( + "Multiple `pubsubTopic` fields found", "HistoryCursorRest" + ) + pubsubTopic = some(reader.readValue(PubsubTopic)) + of "senderTime": + if senderTime.isSome(): + reader.raiseUnexpectedField( + "Multiple `senderTime` fields found", "HistoryCursorRest" + ) + senderTime = some(reader.readValue(Timestamp)) + of "storeTime": + if storeTime.isSome(): + reader.raiseUnexpectedField( + "Multiple `storeTime` fields found", "HistoryCursorRest" + ) + storeTime = some(reader.readValue(Timestamp)) + of "digest": + if digest.isSome(): + reader.raiseUnexpectedField( + "Multiple `digest` fields found", "HistoryCursorRest" + ) + digest = some(reader.readValue(waku_store_common.MessageDigest)) + else: + reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) + + if pubsubTopic.isNone(): + reader.raiseUnexpectedValue("Field `pubsubTopic` is missing") + + if senderTime.isNone(): + reader.raiseUnexpectedValue("Field `senderTime` is missing") + + if storeTime.isNone(): + reader.raiseUnexpectedValue("Field `storeTime` is missing") + + if digest.isNone(): + reader.raiseUnexpectedValue("Field `digest` is missing") + + value = HistoryCursorRest( + pubsubTopic: pubsubTopic.get(), + senderTime: senderTime.get(), + storeTime: storeTime.get(), + digest: digest.get(), + ) + +## End of HistoryCursorRest serde + +## Beginning of StoreResponseRest serde + +proc writeValue*( + writer: var JsonWriter, value: StoreResponseRest +) {.gcsafe, raises: [IOError].} = + writer.beginRecord() + writer.writeField("messages", value.messages) + if value.cursor.isSome(): + writer.writeField("cursor", value.cursor.get()) + if value.errorMessage.isSome(): + writer.writeField("errorMessage", value.errorMessage.get()) + writer.endRecord() + +proc readValue*( + reader: var JsonReader, value: var StoreResponseRest +) {.gcsafe, raises: [SerializationError, IOError].} = + var + messages = none(seq[StoreWakuMessage]) + cursor = none(HistoryCursorRest) + errorMessage = none(string) + + for fieldName in readObjectFields(reader): + case fieldName + of "messages": + if messages.isSome(): + reader.raiseUnexpectedField( + "Multiple `messages` fields found", "StoreResponseRest" + ) + messages = some(reader.readValue(seq[StoreWakuMessage])) + of "cursor": + if cursor.isSome(): + reader.raiseUnexpectedField( + "Multiple `cursor` fields found", "StoreResponseRest" + ) + cursor = some(reader.readValue(HistoryCursorRest)) + of "errorMessage": + if errorMessage.isSome(): + reader.raiseUnexpectedField( + "Multiple `errorMessage` fields found", "StoreResponseRest" + ) + errorMessage = some(reader.readValue(string)) + else: + reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) + + if messages.isNone(): + reader.raiseUnexpectedValue("Field `messages` is missing") + + value = StoreResponseRest( + messages: messages.get(), cursor: cursor, errorMessage: errorMessage + ) + +## End of StoreResponseRest serde + +## Beginning of StoreRequestRest serde + +proc writeValue*( + writer: var JsonWriter, value: StoreRequestRest +) {.gcsafe, raises: [IOError].} = + writer.beginRecord() + if value.pubsubTopic.isSome(): + writer.writeField("pubsubTopic", value.pubsubTopic.get()) + writer.writeField("contentTopics", value.contentTopics) + if value.startTime.isSome(): + writer.writeField("startTime", value.startTime.get()) + if value.endTime.isSome(): + writer.writeField("endTime", value.endTime.get()) + writer.writeField("pageSize", value.pageSize) + writer.writeField("ascending", value.ascending) + writer.endRecord() + +## End of StoreRequestRest serde diff --git a/third-party/nwaku/waku/waku_api/rest/lightpush/client.nim b/third-party/nwaku/waku/waku_api/rest/lightpush/client.nim new file mode 100644 index 0000000..abf832a --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/lightpush/client.nim @@ -0,0 +1,23 @@ +{.push raises: [].} + +import + json, + std/sets, + stew/byteutils, + strformat, + chronicles, + json_serialization, + json_serialization/std/options, + presto/[route, client, common] +import ../../../waku_core, ../serdes, ../responses, ../rest_serdes, ./types + +export types + +proc encodeBytes*(value: PushRequest, contentType: string): RestResult[seq[byte]] = + return encodeBytesOf(value, contentType) + +proc sendPushRequest*( + body: PushRequest +): RestResponse[PushResponse] {. + rest, endpoint: "/lightpush/v3/message", meth: HttpMethod.MethodPost +.} diff --git a/third-party/nwaku/waku/waku_api/rest/lightpush/handlers.nim b/third-party/nwaku/waku/waku_api/rest/lightpush/handlers.nim new file mode 100644 index 0000000..e1f950d --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/lightpush/handlers.nim @@ -0,0 +1,104 @@ +{.push raises: [].} + +import + std/strformat, + stew/byteutils, + chronicles, + json_serialization, + json_serialization/std/options, + presto/route, + presto/common + +import + waku/node/peer_manager, + waku/waku_lightpush/common, + ../../../waku_node, + ../../handlers, + ../serdes, + ../responses, + ../rest_serdes, + ./types + +export types + +logScope: + topics = "waku node rest lightpush api" + +const FutTimeoutForPushRequestProcessing* = 5.seconds + +const NoPeerNoDiscoError = "No suitable service peer & no discovery method" +const NoPeerNoneFoundError = "No suitable service peer & none discovered" + +proc useSelfHostedLightPush(node: WakuNode): bool = + return node.wakuLightPush != nil and node.wakuLightPushClient == nil + +proc convertErrorKindToHttpStatus(statusCode: LightPushStatusCode): HttpCode = + ## Lightpush status codes are matching HTTP status codes by design + return toHttpCode(statusCode.int).get(Http500) + +proc makeRestResponse(response: WakuLightPushResult): RestApiResponse = + var httpStatus: HttpCode = Http200 + var apiResponse: PushResponse + + if response.isOk(): + apiResponse.relayPeerCount = some(response.get()) + else: + httpStatus = convertErrorKindToHttpStatus(response.error().code) + apiResponse.statusDesc = response.error().desc + + let restResp = RestApiResponse.jsonResponse(apiResponse, status = httpStatus).valueOr: + error "An error ocurred while building the json respose: ", error = error + return RestApiResponse.internalServerError( + fmt("An error ocurred while building the json respose: {error}") + ) + + return restResp + +#### Request handlers +const ROUTE_LIGHTPUSH = "/lightpush/v3/message" + +proc installLightPushRequestHandler*( + router: var RestRouter, + node: WakuNode, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + router.api(MethodPost, ROUTE_LIGHTPUSH) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: + ## Send a request to push a waku message + debug "post received", ROUTE_LIGHTPUSH + trace "content body", ROUTE_LIGHTPUSH, contentBody + + let req: PushRequest = decodeRequestBody[PushRequest](contentBody).valueOr: + return + makeRestResponse(lightpushResultBadRequest("Invalid push request! " & $error)) + + let msg = req.message.toWakuMessage().valueOr: + return makeRestResponse(lightpushResultBadRequest("Invalid message! " & $error)) + + var toPeer = none(RemotePeerInfo) + if useSelfHostedLightPush(node): + discard + else: + let aPeer = node.peerManager.selectPeer(WakuLightPushCodec).valueOr: + let handler = discHandler.valueOr: + return makeRestResponse(lightpushResultServiceUnavailable(NoPeerNoDiscoError)) + + let peerOp = (await handler()).valueOr: + return makeRestResponse( + lightpushResultInternalError("No value in peerOp: " & $error) + ) + + peerOp.valueOr: + return + makeRestResponse(lightpushResultServiceUnavailable(NoPeerNoneFoundError)) + toPeer = some(aPeer) + + let subFut = node.lightpushPublish(req.pubsubTopic, msg, toPeer) + + if not await subFut.withTimeout(FutTimeoutForPushRequestProcessing): + error "Failed to request a message push due to timeout!" + return + makeRestResponse(lightpushResultServiceUnavailable("Push request timed out")) + + return makeRestResponse(subFut.value()) diff --git a/third-party/nwaku/waku/waku_api/rest/lightpush/types.nim b/third-party/nwaku/waku/waku_api/rest/lightpush/types.nim new file mode 100644 index 0000000..1fb87ab --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/lightpush/types.nim @@ -0,0 +1,114 @@ +{.push raises: [].} + +import + std/[sets, strformat], + chronicles, + json_serialization, + json_serialization/std/options, + presto/[route, client] + +import ../../../waku_core, ../relay/types as relay_types, ../serdes + +export relay_types + +#### Types + +type + PushRequest* = object + pubsubTopic*: Option[PubSubTopic] + message*: RelayWakuMessage + + PushResponse* = object + statusDesc*: Option[string] + relayPeerCount*: Option[uint32] + +#### Serialization and deserialization +proc writeValue*( + writer: var JsonWriter[RestJson], value: PushRequest +) {.raises: [IOError].} = + writer.beginRecord() + if value.pubsubTopic.isSome(): + writer.writeField("pubsubTopic", value.pubsubTopic.get()) + writer.writeField("message", value.message) + writer.endRecord() + +proc readValue*( + reader: var JsonReader[RestJson], value: var PushRequest +) {.raises: [SerializationError, IOError].} = + var + pubsubTopic = none(PubsubTopic) + message = none(RelayWakuMessage) + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "PushRequest") + + case fieldName + of "pubsubTopic": + pubsubTopic = some(reader.readValue(PubsubTopic)) + of "message": + message = some(reader.readValue(RelayWakuMessage)) + else: + unrecognizedFieldWarning(value) + + if message.isNone(): + reader.raiseUnexpectedValue("Field `message` is missing") + + value = PushRequest( + pubsubTopic: + if pubsubTopic.isNone() or pubsubTopic.get() == "": + none(string) + else: + some(pubsubTopic.get()), + message: message.get(), + ) + +proc writeValue*( + writer: var JsonWriter[RestJson], value: PushResponse +) {.raises: [IOError].} = + writer.beginRecord() + if value.statusDesc.isSome(): + writer.writeField("statusDesc", value.statusDesc.get()) + if value.relayPeerCount.isSome(): + writer.writeField("relayPeerCount", value.relayPeerCount.get()) + writer.endRecord() + +proc readValue*( + reader: var JsonReader[RestJson], value: var PushResponse +) {.raises: [SerializationError, IOError].} = + var + statusDesc = none(string) + relayPeerCount = none(uint32) + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "PushResponse") + + case fieldName + of "statusDesc": + statusDesc = some(reader.readValue(string)) + of "relayPeerCount": + relayPeerCount = some(reader.readValue(uint32)) + else: + unrecognizedFieldWarning(value) + + if relayPeerCount.isNone() and statusDesc.isNone(): + reader.raiseUnexpectedValue( + "Fields are missing, either `relayPeerCount` or `statusDesc` must be present" + ) + + value = PushResponse(statusDesc: statusDesc, relayPeerCount: relayPeerCount) diff --git a/third-party/nwaku/waku/waku_api/rest/origin_handler.nim b/third-party/nwaku/waku/waku_api/rest/origin_handler.nim new file mode 100644 index 0000000..2317c94 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/origin_handler.nim @@ -0,0 +1,125 @@ +{.push raises: [].} + +import + std/[options, strutils, net], + regex, + results, + chronicles, + chronos, + chronos/apps/http/httpserver + +type OriginHandlerMiddlewareRef* = ref object of HttpServerMiddlewareRef + allowedOriginMatcher: Option[Regex2] + everyOriginAllowed: bool + +proc isEveryOriginAllowed(maybeAllowedOrigin: Option[string]): bool = + return maybeAllowedOrigin.isSome() and maybeAllowedOrigin.get() == "*" + +proc compileOriginMatcher(maybeAllowedOrigin: Option[string]): Option[Regex2] = + if maybeAllowedOrigin.isNone(): + return none(Regex2) + + let allowedOrigin = maybeAllowedOrigin.get() + + if (len(allowedOrigin) == 0): + return none(Regex2) + + try: + var matchOrigin: string + + if allowedOrigin == "*": + matchOrigin = r".*" + return some(re2(matchOrigin, {regexCaseless, regexExtended})) + + let allowedOrigins = allowedOrigin.split(",") + + var matchExpressions: seq[string] = @[] + + var prefix: string + for allowedOrigin in allowedOrigins: + if allowedOrigin.startsWith("http://"): + prefix = r"http:\/\/" + matchOrigin = allowedOrigin.substr(7) + elif allowedOrigin.startsWith("https://"): + prefix = r"https:\/\/" + matchOrigin = allowedOrigin.substr(8) + else: + prefix = r"https?:\/\/" + matchOrigin = allowedOrigin + + matchOrigin = matchOrigin.replace(".", r"\.") + matchOrigin = matchOrigin.replace("*", ".*") + matchOrigin = matchOrigin.replace("?", ".?") + + matchExpressions.add("^" & prefix & matchOrigin & "$") + + let finalExpression = matchExpressions.join("|") + + return some(re2(finalExpression, {regexCaseless, regexExtended})) + except RegexError: + var msg = getCurrentExceptionMsg() + error "Failed to compile regex", source = allowedOrigin, err = msg + return none(Regex2) + +proc originsMatch( + originHandler: OriginHandlerMiddlewareRef, requestOrigin: string +): bool = + if originHandler.allowedOriginMatcher.isNone(): + return false + + return requestOrigin.match(originHandler.allowedOriginMatcher.get()) + +proc originMiddlewareProc( + middleware: HttpServerMiddlewareRef, + reqfence: RequestFence, + nextHandler: HttpProcessCallback2, +): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = + if reqfence.isErr(): + # Ignore request errors that detected before our middleware. + # Let final handler deal with it. + return await nextHandler(reqfence) + + let self = OriginHandlerMiddlewareRef(middleware) + let request = reqfence.get() + var reqHeaders = request.headers + var response = request.getResponse() + + if self.allowedOriginMatcher.isSome(): + let origin = reqHeaders.getList("Origin") + try: + if origin.len == 1: + if self.everyOriginAllowed: + response.addHeader("Access-Control-Allow-Origin", "*") + response.addHeader("Access-Control-Allow-Headers", "Content-Type") + elif self.originsMatch(origin[0]): + # The Vary: Origin header to must be set to prevent + # potential cache poisoning attacks: + # https://textslashplain.com/2018/08/02/cors-and-vary/ + response.addHeader("Vary", "Origin") + response.addHeader("Access-Control-Allow-Origin", origin[0]) + response.addHeader("Access-Control-Allow-Headers", "Content-Type") + else: + return await request.respond(Http403, "Origin not allowed") + elif origin.len == 0: + discard + elif origin.len > 1: + return await request.respond( + Http400, "Only a single Origin header must be specified" + ) + except HttpWriteError as exc: + # We use default error handler if we unable to send response. + return defaultResponse(exc) + + # Calling next handler. + return await nextHandler(reqfence) + +proc new*( + t: typedesc[OriginHandlerMiddlewareRef], + allowedOrigin: Option[string] = none(string), +): HttpServerMiddlewareRef = + let middleware = OriginHandlerMiddlewareRef( + allowedOriginMatcher: compileOriginMatcher(allowedOrigin), + everyOriginAllowed: isEveryOriginAllowed(allowedOrigin), + handler: originMiddlewareProc, + ) + return HttpServerMiddlewareRef(middleware) diff --git a/third-party/nwaku/waku/waku_api/rest/relay/client.nim b/third-party/nwaku/waku/waku_api/rest/relay/client.nim new file mode 100644 index 0000000..6956a95 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/relay/client.nim @@ -0,0 +1,69 @@ +{.push raises: [].} + +import stew/byteutils, chronicles, json_serialization, presto/[route, client, common] +import ../../../waku_core, ../serdes, ../rest_serdes, ./types + +export types + +logScope: + topics = "waku node rest client" + +proc encodeBytes*(value: seq[PubSubTopic], contentType: string): RestResult[seq[byte]] = + return encodeBytesOf(value, contentType) + +# TODO: Check how we can use a constant to set the method endpoint (improve "rest" pragma under nim-presto) +proc relayPostSubscriptionsV1*( + body: seq[PubsubTopic] +): RestResponse[string] {. + rest, endpoint: "/relay/v1/subscriptions", meth: HttpMethod.MethodPost +.} + +proc relayPostAutoSubscriptionsV1*( + body: seq[ContentTopic] +): RestResponse[string] {. + rest, endpoint: "/relay/v1/auto/subscriptions", meth: HttpMethod.MethodPost +.} + +# TODO: Check how we can use a constant to set the method endpoint (improve "rest" pragma under nim-presto) +proc relayDeleteSubscriptionsV1*( + body: seq[PubsubTopic] +): RestResponse[string] {. + rest, endpoint: "/relay/v1/subscriptions", meth: HttpMethod.MethodDelete +.} + +proc relayDeleteAutoSubscriptionsV1*( + body: seq[ContentTopic] +): RestResponse[string] {. + rest, endpoint: "/relay/v1/auto/subscriptions", meth: HttpMethod.MethodDelete +.} + +proc encodeBytes*( + value: RelayPostMessagesRequest, contentType: string +): RestResult[seq[byte]] = + return encodeBytesOf(value, contentType) + +# TODO: Check how we can use a constant to set the method endpoint (improve "rest" pragma under nim-presto) +proc relayGetMessagesV1*( + pubsubTopic: string +): RestResponse[RelayGetMessagesResponse] {. + rest, endpoint: "/relay/v1/messages/{pubsubTopic}", meth: HttpMethod.MethodGet +.} + +proc relayGetAutoMessagesV1*( + contentTopic: string +): RestResponse[RelayGetMessagesResponse] {. + rest, endpoint: "/relay/v1/auto/messages/{contentTopic}", meth: HttpMethod.MethodGet +.} + +# TODO: Check how we can use a constant to set the method endpoint (improve "rest" pragma under nim-presto) +proc relayPostMessagesV1*( + pubsubTopic: string, body: RelayPostMessagesRequest +): RestResponse[string] {. + rest, endpoint: "/relay/v1/messages/{pubsubTopic}", meth: HttpMethod.MethodPost +.} + +proc relayPostAutoMessagesV1*( + body: RelayPostMessagesRequest +): RestResponse[string] {. + rest, endpoint: "/relay/v1/auto/messages", meth: HttpMethod.MethodPost +.} diff --git a/third-party/nwaku/waku/waku_api/rest/relay/handlers.nim b/third-party/nwaku/waku/waku_api/rest/relay/handlers.nim new file mode 100644 index 0000000..ae1f2ff --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/relay/handlers.nim @@ -0,0 +1,326 @@ +{.push raises: [].} + +import + std/sequtils, + stew/byteutils, + results, + chronicles, + json_serialization, + json_serialization/std/options, + presto/route, + presto/common +import + ../../../waku_node, + ../../../waku_relay/protocol, + ../../../waku_rln_relay, + ../../../node/waku_node, + ../../message_cache, + ../../handlers, + ../serdes, + ../responses, + ../rest_serdes, + ./types + +from std/times import getTime +from std/times import toUnix + +export types + +logScope: + topics = "waku node rest relay_api" + +##### Topic cache + +const futTimeout* = 5.seconds # Max time to wait for futures + +#### Request handlers + +const ROUTE_RELAY_SUBSCRIPTIONSV1* = "/relay/v1/subscriptions" +const ROUTE_RELAY_MESSAGESV1* = "/relay/v1/messages/{pubsubTopic}" +const ROUTE_RELAY_AUTO_SUBSCRIPTIONSV1* = "/relay/v1/auto/subscriptions" +const ROUTE_RELAY_AUTO_MESSAGESV1* = "/relay/v1/auto/messages/{contentTopic}" +const ROUTE_RELAY_AUTO_MESSAGESV1_NO_TOPIC* = "/relay/v1/auto/messages" + +proc validatePubSubTopics(topics: seq[PubsubTopic]): Result[void, RestApiResponse] = + let badPubSubTopics = topics.filterIt(RelayShard.parseStaticSharding(it).isErr()) + if badPubSubTopics.len > 0: + error "Invalid pubsub topic(s)", PubSubTopics = $badPubSubTopics + return + err(RestApiResponse.badRequest("Invalid pubsub topic(s): " & $badPubSubTopics)) + + return ok() + +proc installRelayApiHandlers*( + router: var RestRouter, node: WakuNode, cache: MessageCache +) = + router.api(MethodOptions, ROUTE_RELAY_SUBSCRIPTIONSV1) do() -> RestApiResponse: + return RestApiResponse.ok() + + router.api(MethodPost, ROUTE_RELAY_SUBSCRIPTIONSV1) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: + ## Subscribes a node to a list of PubSub topics + + debug "post_waku_v2_relay_v1_subscriptions" + + # Check the request body + if contentBody.isNone(): + return RestApiResponse.badRequest() + + let req: seq[PubsubTopic] = decodeRequestBody[seq[PubsubTopic]](contentBody).valueOr: + return error + + validatePubSubTopics(req).isOkOr: + return error + + # Only subscribe to topics for which we have no subscribed topic handlers yet + let newTopics = req.filterIt(not cache.isPubsubSubscribed(it)) + + for pubsubTopic in newTopics: + cache.pubsubSubscribe(pubsubTopic) + + node.subscribe((kind: PubsubSub, topic: pubsubTopic), messageCacheHandler(cache)).isOkOr: + let errorMsg = "Subscribe failed:" & $error + error "SUBSCRIBE failed", error = errorMsg + return RestApiResponse.internalServerError(errorMsg) + + return RestApiResponse.ok() + + router.api(MethodDelete, ROUTE_RELAY_SUBSCRIPTIONSV1) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: + # ## Subscribes a node to a list of PubSub topics + # debug "delete_waku_v2_relay_v1_subscriptions" + + # Check the request body + if contentBody.isNone(): + return RestApiResponse.badRequest() + + let req: seq[PubsubTopic] = decodeRequestBody[seq[PubsubTopic]](contentBody).valueOr: + return error + + validatePubSubTopics(req).isOkOr: + return error + + # Unsubscribe all handlers from requested topics + for pubsubTopic in req: + cache.pubsubUnsubscribe(pubsubTopic) + node.unsubscribe((kind: PubsubUnsub, topic: pubsubTopic)).isOkOr: + let errorMsg = "Unsubscribe failed:" & $error + error "UNSUBSCRIBE failed", error = errorMsg + return RestApiResponse.internalServerError(errorMsg) + + # Successfully unsubscribed from all requested topics + return RestApiResponse.ok() + + router.api(MethodOptions, ROUTE_RELAY_MESSAGESV1) do( + pubsubTopic: string + ) -> RestApiResponse: + return RestApiResponse.ok() + + router.api(MethodGet, ROUTE_RELAY_MESSAGESV1) do( + pubsubTopic: string + ) -> RestApiResponse: + # ## Returns all WakuMessages received on a PubSub topic since the + # ## last time this method was called + # ## TODO: ability to specify a return message limit + # debug "get_waku_v2_relay_v1_messages", topic=topic + + if pubsubTopic.isErr(): + return RestApiResponse.badRequest() + let pubSubTopic = pubsubTopic.get() + + let messages = cache.getMessages(pubSubTopic, clear = true) + if messages.isErr(): + debug "Not subscribed to topic", topic = pubSubTopic + return RestApiResponse.notFound() + + let data = RelayGetMessagesResponse(messages.get().map(toRelayWakuMessage)) + let resp = RestApiResponse.jsonResponse(data, status = Http200) + if resp.isErr(): + debug "An error ocurred while building the json respose", error = resp.error + return RestApiResponse.internalServerError() + + return resp.get() + + router.api(MethodPost, ROUTE_RELAY_MESSAGESV1) do( + pubsubTopic: string, contentBody: Option[ContentBody] + ) -> RestApiResponse: + if pubsubTopic.isErr(): + return RestApiResponse.badRequest() + let pubSubTopic = pubsubTopic.get() + + # ensure the node is subscribed to the topic. otherwise it risks publishing + # to a topic with no connected peers + if pubSubTopic notin node.wakuRelay.subscribedTopics(): + return RestApiResponse.badRequest( + "Failed to publish: Node not subscribed to topic: " & pubsubTopic + ) + + # Check the request body + if contentBody.isNone(): + return RestApiResponse.badRequest() + + let reqWakuMessage: RelayWakuMessage = decodeRequestBody[RelayWakuMessage]( + contentBody + ).valueOr: + return error + + var message: WakuMessage = reqWakuMessage.toWakuMessage(version = 0).valueOr: + return RestApiResponse.badRequest($error) + + # if RLN is mounted, append the proof to the message + if not node.wakuRlnRelay.isNil(): + # append the proof to the message + + node.wakuRlnRelay.appendRLNProof(message, float64(getTime().toUnix())).isOkOr: + return RestApiResponse.internalServerError( + "Failed to publish: error appending RLN proof to message: " & $error + ) + + (await node.wakuRelay.validateMessage(pubsubTopic, message)).isOkOr: + return RestApiResponse.badRequest("Failed to publish: " & error) + + # Log for message tracking purposes + logMessageInfo(node.wakuRelay, "rest", pubsubTopic, "none", message, onRecv = true) + + # if we reach here its either a non-RLN message or a RLN message with a valid proof + debug "Publishing message", + pubSubTopic = pubSubTopic, rln = not node.wakuRlnRelay.isNil() + if not (waitFor node.publish(some(pubSubTopic), message).withTimeout(futTimeout)): + error "Failed to publish message to topic", pubSubTopic = pubSubTopic + return RestApiResponse.internalServerError("Failed to publish: timedout") + + return RestApiResponse.ok() + + # Autosharding API + + router.api(MethodOptions, ROUTE_RELAY_AUTO_SUBSCRIPTIONSV1) do() -> RestApiResponse: + return RestApiResponse.ok() + + router.api(MethodPost, ROUTE_RELAY_AUTO_SUBSCRIPTIONSV1) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: + ## Subscribes a node to a list of content topics. + + debug "post_waku_v2_relay_v1_auto_subscriptions" + + let req: seq[ContentTopic] = decodeRequestBody[seq[ContentTopic]](contentBody).valueOr: + return error + + # Only subscribe to topics for which we have no subscribed topic handlers yet + let newTopics = req.filterIt(not cache.isContentSubscribed(it)) + + for contentTopic in newTopics: + cache.contentSubscribe(contentTopic) + + node.subscribe( + (kind: ContentSub, topic: contentTopic), messageCacheHandler(cache) + ).isOkOr: + let errorMsg = "Subscribe failed:" & $error + error "SUBSCRIBE failed", error = errorMsg + return RestApiResponse.internalServerError(errorMsg) + + return RestApiResponse.ok() + + router.api(MethodDelete, ROUTE_RELAY_AUTO_SUBSCRIPTIONSV1) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: + ## Unsubscribes a node from a list of content topics. + + debug "delete_waku_v2_relay_v1_auto_subscriptions" + + let req: seq[ContentTopic] = decodeRequestBody[seq[ContentTopic]](contentBody).valueOr: + return error + + for contentTopic in req: + cache.contentUnsubscribe(contentTopic) + node.unsubscribe((kind: ContentUnsub, topic: contentTopic)).isOkOr: + let errorMsg = "Unsubscribe failed:" & $error + error "UNSUBSCRIBE failed", error = errorMsg + return RestApiResponse.internalServerError(errorMsg) + + return RestApiResponse.ok() + + router.api(MethodOptions, ROUTE_RELAY_AUTO_MESSAGESV1) do( + contentTopic: string + ) -> RestApiResponse: + return RestApiResponse.ok() + + router.api(MethodGet, ROUTE_RELAY_AUTO_MESSAGESV1) do( + contentTopic: string + ) -> RestApiResponse: + ## Returns all WakuMessages received on a content topic since the + ## last time this method was called. + + debug "get_waku_v2_relay_v1_auto_messages", contentTopic = contentTopic + + let contentTopic = contentTopic.valueOr: + return RestApiResponse.badRequest($error) + + let messages = cache.getAutoMessages(contentTopic, clear = true).valueOr: + debug "Not subscribed to topic", topic = contentTopic + return RestApiResponse.notFound(contentTopic) + + let data = RelayGetMessagesResponse(messages.map(toRelayWakuMessage)) + + return RestApiResponse.jsonResponse(data, status = Http200).valueOr: + debug "An error ocurred while building the json respose", error = error + return RestApiResponse.internalServerError($error) + + router.api(MethodOptions, ROUTE_RELAY_AUTO_MESSAGESV1_NO_TOPIC) do() -> RestApiResponse: + return RestApiResponse.ok() + + router.api(MethodPost, ROUTE_RELAY_AUTO_MESSAGESV1_NO_TOPIC) do( + contentBody: Option[ContentBody] + ) -> RestApiResponse: + # Check the request body + if contentBody.isNone(): + return RestApiResponse.badRequest() + + let req: RelayWakuMessage = decodeRequestBody[RelayWakuMessage](contentBody).valueOr: + return error + + if req.contentTopic.isNone(): + return RestApiResponse.badRequest() + + var message: WakuMessage = req.toWakuMessage(version = 0).valueOr: + return RestApiResponse.badRequest() + + if node.wakuAutoSharding.isNone(): + let msg = "Autosharding is disabled" + error "publish error", err = msg + return RestApiResponse.badRequest("Failed to publish. " & msg) + + let pubsubTopic = node.wakuAutoSharding.get().getShard(message.contentTopic).valueOr: + let msg = "Autosharding error: " & error + error "publish error", err = msg + return RestApiResponse.badRequest("Failed to publish. " & msg) + + # if RLN is mounted, append the proof to the message + if not node.wakuRlnRelay.isNil(): + node.wakuRlnRelay.appendRLNProof(message, float64(getTime().toUnix())).isOkOr: + return RestApiResponse.internalServerError( + "Failed to publish: error appending RLN proof to message: " & $error + ) + + (await node.wakuRelay.validateMessage(pubsubTopic, message)).isOkOr: + return RestApiResponse.badRequest("Failed to publish: " & error) + + # Log for message tracking purposes + logMessageInfo(node.wakuRelay, "rest", pubsubTopic, "none", message, onRecv = true) + + # if we reach here its either a non-RLN message or a RLN message with a valid proof + debug "Publishing message", + contentTopic = message.contentTopic, rln = not node.wakuRlnRelay.isNil() + + var publishFut = node.publish(some($pubsubTopic), message) + if not await publishFut.withTimeout(futTimeout): + return RestApiResponse.internalServerError("Failed to publish: timedout") + + var res = publishFut.read() + + if res.isErr(): + return RestApiResponse.badRequest("Failed to publish. " & res.error) + + return RestApiResponse.ok() diff --git a/third-party/nwaku/waku/waku_api/rest/relay/types.nim b/third-party/nwaku/waku/waku_api/rest/relay/types.nim new file mode 100644 index 0000000..ca7d1ff --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/relay/types.nim @@ -0,0 +1,145 @@ +{.push raises: [].} + +import + std/[sets, strformat, times], + chronicles, + json_serialization, + json_serialization/std/options, + presto/[route, client, common] +import ../../../common/base64, ../../../waku_core, ../serdes + +#### Types + +type RelayWakuMessage* = object + payload*: Base64String + contentTopic*: Option[ContentTopic] + version*: Option[Natural] + timestamp*: Option[int64] + meta*: Option[Base64String] + ephemeral*: Option[bool] + proof*: Option[Base64String] + +type + RelayGetMessagesResponse* = seq[RelayWakuMessage] + RelayPostMessagesRequest* = RelayWakuMessage + +#### Type conversion + +proc toRelayWakuMessage*(msg: WakuMessage): RelayWakuMessage = + RelayWakuMessage( + payload: base64.encode(msg.payload), + contentTopic: some(msg.contentTopic), + version: some(Natural(msg.version)), + timestamp: some(msg.timestamp), + meta: + if msg.meta.len > 0: + some(base64.encode(msg.meta)) + else: + none(Base64String), + ephemeral: some(msg.ephemeral), + proof: some(base64.encode(msg.proof)), + ) + +proc toWakuMessage*(msg: RelayWakuMessage, version = 0): Result[WakuMessage, string] = + let + payload = ?msg.payload.decode() + contentTopic = msg.contentTopic.get(DefaultContentTopic) + version = uint32(msg.version.get(version)) + meta = ?msg.meta.get(Base64String("")).decode() + ephemeral = msg.ephemeral.get(false) + proof = ?msg.proof.get(Base64String("")).decode() + + var timestamp = msg.timestamp.get(0) + + if timestamp == 0: + timestamp = getNanosecondTime(getTime().toUnixFloat()) + + return ok( + WakuMessage( + payload: payload, + contentTopic: contentTopic, + version: version, + timestamp: timestamp, + meta: meta, + ephemeral: ephemeral, + proof: proof, + ) + ) + +#### Serialization and deserialization + +proc writeValue*( + writer: var JsonWriter[RestJson], value: RelayWakuMessage +) {.raises: [IOError].} = + writer.beginRecord() + writer.writeField("payload", value.payload) + if value.contentTopic.isSome(): + writer.writeField("contentTopic", value.contentTopic.get()) + if value.version.isSome(): + writer.writeField("version", value.version.get()) + if value.timestamp.isSome(): + writer.writeField("timestamp", value.timestamp.get()) + if value.meta.isSome(): + writer.writeField("meta", value.meta.get()) + if value.ephemeral.isSome(): + writer.writeField("ephemeral", value.ephemeral.get()) + if value.proof.isSome(): + writer.writeField("proof", value.proof.get()) + writer.endRecord() + +proc readValue*( + reader: var JsonReader[RestJson], value: var RelayWakuMessage +) {.raises: [SerializationError, IOError].} = + var + payload = none(Base64String) + contentTopic = none(ContentTopic) + version = none(Natural) + timestamp = none(int64) + meta = none(Base64String) + ephemeral = none(bool) + proof = none(Base64String) + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "RelayWakuMessage") + + case fieldName + of "payload": + payload = some(reader.readValue(Base64String)) + of "contentTopic": + contentTopic = some(reader.readValue(ContentTopic)) + of "version": + version = some(reader.readValue(Natural)) + of "timestamp": + timestamp = some(reader.readValue(int64)) + of "meta": + meta = some(reader.readValue(Base64String)) + of "ephemeral": + ephemeral = some(reader.readValue(bool)) + of "proof": + proof = some(reader.readValue(Base64String)) + else: + unrecognizedFieldWarning(value) + + if payload.isNone() or isEmptyOrWhitespace(string(payload.get())): + reader.raiseUnexpectedValue("Field `payload` is missing or empty") + + if contentTopic.isNone() or contentTopic.get().isEmptyOrWhitespace(): + reader.raiseUnexpectedValue("Field `contentTopic` is missing or empty") + + value = RelayWakuMessage( + payload: payload.get(), + contentTopic: contentTopic, + version: version, + timestamp: timestamp, + meta: meta, + ephemeral: ephemeral, + proof: proof, + ) diff --git a/third-party/nwaku/waku/waku_api/rest/responses.nim b/third-party/nwaku/waku/waku_api/rest/responses.nim new file mode 100644 index 0000000..1827211 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/responses.nim @@ -0,0 +1,45 @@ +{.push raises: [].} + +import std/typetraits, results, chronicles, presto/common +import ./serdes + +const MIMETYPE_JSON* = MediaType.init("application/json") +const MIMETYPE_TEXT* = MediaType.init("text/plain") + +proc ok*(t: typedesc[RestApiResponse]): RestApiResponse = + RestApiResponse.response("OK", Http200, $MIMETYPE_TEXT) + +proc internalServerError*( + t: typedesc[RestApiResponse], msg: string = "" +): RestApiResponse = + RestApiResponse.error(Http500, msg, $MIMETYPE_TEXT) + +proc serviceUnavailable*( + t: typedesc[RestApiResponse], msg: string = "" +): RestApiResponse = + RestApiResponse.error(Http503, msg, $MIMETYPE_TEXT) + +proc badRequest*(t: typedesc[RestApiResponse], msg: string = ""): RestApiResponse = + RestApiResponse.error(Http400, msg, $MIMETYPE_TEXT) + +proc notFound*(t: typedesc[RestApiResponse], msg: string = ""): RestApiResponse = + RestApiResponse.error(Http404, msg, $MIMETYPE_TEXT) + +proc preconditionFailed*( + t: typedesc[RestApiResponse], msg: string = "" +): RestApiResponse = + RestApiResponse.error(Http412, msg, $MIMETYPE_TEXT) + +proc tooManyRequests*(t: typedesc[RestApiResponse], msg: string = ""): RestApiResponse = + RestApiResponse.error(Http429, msg, $MIMETYPE_TEXT) + +proc jsonResponse*( + t: typedesc[RestApiResponse], data: auto, status: HttpCode = Http200 +): SerdesResult[RestApiResponse] = + let encoded = ?encodeIntoJsonBytes(data) + ok(RestApiResponse.response(encoded, status, $MIMETYPE_JSON)) + +proc textResponse*( + t: typedesc[RestApiResponse], data: string, status: HttpCode = Http200 +): RestApiResponse = + RestApiResponse.response(data, status, $MIMETYPE_TEXT) diff --git a/third-party/nwaku/waku/waku_api/rest/rest_serdes.nim b/third-party/nwaku/waku/waku_api/rest/rest_serdes.nim new file mode 100644 index 0000000..1b6d5a9 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/rest_serdes.nim @@ -0,0 +1,84 @@ +{.push raises: [].} + +import + std/typetraits, + std/os, + results, + chronicles, + serialization, + json_serialization, + json_serialization/std/options, + json_serialization/std/net, + json_serialization/std/sets, + presto/common + +import ./serdes, ./responses + +logScope: + topics = "waku node rest" + +proc encodeBytesOf*[T](value: T, contentType: string): RestResult[seq[byte]] = + let reqContentType = MediaType.init(contentType) + + if reqContentType != MIMETYPE_JSON: + error "Unsupported contentType value", + contentType = contentType, typ = value.type.name + return err("Unsupported contentType") + + let encoded = ?encodeIntoJsonBytes(value) + return ok(encoded) + +func decodeRequestBody*[T]( + contentBody: Option[ContentBody] +): Result[T, RestApiResponse] = + if contentBody.isNone(): + return err(RestApiResponse.badRequest("Missing content body")) + + let reqBodyContentType = contentBody.get().contentType.mediaType + + if reqBodyContentType != MIMETYPE_JSON and reqBodyContentType != MIMETYPE_TEXT: + return err( + RestApiResponse.badRequest( + "Wrong Content-Type, expected application/json or text/plain" + ) + ) + + let reqBodyData = contentBody.get().data + + let requestResult = decodeFromJsonBytes(T, reqBodyData) + if requestResult.isErr(): + return err( + RestApiResponse.badRequest( + "Invalid content body, could not decode. " & $requestResult.error + ) + ) + + return ok(requestResult.get()) + +proc decodeBytes*( + t: typedesc[string], value: openarray[byte], contentType: Opt[ContentTypeData] +): RestResult[string] = + if MediaType.init($contentType) != MIMETYPE_TEXT: + error "Unsupported contentType value", contentType = contentType + return err("Unsupported contentType") + + var res: string + if len(value) > 0: + res = newString(len(value)) + copyMem(addr res[0], unsafeAddr value[0], len(value)) + return ok(res) + +proc decodeBytes*[T]( + t: typedesc[T], data: openArray[byte], contentType: Opt[ContentTypeData] +): RestResult[T] = + let reqContentType = contentType.valueOr: + error "Unsupported response, missing contentType value" + return err("Unsupported response, missing contentType") + + if reqContentType.mediaType != MIMETYPE_JSON and + reqContentType.mediaType != MIMETYPE_TEXT: + error "Unsupported response contentType value", contentType = contentType + return err("Unsupported response contentType") + + let decoded = ?decodeFromJsonBytes(T, data) + return ok(decoded) diff --git a/third-party/nwaku/waku/waku_api/rest/serdes.nim b/third-party/nwaku/waku/waku_api/rest/serdes.nim new file mode 100644 index 0000000..23b9e37 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/serdes.nim @@ -0,0 +1,124 @@ +{.push raises: [].} + +import + std/[typetraits, parseutils], + results, + stew/[byteutils, base10], + chronicles, + serialization, + json_serialization, + json_serialization/std/options, + json_serialization/std/net, + json_serialization/std/sets, + presto/common +import ../../common/base64 + +logScope: + topics = "waku node rest" + +createJsonFlavor RestJson + +Json.setWriter JsonWriter, PreferredOutput = string + +template unrecognizedFieldWarning*(field: typed) = + # TODO: There should be a different notification mechanism for informing the + # caller of a deserialization routine for unexpected fields. + # The chonicles import in this module should be removed. + debug "JSON field not recognized by the current version of nwaku. Consider upgrading", + fieldName, typeName = typetraits.name(typeof field) + +type SerdesResult*[T] = Result[T, cstring] + +proc writeValue*( + writer: var JsonWriter, value: Base64String +) {.gcsafe, raises: [IOError].} = + writer.writeValue(string(value)) + +proc readValue*( + reader: var JsonReader, value: var Base64String +) {.gcsafe, raises: [SerializationError, IOError].} = + value = Base64String(reader.readValue(string)) + +proc decodeFromJsonString*[T]( + t: typedesc[T], data: JsonString, requireAllFields: bool = true +): SerdesResult[T] = + try: + if requireAllFields: + ok( + RestJson.decode( + string(data), T, requireAllFields = true, allowUnknownFields = true + ) + ) + else: + ok( + RestJson.decode( + string(data), T, requireAllFields = false, allowUnknownFields = true + ) + ) + except SerializationError: + # TODO: Do better error reporting here + err("Unable to deserialize data") + +# Internal static implementation +proc decodeFromJsonBytes*[T]( + t: typedesc[T], data: openArray[byte], requireAllFields: bool = true +): SerdesResult[T] = + try: + if requireAllFields: + ok( + RestJson.decode( + string.fromBytes(data), T, requireAllFields = true, allowUnknownFields = true + ) + ) + else: + ok( + RestJson.decode( + string.fromBytes(data), T, requireAllFields = false, allowUnknownFields = true + ) + ) + except SerializationError: + err("Unable to deserialize data: " & getCurrentExceptionMsg()) + +proc encodeIntoJsonString*(value: auto): SerdesResult[string] = + var encoded: string + try: + var stream = memoryOutput() + var writer = JsonWriter[RestJson].init(stream) + writer.writeValue(value) + encoded = stream.getOutput(string) + except SerializationError, IOError: + # TODO: Do better error reporting here + return err("unable to serialize data") + + ok(encoded) + +proc encodeIntoJsonBytes*(value: auto): SerdesResult[seq[byte]] = + var encoded: seq[byte] + try: + var stream = memoryOutput() + var writer = JsonWriter[RestJson].init(stream) + writer.writeValue(value) + encoded = stream.getOutput(seq[byte]) + except SerializationError, IOError: + # TODO: Do better error reporting here + return err("unable to serialize data") + + ok(encoded) + +#### helpers + +proc encodeString*(value: string): SerdesResult[string] = + ok(value) + +proc decodeString*(t: typedesc[string], value: string): SerdesResult[string] = + ok(value) + +proc encodeString*(value: SomeUnsignedInt): SerdesResult[string] = + ok(Base10.toString(value)) + +proc decodeString*(T: typedesc[SomeUnsignedInt], value: string): SerdesResult[T] = + let v = Base10.decode(T, value) + if v.isErr(): + return err(v.error()) + else: + return ok(v.get()) diff --git a/third-party/nwaku/waku/waku_api/rest/server.nim b/third-party/nwaku/waku/waku_api/rest/server.nim new file mode 100644 index 0000000..f16dfe8 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/server.nim @@ -0,0 +1,206 @@ +{.push raises: [].} + +import std/net +import + results, + chronicles, + chronos, + chronos/apps/http/httpserver, + presto, + presto/middleware, + presto/servercommon + +import ./origin_handler + +type + RestServerResult*[T] = Result[T, string] + + WakuRestServer* = object of RootObj + router*: RestRouter + httpServer*: HttpServerRef + + WakuRestServerRef* = ref WakuRestServer + +### Configuration + +type RestServerConf* {.requiresInit.} = object + cacheSize*: Natural + ## \ + ## The maximum number of recently accessed states that are kept in \ + ## memory. Speeds up requests obtaining information for consecutive + ## slots or epochs. + + cacheTtl*: Natural + ## \ + ## The number of seconds to keep recently accessed states in memory + + requestTimeout*: Natural + ## \ + ## The number of seconds to wait until complete REST request will be received + + maxRequestBodySize*: Natural + ## \ + ## Maximum size of REST request body (kilobytes) + + maxRequestHeadersSize*: Natural + ## \ + ## Maximum size of REST request headers (kilobytes) + +proc default*(T: type RestServerConf): T = + RestServerConf( + cacheSize: 3, + cacheTtl: 60, + requestTimeout: 0, + maxRequestBodySize: 16_384, + maxRequestHeadersSize: 64, + ) + +### Initialization + +proc new*( + t: typedesc[WakuRestServerRef], + router: RestRouter, + address: TransportAddress, + serverIdent: string = PrestoIdent, + serverFlags = {HttpServerFlags.NotifyDisconnect}, + socketFlags: set[ServerFlags] = {ReuseAddr}, + serverUri = Uri(), + maxConnections: int = -1, + backlogSize: int = DefaultBacklogSize, + bufferSize: int = 4096, + httpHeadersTimeout = 10.seconds, + maxHeadersSize: int = 8192, + maxRequestBodySize: int = 1_048_576, + requestErrorHandler: RestRequestErrorHandler = nil, + dualstack = DualStackType.Auto, + allowedOrigin: Option[string] = none(string), +): RestServerResult[WakuRestServerRef] = + var server = WakuRestServerRef(router: router) + + let restMiddleware = RestServerMiddlewareRef.new( + router = server.router, errorHandler = requestErrorHandler + ) + let originHandlerMiddleware = OriginHandlerMiddlewareRef.new(allowedOrigin) + + let middlewares = [originHandlerMiddleware, restMiddleware] + + ## This must be empty and needed only to confirm original initialization requirements of + ## the RestHttpServer now combining old and new middleware approach. + proc defaultProcessCallback( + rf: RequestFence + ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} = + discard + + let sres = HttpServerRef.new( + address, + defaultProcessCallback, + serverFlags, + socketFlags, + serverUri, + serverIdent, + maxConnections, + bufferSize, + backlogSize, + httpHeadersTimeout, + maxHeadersSize, + maxRequestBodySize, + dualstack = dualstack, + middlewares = middlewares, + ) + if sres.isOk(): + server.httpServer = sres.get() + ok(server) + else: + err(sres.error) + +proc getRouter(): RestRouter = + # TODO: Review this `validate` method. Check in nim-presto what is this used for. + proc validate(pattern: string, value: string): int = + ## This is rough validation procedure which should be simple and fast, + ## because it will be used for query routing. + if pattern.startsWith("{") and pattern.endsWith("}"): 0 else: 1 + + # disable allowed origin handling by presto, we add our own handling as middleware + RestRouter.init(validate, allowedOrigin = none(string)) + +proc init*( + T: type WakuRestServerRef, + ip: IpAddress, + port: Port, + allowedOrigin = none(string), + conf = RestServerConf.default(), + requestErrorHandler: RestRequestErrorHandler = nil, +): RestServerResult[T] = + let address = initTAddress(ip, port) + let serverFlags = + {HttpServerFlags.QueryCommaSeparatedArray, HttpServerFlags.NotifyDisconnect} + + let + headersTimeout = + if conf.requestTimeout == 0: + chronos.InfiniteDuration + else: + seconds(int64(conf.requestTimeout)) + maxHeadersSize = conf.maxRequestHeadersSize * 1024 + maxRequestBodySize = conf.maxRequestBodySize * 1024 + + let router = getRouter() + + try: + return WakuRestServerRef.new( + router, + address, + serverFlags = serverFlags, + httpHeadersTimeout = headersTimeout, + maxHeadersSize = maxHeadersSize, + maxRequestBodySize = maxRequestBodySize, + requestErrorHandler = requestErrorHandler, + allowedOrigin = allowedOrigin, + ) + except CatchableError: + return err(getCurrentExceptionMsg()) + +proc newRestHttpServer*( + ip: IpAddress, + port: Port, + allowedOrigin = none(string), + conf = RestServerConf.default(), + requestErrorHandler: RestRequestErrorHandler = nil, +): RestServerResult[WakuRestServerRef] = + WakuRestServerRef.init(ip, port, allowedOrigin, conf, requestErrorHandler) + +proc localAddress*(rs: WakuRestServerRef): TransportAddress = + ## Returns `rs` bound local socket address. + rs.httpServer.instance.localAddress() + +proc state*(rs: WakuRestServerRef): RestServerState = + ## Returns current REST server's state. + case rs.httpServer.state + of HttpServerState.ServerClosed: RestServerState.Closed + of HttpServerState.ServerStopped: RestServerState.Stopped + of HttpServerState.ServerRunning: RestServerState.Running + +proc start*(rs: WakuRestServerRef) = + ## Starts REST server. + rs.httpServer.start() + notice "REST service started", address = $rs.localAddress() + +proc stop*(rs: WakuRestServerRef) {.async: (raises: []).} = + ## Stop REST server from accepting new connections. + await rs.httpServer.stop() + notice "REST service stopped", address = $rs.localAddress() + +proc drop*(rs: WakuRestServerRef): Future[void] {.async: (raw: true, raises: []).} = + ## Drop all pending connections. + rs.httpServer.drop() + +proc closeWait*(rs: WakuRestServerRef) {.async: (raises: []).} = + ## Stop REST server and drop all the pending connections. + await rs.httpServer.closeWait() + notice "REST service closed", address = $rs.localAddress() + +proc join*( + rs: WakuRestServerRef +): Future[void] {.async: (raw: true, raises: [CancelledError]).} = + ## Wait until REST server will not be closed. + rs.httpServer.join() diff --git a/third-party/nwaku/waku/waku_api/rest/store/client.nim b/third-party/nwaku/waku/waku_api/rest/store/client.nim new file mode 100644 index 0000000..80939ee --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/store/client.nim @@ -0,0 +1,63 @@ +{.push raises: [].} + +import + chronicles, json_serialization, json_serialization/std/options, presto/[route, client] +import + ../../../waku_store/common, + ../../../waku_core/message/digest, + ../serdes, + ../responses, + ./types + +export types + +logScope: + topics = "waku node rest store_api" + +proc decodeBytes*( + t: typedesc[StoreQueryResponseHex], + data: openArray[byte], + contentType: Opt[ContentTypeData], +): RestResult[StoreQueryResponseHex] = + if MediaType.init($contentType) == MIMETYPE_JSON: + let decoded = ?decodeFromJsonBytes(StoreQueryResponseHex, data) + return ok(decoded) + + if MediaType.init($contentType) == MIMETYPE_TEXT: + var res: string + if len(data) > 0: + res = newString(len(data)) + copyMem(addr res[0], unsafeAddr data[0], len(data)) + + return ok( + StoreQueryResponseHex( + statusCode: uint32(ErrorCode.BAD_RESPONSE), + statusDesc: res, + messages: newSeq[WakuMessageKeyValueHex](0), + paginationCursor: none(string), + ) + ) + + # If everything goes wrong + return err(cstring("Unsupported contentType " & $contentType)) + +proc getStoreMessagesV3*( + # URL-encoded reference to the store-node + peerAddr: string = "", + includeData: string = "", + pubsubTopic: string = "", + # URL-encoded comma-separated list of content topics + contentTopics: string = "", + startTime: string = "", + endTime: string = "", + + # URL-encoded comma-separated list of message hashes + hashes: string = "", + + # Optional cursor fields + cursor: string = "", # base64-encoded hash + ascending: string = "", + pageSize: string = "", +): RestResponse[StoreQueryResponseHex] {. + rest, endpoint: "/store/v3/messages", meth: HttpMethod.MethodGet +.} diff --git a/third-party/nwaku/waku/waku_api/rest/store/handlers.nim b/third-party/nwaku/waku/waku_api/rest/store/handlers.nim new file mode 100644 index 0000000..10edb68 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/store/handlers.nim @@ -0,0 +1,240 @@ +{.push raises: [].} + +import std/strformat, results, chronicles, uri, json_serialization, presto/route +import + ../../../waku_core, + ../../../waku_store/common, + ../../../waku_store/self_req_handler, + ../../../waku_node, + ../../../node/peer_manager, + ../../../common/paging, + ../../handlers, + ../responses, + ../serdes, + ./types + +export types + +logScope: + topics = "waku node rest store_api" + +const futTimeout* = 5.seconds # Max time to wait for futures + +const NoPeerNoDiscError* = + RestApiResponse.preconditionFailed("No suitable service peer & no discovery method") + +# Queries the store-node with the query parameters and +# returns a RestApiResponse that is sent back to the api client. +proc performStoreQuery( + selfNode: WakuNode, storeQuery: StoreQueryRequest, storePeer: RemotePeerInfo +): Future[RestApiResponse] {.async.} = + let queryFut = selfNode.query(storeQuery, storePeer) + + if not await queryFut.withTimeout(futTimeout): + const msg = "No history response received (timeout)" + error msg + return RestApiResponse.internalServerError(msg) + + let futRes = queryFut.read() + + if futRes.isErr(): + const msg = "Error occurred in queryFut.read()" + error msg, error = futRes.error + return RestApiResponse.internalServerError(fmt("{msg} [{futRes.error}]")) + + let res = futRes.get().toHex() + + if res.statusCode == uint32(ErrorCode.TOO_MANY_REQUESTS): + debug "Request rate limit reached on peer ", storePeer + return RestApiResponse.tooManyRequests("Request rate limit reached") + + let resp = RestApiResponse.jsonResponse(res, status = Http200).valueOr: + const msg = "Error building the json respose" + let e = $error + error msg, error = e + return RestApiResponse.internalServerError(fmt("{msg} [{e}]")) + + return resp + +# Converts a string time representation into an Option[Timestamp]. +# Only positive time is considered a valid Timestamp in the request +proc parseTime(input: Option[string]): Result[Option[Timestamp], string] = + if input.isSome() and input.get() != "": + try: + let time = parseInt(input.get()) + if time > 0: + return ok(some(Timestamp(time))) + except ValueError: + return err("time parsing error: " & getCurrentExceptionMsg()) + + return ok(none(Timestamp)) + +proc parseIncludeData(input: Option[string]): Result[bool, string] = + var includeData = false + if input.isSome() and input.get() != "": + try: + includeData = parseBool(input.get()) + except ValueError: + return err("include data parsing error: " & getCurrentExceptionMsg()) + + return ok(includeData) + +# Creates a HistoryQuery from the given params +proc createStoreQuery( + includeData: Option[string], + pubsubTopic: Option[string], + contentTopics: Option[string], + startTime: Option[string], + endTime: Option[string], + hashes: Option[string], + cursor: Option[string], + direction: Option[string], + pageSize: Option[string], +): Result[StoreQueryRequest, string] = + var parsedIncludeData = ?parseIncludeData(includeData) + + # Parse pubsubTopic parameter + var parsedPubsubTopic = none(string) + if pubsubTopic.isSome(): + let decodedPubsubTopic = decodeUrl(pubsubTopic.get()) + if decodedPubsubTopic != "": + parsedPubsubTopic = some(decodedPubsubTopic) + + # Parse the content topics + var parsedContentTopics = newSeq[ContentTopic](0) + if contentTopics.isSome(): + let ctList = decodeUrl(contentTopics.get()) + if ctList != "": + for ct in ctList.split(','): + parsedContentTopics.add(ct) + + # Parse start time + let parsedStartTime = ?parseTime(startTime) + + # Parse end time + let parsedEndTime = ?parseTime(endTime) + + var parsedHashes = ?parseHashes(hashes) + + # Parse cursor information + let parsedCursor = ?parseHash(cursor) + + # Parse ascending field + var parsedDirection = default() + if direction.isSome() and direction.get() != "": + parsedDirection = direction.get().into() + + # Parse page size field + var parsedPagedSize = none(uint64) + if pageSize.isSome() and pageSize.get() != "": + try: + parsedPagedSize = some(uint64(parseInt(pageSize.get()))) + except CatchableError: + return err("page size parsing error: " & getCurrentExceptionMsg()) + + return ok( + StoreQueryRequest( + includeData: parsedIncludeData, + pubsubTopic: parsedPubsubTopic, + contentTopics: parsedContentTopics, + startTime: parsedStartTime, + endTime: parsedEndTime, + messageHashes: parsedHashes, + paginationCursor: parsedCursor, + paginationForward: parsedDirection, + paginationLimit: parsedPagedSize, + ) + ) + +# Simple type conversion. The "Option[Result[string, cstring]]" +# type is used by the nim-presto library. +proc toOpt(self: Option[Result[string, cstring]]): Option[string] = + if not self.isSome() or self.get().value == "": + return none(string) + if self.isSome() and self.get().value != "": + return some(self.get().value) + +proc retrieveMsgsFromSelfNode( + self: WakuNode, storeQuery: StoreQueryRequest +): Future[RestApiResponse] {.async.} = + ## Performs a "store" request to the local node (self node.) + ## Notice that this doesn't follow the regular store libp2p channel because a node + ## it is not allowed to libp2p-dial a node to itself, by default. + ## + + let storeResp = (await self.wakuStore.handleSelfStoreRequest(storeQuery)).valueOr: + return RestApiResponse.internalServerError($error) + + let resp = RestApiResponse.jsonResponse(storeResp.toHex(), status = Http200).valueOr: + const msg = "Error building the json respose" + let e = $error + error msg, error = e + return RestApiResponse.internalServerError(fmt("{msg} [{e}]")) + + return resp + +# Subscribes the rest handler to attend "/store/v1/messages" requests +proc installStoreApiHandlers*( + router: var RestRouter, + node: WakuNode, + discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler), +) = + # Handles the store-query request according to the passed parameters + router.api(MethodGet, "/store/v3/messages") do( + peerAddr: Option[string], + includeData: Option[string], + pubsubTopic: Option[string], + contentTopics: Option[string], + startTime: Option[string], + endTime: Option[string], + hashes: Option[string], + cursor: Option[string], + ascending: Option[string], + pageSize: Option[string] + ) -> RestApiResponse: + let peer = peerAddr.toOpt() + + debug "REST-GET /store/v3/messages ", peer_addr = $peer + + # All the GET parameters are URL-encoded (https://en.wikipedia.org/wiki/URL_encoding) + # Example: + # /store/v1/messages?peerAddr=%2Fip4%2F127.0.0.1%2Ftcp%2F60001%2Fp2p%2F16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\&pubsubTopic=my-waku-topic + + # Parse the rest of the parameters and create a HistoryQuery + let storeQuery = createStoreQuery( + includeData.toOpt(), + pubsubTopic.toOpt(), + contentTopics.toOpt(), + startTime.toOpt(), + endTime.toOpt(), + hashes.toOpt(), + cursor.toOpt(), + ascending.toOpt(), + pageSize.toOpt(), + ).valueOr: + return RestApiResponse.badRequest(error) + + if peer.isNone() and not node.wakuStore.isNil(): + ## The user didn't specify a peer address and self-node is configured as a store node. + ## In this case we assume that the user is willing to retrieve the messages stored by + ## the local/self store node. + return await node.retrieveMsgsFromSelfNode(storeQuery) + + # Parse the peer address parameter + let parsedPeerAddr = parseUrlPeerAddr(peer).valueOr: + return RestApiResponse.badRequest(error) + + let peerInfo = parsedPeerAddr.valueOr: + node.peerManager.selectPeer(WakuStoreCodec).valueOr: + let handler = discHandler.valueOr: + return NoPeerNoDiscError + + let peerOp = (await handler()).valueOr: + return RestApiResponse.internalServerError($error) + + peerOp.valueOr: + return RestApiResponse.preconditionFailed( + "No suitable service peer & none discovered" + ) + + return await node.performStoreQuery(storeQuery, peerInfo) diff --git a/third-party/nwaku/waku/waku_api/rest/store/types.nim b/third-party/nwaku/waku/waku_api/rest/store/types.nim new file mode 100644 index 0000000..99818b5 --- /dev/null +++ b/third-party/nwaku/waku/waku_api/rest/store/types.nim @@ -0,0 +1,321 @@ +{.push raises: [].} + +import + std/[sets, strformat, uri, options, sequtils], + stew/byteutils, + chronicles, + json_serialization, + json_serialization/std/options, + presto/[route, client, common] +import ../../../waku_store/common, ../../../common/base64, ../../../waku_core, ../serdes + +#### Types + +createJsonFlavor RestJson + +Json.setWriter JsonWriter, PreferredOutput = string + +#### Type conversion + +proc parseHash*(input: Option[string]): Result[Option[WakuMessageHash], string] = + let hexUrlEncoded = + if input.isSome(): + input.get() + else: + return ok(none(WakuMessageHash)) + + if hexUrlEncoded == "": + return ok(none(WakuMessageHash)) + + let hexDecoded = decodeUrl(hexUrlEncoded, false) + + var decodedBytes: seq[byte] + try: + decodedBytes = hexToSeqByte(hexDecoded) + except ValueError as e: + return err("Exception converting hex string to bytes: " & e.msg) + + if decodedBytes.len != 32: + return + err("waku message hash parsing error: invalid hash length: " & $decodedBytes.len) + + let hash: WakuMessageHash = fromBytes(decodedBytes) + + return ok(some(hash)) + +proc parseHashes*(input: Option[string]): Result[seq[WakuMessageHash], string] = + var hashes: seq[WakuMessageHash] = @[] + + if not input.isSome() or input.get() == "": + return ok(hashes) + + let decodedUrl = decodeUrl(input.get(), false) + + if decodedUrl != "": + for subString in decodedUrl.split(','): + let hash = ?parseHash(some(subString)) + + if hash.isSome(): + hashes.add(hash.get()) + + return ok(hashes) + +# Converts a given MessageDigest object into a suitable +# Hex-URL-encoded string suitable to be transmitted in a Rest +# request-response. The MessageDigest is first hex encoded +# and this result is URL-encoded. +proc toRestStringWakuMessageHash*(self: WakuMessageHash): string = + let hexEncoded = self.to0xHex() + encodeUrl(hexEncoded, false) + +## WakuMessage serde + +proc writeValue*( + writer: var JsonWriter, msg: WakuMessage +) {.gcsafe, raises: [IOError].} = + writer.beginRecord() + + writer.writeField("payload", base64.encode(msg.payload)) + writer.writeField("contentTopic", msg.contentTopic) + + if msg.meta.len > 0: + writer.writeField("meta", base64.encode(msg.meta)) + + writer.writeField("version", msg.version) + writer.writeField("timestamp", msg.timestamp) + writer.writeField("ephemeral", msg.ephemeral) + + if msg.proof.len > 0: + writer.writeField("proof", base64.encode(msg.proof)) + + writer.endRecord() + +proc readValue*( + reader: var JsonReader, value: var WakuMessage +) {.gcsafe, raises: [SerializationError, IOError].} = + var + payload: seq[byte] + contentTopic: ContentTopic + version: uint32 + timestamp: Timestamp + ephemeral: bool + meta: seq[byte] + proof: seq[byte] + + var keys = initHashSet[string]() + for fieldName in readObjectFields(reader): + # Check for reapeated keys + if keys.containsOrIncl(fieldName): + let err = + try: + fmt"Multiple `{fieldName}` fields found" + except CatchableError: + "Multiple fields with the same name found" + reader.raiseUnexpectedField(err, "WakuMessage") + + case fieldName + of "payload": + let base64String = reader.readValue(Base64String) + payload = base64.decode(base64String).valueOr: + reader.raiseUnexpectedField("Failed decoding data", "payload") + of "contentTopic": + contentTopic = reader.readValue(ContentTopic) + of "version": + version = reader.readValue(uint32) + of "timestamp": + timestamp = reader.readValue(Timestamp) + of "ephemeral": + ephemeral = reader.readValue(bool) + of "meta": + let base64String = reader.readValue(Base64String) + meta = base64.decode(base64String).valueOr: + reader.raiseUnexpectedField("Failed decoding data", "meta") + of "proof": + let base64String = reader.readValue(Base64String) + proof = base64.decode(base64String).valueOr: + reader.raiseUnexpectedField("Failed decoding data", "proof") + else: + reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) + + if payload.len == 0: + reader.raiseUnexpectedValue("Field `payload` is missing") + + value = WakuMessage( + payload: payload, + contentTopic: contentTopic, + version: version, + timestamp: timestamp, + ephemeral: ephemeral, + meta: meta, + proof: proof, + ) + +## WakuMessageKeyValueHex serde + +proc writeValue*( + writer: var JsonWriter, value: WakuMessageKeyValueHex +) {.gcsafe, raises: [IOError].} = + writer.beginRecord() + + writer.writeField("messageHash", value.messageHash) + + if value.message.isSome(): + writer.writeField("message", value.message.get()) + + if value.pubsubTopic.isSome(): + writer.writeField("pubsubTopic", value.pubsubTopic.get()) + + writer.endRecord() + +proc readValue*( + reader: var JsonReader, value: var WakuMessageKeyValueHex +) {.gcsafe, raises: [SerializationError, IOError].} = + var + messageHash = none(string) + message = none(WakuMessage) + pubsubTopic = none(PubsubTopic) + + for fieldName in readObjectFields(reader): + case fieldName + of "messageHash": + if messageHash.isSome(): + reader.raiseUnexpectedField( + "Multiple `messageHash` fields found", "WakuMessageKeyValueHex" + ) + messageHash = some(reader.readValue(string)) + of "message": + if message.isSome(): + reader.raiseUnexpectedField( + "Multiple `message` fields found", "WakuMessageKeyValueHex" + ) + message = some(reader.readValue(WakuMessage)) + of "pubsubTopic": + if pubsubTopic.isSome(): + reader.raiseUnexpectedField( + "Multiple `pubsubTopic` fields found", "WakuMessageKeyValueHex" + ) + pubsubTopic = some(reader.readValue(string)) + else: + reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) + + if messageHash.isNone(): + reader.raiseUnexpectedValue("Field `messageHash` is missing") + + value = WakuMessageKeyValueHex( + messageHash: messageHash.get(), message: message, pubsubTopic: pubsubTopic + ) + +## StoreQueryResponse serde + +proc writeValue*( + writer: var JsonWriter, value: StoreQueryResponseHex +) {.gcsafe, raises: [IOError].} = + writer.beginRecord() + + writer.writeField("requestId", value.requestId) + writer.writeField("statusCode", value.statusCode) + writer.writeField("statusDesc", value.statusDesc) + writer.writeField("messages", value.messages) + + if value.paginationCursor.isSome(): + writer.writeField("paginationCursor", value.paginationCursor.get()) + + writer.endRecord() + +proc readValue*( + reader: var JsonReader, value: var StoreQueryResponseHex +) {.gcsafe, raises: [SerializationError, IOError].} = + var + requestId = none(string) + code = none(uint32) + desc = none(string) + messages = none(seq[WakuMessageKeyValueHex]) + cursor = none(string) + + for fieldName in readObjectFields(reader): + case fieldName + of "requestId": + if requestId.isSome(): + reader.raiseUnexpectedField( + "Multiple `requestId` fields found", "StoreQueryResponseHex" + ) + requestId = some(reader.readValue(string)) + of "statusCode": + if code.isSome(): + reader.raiseUnexpectedField( + "Multiple `statusCode` fields found", "StoreQueryResponseHex" + ) + code = some(reader.readValue(uint32)) + of "statusDesc": + if desc.isSome(): + reader.raiseUnexpectedField( + "Multiple `statusDesc` fields found", "StoreQueryResponseHex" + ) + desc = some(reader.readValue(string)) + of "messages": + if messages.isSome(): + reader.raiseUnexpectedField( + "Multiple `messages` fields found", "StoreQueryResponseHex" + ) + messages = some(reader.readValue(seq[WakuMessageKeyValueHex])) + of "paginationCursor": + if cursor.isSome(): + reader.raiseUnexpectedField( + "Multiple `paginationCursor` fields found", "StoreQueryResponseHex" + ) + cursor = some(reader.readValue(string)) + else: + reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName)) + + if requestId.isNone(): + reader.raiseUnexpectedValue("Field `requestId` is missing") + + if code.isNone(): + reader.raiseUnexpectedValue("Field `statusCode` is missing") + + if desc.isNone(): + reader.raiseUnexpectedValue("Field `statusDesc` is missing") + + if messages.isNone(): + reader.raiseUnexpectedValue("Field `messages` is missing") + + value = StoreQueryResponseHex( + requestId: requestId.get(), + statusCode: code.get(), + statusDesc: desc.get(), + messages: messages.get(), + paginationCursor: cursor, + ) + +## StoreRequestRest serde + +proc writeValue*( + writer: var JsonWriter, req: StoreQueryRequest +) {.gcsafe, raises: [IOError].} = + writer.beginRecord() + + writer.writeField("requestId", req.requestId) + writer.writeField("includeData", req.includeData) + + if req.pubsubTopic.isSome(): + writer.writeField("pubsubTopic", req.pubsubTopic.get()) + + writer.writeField("contentTopics", req.contentTopics) + + if req.startTime.isSome(): + writer.writeField("startTime", req.startTime.get()) + + if req.endTime.isSome(): + writer.writeField("endTime", req.endTime.get()) + + writer.writeField("messageHashes", req.messageHashes.mapIt(base64.encode(it))) + + if req.paginationCursor.isSome(): + writer.writeField("paginationCursor", base64.encode(req.paginationCursor.get())) + + writer.writeField("paginationForward", req.paginationForward) + + if req.paginationLimit.isSome(): + writer.writeField("paginationLimit", req.paginationLimit.get()) + + writer.endRecord() diff --git a/third-party/nwaku/waku/waku_archive.nim b/third-party/nwaku/waku/waku_archive.nim new file mode 100644 index 0000000..14c7b60 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive.nim @@ -0,0 +1,7 @@ +import + ./waku_archive/common, + ./waku_archive/archive, + ./waku_archive/driver, + ./waku_archive/retention_policy + +export common, archive, driver, retention_policy diff --git a/third-party/nwaku/waku/waku_archive/archive.nim b/third-party/nwaku/waku/waku_archive/archive.nim new file mode 100644 index 0000000..f3112c7 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/archive.nim @@ -0,0 +1,296 @@ +{.push raises: [].} + +import + std/[times, options, sequtils, algorithm], + stew/[byteutils], + chronicles, + chronos, + results, + metrics +import + ../common/paging, + ./driver, + ./retention_policy, + ../waku_core, + ../waku_core/message/digest, + ./common, + ./archive_metrics + +logScope: + topics = "waku archive" + +const + DefaultPageSize*: uint = 20 + MaxPageSize*: uint = 100 + MaxContentTopicsPerQuery*: int = 10 + + # Retention policy + WakuArchiveDefaultRetentionPolicyInterval* = chronos.minutes(30) + WakuArchiveDefaultRetentionPolicyIntervalWhenError* = chronos.minutes(1) + + # Metrics reporting + WakuArchiveDefaultMetricsReportInterval* = chronos.minutes(30) + + # Message validation + # 20 seconds maximum allowable sender timestamp "drift" + MaxMessageTimestampVariance* = getNanoSecondTime(20) + +type MessageValidator* = + proc(msg: WakuMessage): Result[void, string] {.closure, gcsafe, raises: [].} + +## Archive + +type WakuArchive* = ref object + driver: ArchiveDriver + + validator: MessageValidator + + retentionPolicy: Option[RetentionPolicy] + + retentionPolicyHandle: Future[void] + metricsHandle: Future[void] + +proc validate*(msg: WakuMessage): Result[void, string] = + if msg.ephemeral: + # Ephemeral message, do not store + return + + let + now = getNanosecondTime(getTime().toUnixFloat()) + lowerBound = now - MaxMessageTimestampVariance + upperBound = now + MaxMessageTimestampVariance + + if msg.timestamp < lowerBound: + return err(invalidMessageOld) + + if upperBound < msg.timestamp: + return err(invalidMessageFuture) + + return ok() + +proc new*( + T: type WakuArchive, + driver: ArchiveDriver, + validator: MessageValidator = validate, + retentionPolicy = none(RetentionPolicy), +): Result[T, string] = + if driver.isNil(): + return err("archive driver is Nil") + + let archive = + WakuArchive(driver: driver, validator: validator, retentionPolicy: retentionPolicy) + + return ok(archive) + +proc handleMessage*( + self: WakuArchive, pubsubTopic: PubsubTopic, msg: WakuMessage +) {.async.} = + let msgHash = computeMessageHash(pubsubTopic, msg) + let msgHashHex = msgHash.to0xHex() + + trace "handling message", + msg_hash = msgHashHex, + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + msgTimestamp = msg.timestamp + + self.validator(msg).isOkOr: + waku_archive_errors.inc(labelValues = [error]) + trace "invalid message", + msg_hash = msgHashHex, + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + timestamp = msg.timestamp, + error = error + return + + let insertStartTime = getTime().toUnixFloat() + + (await self.driver.put(msgHash, pubsubTopic, msg)).isOkOr: + waku_archive_errors.inc(labelValues = [insertFailure]) + trace "failed to insert message", + msg_hash = msgHashHex, + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + timestamp = msg.timestamp, + error = error + return + + let insertDuration = getTime().toUnixFloat() - insertStartTime + waku_archive_insert_duration_seconds.observe(insertDuration) + + let shard = RelayShard.parseStaticSharding(pubsubTopic).valueOr: + DefaultRelayShard + + waku_archive_messages_per_shard.inc(labelValues = [$shard.shardId]) + + trace "message archived", + msg_hash = msgHashHex, + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + timestamp = msg.timestamp, + insertDuration = insertDuration + +proc syncMessageIngress*( + self: WakuArchive, + msgHash: WakuMessageHash, + pubsubTopic: PubsubTopic, + msg: WakuMessage, +): Future[Result[void, string]] {.async.} = + if msg.ephemeral: + return err("ephemeral message, will not store") + + let msgHashHex = msgHash.to0xHex() + + trace "handling message in syncMessageIngress", + msg_hash = msgHashHex, + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + timestamp = msg.timestamp + + let insertStartTime = getTime().toUnixFloat() + (await self.driver.put(msgHash, pubsubTopic, msg)).isOkOr: + waku_archive_errors.inc(labelValues = [insertFailure]) + trace "failed to insert message in in syncMessageIngress", + msg_hash = msgHashHex, + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + timestamp = msg.timestamp, + error = $error + return err(error) + + let insertDuration = getTime().toUnixFloat() - insertStartTime + waku_archive_insert_duration_seconds.observe(insertDuration) + + trace "message archived in syncMessageIngress", + msg_hash = msgHashHex, + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + timestamp = msg.timestamp, + insertDuration = insertDuration + + return ok() + +proc findMessages*( + self: WakuArchive, query: ArchiveQuery +): Future[ArchiveResult] {.async, gcsafe.} = + ## Search the archive to return a single page of messages matching the query criteria + + if query.cursor.isSome(): + let cursor = query.cursor.get() + + if cursor.len != 32: + return + err(ArchiveError.invalidQuery("invalid cursor hash length: " & $cursor.len)) + + if cursor == EmptyWakuMessageHash: + return err(ArchiveError.invalidQuery("all zeroes cursor hash")) + + if query.contentTopics.len > MaxContentTopicsPerQuery: + return err(ArchiveError.invalidQuery("too many content topics")) + + let maxPageSize = + if query.pageSize <= 0: + DefaultPageSize + else: + min(query.pageSize, MaxPageSize) + + let isAscendingOrder = query.direction.into() + + let queryStartTime = getTime().toUnixFloat() + + let rows = ( + await self.driver.getMessages( + includeData = query.includeData, + contentTopics = query.contentTopics, + pubsubTopic = query.pubsubTopic, + cursor = query.cursor, + startTime = query.startTime, + endTime = query.endTime, + hashes = query.hashes, + maxPageSize = maxPageSize + 1, + ascendingOrder = isAscendingOrder, + requestId = query.requestId, + ) + ).valueOr: + return err(ArchiveError(kind: ArchiveErrorKind.DRIVER_ERROR, cause: error)) + let queryDuration = getTime().toUnixFloat() - queryStartTime + waku_archive_query_duration_seconds.observe(queryDuration) + + var hashes = newSeq[WakuMessageHash]() + var messages = newSeq[WakuMessage]() + var topics = newSeq[PubsubTopic]() + var cursor = none(ArchiveCursor) + + if rows.len == 0: + return ok(ArchiveResponse(hashes: hashes, messages: messages, cursor: cursor)) + + let pageSize = min(rows.len, int(maxPageSize)) + + hashes = rows[0 ..< pageSize].mapIt(it[0]) + + if query.includeData: + topics = rows[0 ..< pageSize].mapIt(it[1]) + messages = rows[0 ..< pageSize].mapIt(it[2]) + + if rows.len > int(maxPageSize): + ## Build last message cursor + ## The cursor is built from the last message INCLUDED in the response + ## (i.e. the second last message in the rows list) + + let (hash, _, _) = rows[^2] + + cursor = some(hash) + + # Messages MUST be returned in chronological order + if not isAscendingOrder: + reverse(hashes) + reverse(topics) + reverse(messages) + + return ok( + ArchiveResponse(cursor: cursor, topics: topics, hashes: hashes, messages: messages) + ) + +proc periodicRetentionPolicy(self: WakuArchive) {.async.} = + let policy = self.retentionPolicy.get() + + while true: + debug "executing message retention policy" + (await policy.execute(self.driver)).isOkOr: + waku_archive_errors.inc(labelValues = [retPolicyFailure]) + error "failed execution of retention policy", error = error + await sleepAsync(WakuArchiveDefaultRetentionPolicyIntervalWhenError) + ## in case of error, let's try again faster + continue + + await sleepAsync(WakuArchiveDefaultRetentionPolicyInterval) + +proc periodicMetricReport(self: WakuArchive) {.async.} = + while true: + let countRes = (await self.driver.getMessagesCount()) + if countRes.isErr(): + error "loopReportStoredMessagesMetric failed to get messages count", + error = countRes.error + else: + let count = countRes.get() + waku_archive_messages.set(count, labelValues = ["stored"]) + + await sleepAsync(WakuArchiveDefaultMetricsReportInterval) + +proc start*(self: WakuArchive) = + if self.retentionPolicy.isSome(): + self.retentionPolicyHandle = self.periodicRetentionPolicy() + + self.metricsHandle = self.periodicMetricReport() + +proc stopWait*(self: WakuArchive) {.async.} = + var futures: seq[Future[void]] + + if self.retentionPolicy.isSome() and not self.retentionPolicyHandle.isNil(): + futures.add(self.retentionPolicyHandle.cancelAndWait()) + + if not self.metricsHandle.isNil: + futures.add(self.metricsHandle.cancelAndWait()) + + await noCancel(allFutures(futures)) diff --git a/third-party/nwaku/waku/waku_archive/archive_metrics.nim b/third-party/nwaku/waku/waku_archive/archive_metrics.nim new file mode 100644 index 0000000..ec97d38 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/archive_metrics.nim @@ -0,0 +1,18 @@ +{.push raises: [].} + +import metrics + +declarePublicGauge waku_archive_messages, "number of historical messages", ["type"] +declarePublicGauge waku_archive_messages_per_shard, + "number of historical messages per shard ", ["shard"] +declarePublicCounter waku_archive_errors, "number of store protocol errors", ["type"] +declarePublicHistogram waku_archive_insert_duration_seconds, + "message insertion duration" +declarePublicHistogram waku_archive_query_duration_seconds, "history query duration" + +# Error types (metric label values) +const + invalidMessageOld* = "invalid_message_too_old" + invalidMessageFuture* = "invalid_message_future_timestamp" + insertFailure* = "insert_failure" + retPolicyFailure* = "retpolicy_failure" diff --git a/third-party/nwaku/waku/waku_archive/common.nim b/third-party/nwaku/waku/waku_archive/common.nim new file mode 100644 index 0000000..f26c73d --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/common.nim @@ -0,0 +1,54 @@ +{.push raises: [].} + +import std/options, results +import ../waku_core, ../common/paging + +## Public API types + +type + ArchiveCursor* = WakuMessageHash + + ArchiveQuery* = object + includeData*: bool + pubsubTopic*: Option[PubsubTopic] + contentTopics*: seq[ContentTopic] + cursor*: Option[ArchiveCursor] + startTime*: Option[Timestamp] + endTime*: Option[Timestamp] + hashes*: seq[WakuMessageHash] + pageSize*: uint + direction*: PagingDirection + requestId*: string + + ArchiveResponse* = object + hashes*: seq[WakuMessageHash] + messages*: seq[WakuMessage] + topics*: seq[PubsubTopic] + cursor*: Option[ArchiveCursor] + + ArchiveErrorKind* {.pure.} = enum + UNKNOWN = uint32(0) + DRIVER_ERROR = uint32(1) + INVALID_QUERY = uint32(2) + + ArchiveError* = object + case kind*: ArchiveErrorKind + of DRIVER_ERROR, INVALID_QUERY: + # TODO: Add an enum to be able to distinguish between error causes + cause*: string + else: + discard + + ArchiveResult* = Result[ArchiveResponse, ArchiveError] + +proc `$`*(err: ArchiveError): string = + case err.kind + of ArchiveErrorKind.DRIVER_ERROR: + "DRIVER_ERROR: " & err.cause + of ArchiveErrorKind.INVALID_QUERY: + "INVALID_QUERY: " & err.cause + of ArchiveErrorKind.UNKNOWN: + "UNKNOWN" + +proc invalidQuery*(T: type ArchiveError, cause: string): T = + ArchiveError(kind: ArchiveErrorKind.INVALID_QUERY, cause: cause) diff --git a/third-party/nwaku/waku/waku_archive/driver.nim b/third-party/nwaku/waku/waku_archive/driver.nim new file mode 100644 index 0000000..4d5cedd --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/driver.nim @@ -0,0 +1,102 @@ +{.push raises: [].} + +import std/options, results, chronos +import ../waku_core, ./common + +const DefaultPageSize*: uint = 25 + +type + ArchiveDriverResult*[T] = Result[T, string] + ArchiveDriver* = ref object of RootObj + +type ArchiveRow* = (WakuMessageHash, PubsubTopic, WakuMessage) + +# ArchiveDriver interface + +method put*( + driver: ArchiveDriver, + messageHash: WakuMessageHash, + pubsubTopic: PubsubTopic, + message: WakuMessage, +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard + +method getAllMessages*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = + discard + +method getMessages*( + driver: ArchiveDriver, + includeData = true, + contentTopics = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes = newSeq[WakuMessageHash](0), + maxPageSize = DefaultPageSize, + ascendingOrder = true, + requestId = "", +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = + discard + +method getMessagesCount*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[int64]] {.base, async.} = + discard + +method getPagesCount*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[int64]] {.base, async.} = + discard + +method getPagesSize*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[int64]] {.base, async.} = + discard + +method getDatabaseSize*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[int64]] {.base, async.} = + discard + +method performVacuum*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard + +method getOldestMessageTimestamp*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[Timestamp]] {.base, async.} = + discard + +method getNewestMessageTimestamp*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[Timestamp]] {.base, async.} = + discard + +method deleteMessagesOlderThanTimestamp*( + driver: ArchiveDriver, ts: Timestamp +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard + +method deleteOldestMessagesNotWithinLimit*( + driver: ArchiveDriver, limit: int +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard + +method decreaseDatabaseSize*( + driver: ArchiveDriver, targetSizeInBytes: int64, forceRemoval: bool = false +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard + +method close*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard + +method existsTable*( + driver: ArchiveDriver, tableName: string +): Future[ArchiveDriverResult[bool]] {.base, async.} = + discard diff --git a/third-party/nwaku/waku/waku_archive/driver/builder.nim b/third-party/nwaku/waku/waku_archive/driver/builder.nim new file mode 100644 index 0000000..ddc4476 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/driver/builder.nim @@ -0,0 +1,124 @@ +{.push raises: [].} + +import results, chronicles, chronos +import + ../driver, + ../../common/databases/dburl, + ../../common/databases/db_sqlite, + ../../common/error_handling, + ./sqlite_driver, + ./sqlite_driver/migrations as archive_driver_sqlite_migrations, + ./queue_driver + +export sqlite_driver, queue_driver + +when defined(postgres): + import ## These imports add dependency with an external libpq library + ./postgres_driver/migrations as archive_postgres_driver_migrations, + ./postgres_driver + export postgres_driver + +proc new*( + T: type ArchiveDriver, + url: string, + vacuum: bool, + migrate: bool, + maxNumConn: int, + onFatalErrorAction: OnFatalErrorHandler, +): Future[Result[T, string]] {.async.} = + ## url - string that defines the database + ## vacuum - if true, a cleanup operation will be applied to the database + ## migrate - if true, the database schema will be updated + ## maxNumConn - defines the maximum number of connections to handle simultaneously (Postgres) + ## onFatalErrorAction - called if, e.g., the connection with db got lost + + let dbUrlValidationRes = dburl.validateDbUrl(url) + if dbUrlValidationRes.isErr(): + return err("DbUrl failure in ArchiveDriver.new: " & dbUrlValidationRes.error) + + let engineRes = dburl.getDbEngine(url) + if engineRes.isErr(): + return err("error getting db engine in setupWakuArchiveDriver: " & engineRes.error) + + let engine = engineRes.get() + + case engine + of "sqlite": + let pathRes = dburl.getDbPath(url) + if pathRes.isErr(): + return err("error get path in setupWakuArchiveDriver: " & pathRes.error) + + let dbRes = SqliteDatabase.new(pathRes.get()) + if dbRes.isErr(): + return err("error in setupWakuArchiveDriver: " & dbRes.error) + + let db = dbRes.get() + + # SQLite vacuum + let sqliteStatsRes = db.gatherSqlitePageStats() + if sqliteStatsRes.isErr(): + return err("error while gathering sqlite stats: " & $sqliteStatsRes.error) + + let (pageSize, pageCount, freelistCount) = sqliteStatsRes.get() + debug "sqlite database page stats", + pageSize = pageSize, pages = pageCount, freePages = freelistCount + + if vacuum and (pageCount > 0 and freelistCount > 0): + let vacuumRes = db.performSqliteVacuum() + if vacuumRes.isErr(): + return err("error in vacuum sqlite: " & $vacuumRes.error) + + # Database migration + if migrate: + let migrateRes = archive_driver_sqlite_migrations.migrate(db) + if migrateRes.isErr(): + return err("error in migrate sqlite: " & $migrateRes.error) + + debug "setting up sqlite waku archive driver" + let res = SqliteDriver.new(db) + if res.isErr(): + return err("failed to init sqlite archive driver: " & res.error) + + return ok(res.get()) + of "postgres": + when defined(postgres): + let res = PostgresDriver.new( + dbUrl = url, + maxConnections = maxNumConn, + onFatalErrorAction = onFatalErrorAction, + ) + if res.isErr(): + return err("failed to init postgres archive driver: " & res.error) + + let driver = res.get() + + # Database migration + if migrate: + let migrateRes = await archive_postgres_driver_migrations.migrate(driver) + if migrateRes.isErr(): + return err("ArchiveDriver build failed in migration: " & $migrateRes.error) + + ## This should be started once we make sure the 'messages' table exists + ## Hence, this should be run after the migration is completed. + asyncSpawn driver.startPartitionFactory(onFatalErrorAction) + + driver.startAnalyzeTableLoop() + + info "waiting for a partition to be created" + for i in 0 ..< 100: + if driver.containsAnyPartition(): + break + await sleepAsync(chronos.milliseconds(100)) + + if not driver.containsAnyPartition(): + onFatalErrorAction("a partition could not be created") + + return ok(driver) + else: + return err( + "Postgres has been configured but not been compiled. Check compiler definitions." + ) + else: + debug "setting up in-memory waku archive driver" + let driver = QueueDriver.new() # Defaults to a capacity of 25.000 messages + return ok(driver) diff --git a/third-party/nwaku/waku/waku_archive/driver/postgres_driver.nim b/third-party/nwaku/waku/waku_archive/driver/postgres_driver.nim new file mode 100644 index 0000000..1da5156 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/driver/postgres_driver.nim @@ -0,0 +1,8 @@ +{.push raises: [].} + +import + ./postgres_driver/postgres_driver, + ./postgres_driver/partitions_manager, + ./postgres_driver/postgres_healthcheck + +export postgres_driver, partitions_manager, postgres_healthcheck diff --git a/third-party/nwaku/waku/waku_archive/driver/postgres_driver/migrations.nim b/third-party/nwaku/waku/waku_archive/driver/postgres_driver/migrations.nim new file mode 100644 index 0000000..6409a07 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/driver/postgres_driver/migrations.nim @@ -0,0 +1,99 @@ +{.push raises: [].} + +import std/strutils, results, chronicles, chronos +import + ../../../common/databases/common, + ../../../../migrations/message_store_postgres/pg_migration_manager, + ../postgres_driver + +logScope: + topics = "waku archive migration" + +const SchemaVersion* = 7 # increase this when there is an update in the database schema + +proc breakIntoStatements*(script: string): seq[string] = + ## Given a full migration script, that can potentially contain a list + ## of SQL statements, this proc splits it into the contained isolated statements + ## that should be executed one after the other. + var statements = newSeq[string]() + + let lines = script.split('\n') + + var simpleStmt: string + var plSqlStatement: string + var insidePlSqlScript = false + for line in lines: + if line.strip().len == 0: + continue + + if insidePlSqlScript: + if line.contains("END $$"): + ## End of the Pl/SQL script + plSqlStatement &= line + statements.add(plSqlStatement) + plSqlStatement = "" + insidePlSqlScript = false + continue + else: + plSqlStatement &= line & "\n" + + if line.contains("DO $$"): + ## Beginning of the Pl/SQL script + insidePlSqlScript = true + plSqlStatement &= line & "\n" + + if not insidePlSqlScript: + if line.contains(';'): + ## End of simple statement + simpleStmt &= line + statements.add(simpleStmt) + simpleStmt = "" + else: + simpleStmt &= line & "\n" + + return statements + +proc migrate*( + driver: PostgresDriver, targetVersion = SchemaVersion +): Future[DatabaseResult[void]] {.async.} = + debug "starting message store's postgres database migration" + + let currentVersion = (await driver.getCurrentVersion()).valueOr: + return err("migrate error could not retrieve current version: " & $error) + + if currentVersion == targetVersion: + debug "database schema is up to date", + currentVersion = currentVersion, targetVersion = targetVersion + return ok() + + info "database schema is outdated", + currentVersion = currentVersion, targetVersion = targetVersion + + # Load migration scripts + let scripts = pg_migration_manager.getMigrationScripts(currentVersion, targetVersion) + + # Lock the db + (await driver.acquireDatabaseLock()).isOkOr: + error "failed to acquire lock", error = error + return err("failed to lock the db") + + defer: + (await driver.releaseDatabaseLock()).isOkOr: + error "failed to release lock", error = error + return err("failed to unlock the db.") + + # Run the migration scripts + for script in scripts: + for statement in script.breakIntoStatements(): + debug "executing migration statement", statement = statement + + (await driver.performWriteQuery(statement)).isOkOr: + error "failed to execute migration statement", + statement = statement, error = error + return err("failed to execute migration statement") + + debug "migration statement executed succesfully", statement = statement + + debug "finished message store's postgres database migration" + + return ok() diff --git a/third-party/nwaku/waku/waku_archive/driver/postgres_driver/partitions_manager.nim b/third-party/nwaku/waku/waku_archive/driver/postgres_driver/partitions_manager.nim new file mode 100644 index 0000000..fe8209d --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/driver/postgres_driver/partitions_manager.nim @@ -0,0 +1,139 @@ +## This module is aimed to handle the creation and truncation of partition tables +## in order to limit the space occupied in disk by the database. +## +## The created partitions are referenced by the 'timestamp' field. +## + +import std/[deques, times] +import chronos, chronicles +import ../../../waku_core/time + +logScope: + topics = "waku archive partitions_manager" + +## The time range has seconds resolution +type TimeRange* = tuple[beginning: int64, `end`: int64] + +type + Partition* = object + name: string + timeRange: TimeRange + + PartitionManager* = ref object + partitions: Deque[Partition] + # FIFO of partition table names. The first is the oldest partition + +proc new*(T: type PartitionManager): T = + return PartitionManager() + +proc getPartitionFromDateTime*( + self: PartitionManager, targetMoment: int64 +): Result[Partition, string] = + ## Returns the partition name that might store a message containing the passed timestamp. + ## In order words, it simply returns the partition name which contains the given timestamp. + ## targetMoment - represents the time of interest, measured in seconds since epoch. + + if self.partitions.len == 0: + return err("There are no partitions") + + for partition in self.partitions: + let timeRange = partition.timeRange + + let beginning = timeRange.beginning + let `end` = timeRange.`end` + + if beginning <= targetMoment and targetMoment < `end`: + return ok(partition) + + return err("Couldn't find a partition table for given time: " & $targetMoment) + +proc getNewestPartition*(self: PartitionManager): Result[Partition, string] = + if self.partitions.len == 0: + return err("there are no partitions allocated") + + let newestPartition = self.partitions.peekLast + return ok(newestPartition) + +proc getOldestPartition*(self: PartitionManager): Result[Partition, string] = + if self.partitions.len == 0: + return err("there are no partitions allocated") + + let oldestPartition = self.partitions.peekFirst + return ok(oldestPartition) + +proc addPartitionInfo*( + self: PartitionManager, partitionName: string, beginning: int64, `end`: int64 +) = + ## The given partition range has seconds resolution. + ## We just store information of the new added partition merely to keep track of it. + let partitionInfo = Partition(name: partitionName, timeRange: (beginning, `end`)) + trace "Adding partition info" + self.partitions.addLast(partitionInfo) + +proc clearPartitionInfo*(self: PartitionManager) = + self.partitions.clear() + +proc removeOldestPartitionName*(self: PartitionManager) = + ## Simply removed the partition from the tracked/known partitions queue. + ## Just remove it and ignore it. + discard self.partitions.popFirst() + +proc isEmpty*(self: PartitionManager): bool = + return self.partitions.len == 0 + +proc getLastMoment*(partition: Partition): int64 = + ## Considering the time range covered by the partition, this + ## returns the `end` time (number of seconds since epoch) of such range. + let lastTimeInSec = partition.timeRange.`end` + return lastTimeInSec + +proc getPartitionStartTimeInNanosec*(partition: Partition): int64 = + return partition.timeRange.beginning * 1_000_000_000 + +proc containsMoment*(partition: Partition, time: int64): bool = + ## Returns true if the given moment is contained within the partition window, + ## 'false' otherwise. + ## time - number of seconds since epoch + if partition.timeRange.beginning <= time and time < partition.timeRange.`end`: + return true + + return false + +proc calcEndPartitionTime*(startTime: Timestamp): Timestamp = + ## Each partition has an "startTime" and "end" time. This proc calculates the "end" time so that + ## it precisely matches the next o'clock time. + ## This considers that the partitions should be 1 hour long. + ## For example, if `startTime` == 14:28 , then the returned end time should be 15:00. + ## Notice both `startTime` and returned time are in seconds since Epoch. + ## + ## startTime - seconds from Epoch that represents the partition start time + + let startDateTime: DateTime = times.fromUnix(startTime).utc() + + let maxPartitionDuration: times.Duration = times.initDuration(hours = 1) + ## Max time range covered by each parition + ## It is max because we aim to make the partition times synced to + ## o'clock hours. i.e. each partition edge will have min == sec == nanosec == 0 + + let endDateTime = startDateTime + maxPartitionDuration + let endDateTimeOClock = times.dateTime( + year = endDateTime.year, + month = endDateTime.month, + monthday = endDateTime.monthday, + hour = endDateTime.hour, + minute = 0, + second = 0, + nanosecond = 0, + zone = utc(), + ) + + return Timestamp(endDateTimeOClock.toTime().toUnix()) + +proc getName*(partition: Partition): string = + return partition.name + +proc getTimeRange*(partition: Partition): TimeRange = + return partition.timeRange + +func `==`*(a, b: Partition): bool {.inline.} = + return a.name == b.name diff --git a/third-party/nwaku/waku/waku_archive/driver/postgres_driver/postgres_driver.nim b/third-party/nwaku/waku/waku_archive/driver/postgres_driver/postgres_driver.nim new file mode 100644 index 0000000..1518f7a --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/driver/postgres_driver/postgres_driver.nim @@ -0,0 +1,1556 @@ +{.push raises: [].} + +import + std/[options, sequtils, strutils, strformat, times, sugar], + stew/[byteutils, arrayops], + results, + chronos, + db_connector/[postgres, db_common], + chronicles +import + ../../../common/error_handling, + ../../../waku_core, + ../../common, + ../../driver, + ../../../common/databases/db_postgres as waku_postgres, + ./postgres_healthcheck, + ./partitions_manager + +type PostgresDriver* = ref object of ArchiveDriver + ## Establish a separate pools for read/write operations + writeConnPool: PgAsyncPool + readConnPool: PgAsyncPool + + ## Partition container + partitionMngr: PartitionManager + futLoopPartitionFactory: Future[void] + + futLoopAnalyzeTable: Future[void] + +const InsertRowStmtName = "InsertRow" +const InsertRowStmtDefinition = + """INSERT INTO messages (id, messageHash, pubsubTopic, contentTopic, payload, + version, timestamp, meta) VALUES ($1, $2, $3, $4, $5, $6, $7, CASE WHEN $8 = '' THEN NULL ELSE $8 END) ON CONFLICT DO NOTHING;""" + +const InsertRowInMessagesLookupStmtName = "InsertRowMessagesLookup" +const InsertRowInMessagesLookupStmtDefinition = + """INSERT INTO messages_lookup (messageHash, timestamp) VALUES ($1, $2) ON CONFLICT DO NOTHING;""" + +const SelectClause = + """SELECT messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta FROM messages """ + +const SelectNoCursorAscStmtName = "SelectWithoutCursorAsc" +const SelectNoCursorAscStmtDef = + SelectClause & + """WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + timestamp >= $4 AND + timestamp <= $5 + ORDER BY timestamp ASC, messageHash ASC LIMIT $6;""" + +const SelectNoCursorNoDataAscStmtName = "SelectWithoutCursorAndDataAsc" +const SelectNoCursorNoDataAscStmtDef = + """SELECT messageHash FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + timestamp >= $4 AND + timestamp <= $5 + ORDER BY timestamp ASC, messageHash ASC LIMIT $6;""" + +const SelectNoCursorDescStmtName = "SelectWithoutCursorDesc" +const SelectNoCursorDescStmtDef = + SelectClause & + """WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + timestamp >= $4 AND + timestamp <= $5 + ORDER BY timestamp DESC, messageHash DESC LIMIT $6;""" + +const SelectNoCursorNoDataDescStmtName = "SelectWithoutCursorAndDataDesc" +const SelectNoCursorNoDataDescStmtDef = + """SELECT messageHash FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + timestamp >= $4 AND + timestamp <= $5 + ORDER BY timestamp DESC, messageHash DESC LIMIT $6;""" + +const SelectWithCursorDescStmtName = "SelectWithCursorDesc" +const SelectWithCursorDescStmtDef = + SelectClause & + """WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + (timestamp, messageHash) < ($4,$5) AND + timestamp >= $6 AND + timestamp <= $7 + ORDER BY timestamp DESC, messageHash DESC LIMIT $8;""" + +const SelectWithCursorNoDataDescStmtName = "SelectWithCursorNoDataDesc" +const SelectWithCursorNoDataDescStmtDef = + """SELECT messageHash FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + (timestamp, messageHash) < ($4,$5) AND + timestamp >= $6 AND + timestamp <= $7 + ORDER BY timestamp DESC, messageHash DESC LIMIT $8;""" + +const SelectWithCursorAscStmtName = "SelectWithCursorAsc" +const SelectWithCursorAscStmtDef = + SelectClause & + """WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + (timestamp, messageHash) > ($4,$5) AND + timestamp >= $6 AND + timestamp <= $7 + ORDER BY timestamp ASC, messageHash ASC LIMIT $8;""" + +const SelectWithCursorNoDataAscStmtName = "SelectWithCursorNoDataAsc" +const SelectWithCursorNoDataAscStmtDef = + """SELECT messageHash FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + (timestamp, messageHash) > ($4,$5) AND + timestamp >= $6 AND + timestamp <= $7 + ORDER BY timestamp ASC, messageHash ASC LIMIT $8;""" + +const SelectCursorByHashName = "SelectMessageByHashInMessagesLookup" +const SelectCursorByHashDef = + """SELECT timestamp FROM messages_lookup + WHERE messageHash = $1""" + +const + DefaultMaxNumConns = 50 + MaxHashesPerQuery = 100 + +proc new*( + T: type PostgresDriver, + dbUrl: string, + maxConnections = DefaultMaxNumConns, + onFatalErrorAction: OnFatalErrorHandler = nil, +): ArchiveDriverResult[T] = + ## Very simplistic split of max connections + let maxNumConnOnEachPool = int(maxConnections / 2) + + let readConnPool = PgAsyncPool.new(dbUrl, maxNumConnOnEachPool).valueOr: + return err("error creating read conn pool PgAsyncPool") + + let writeConnPool = PgAsyncPool.new(dbUrl, maxNumConnOnEachPool).valueOr: + return err("error creating write conn pool PgAsyncPool") + + if not isNil(onFatalErrorAction): + asyncSpawn checkConnectivity(readConnPool, onFatalErrorAction) + + if not isNil(onFatalErrorAction): + asyncSpawn checkConnectivity(writeConnPool, onFatalErrorAction) + + let driver = PostgresDriver( + writeConnPool: writeConnPool, + readConnPool: readConnPool, + partitionMngr: PartitionManager.new(), + ) + return ok(driver) + +proc reset*(s: PostgresDriver): Future[ArchiveDriverResult[void]] {.async.} = + ## Clear the database partitions + let targetSize = 0 + let forceRemoval = true + let ret = await s.decreaseDatabaseSize(targetSize, forceRemoval) + return ret + +proc timeCursorCallbackImpl(pqResult: ptr PGresult, timeCursor: var Option[Timestamp]) = + ## Callback to get a timestamp out of the DB. + ## Used to get the cursor timestamp. + + let numFields = pqResult.pqnfields() + if numFields != 1: + error "Wrong number of fields" + return + + let rawTimestamp = $(pqgetvalue(pqResult, 0, 0)) + + trace "db output", rawTimestamp + + if rawTimestamp.len < 1: + return + + let catchable = catch: + parseBiggestInt(rawTimestamp) + + if catchable.isErr(): + error "could not parse correctly", error = catchable.error.msg + return + + timeCursor = some(catchable.get()) + +proc hashCallbackImpl( + pqResult: ptr PGresult, rows: var seq[(WakuMessageHash, PubsubTopic, WakuMessage)] +) = + ## Callback to get a hash out of the DB. + ## Used when queries only ask for hashes + + let numFields = pqResult.pqnfields() + if numFields != 1: + error "Wrong number of fields" + return + + for iRow in 0 ..< pqResult.pqNtuples(): + let rawHash = $(pqgetvalue(pqResult, iRow, 0)) + + trace "db output", rawHash + + if rawHash.len < 1: + return + + let catchable = catch: + parseHexStr(rawHash) + + if catchable.isErr(): + error "could not parse correctly", error = catchable.error.msg + return + + let hashHex = catchable.get() + let msgHash = fromBytes(hashHex.toOpenArrayByte(0, 31)) + + rows.add((msgHash, "", WakuMessage())) + +proc rowCallbackImpl( + pqResult: ptr PGresult, + outRows: var seq[(WakuMessageHash, PubsubTopic, WakuMessage)], +) = + ## Proc aimed to contain the logic of the callback passed to the `psasyncpool`. + ## That callback is used in "SELECT" queries. + ## + ## pqResult - contains the query results + ## outRows - seq of Store-rows. This is populated from the info contained in pqResult + + let numFields = pqResult.pqnfields() + if numFields != 7: + error "Wrong number of fields" + return + + for iRow in 0 ..< pqResult.pqNtuples(): + var + rawHash: string + rawPayload: string + rawVersion: string + rawTimestamp: string + rawMeta: string + + hashHex: string + msgHash: WakuMessageHash + + pubSubTopic: string + + contentTopic: string + payload: string + version: uint + timestamp: Timestamp + meta: string + wakuMessage: WakuMessage + + rawHash = $(pqgetvalue(pqResult, iRow, 0)) + pubSubTopic = $(pqgetvalue(pqResult, iRow, 1)) + contentTopic = $(pqgetvalue(pqResult, iRow, 2)) + rawPayload = $(pqgetvalue(pqResult, iRow, 3)) + rawVersion = $(pqgetvalue(pqResult, iRow, 4)) + rawTimestamp = $(pqgetvalue(pqResult, iRow, 5)) + rawMeta = $(pqgetvalue(pqResult, iRow, 6)) + + trace "db output", + rawHash, pubSubTopic, contentTopic, rawPayload, rawVersion, rawTimestamp, rawMeta + + try: + hashHex = parseHexStr(rawHash) + payload = parseHexStr(rawPayload) + version = parseUInt(rawVersion) + timestamp = parseInt(rawTimestamp) + meta = parseHexStr(rawMeta) + except ValueError: + error "could not parse correctly", error = getCurrentExceptionMsg() + + msgHash = fromBytes(hashHex.toOpenArrayByte(0, 31)) + + wakuMessage.contentTopic = contentTopic + wakuMessage.payload = @(payload.toOpenArrayByte(0, payload.high)) + wakuMessage.version = uint32(version) + wakuMessage.timestamp = timestamp + wakuMessage.meta = @(meta.toOpenArrayByte(0, meta.high)) + + outRows.add((msgHash, pubSubTopic, wakuMessage)) + +method put*( + s: PostgresDriver, + messageHash: WakuMessageHash, + pubsubTopic: PubsubTopic, + message: WakuMessage, +): Future[ArchiveDriverResult[void]] {.async.} = + let messageHash = toHex(messageHash) + + let contentTopic = message.contentTopic + let payload = toHex(message.payload) + let version = $message.version + let timestamp = $message.timestamp + let meta = toHex(message.meta) + + trace "put PostgresDriver", + messageHash, contentTopic, payload, version, timestamp, meta + + ## this is not needed for store-v3. Nevertheless, we will keep that temporarily + ## until we completely remove the store/archive-v2 logic + let fakeId = "0" + + ( + ## Add the row to the messages table + await s.writeConnPool.runStmt( + InsertRowStmtName, + InsertRowStmtDefinition, + @[ + fakeId, messageHash, pubsubTopic, contentTopic, payload, version, timestamp, + meta, + ], + @[ + int32(fakeId.len), + int32(messageHash.len), + int32(pubsubTopic.len), + int32(contentTopic.len), + int32(payload.len), + int32(version.len), + int32(timestamp.len), + int32(meta.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], + ) + ).isOkOr: + return err("could not put msg in messages table: " & $error) + + ## Now add the row to messages_lookup + return await s.writeConnPool.runStmt( + InsertRowInMessagesLookupStmtName, + InsertRowInMessagesLookupStmtDefinition, + @[messageHash, timestamp], + @[int32(messageHash.len), int32(timestamp.len)], + @[int32(0), int32(0)], + ) + +method getAllMessages*( + s: PostgresDriver +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + ## Retrieve all messages from the store. + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + ( + await s.readConnPool.pgQuery( + """SELECT messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta + FROM messages + ORDER BY timestamp ASC, messageHash ASC""", + newSeq[string](0), + rowCallback, + ) + ).isOkOr: + return err("failed in query: " & $error) + + return ok(rows) + +proc getPartitionsList( + s: PostgresDriver +): Future[ArchiveDriverResult[seq[string]]] {.async.} = + ## Retrieves the seq of partition table names. + ## e.g: @["messages_1708534333_1708534393", "messages_1708534273_1708534333"] + var partitions: seq[string] + proc rowCallback(pqResult: ptr PGresult) = + for iRow in 0 ..< pqResult.pqNtuples(): + let partitionName = $(pqgetvalue(pqResult, iRow, 0)) + partitions.add(partitionName) + + ( + await s.readConnPool.pgQuery( + """ + SELECT child.relname AS partition_name + FROM pg_inherits + JOIN pg_class parent ON pg_inherits.inhparent = parent.oid + JOIN pg_class child ON pg_inherits.inhrelid = child.oid + JOIN pg_namespace nmsp_parent ON nmsp_parent.oid = parent.relnamespace + JOIN pg_namespace nmsp_child ON nmsp_child.oid = child.relnamespace + WHERE parent.relname='messages' + ORDER BY partition_name ASC + """, + newSeq[string](0), + rowCallback, + ) + ).isOkOr: + return err("getPartitionsList failed in query: " & $error) + + return ok(partitions) + +proc getTimeCursor( + s: PostgresDriver, hashHex: string +): Future[ArchiveDriverResult[Option[Timestamp]]] {.async.} = + var timeCursor: Option[Timestamp] + + proc cursorCallback(pqResult: ptr PGresult) = + timeCursorCallbackImpl(pqResult, timeCursor) + + ?await s.readConnPool.runStmt( + SelectCursorByHashName, + SelectCursorByHashDef, + @[hashHex], + @[int32(hashHex.len)], + @[int32(0)], + cursorCallback, + ) + + return ok(timeCursor) + +proc getMessagesArbitraryQuery( + s: PostgresDriver, + contentTopics: seq[ContentTopic] = @[], + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hexHashes: seq[string] = @[], + maxPageSize = DefaultPageSize, + ascendingOrder = true, + requestId: string, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + ## This proc allows to handle atypical queries. We don't use prepared statements for those. + + var query = SelectClause + var statements: seq[string] + var args: seq[string] + + if cursor.isSome(): + let hashHex = toHex(cursor.get()) + + let timeCursor = ?await s.getTimeCursor(hashHex) + + if timeCursor.isNone(): + return err("cursor not found") + + let comp = if ascendingOrder: ">" else: "<" + statements.add("(timestamp, messageHash) " & comp & " (?,?)") + + args.add($timeCursor.get()) + args.add(hashHex) + + if contentTopics.len > 0: + let cstmt = "contentTopic IN (" & "?".repeat(contentTopics.len).join(",") & ")" + statements.add(cstmt) + for t in contentTopics: + args.add(t) + + if hexHashes.len > 0: + let cstmt = "messageHash IN (" & "?".repeat(hexHashes.len).join(",") & ")" + statements.add(cstmt) + for t in hexHashes: + args.add(t) + + if pubsubTopic.isSome(): + statements.add("pubsubTopic = ?") + args.add(pubsubTopic.get()) + + if startTime.isSome(): + statements.add("timestamp >= ?") + args.add($startTime.get()) + + if endTime.isSome(): + statements.add("timestamp <= ?") + args.add($endTime.get()) + + if statements.len > 0: + query &= " WHERE " & statements.join(" AND ") + + var direction: string + if ascendingOrder: + direction = "ASC" + else: + direction = "DESC" + + query &= " ORDER BY timestamp " & direction & ", messageHash " & direction + + query &= " LIMIT ?" + args.add($maxPageSize) + + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + (await s.readConnPool.pgQuery(query, args, rowCallback, requestId)).isOkOr: + return err("failed to run query: " & $error) + + return ok(rows) + +proc getMessageHashesArbitraryQuery( + s: PostgresDriver, + contentTopics: seq[ContentTopic] = @[], + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hexHashes: seq[string] = @[], + maxPageSize = DefaultPageSize, + ascendingOrder = true, + requestId: string, +): Future[ArchiveDriverResult[seq[(WakuMessageHash, PubsubTopic, WakuMessage)]]] {. + async +.} = + ## This proc allows to handle atypical queries. We don't use prepared statements for those. + var query = """SELECT messageHash FROM messages""" + + var statements: seq[string] + var args: seq[string] + + if cursor.isSome(): + let hashHex = toHex(cursor.get()) + + let timeCursor = ?await s.getTimeCursor(hashHex) + + if timeCursor.isNone(): + return err("cursor not found") + + let comp = if ascendingOrder: ">" else: "<" + statements.add("(timestamp, messageHash) " & comp & " (?,?)") + + args.add($timeCursor.get()) + args.add(hashHex) + + if contentTopics.len > 0: + let cstmt = "contentTopic IN (" & "?".repeat(contentTopics.len).join(",") & ")" + statements.add(cstmt) + for t in contentTopics: + args.add(t) + + if hexHashes.len > 0: + let cstmt = "messageHash IN (" & "?".repeat(hexHashes.len).join(",") & ")" + statements.add(cstmt) + for t in hexHashes: + args.add(t) + + if pubsubTopic.isSome(): + statements.add("pubsubTopic = ?") + args.add(pubsubTopic.get()) + + if startTime.isSome(): + statements.add("timestamp >= ?") + args.add($startTime.get()) + + if endTime.isSome(): + statements.add("timestamp <= ?") + args.add($endTime.get()) + + if statements.len > 0: + query &= " WHERE " & statements.join(" AND ") + + var direction: string + if ascendingOrder: + direction = "ASC" + else: + direction = "DESC" + + query &= " ORDER BY timestamp " & direction & ", messageHash " & direction + + query &= " LIMIT ?" + args.add($maxPageSize) + + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] + proc rowCallback(pqResult: ptr PGresult) = + hashCallbackImpl(pqResult, rows) + + (await s.readConnPool.pgQuery(query, args, rowCallback, requestId)).isOkOr: + return err("failed to run query: " & $error) + + return ok(rows) + +proc getMessagesPreparedStmt( + s: PostgresDriver, + contentTopic: string, + pubsubTopic: PubsubTopic, + cursor = none(ArchiveCursor), + startTime: Timestamp, + endTime: Timestamp, + hashes: string, + maxPageSize = DefaultPageSize, + ascOrder = true, + requestId: string, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + ## This proc aims to run the most typical queries in a more performant way, + ## i.e. by means of prepared statements. + + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] + + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + let startTimeStr = $startTime + let endTimeStr = $endTime + let limit = $maxPageSize + + if cursor.isNone(): + var stmtName = + if ascOrder: SelectNoCursorAscStmtName else: SelectNoCursorDescStmtName + var stmtDef = if ascOrder: SelectNoCursorAscStmtDef else: SelectNoCursorDescStmtDef + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[contentTopic, hashes, pubsubTopic, startTimeStr, endTimeStr, limit], + @[ + int32(contentTopic.len), + int32(pubsubTopic.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0)], + rowCallback, + requestId, + ) + ).isOkOr: + return err(stmtName & ": " & $error) + + return ok(rows) + + let hashHex = toHex(cursor.get()) + + let timeCursor = ?await s.getTimeCursor(hashHex) + + if timeCursor.isNone(): + return err("cursor not found") + + let timeString = $timeCursor.get() + + var stmtName = + if ascOrder: SelectWithCursorAscStmtName else: SelectWithCursorDescStmtName + var stmtDef = + if ascOrder: SelectWithCursorAscStmtDef else: SelectWithCursorDescStmtDef + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[ + contentTopic, hashes, pubsubTopic, hashHex, timeString, startTimeStr, + endTimeStr, limit, + ], + @[ + int32(contentTopic.len), + int32(hashes.len), + int32(pubsubTopic.len), + int32(timeString.len), + int32(hashHex.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], + rowCallback, + requestId, + ) + ).isOkOr: + return err(stmtName & ": " & $error) + + return ok(rows) + +proc getMessageHashesPreparedStmt( + s: PostgresDriver, + contentTopic: string, + pubsubTopic: PubsubTopic, + cursor = none(ArchiveCursor), + startTime: Timestamp, + endTime: Timestamp, + hashes: string, + maxPageSize = DefaultPageSize, + ascOrder = true, + requestId: string, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + ## This proc aims to run the most typical queries in a more performant way, + ## i.e. by means of prepared statements. + + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] + + proc rowCallback(pqResult: ptr PGresult) = + hashCallbackImpl(pqResult, rows) + + let startTimeStr = $startTime + let endTimeStr = $endTime + let limit = $maxPageSize + + if cursor.isNone(): + var stmtName = + if ascOrder: SelectNoCursorNoDataAscStmtName else: SelectNoCursorNoDataDescStmtName + var stmtDef = + if ascOrder: SelectNoCursorNoDataAscStmtDef else: SelectNoCursorNoDataDescStmtDef + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[contentTopic, hashes, pubsubTopic, startTimeStr, endTimeStr, limit], + @[ + int32(contentTopic.len), + int32(hashes.len), + int32(pubsubTopic.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], + rowCallback, + requestId, + ) + ).isOkOr: + return err(stmtName & ": " & $error) + + return ok(rows) + + let hashHex = toHex(cursor.get()) + + let timeCursor = ?await s.getTimeCursor(hashHex) + + if timeCursor.isNone(): + return err("cursor not found") + + let timeString = $timeCursor.get() + + var stmtName = + if ascOrder: + SelectWithCursorNoDataAscStmtName + else: + SelectWithCursorNoDataDescStmtName + var stmtDef = + if ascOrder: SelectWithCursorNoDataAscStmtDef else: SelectWithCursorNoDataDescStmtDef + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[ + contentTopic, hashes, pubsubTopic, hashHex, timeString, startTimeStr, + endTimeStr, limit, + ], + @[ + int32(contentTopic.len), + int32(hashes.len), + int32(pubsubTopic.len), + int32(timeString.len), + int32(hashHex.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], + rowCallback, + requestId, + ) + ).isOkOr: + return err(stmtName & ": " & $error) + + return ok(rows) + +proc getMessagesByMessageHashes( + s: PostgresDriver, hashes: string, maxPageSize: uint, requestId: string +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + ## Retrieves information only filtering by a given messageHashes list. + ## This proc levarages on the messages_lookup table to have better query performance + ## and only query the desired partitions in the partitioned messages table + var query = + fmt""" + WITH min_timestamp AS ( + SELECT MIN(timestamp) AS min_ts + FROM messages_lookup + WHERE messagehash IN ( + {hashes} + ) + ) + SELECT m.messageHash, pubsubTopic, contentTopic, payload, version, m.timestamp, meta + FROM messages m + INNER JOIN + messages_lookup l + ON + m.timestamp = l.timestamp + AND m.messagehash = l.messagehash + WHERE + l.timestamp >= (SELECT min_ts FROM min_timestamp) + AND l.messagehash IN ( + {hashes} + ) + ORDER BY + m.timestamp DESC, + m.messagehash DESC + LIMIT {maxPageSize}; + """ + + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + ( + await s.readConnPool.pgQuery( + query = query, rowCallback = rowCallback, requestId = requestId + ) + ).isOkOr: + return err("failed to run query: " & $error) + + return ok(rows) + +proc getMessagesWithinLimits( + self: PostgresDriver, + includeData: bool, + contentTopics: seq[ContentTopic], + pubsubTopic: Option[PubsubTopic], + cursor: Option[ArchiveCursor], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + hashes: seq[WakuMessageHash], + maxPageSize: uint, + ascendingOrder: bool, + requestId: string, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + if hashes.len > MaxHashesPerQuery: + return err(fmt"can not attend queries with more than {MaxHashesPerQuery} hashes") + + let hexHashes = hashes.mapIt(toHex(it)) + + if cursor.isNone() and pubsubTopic.isNone() and contentTopics.len == 0 and + startTime.isNone() and endTime.isNone() and hexHashes.len > 0: + return await self.getMessagesByMessageHashes( + "'" & hexHashes.join("','") & "'", maxPageSize, requestId + ) + + if contentTopics.len > 0 and hexHashes.len > 0 and pubsubTopic.isSome() and + startTime.isSome() and endTime.isSome(): + ## Considered the most common query. Therefore, we use prepared statements to optimize it. + if includeData: + return await self.getMessagesPreparedStmt( + contentTopics.join(","), + PubsubTopic(pubsubTopic.get()), + cursor, + startTime.get(), + endTime.get(), + hexHashes.join(","), + maxPageSize, + ascendingOrder, + requestId, + ) + else: + return await self.getMessageHashesPreparedStmt( + contentTopics.join(","), + PubsubTopic(pubsubTopic.get()), + cursor, + startTime.get(), + endTime.get(), + hexHashes.join(","), + maxPageSize, + ascendingOrder, + requestId, + ) + else: + if includeData: + ## We will run atypical query. In this case we don't use prepared statemets + return await self.getMessagesArbitraryQuery( + contentTopics, pubsubTopic, cursor, startTime, endTime, hexHashes, maxPageSize, + ascendingOrder, requestId, + ) + else: + return await self.getMessageHashesArbitraryQuery( + contentTopics, pubsubTopic, cursor, startTime, endTime, hexHashes, maxPageSize, + ascendingOrder, requestId, + ) + +method getMessages*( + s: PostgresDriver, + includeData = true, + contentTopics = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes = newSeq[WakuMessageHash](0), + maxPageSize = DefaultPageSize, + ascendingOrder = true, + requestId = "", +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + let rows = collect(newSeq): + for i in countup(0, hashes.len, MaxHashesPerQuery): + let stop = min(i + MaxHashesPerQuery, hashes.len) + + let splittedHashes = hashes[i ..< stop] + + let subRows = + ?await s.getMessagesWithinLimits( + includeData, contentTopics, pubsubTopic, cursor, startTime, endTime, + splittedHashes, maxPageSize, ascendingOrder, requestId, + ) + + for row in subRows: + row + + return ok(rows) + +proc getStr( + s: PostgresDriver, query: string +): Future[ArchiveDriverResult[string]] {.async.} = + # Performs a query that is expected to return a single string + + var ret: string + proc rowCallback(pqResult: ptr PGresult) = + if pqResult.pqnfields() != 1: + error "Wrong number of fields in getStr" + return + + if pqResult.pqNtuples() != 1: + error "Wrong number of rows in getStr" + return + + ret = $(pqgetvalue(pqResult, 0, 0)) + + (await s.readConnPool.pgQuery(query, newSeq[string](0), rowCallback)).isOkOr: + return err("failed in getRow: " & $error) + + return ok(ret) + +proc getInt( + s: PostgresDriver, query: string +): Future[ArchiveDriverResult[int64]] {.async.} = + # Performs a query that is expected to return a single numeric value (int64) + + var retInt = 0'i64 + let str = (await s.getStr(query)).valueOr: + return err("could not get str in getInt: " & $error) + + try: + retInt = parseInt(str) + except ValueError: + return err( + "exception in getInt, parseInt, str: " & str & " query: " & query & " exception: " & + getCurrentExceptionMsg() + ) + + return ok(retInt) + +method getDatabaseSize*( + s: PostgresDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + let intRes = (await s.getInt("SELECT pg_database_size(current_database())")).valueOr: + return err("error in getDatabaseSize: " & error) + + let databaseSize: int64 = int64(intRes) + return ok(databaseSize) + +method getMessagesCount*( + s: PostgresDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + let intRes = await s.getInt("SELECT COUNT(1) FROM messages") + if intRes.isErr(): + return err("error in getMessagesCount: " & intRes.error) + + return ok(intRes.get()) + +method getOldestMessageTimestamp*( + s: PostgresDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + ## In some cases it could happen that we have + ## empty partitions which are older than the current stored rows. + ## In those cases we want to consider those older partitions as the oldest considered timestamp. + let oldestPartition = s.partitionMngr.getOldestPartition().valueOr: + return err("could not get oldest partition: " & $error) + + let oldestPartitionTimeNanoSec = oldestPartition.getPartitionStartTimeInNanosec() + + let intRes = await s.getInt("SELECT MIN(timestamp) FROM messages") + if intRes.isErr(): + ## Just return the oldest partition time considering the partitions set + return ok(Timestamp(oldestPartitionTimeNanoSec)) + + return ok(Timestamp(min(intRes.get(), oldestPartitionTimeNanoSec))) + +method getNewestMessageTimestamp*( + s: PostgresDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + let intRes = await s.getInt("SELECT MAX(timestamp) FROM messages") + + if intRes.isErr(): + return err("error in getNewestMessageTimestamp: " & intRes.error) + + return ok(Timestamp(intRes.get())) + +method deleteOldestMessagesNotWithinLimit*( + s: PostgresDriver, limit: int +): Future[ArchiveDriverResult[void]] {.async.} = + var execRes = await s.writeConnPool.pgQuery( + """DELETE FROM messages WHERE messageHash NOT IN + ( + SELECT messageHash FROM messages ORDER BY timestamp DESC LIMIT ? + );""", + @[$limit], + ) + if execRes.isErr(): + return err("error in deleteOldestMessagesNotWithinLimit: " & execRes.error) + + execRes = await s.writeConnPool.pgQuery( + """DELETE FROM messages_lookup WHERE messageHash NOT IN + ( + SELECT messageHash FROM messages ORDER BY timestamp DESC LIMIT ? + );""", + @[$limit], + ) + if execRes.isErr(): + return err( + "error in deleteOldestMessagesNotWithinLimit messages_lookup: " & execRes.error + ) + + return ok() + +method close*(s: PostgresDriver): Future[ArchiveDriverResult[void]] {.async.} = + ## Cancel the partition factory loop + s.futLoopPartitionFactory.cancelSoon() + + ## Cancel analyze table loop + if not s.futLoopAnalyzeTable.isNil(): + s.futLoopAnalyzeTable.cancelSoon() + + ## Close the database connection + let writeCloseRes = await s.writeConnPool.close() + let readCloseRes = await s.readConnPool.close() + + writeCloseRes.isOkOr: + return err("error closing write pool: " & $error) + + readCloseRes.isOkOr: + return err("error closing read pool: " & $error) + + return ok() + +proc sleep*( + s: PostgresDriver, seconds: int +): Future[ArchiveDriverResult[void]] {.async.} = + # This is for testing purposes only. It is aimed to test the proper + # implementation of asynchronous requests. It merely triggers a sleep in the + # database for the amount of seconds given as a parameter. + + proc rowCallback(result: ptr PGresult) = + ## We are not interested in any value in this case + discard + + try: + let params = @[$seconds] + (await s.writeConnPool.pgQuery("SELECT pg_sleep(?)", params, rowCallback)).isOkOr: + return err("error in postgres_driver sleep: " & $error) + except DbError: + # This always raises an exception although the sleep works + return err("exception sleeping: " & getCurrentExceptionMsg()) + + return ok() + +const EXPECTED_LOCK_ERROR* = "another waku instance is currently executing a migration" +proc acquireDatabaseLock*( + s: PostgresDriver, lockId: int = 841886 +): Future[ArchiveDriverResult[void]] {.async.} = + ## Acquire an advisory lock (useful to avoid more than one application running migrations at the same time) + ## This should only be used in the migrations module because this approach doesn't ensure + ## that the lock is acquired/released by the same connection. The preferable "lock" + ## approach is using the "performWriteQueryWithLock" proc. However, we can't use + ## "performWriteQueryWithLock" in the migrations process because we can't nest two PL/SQL + ## scripts. + + let locked = ( + await s.getStr( + fmt""" + SELECT pg_try_advisory_lock({lockId}) + """ + ) + ).valueOr: + return err("error acquiring a lock: " & error) + + if locked == "f": + return err(EXPECTED_LOCK_ERROR) + + return ok() + +proc releaseDatabaseLock*( + s: PostgresDriver, lockId: int = 841886 +): Future[ArchiveDriverResult[void]] {.async.} = + ## Release an advisory lock (useful to avoid more than one application running migrations at the same time) + let unlocked = ( + await s.getStr( + fmt""" + SELECT pg_advisory_unlock({lockId}) + """ + ) + ).valueOr: + return err("error releasing a lock: " & error) + + if unlocked == "f": + return err("could not release advisory lock") + + return ok() + +proc performWriteQuery*( + s: PostgresDriver, query: string +): Future[ArchiveDriverResult[void]] {.async.} = + ## Performs a query that somehow changes the state of the database + + (await s.writeConnPool.pgQuery(query)).isOkOr: + return err("error in performWriteQuery: " & $error) + + return ok() + +const COULD_NOT_ACQUIRE_ADVISORY_LOCK* = "could not acquire advisory lock" + +proc performWriteQueryWithLock( + self: PostgresDriver, queryToProtect: string +): Future[ArchiveDriverResult[void]] {.async.} = + ## This wraps the original query in a script so that we make sure a pg_advisory lock protects it + let query = + fmt""" + DO $$ + DECLARE + lock_acquired boolean; + BEGIN + -- Try to acquire the advisory lock + lock_acquired := pg_try_advisory_lock(123456789); + + IF NOT lock_acquired THEN + RAISE EXCEPTION '{COULD_NOT_ACQUIRE_ADVISORY_LOCK}'; + END IF; + + -- Perform the query + BEGIN + {queryToProtect} + EXCEPTION WHEN OTHERS THEN + -- Ensure the lock is released if an error occurs + PERFORM pg_advisory_unlock(123456789); + RAISE; + END; + + -- Release the advisory lock after the query completes successfully + PERFORM pg_advisory_unlock(123456789); + END $$; +""" + (await self.performWriteQuery(query)).isOkOr: + if error.contains(COULD_NOT_ACQUIRE_ADVISORY_LOCK): + ## We don't consider this as an error. Just someone else acquired the advisory lock + debug "skip performWriteQuery because the advisory lock is acquired by other" + return ok() + + if error.contains("already exists"): + ## expected to happen when trying to add a partition table constraint that already exists + ## e.g., constraint "constraint_name" for relation "messages_1720364735_1720364740" already exists + debug "skip already exists error", error = error + return ok() + + if error.contains("is already a partition"): + ## expected to happen when a node tries to add a partition that is already attached, + ## e.g., "messages_1720364735_1720364740" is already a partition + debug "skip is already a partition error", error = error + return ok() + + if error.contains("does not exist"): + ## expected to happen when trying to drop a constraint that has already been dropped by other + ## constraint "constraint_name" of relation "messages_1720364735_1720364740" does not exist + debug "skip does not exist error", error = error + return ok() + + debug "protected query ended with error", error = $error + return err("protected query ended with error:" & $error) + + debug "protected query ended correctly" + return ok() + +proc addPartition( + self: PostgresDriver, startTime: Timestamp +): Future[ArchiveDriverResult[void]] {.async.} = + ## Creates a partition table that will store the messages that fall in the range + ## `startTime` <= timestamp < `startTime + duration`. + ## `startTime` is measured in seconds since epoch + let beginning = startTime + let `end` = partitions_manager.calcEndPartitionTime(startTime) + + let fromInSec: string = $beginning + let untilInSec: string = $`end` + + let fromInNanoSec: string = fromInSec & "000000000" + let untilInNanoSec: string = untilInSec & "000000000" + + let partitionName = "messages_" & fromInSec & "_" & untilInSec + + ## Create the partition table but not attach it yet to the main table + let createPartitionQuery = + "CREATE TABLE IF NOT EXISTS " & partitionName & + " (LIKE messages INCLUDING DEFAULTS INCLUDING CONSTRAINTS);" + + (await self.performWriteQueryWithLock(createPartitionQuery)).isOkOr: + return err(fmt"error adding partition [{partitionName}]: " & $error) + + ## Add constraint to the partition table so that EXCLUSIVE ACCESS is not performed when + ## the partition is attached to the main table. + let constraintName = partitionName & "_by_range_check" + let addTimeConstraintQuery = + "ALTER TABLE " & partitionName & " ADD CONSTRAINT " & constraintName & + " CHECK ( timestamp >= " & fromInNanoSec & " AND timestamp < " & untilInNanoSec & + " );" + + (await self.performWriteQueryWithLock(addTimeConstraintQuery)).isOkOr: + return err(fmt"error creating constraint [{partitionName}]: " & $error) + + ## Attaching the new created table as a new partition. That does not require EXCLUSIVE ACCESS. + let attachPartitionQuery = + "ALTER TABLE messages ATTACH PARTITION " & partitionName & " FOR VALUES FROM (" & + fromInNanoSec & ") TO (" & untilInNanoSec & ");" + + (await self.performWriteQueryWithLock(attachPartitionQuery)).isOkOr: + return err(fmt"error attaching partition [{partitionName}]: " & $error) + + ## Dropping the check constraint as it was only necessary to prevent full scan, + ## and EXCLUSIVE ACCESS, to the whole messages table, when the new partition was attached. + let dropConstraint = + "ALTER TABLE " & partitionName & " DROP CONSTRAINT " & constraintName & ";" + + (await self.performWriteQueryWithLock(dropConstraint)).isOkOr: + return err(fmt"error dropping constraint [{partitionName}]: " & $error) + + debug "new partition added", query = createPartitionQuery + + self.partitionMngr.addPartitionInfo(partitionName, beginning, `end`) + return ok() + +proc refreshPartitionsInfo( + self: PostgresDriver +): Future[ArchiveDriverResult[void]] {.async.} = + debug "refreshPartitionsInfo" + self.partitionMngr.clearPartitionInfo() + + let partitionNamesRes = await self.getPartitionsList() + if not partitionNamesRes.isOk(): + return err("Could not retrieve partitions list: " & $partitionNamesRes.error) + else: + let partitionNames = partitionNamesRes.get() + for partitionName in partitionNames: + ## partitionName contains something like 'messages_1708449815_1708449875' + let bothTimes = partitionName.replace("messages_", "") + let times = bothTimes.split("_") + if times.len != 2: + return err(fmt"loopPartitionFactory wrong partition name {partitionName}") + + var beginning: int64 + try: + beginning = parseInt(times[0]) + except ValueError: + return err("Could not parse beginning time: " & getCurrentExceptionMsg()) + + var `end`: int64 + try: + `end` = parseInt(times[1]) + except ValueError: + return err("Could not parse end time: " & getCurrentExceptionMsg()) + + self.partitionMngr.addPartitionInfo(partitionName, beginning, `end`) + + return ok() + +const DefaultDatabasePartitionCheckTimeInterval = timer.minutes(10) + +proc loopPartitionFactory( + self: PostgresDriver, onFatalError: OnFatalErrorHandler +) {.async.} = + ## Loop proc that continuously checks whether we need to create a new partition. + ## Notice that the deletion of partitions is handled by the retention policy modules. + + debug "starting loopPartitionFactory" + + while true: + trace "Check if a new partition is needed" + + ## Let's make the 'partition_manager' aware of the current partitions + (await self.refreshPartitionsInfo()).isOkOr: + onFatalError("issue in loopPartitionFactory: " & $error) + + let now = times.now().toTime().toUnix() + + if self.partitionMngr.isEmpty(): + debug "adding partition because now there aren't more partitions" + (await self.addPartition(now)).isOkOr: + onFatalError("error when creating a new partition from empty state: " & $error) + else: + let newestPartitionRes = self.partitionMngr.getNewestPartition() + if newestPartitionRes.isErr(): + onFatalError("could not get newest partition: " & $newestPartitionRes.error) + + let newestPartition = newestPartitionRes.get() + if newestPartition.containsMoment(now): + debug "creating a new partition for the future" + ## The current used partition is the last one that was created. + ## Thus, let's create another partition for the future. + + (await self.addPartition(newestPartition.getLastMoment())).isOkOr: + onFatalError("could not add the next partition for 'now': " & $error) + elif now >= newestPartition.getLastMoment(): + debug "creating a new partition to contain current messages" + ## There is no partition to contain the current time. + ## This happens if the node has been stopped for quite a long time. + ## Then, let's create the needed partition to contain 'now'. + (await self.addPartition(now)).isOkOr: + onFatalError("could not add the next partition: " & $error) + + await sleepAsync(DefaultDatabasePartitionCheckTimeInterval) + +proc startPartitionFactory*( + self: PostgresDriver, onFatalError: OnFatalErrorHandler +) {.async.} = + self.futLoopPartitionFactory = self.loopPartitionFactory(onFatalError) + +proc getTableSize*( + self: PostgresDriver, tableName: string +): Future[ArchiveDriverResult[string]] {.async.} = + ## Returns a human-readable representation of the size for the requested table. + ## tableName - table of interest. + let tableSize = ( + await self.getStr( + fmt""" + SELECT pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size" + FROM pg_class C + where relname = '{tableName}'""" + ) + ).valueOr: + return err("error in getDatabaseSize: " & error) + + return ok(tableSize) + +proc removePartition( + self: PostgresDriver, partition: Partition +): Future[ArchiveDriverResult[void]] {.async.} = + ## Removes the desired partition and also removes the rows from messages_lookup table + ## whose rows belong to the partition time range + + let partitionName = partition.getName() + debug "beginning of removePartition", partitionName + + var partSize = "" + let partSizeRes = await self.getTableSize(partitionName) + if partSizeRes.isOk(): + partSize = partSizeRes.get() + + ## Detach and remove the partition concurrently to not block the parent table (messages) + let detachPartitionQuery = + "ALTER TABLE messages DETACH PARTITION " & partitionName & " CONCURRENTLY;" + debug "removeOldestPartition", query = detachPartitionQuery + (await self.performWriteQuery(detachPartitionQuery)).isOkOr: + debug "detected error when trying to detach partition", error + + if ($error).contains("FINALIZE") or + ($error).contains("already pending detach in part"): + ## We assume the database is suggesting to use FINALIZE when detaching a partition + let detachPartitionFinalizeQuery = + "ALTER TABLE messages DETACH PARTITION " & partitionName & " FINALIZE;" + debug "removeOldestPartition detaching with FINALIZE", + query = detachPartitionFinalizeQuery + (await self.performWriteQuery(detachPartitionFinalizeQuery)).isOkOr: + return err(fmt"error in FINALIZE {detachPartitionFinalizeQuery}: " & $error) + else: + return err(fmt"error in {detachPartitionQuery}: " & $error) + + ## Drop the partition + let dropPartitionQuery = "DROP TABLE " & partitionName + debug "removeOldestPartition drop partition", query = dropPartitionQuery + (await self.performWriteQuery(dropPartitionQuery)).isOkOr: + return err(fmt"error in {dropPartitionQuery}: " & $error) + + debug "removed partition", partition_name = partitionName, partition_size = partSize + self.partitionMngr.removeOldestPartitionName() + + ## Now delete rows from the messages_lookup table + let timeRange = partition.getTimeRange() + let `end` = timeRange.`end` * 1_000_000_000 + let deleteRowsQuery = "DELETE FROM messages_lookup WHERE timestamp < " & $`end` + (await self.performWriteQuery(deleteRowsQuery)).isOkOr: + return err(fmt"error in {deleteRowsQuery}: " & $error) + + return ok() + +proc removePartitionsOlderThan( + self: PostgresDriver, tsInNanoSec: Timestamp +): Future[ArchiveDriverResult[void]] {.async.} = + ## Removes old partitions that don't contain the specified timestamp + + let tsInSec = Timestamp(float(tsInNanoSec) / 1_000_000_000) + debug "beginning of removePartitionsOlderThan", tsInSec + + var oldestPartition = self.partitionMngr.getOldestPartition().valueOr: + return err("could not get oldest partition in removePartitionOlderThan: " & $error) + + while not oldestPartition.containsMoment(tsInSec): + (await self.removePartition(oldestPartition)).isOkOr: + return err("issue in removePartitionsOlderThan: " & $error) + + oldestPartition = self.partitionMngr.getOldestPartition().valueOr: + return err( + "could not get partition in removePartitionOlderThan in while loop: " & $error + ) + + ## We reached the partition that contains the target timestamp plus don't want to remove it + return ok() + +proc removeOldestPartition( + self: PostgresDriver, forceRemoval: bool = false, ## To allow cleanup in tests +): Future[ArchiveDriverResult[void]] {.async.} = + ## Indirectly called from the retention policy + let oldestPartition = self.partitionMngr.getOldestPartition().valueOr: + return err("could not remove oldest partition: " & $error) + + if not forceRemoval: + let now = times.now().toTime().toUnix() + let currentPartitionRes = self.partitionMngr.getPartitionFromDateTime(now) + if currentPartitionRes.isOk(): + ## The database contains a partition that would store current messages. + + if currentPartitionRes.get() == oldestPartition: + debug "Skipping to remove the current partition" + return ok() + + return await self.removePartition(oldestPartition) + +proc containsAnyPartition*(self: PostgresDriver): bool = + return not self.partitionMngr.isEmpty() + +method decreaseDatabaseSize*( + driver: PostgresDriver, targetSizeInBytes: int64, forceRemoval: bool = false +): Future[ArchiveDriverResult[void]] {.async.} = + var dbSize = (await driver.getDatabaseSize()).valueOr: + return err("decreaseDatabaseSize failed to get database size: " & $error) + + ## database size in bytes + var totalSizeOfDB: int64 = int64(dbSize) + + if totalSizeOfDB <= targetSizeInBytes: + return ok() + + debug "start reducing database size", + targetSize = $targetSizeInBytes, currentSize = $totalSizeOfDB + + while totalSizeOfDB > targetSizeInBytes and driver.containsAnyPartition(): + (await driver.removeOldestPartition(forceRemoval)).isOkOr: + return err( + "decreaseDatabaseSize inside loop failed to remove oldest partition: " & $error + ) + + dbSize = (await driver.getDatabaseSize()).valueOr: + return + err("decreaseDatabaseSize inside loop failed to get database size: " & $error) + + let newCurrentSize = int64(dbSize) + if newCurrentSize == totalSizeOfDB: + return err("the previous partition removal didn't clear database size") + + totalSizeOfDB = newCurrentSize + + debug "reducing database size", + targetSize = $targetSizeInBytes, newCurrentSize = $totalSizeOfDB + + return ok() + +method existsTable*( + s: PostgresDriver, tableName: string +): Future[ArchiveDriverResult[bool]] {.async.} = + let query: string = + fmt""" + SELECT EXISTS ( + SELECT FROM + pg_tables + WHERE + tablename = '{tableName}' + ); + """ + + var exists: string + proc rowCallback(pqResult: ptr PGresult) = + if pqResult.pqnfields() != 1: + error "Wrong number of fields in existsTable" + return + + if pqResult.pqNtuples() != 1: + error "Wrong number of rows in existsTable" + return + + exists = $(pqgetvalue(pqResult, 0, 0)) + + (await s.readConnPool.pgQuery(query, newSeq[string](0), rowCallback)).isOkOr: + return err("existsTable failed in getRow: " & $error) + + return ok(exists == "t") + +proc getCurrentVersion*( + s: PostgresDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + let existsVersionTable = (await s.existsTable("version")).valueOr: + return err("error in getCurrentVersion-existsTable: " & $error) + + if not existsVersionTable: + return ok(0) + + let res = (await s.getInt(fmt"SELECT version FROM version")).valueOr: + return err("error in getMessagesCount: " & $error) + + return ok(res) + +method deleteMessagesOlderThanTimestamp*( + s: PostgresDriver, tsNanoSec: Timestamp +): Future[ArchiveDriverResult[void]] {.async.} = + ## First of all, let's remove the older partitions so that we can reduce + ## the database size. + (await s.removePartitionsOlderThan(tsNanoSec)).isOkOr: + return err("error while removing older partitions: " & $error) + + ( + await s.writeConnPool.pgQuery( + "DELETE FROM messages WHERE timestamp < " & $tsNanoSec + ) + ).isOkOr: + return err("error in deleteMessagesOlderThanTimestamp: " & $error) + + return ok() + +############################################ +## TODO: start splitting code better + +const AnalyzeQuery = "ANALYZE messages" +const AnalyzeTableLockId = 111111 ## An arbitrary and different lock id +const RunAnalyzeInterval = timer.days(1) + +proc analyzeTableLoop(self: PostgresDriver) {.async.} = + ## The database stats should be calculated regularly so that the planner + ## picks up the proper indexes and we have better query performance. + while true: + debug "analyzeTableLoop lock db" + (await self.acquireDatabaseLock(AnalyzeTableLockId)).isOkOr: + if error != EXPECTED_LOCK_ERROR: + error "failed to acquire lock in analyzeTableLoop", error = error + await sleepAsync(RunAnalyzeInterval) + continue + + debug "analyzeTableLoop start analysis" + (await self.performWriteQuery(AnalyzeQuery)).isOkOr: + error "failed to run ANALYZE messages", error = error + + debug "analyzeTableLoop unlock db" + (await self.releaseDatabaseLock(AnalyzeTableLockId)).isOkOr: + error "failed to release lock analyzeTableLoop", error = error + + debug "analyzeTableLoop analysis completed" + + await sleepAsync(RunAnalyzeInterval) + +proc startAnalyzeTableLoop*(self: PostgresDriver) = + self.futLoopAnalyzeTable = self.analyzeTableLoop diff --git a/third-party/nwaku/waku/waku_archive/driver/postgres_driver/postgres_healthcheck.nim b/third-party/nwaku/waku/waku_archive/driver/postgres_driver/postgres_healthcheck.nim new file mode 100644 index 0000000..43d205c --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/driver/postgres_driver/postgres_healthcheck.nim @@ -0,0 +1,38 @@ +{.push raises: [].} + +import chronos, results +import ../../../common/databases/db_postgres, ../../../common/error_handling + +## Simple query to validate that the postgres is working and attending requests +const HealthCheckQuery = "SELECT version();" +const CheckConnectivityInterval = 60.seconds +const MaxNumTrials = 20 +const TrialInterval = 1.seconds + +proc checkConnectivity*( + connPool: PgAsyncPool, onFatalErrorAction: OnFatalErrorHandler +) {.async.} = + while true: + (await connPool.pgQuery(HealthCheckQuery)).isOkOr: + ## The connection failed once. Let's try reconnecting for a while. + ## Notice that the 'exec' proc tries to establish a new connection. + + block errorBlock: + ## Force close all the opened connections. No need to close gracefully. + (await connPool.resetConnPool()).isOkOr: + onFatalErrorAction("checkConnectivity resetConnPool error: " & error) + + var numTrial = 0 + while numTrial < MaxNumTrials: + let res = await connPool.pgQuery(HealthCheckQuery) + if res.isOk(): + ## Connection resumed. Let's go back to the normal healthcheck. + break errorBlock + + await sleepAsync(TrialInterval) + numTrial.inc() + + ## The connection couldn't be resumed. Let's inform the upper layers. + onFatalErrorAction("postgres health check error: " & error) + + await sleepAsync(CheckConnectivityInterval) diff --git a/third-party/nwaku/waku/waku_archive/driver/queue_driver.nim b/third-party/nwaku/waku/waku_archive/driver/queue_driver.nim new file mode 100644 index 0000000..df75737 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/driver/queue_driver.nim @@ -0,0 +1,5 @@ +{.push raises: [].} + +import ./queue_driver/queue_driver, ./queue_driver/index + +export queue_driver, index diff --git a/third-party/nwaku/waku/waku_archive/driver/queue_driver/index.nim b/third-party/nwaku/waku/waku_archive/driver/queue_driver/index.nim new file mode 100644 index 0000000..17783eb --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/driver/queue_driver/index.nim @@ -0,0 +1,28 @@ +{.push raises: [].} + +import ../../../waku_core + +type Index* = object + ## This type contains the description of an Index used in the pagination of WakuMessages + time*: Timestamp # the time at which the message is generated + hash*: WakuMessageHash + pubsubTopic*: PubsubTopic + +proc `==`*(x, y: Index): bool = + return x.hash == y.hash + +proc cmp*(x, y: Index): int = + ## compares x and y + ## returns 0 if they are equal + ## returns -1 if x < y + ## returns 1 if x > y + ## + ## Default sorting order priority is: + ## 1. time + ## 2. hash + + let timeCMP = cmp(x.time, y.time) + if timeCMP != 0: + return timeCMP + + return cmp(x.hash, y.hash) diff --git a/third-party/nwaku/waku/waku_archive/driver/queue_driver/queue_driver.nim b/third-party/nwaku/waku/waku_archive/driver/queue_driver/queue_driver.nim new file mode 100644 index 0000000..9dbf3c1 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/driver/queue_driver/queue_driver.nim @@ -0,0 +1,358 @@ +{.push raises: [].} + +import std/options, results, stew/sorted_set, chronicles, chronos +import ../../../waku_core, ../../common, ../../driver, ./index + +logScope: + topics = "waku archive queue_store" + +const QueueDriverDefaultMaxCapacity* = 25_000 + +type + QueryFilterMatcher = + proc(index: Index, msg: WakuMessage): bool {.gcsafe, raises: [], closure.} + + QueueDriver* = ref object of ArchiveDriver + ## Bounded repository for indexed messages + ## + ## The store queue will keep messages up to its + ## configured capacity. As soon as this capacity + ## is reached and a new message is added, the oldest + ## item will be removed to make space for the new one. + ## This implies both a `delete` and `add` operation + ## for new items. + + # TODO: a circular/ring buffer may be a more efficient implementation + items: SortedSet[Index, WakuMessage] # sorted set of stored messages + capacity: int # Maximum amount of messages to keep + + QueueDriverErrorKind {.pure.} = enum + INVALID_CURSOR + + QueueDriverGetPageResult = Result[seq[ArchiveRow], QueueDriverErrorKind] + +proc `$`(error: QueueDriverErrorKind): string = + case error + of INVALID_CURSOR: "invalid_cursor" + +### Helpers + +proc walkToCursor( + w: SortedSetWalkRef[Index, WakuMessage], startCursor: Index, forward: bool +): SortedSetResult[Index, WakuMessage] = + ## Walk to util we find the cursor + ## TODO: Improve performance here with a binary/tree search + + var nextItem = + if forward: + w.first() + else: + w.last() + + ## Fast forward until we reach the startCursor + while nextItem.isOk(): + if nextItem.value.key == startCursor: + break + + # Not yet at cursor. Continue advancing + nextItem = + if forward: + w.next() + else: + w.prev() + + return nextItem + +#### API + +proc new*(T: type QueueDriver, capacity: int = QueueDriverDefaultMaxCapacity): T = + var items = SortedSet[Index, WakuMessage].init() + return QueueDriver(items: items, capacity: capacity) + +proc contains*(driver: QueueDriver, index: Index): bool = + ## Return `true` if the store queue already contains the `index`, `false` otherwise. + return driver.items.eq(index).isOk() + +proc len*(driver: QueueDriver): int {.noSideEffect.} = + return driver.items.len + +proc getPage( + driver: QueueDriver, + pageSize: uint = 0, + forward: bool = true, + cursor: Option[Index] = none(Index), + predicate: QueryFilterMatcher = nil, +): QueueDriverGetPageResult {.raises: [].} = + ## Populate a single page in forward direction + ## Start at the `startCursor` (exclusive), or first entry (inclusive) if not defined. + ## Page size must not exceed `maxPageSize` + ## Each entry must match the `pred` + var outSeq: seq[ArchiveRow] + + var w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + defer: + w.destroy() + + var currentEntry: SortedSetResult[Index, WakuMessage] + + # Find starting entry + if cursor.isSome(): + let cursorEntry = w.walkToCursor(cursor.get(), forward) + if cursorEntry.isErr(): + return err(QueueDriverErrorKind.INVALID_CURSOR) + + # Advance walker once more + currentEntry = + if forward: + w.next() + else: + w.prev() + else: + # Start from the beginning of the queue + currentEntry = + if forward: + w.first() + else: + w.last() + + trace "Starting page query", currentEntry = currentEntry + + ## This loop walks forward over the queue: + ## 1. from the given cursor (or first/last entry, if not provided) + ## 2. adds entries matching the predicate function to output page + ## 3. until either the end of the queue or maxPageSize is reached + var numberOfItems: uint = 0 + while currentEntry.isOk() and numberOfItems < pageSize: + trace "Continuing page query", + currentEntry = currentEntry, numberOfItems = numberOfItems + + let + key = currentEntry.value.key + data = currentEntry.value.data + + if predicate.isNil() or predicate(key, data): + numberOfItems += 1 + + outSeq.add((key.hash, key.pubsubTopic, data)) + + currentEntry = + if forward: + w.next() + else: + w.prev() + + trace "Successfully retrieved page", len = outSeq.len + + return ok(outSeq) + +## --- SortedSet accessors --- + +iterator fwdIterator*(driver: QueueDriver): (Index, WakuMessage) = + ## Forward iterator over the entire store queue + var + w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + res = w.first() + + while res.isOk(): + yield (res.value.key, res.value.data) + res = w.next() + + w.destroy() + +iterator bwdIterator*(driver: QueueDriver): (Index, WakuMessage) = + ## Backwards iterator over the entire store queue + var + w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + res = w.last() + + while res.isOk(): + yield (res.value.key, res.value.data) + res = w.prev() + + w.destroy() + +proc first*(driver: QueueDriver): ArchiveDriverResult[Index] = + var + w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + res = w.first() + w.destroy() + + if res.isErr(): + return err("Not found") + + return ok(res.value.key) + +proc last*(driver: QueueDriver): ArchiveDriverResult[Index] = + var + w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + res = w.last() + w.destroy() + + if res.isErr(): + return err("Not found") + + return ok(res.value.key) + +## --- Queue API --- + +proc add*( + driver: QueueDriver, index: Index, msg: WakuMessage +): ArchiveDriverResult[void] = + ## Add a message to the queue + ## + ## If we're at capacity, we will be removing, the oldest (first) item + if driver.contains(index): + trace "could not add item to store queue. Index already exists", index = index + return err("duplicate") + + # TODO: the below delete block can be removed if we convert to circular buffer + if driver.items.len >= driver.capacity: + var + w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + firstItem = w.first + + if cmp(index, firstItem.value.key) < 0: + # When at capacity, we won't add if message index is smaller (older) than our oldest item + w.destroy # Clean up walker + return err("too_old") + + discard driver.items.delete(firstItem.value.key) + w.destroy # better to destroy walker after a delete operation + + driver.items.insert(index).value.data = msg + + return ok() + +method put*( + driver: QueueDriver, + messageHash: WakuMessageHash, + pubsubTopic: PubsubTopic, + message: WakuMessage, +): Future[ArchiveDriverResult[void]] {.async.} = + let index = + Index(time: message.timestamp, hash: messageHash, pubsubTopic: pubsubTopic) + + return driver.add(index, message) + +method getAllMessages*( + driver: QueueDriver +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + # TODO: Implement this message_store method + return err("interface method not implemented") + +method existsTable*( + driver: QueueDriver, tableName: string +): Future[ArchiveDriverResult[bool]] {.async.} = + return err("interface method not implemented") + +method getMessages*( + driver: QueueDriver, + includeData = true, + contentTopics: seq[ContentTopic] = @[], + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes: seq[WakuMessageHash] = @[], + maxPageSize = DefaultPageSize, + ascendingOrder = true, + requestId = "", +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + var index = none(Index) + + if cursor.isSome(): + index = some(Index(hash: cursor.get())) + + let matchesQuery: QueryFilterMatcher = + func (index: Index, msg: WakuMessage): bool = + if pubsubTopic.isSome() and index.pubsubTopic != pubsubTopic.get(): + return false + + if contentTopics.len > 0 and msg.contentTopic notin contentTopics: + return false + + if startTime.isSome() and msg.timestamp < startTime.get(): + return false + + if endTime.isSome() and msg.timestamp > endTime.get(): + return false + + if hashes.len > 0 and index.hash notin hashes: + return false + + return true + + let catchable = catch: + driver.getPage(maxPageSize, ascendingOrder, index, matchesQuery) + + let pageRes: QueueDriverGetPageResult = + if catchable.isErr(): + return err(catchable.error.msg) + else: + catchable.get() + + if pageRes.isErr(): + return err($pageRes.error) + + return ok(pageRes.value) + +method getMessagesCount*( + driver: QueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + return ok(int64(driver.len())) + +method getPagesCount*( + driver: QueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + return ok(int64(driver.len())) + +method getPagesSize*( + driver: QueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + return ok(int64(driver.len())) + +method getDatabaseSize*( + driver: QueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + return ok(int64(driver.len())) + +method performVacuum*( + driver: QueueDriver +): Future[ArchiveDriverResult[void]] {.async.} = + return err("interface method not implemented") + +method getOldestMessageTimestamp*( + driver: QueueDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + return driver.first().map( + proc(index: Index): Timestamp = + index.time + ) + +method getNewestMessageTimestamp*( + driver: QueueDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + return driver.last().map( + proc(index: Index): Timestamp = + index.time + ) + +method deleteMessagesOlderThanTimestamp*( + driver: QueueDriver, ts: Timestamp +): Future[ArchiveDriverResult[void]] {.async.} = + # TODO: Implement this message_store method + return err("interface method not implemented") + +method deleteOldestMessagesNotWithinLimit*( + driver: QueueDriver, limit: int +): Future[ArchiveDriverResult[void]] {.async.} = + # TODO: Implement this message_store method + return err("interface method not implemented") + +method decreaseDatabaseSize*( + driver: QueueDriver, targetSizeInBytes: int64, forceRemoval: bool = false +): Future[ArchiveDriverResult[void]] {.async.} = + return err("interface method not implemented") + +method close*(driver: QueueDriver): Future[ArchiveDriverResult[void]] {.async.} = + return ok() diff --git a/third-party/nwaku/waku/waku_archive/driver/sqlite_driver.nim b/third-party/nwaku/waku/waku_archive/driver/sqlite_driver.nim new file mode 100644 index 0000000..da46f6c --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/driver/sqlite_driver.nim @@ -0,0 +1,5 @@ +{.push raises: [].} + +import ./sqlite_driver/sqlite_driver + +export sqlite_driver diff --git a/third-party/nwaku/waku/waku_archive/driver/sqlite_driver/migrations.nim b/third-party/nwaku/waku/waku_archive/driver/sqlite_driver/migrations.nim new file mode 100644 index 0000000..16ef946 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/driver/sqlite_driver/migrations.nim @@ -0,0 +1,74 @@ +{.push raises: [].} + +import + std/[tables, strutils, os], results, chronicles, sqlite3_abi # sqlite3_column_int64 +import ../../../common/databases/db_sqlite, ../../../common/databases/common + +logScope: + topics = "waku archive migration" + +const SchemaVersion* = 10 # increase this when there is an update in the database schema + +template projectRoot(): string = + currentSourcePath.rsplit(DirSep, 1)[0] / ".." / ".." / ".." / ".." + +const MessageStoreMigrationPath: string = projectRoot / "migrations" / "message_store" + +proc isSchemaVersion7*(db: SqliteDatabase): DatabaseResult[bool] = + ## Temporary proc created to analyse when the table actually belongs to the SchemaVersion 7. + ## + ## During many nwaku versions, 0.14.0 until 0.18.0, the SchemaVersion wasn't set or checked. + ## Docker `nwaku` nodes that start working from these versions, 0.14.0 until 0.18.0, they started + ## with this discrepancy: `user_version`== 0 (not set) but Message table with SchemaVersion 7. + ## + ## We found issues where `user_version` (SchemaVersion) was set to 0 in the database even though + ## its scheme structure reflected SchemaVersion 7. In those cases, when `nwaku` re-started to + ## apply the migration scripts (in 0.19.0) the node didn't start properly because it tried to + ## migrate a database that already had the Schema structure #7, so it failed when changing the PK. + ## + ## TODO: This was added in version 0.20.0. We might remove this in version 0.30.0, as we + ## could consider that many users use +0.20.0. + + var pkColumns = newSeq[string]() + proc queryRowCallback(s: ptr sqlite3_stmt) = + let colName = cstring sqlite3_column_text(s, 0) + pkColumns.add($colName) + + let query = + """SELECT l.name FROM pragma_table_info("Message") as l WHERE l.pk != 0;""" + let res = db.query(query, queryRowCallback) + if res.isErr(): + return err("failed to determine the current SchemaVersion: " & $res.error) + + if pkColumns == @["pubsubTopic", "id", "storedAt"]: + return ok(true) + else: + info "Not considered schema version 7" + return ok(false) + +proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult[void] = + ## Compares the `user_version` of the sqlite database with the provided `targetVersion`, then + ## it runs migration scripts if the `user_version` is outdated. The `migrationScriptsDir` path + ## points to the directory holding the migrations scripts once the db is updated, it sets the + ## `user_version` to the `tragetVersion`. + ## + ## If not `targetVersion` is provided, it defaults to `SchemaVersion`. + ## + ## NOTE: Down migration it is not currently supported + debug "starting message store's sqlite database migration" + + let userVersion = ?db.getUserVersion() + let isSchemaVersion7 = ?db.isSchemaVersion7() + + if userVersion == 0'i64 and isSchemaVersion7: + info "We found user_version 0 but the database schema reflects the user_version 7" + ## Force the correct schema version + ?db.setUserVersion(7) + + let migrationRes = + migrate(db, targetVersion, migrationsScriptsDir = MessageStoreMigrationPath) + if migrationRes.isErr(): + return err("failed to execute migration scripts: " & migrationRes.error) + + debug "finished message store's sqlite database migration" + return ok() diff --git a/third-party/nwaku/waku/waku_archive/driver/sqlite_driver/queries.nim b/third-party/nwaku/waku/waku_archive/driver/sqlite_driver/queries.nim new file mode 100644 index 0000000..6fafc06 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/driver/sqlite_driver/queries.nim @@ -0,0 +1,590 @@ +{.push raises: [].} + +import std/[options, sequtils], stew/byteutils, sqlite3_abi, results +import chronicles +import + ../../../common/databases/db_sqlite, + ../../../common/databases/common, + ../../../waku_core + +const DbTable = "Message" + +type SqlQueryStr = string + +### SQLite column helper methods + +proc queryRowWakuMessageCallback( + s: ptr sqlite3_stmt, + contentTopicCol, payloadCol, versionCol, timestampCol, metaCol: cint, +): WakuMessage = + let + topic = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, contentTopicCol)) + topicLength = sqlite3_column_bytes(s, contentTopicCol) + contentTopic = string.fromBytes(@(toOpenArray(topic, 0, topicLength - 1))) + + p = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, payloadCol)) + m = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, metaCol)) + + payloadLength = sqlite3_column_bytes(s, payloadCol) + metaLength = sqlite3_column_bytes(s, metaCol) + payload = @(toOpenArray(p, 0, payloadLength - 1)) + version = sqlite3_column_int64(s, versionCol) + timestamp = sqlite3_column_int64(s, timestampCol) + meta = @(toOpenArray(m, 0, metaLength - 1)) + + return WakuMessage( + contentTopic: ContentTopic(contentTopic), + payload: payload, + version: uint32(version), + timestamp: Timestamp(timestamp), + meta: meta, + ) + +proc queryRowTimestampCallback(s: ptr sqlite3_stmt, timestampCol: cint): Timestamp = + let timestamp = sqlite3_column_int64(s, timestampCol) + return Timestamp(timestamp) + +proc queryRowPubsubTopicCallback( + s: ptr sqlite3_stmt, pubsubTopicCol: cint +): PubsubTopic = + let + pubsubTopicPointer = + cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, pubsubTopicCol)) + pubsubTopicLength = sqlite3_column_bytes(s, pubsubTopicCol) + pubsubTopic = + string.fromBytes(@(toOpenArray(pubsubTopicPointer, 0, pubsubTopicLength - 1))) + + return pubsubTopic + +proc queryRowWakuMessageHashCallback( + s: ptr sqlite3_stmt, hashCol: cint +): WakuMessageHash = + let + hashPointer = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, hashCol)) + hashLength = sqlite3_column_bytes(s, hashCol) + hash = fromBytes(toOpenArray(hashPointer, 0, hashLength - 1)) + + return hash + +### SQLite queries + +## Create table + +proc createTableQuery(table: string): SqlQueryStr = + "CREATE TABLE IF NOT EXISTS " & table & " (" & + " messageHash BLOB NOT NULL PRIMARY KEY," & " pubsubTopic BLOB NOT NULL," & + " contentTopic BLOB NOT NULL," & " payload BLOB," & " version INTEGER NOT NULL," & + " timestamp INTEGER NOT NULL," & " meta BLOB" & ") WITHOUT ROWID;" + +proc createTable*(db: SqliteDatabase): DatabaseResult[void] = + let query = createTableQuery(DbTable) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard, + ) + return ok() + +## Create indices + +proc createOldestMessageTimestampIndexQuery(table: string): SqlQueryStr = + "CREATE INDEX IF NOT EXISTS i_ts ON " & table & " (timestamp);" + +proc createOldestMessageTimestampIndex*(db: SqliteDatabase): DatabaseResult[void] = + let query = createOldestMessageTimestampIndexQuery(DbTable) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard, + ) + return ok() + +## Insert message +type InsertMessageParams* = + (seq[byte], seq[byte], seq[byte], seq[byte], int64, Timestamp, seq[byte]) + +proc insertMessageQuery(table: string): SqlQueryStr = + return + "INSERT INTO " & table & + "(messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta)" & + " VALUES (?, ?, ?, ?, ?, ?, ?);" + +proc prepareInsertMessageStmt*( + db: SqliteDatabase +): SqliteStmt[InsertMessageParams, void] = + let query = insertMessageQuery(DbTable) + return + db.prepareStmt(query, InsertMessageParams, void).expect("this is a valid statement") + +## Count table messages + +proc countMessagesQuery(table: string): SqlQueryStr = + return "SELECT COUNT(*) FROM " & table + +proc getMessageCount*(db: SqliteDatabase): DatabaseResult[int64] = + var count: int64 + proc queryRowCallback(s: ptr sqlite3_stmt) = + count = sqlite3_column_int64(s, 0) + + let query = countMessagesQuery(DbTable) + let res = db.query(query, queryRowCallback) + if res.isErr(): + return err("failed to count number of messages in the database") + + return ok(count) + +## Get oldest message receiver timestamp + +proc selectOldestMessageTimestampQuery(table: string): SqlQueryStr = + return "SELECT MIN(timestamp) FROM " & table + +proc selectOldestTimestamp*(db: SqliteDatabase): DatabaseResult[Timestamp] {.inline.} = + var timestamp: Timestamp + proc queryRowCallback(s: ptr sqlite3_stmt) = + timestamp = queryRowTimestampCallback(s, 0) + + let query = selectOldestMessageTimestampQuery(DbTable) + let res = db.query(query, queryRowCallback) + if res.isErr(): + return err("failed to get the oldest receiver timestamp from the database") + + return ok(timestamp) + +## Get newest message receiver timestamp + +proc selectNewestMessageTimestampQuery(table: string): SqlQueryStr = + return "SELECT MAX(timestamp) FROM " & table + +proc selectNewestTimestamp*(db: SqliteDatabase): DatabaseResult[Timestamp] {.inline.} = + var timestamp: Timestamp + proc queryRowCallback(s: ptr sqlite3_stmt) = + timestamp = queryRowTimestampCallback(s, 0) + + let query = selectNewestMessageTimestampQuery(DbTable) + let res = db.query(query, queryRowCallback) + if res.isErr(): + return err("failed to get the newest receiver timestamp from the database") + + return ok(timestamp) + +## Delete messages older than timestamp + +proc deleteMessagesOlderThanTimestampQuery(table: string, ts: Timestamp): SqlQueryStr = + return "DELETE FROM " & table & " WHERE timestamp < " & $ts + +proc deleteMessagesOlderThanTimestamp*( + db: SqliteDatabase, ts: int64 +): DatabaseResult[void] = + let query = deleteMessagesOlderThanTimestampQuery(DbTable, ts) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard, + ) + return ok() + +## Delete oldest messages not within limit + +proc deleteOldestMessagesNotWithinLimitQuery(table: string, limit: int): SqlQueryStr = + return + "DELETE FROM " & table & " WHERE (timestamp, messageHash) NOT IN (" & + " SELECT timestamp, messageHash FROM " & table & + " ORDER BY timestamp DESC, messageHash DESC" & " LIMIT " & $limit & ");" + +proc deleteOldestMessagesNotWithinLimit*( + db: SqliteDatabase, limit: int +): DatabaseResult[void] = + # NOTE: The word `limit` here refers the store capacity/maximum number-of-messages allowed limit + let query = deleteOldestMessagesNotWithinLimitQuery(DbTable, limit = limit) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard, + ) + return ok() + +## Select all messages + +proc selectAllMessagesQuery(table: string): SqlQueryStr = + return + "SELECT messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta" & + " FROM " & table & " ORDER BY timestamp ASC" + +proc selectAllMessages*( + db: SqliteDatabase +): DatabaseResult[seq[(WakuMessageHash, PubsubTopic, WakuMessage)]] = + ## Retrieve all messages from the store. + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] + proc queryRowCallback(s: ptr sqlite3_stmt) = + let + hash = queryRowWakuMessageHashCallback(s, hashCol = 0) + pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 1) + wakuMessage = queryRowWakuMessageCallback( + s, + contentTopicCol = 2, + payloadCol = 3, + versionCol = 4, + timestampCol = 5, + metaCol = 6, + ) + + rows.add((hash, pubsubTopic, wakuMessage)) + + let query = selectAllMessagesQuery(DbTable) + db.query(query, queryRowCallback).isOkOr: + return err("select all messages failed: " & $error) + + return ok(rows) + +## Select all messages without data + +proc selectAllMessageHashesQuery(table: string): SqlQueryStr = + return "SELECT messageHash" & " FROM " & table & " ORDER BY timestamp ASC" + +proc selectAllMessageHashes*(db: SqliteDatabase): DatabaseResult[seq[WakuMessageHash]] = + ## Retrieve all messages from the store. + var rows: seq[WakuMessageHash] + proc queryRowCallback(s: ptr sqlite3_stmt) = + let hash = queryRowWakuMessageHashCallback(s, hashCol = 0) + rows.add(hash) + + let query = selectAllMessageHashesQuery(DbTable) + db.query(query, queryRowCallback).isOkOr: + return err("select all message hashes failed: " & $error) + + return ok(rows) + +## Select messages by history query with limit + +proc combineClauses(clauses: varargs[Option[string]]): Option[string] = + let whereSeq = @clauses.filterIt(it.isSome()).mapIt(it.get()) + if whereSeq.len <= 0: + return none(string) + + var where: string = whereSeq[0] + for clause in whereSeq[1 ..^ 1]: + where &= " AND " & clause + return some(where) + +proc prepareStmt( + db: SqliteDatabase, stmt: string +): DatabaseResult[SqliteStmt[void, void]] = + var s: RawStmtPtr + checkErr sqlite3_prepare_v2(db.env, stmt, stmt.len.cint, addr s, nil) + return ok(SqliteStmt[void, void](s)) + +proc execSelectMessageByHash( + s: SqliteStmt, hash: WakuMessageHash, onRowCallback: DataProc +): DatabaseResult[void] = + let s = RawStmtPtr(s) + + checkErr bindParam(s, 1, toSeq(hash)) + + try: + while true: + let v = sqlite3_step(s) + case v + of SQLITE_ROW: + onRowCallback(s) + of SQLITE_DONE: + return ok() + else: + return err($sqlite3_errstr(v)) + except Exception, CatchableError: + error "exception in execSelectMessageByHash", error = getCurrentExceptionMsg() + + # release implicit transaction + discard sqlite3_reset(s) # same return information as step + discard sqlite3_clear_bindings(s) # no errors possible + +proc selectTimestampByHashQuery(table: string): SqlQueryStr = + return "SELECT timestamp FROM " & table & " WHERE messageHash = (?)" + +proc getCursorTimestamp( + db: SqliteDatabase, hash: WakuMessageHash +): DatabaseResult[Option[Timestamp]] = + var timestamp = none(Timestamp) + + proc queryRowCallback(s: ptr sqlite3_stmt) = + timestamp = some(queryRowTimestampCallback(s, 0)) + + let query = selectTimestampByHashQuery(DbTable) + let dbStmt = ?db.prepareStmt(query) + ?dbStmt.execSelectMessageByHash(hash, queryRowCallback) + dbStmt.dispose() + + return ok(timestamp) + +proc whereClause( + cursor: bool, + pubsubTopic: Option[PubsubTopic], + contentTopic: seq[ContentTopic], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + hashes: seq[WakuMessageHash], + ascending: bool, +): Option[string] = + let cursorClause = + if cursor: + let comp = if ascending: ">" else: "<" + + some("(timestamp, messageHash) " & comp & " (?, ?)") + else: + none(string) + + let pubsubTopicClause = + if pubsubTopic.isNone(): + none(string) + else: + some("pubsubTopic = (?)") + + let contentTopicClause = + if contentTopic.len <= 0: + none(string) + else: + var where = "contentTopic IN (" + where &= "?" + for _ in 1 ..< contentTopic.len: + where &= ", ?" + where &= ")" + some(where) + + let startTimeClause = + if startTime.isNone(): + none(string) + else: + some("timestamp >= (?)") + + let endTimeClause = + if endTime.isNone(): + none(string) + else: + some("timestamp <= (?)") + + let hashesClause = + if hashes.len <= 0: + none(string) + else: + var where = "messageHash IN (" + where &= "?" + for _ in 1 ..< hashes.len: + where &= ", ?" + where &= ")" + some(where) + + return combineClauses( + cursorClause, pubsubTopicClause, contentTopicClause, startTimeClause, endTimeClause, + hashesClause, + ) + +proc execSelectMessagesWithLimitStmt( + s: SqliteStmt, + cursor: Option[(Timestamp, WakuMessageHash)], + pubsubTopic: Option[PubsubTopic], + contentTopic: seq[ContentTopic], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + hashes: seq[WakuMessageHash], + onRowCallback: DataProc, +): DatabaseResult[void] = + let s = RawStmtPtr(s) + + # Bind params + var paramIndex = 1 + + if cursor.isSome(): + let (time, hash) = cursor.get() + checkErr bindParam(s, paramIndex, time) + paramIndex += 1 + checkErr bindParam(s, paramIndex, toSeq(hash)) + paramIndex += 1 + + if pubsubTopic.isSome(): + let pubsubTopic = toBytes(pubsubTopic.get()) + checkErr bindParam(s, paramIndex, pubsubTopic) + paramIndex += 1 + + for topic in contentTopic: + checkErr bindParam(s, paramIndex, topic.toBytes()) + paramIndex += 1 + + for hash in hashes: + checkErr bindParam(s, paramIndex, toSeq(hash)) + paramIndex += 1 + + if startTime.isSome(): + let time = startTime.get() + checkErr bindParam(s, paramIndex, time) + paramIndex += 1 + + if endTime.isSome(): + let time = endTime.get() + checkErr bindParam(s, paramIndex, time) + paramIndex += 1 + + try: + while true: + let v = sqlite3_step(s) + case v + of SQLITE_ROW: + onRowCallback(s) + of SQLITE_DONE: + return ok() + else: + return err($sqlite3_errstr(v)) + except Exception, CatchableError: + error "exception in execSelectMessagesWithLimitStmt", + error = getCurrentExceptionMsg() + + # release implicit transaction + discard sqlite3_reset(s) # same return information as step + discard sqlite3_clear_bindings(s) # no errors possible + +proc selectMessagesWithLimitQuery( + table: string, where: Option[string], limit: uint, ascending = true +): SqlQueryStr = + let order = if ascending: "ASC" else: "DESC" + + var query: string + + query = + "SELECT messageHash, pubsubTopic, contentTopic, payload, version, timestamp, meta" + query &= " FROM " & table + + if where.isSome(): + query &= " WHERE " & where.get() + + query &= " ORDER BY timestamp " & order & ", messageHash " & order + + query &= " LIMIT " & $limit & ";" + + return query + +proc selectMessageHashesWithLimitQuery( + table: string, where: Option[string], limit: uint, ascending = true +): SqlQueryStr = + let order = if ascending: "ASC" else: "DESC" + + var query = "SELECT messageHash FROM " & table + + if where.isSome(): + query &= " WHERE " & where.get() + + query &= " ORDER BY timestamp " & order & ", messageHash " & order + + query &= " LIMIT " & $limit & ";" + + return query + +proc selectMessagesByStoreQueryWithLimit*( + db: SqliteDatabase, + contentTopic: seq[ContentTopic], + pubsubTopic: Option[PubsubTopic], + cursor: Option[WakuMessageHash], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + hashes: seq[WakuMessageHash], + limit: uint, + ascending: bool, +): DatabaseResult[seq[(WakuMessageHash, PubsubTopic, WakuMessage)]] = + var timeCursor = none((Timestamp, WakuMessageHash)) + + if cursor.isSome(): + let hash: WakuMessageHash = cursor.get() + + let timeOpt = ?getCursorTimestamp(db, hash) + + if timeOpt.isNone(): + return err("cursor not found") + + timeCursor = some((timeOpt.get(), hash)) + + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] = @[] + + proc queryRowCallback(s: ptr sqlite3_stmt) = + let + hash = queryRowWakuMessageHashCallback(s, hashCol = 0) + pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 1) + message = queryRowWakuMessageCallback( + s, + contentTopicCol = 2, + payloadCol = 3, + versionCol = 4, + timestampCol = 5, + metaCol = 6, + ) + + rows.add((hash, pubsubTopic, message)) + + let where = whereClause( + timeCursor.isSome(), + pubsubTopic, + contentTopic, + startTime, + endTime, + hashes, + ascending, + ) + + let query = selectMessagesWithLimitQuery(DbTable, where, limit, ascending) + + let dbStmt = ?db.prepareStmt(query) + ?dbStmt.execSelectMessagesWithLimitStmt( + timeCursor, pubsubTopic, contentTopic, startTime, endTime, hashes, queryRowCallback + ) + dbStmt.dispose() + + return ok(rows) + +proc selectMessageHashesByStoreQueryWithLimit*( + db: SqliteDatabase, + contentTopic: seq[ContentTopic], + pubsubTopic: Option[PubsubTopic], + cursor: Option[WakuMessageHash], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + hashes: seq[WakuMessageHash], + limit: uint, + ascending: bool, +): DatabaseResult[seq[(WakuMessageHash, PubsubTopic, WakuMessage)]] = + var timeCursor = none((Timestamp, WakuMessageHash)) + + if cursor.isSome(): + let hash: WakuMessageHash = cursor.get() + + let timeOpt = ?getCursorTimestamp(db, hash) + + if timeOpt.isNone(): + return err("cursor not found") + + timeCursor = some((timeOpt.get(), hash)) + + var rows: seq[(WakuMessageHash, PubsubTopic, WakuMessage)] = @[] + + proc queryRowCallback(s: ptr sqlite3_stmt) = + let hash = queryRowWakuMessageHashCallback(s, hashCol = 0) + rows.add((hash, "", WakuMessage())) + + let where = whereClause( + timeCursor.isSome(), + pubsubTopic, + contentTopic, + startTime, + endTime, + hashes, + ascending, + ) + + let query = selectMessageHashesWithLimitQuery(DbTable, where, limit, ascending) + + let dbStmt = ?db.prepareStmt(query) + ?dbStmt.execSelectMessagesWithLimitStmt( + timeCursor, pubsubTopic, contentTopic, startTime, endTime, hashes, queryRowCallback + ) + dbStmt.dispose() + + return ok(rows) diff --git a/third-party/nwaku/waku/waku_archive/driver/sqlite_driver/sqlite_driver.nim b/third-party/nwaku/waku/waku_archive/driver/sqlite_driver/sqlite_driver.nim new file mode 100644 index 0000000..173dd3e --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/driver/sqlite_driver/sqlite_driver.nim @@ -0,0 +1,192 @@ +# The code in this file is an adaptation of the Sqlite KV Store found in nim-eth. +# https://github.com/status-im/nim-eth/blob/master/eth/db/kvstore_sqlite3.nim +{.push raises: [].} + +import std/options, stew/byteutils, chronicles, chronos, results +import + ../../../common/databases/db_sqlite, + ../../../waku_core, + ../../../waku_core/message/digest, + ../../common, + ../../driver, + ./queries + +logScope: + topics = "waku archive sqlite" + +proc init(db: SqliteDatabase): ArchiveDriverResult[void] = + ## Misconfiguration can lead to nil DB + if db.isNil(): + return err("db not initialized") + + # Create table, if doesn't exist + let resCreate = createTable(db) + if resCreate.isErr(): + return err("failed to create table: " & resCreate.error()) + + # Create indices, if don't exist + let resRtIndex = createOldestMessageTimestampIndex(db) + if resRtIndex.isErr(): + return err("failed to create i_ts index: " & resRtIndex.error()) + + return ok() + +type SqliteDriver* = ref object of ArchiveDriver + db: SqliteDatabase + insertStmt: SqliteStmt[InsertMessageParams, void] + +proc new*(T: type SqliteDriver, db: SqliteDatabase): ArchiveDriverResult[T] = + # Database initialization + let resInit = init(db) + if resInit.isErr(): + return err(resInit.error()) + + # General initialization + let insertStmt = db.prepareInsertMessageStmt() + return ok(SqliteDriver(db: db, insertStmt: insertStmt)) + +method put*( + s: SqliteDriver, + messageHash: WakuMessageHash, + pubsubTopic: PubsubTopic, + message: WakuMessage, +): Future[ArchiveDriverResult[void]] {.async.} = + ## Inserts a message into the store + let res = s.insertStmt.exec( + ( + @(messageHash), + toBytes(pubsubTopic), + toBytes(message.contentTopic), + message.payload, + int64(message.version), + message.timestamp, + message.meta, + ) + ) + + return res + +method getAllMessages*( + s: SqliteDriver +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + ## Retrieve all messages from the store. + return s.db.selectAllMessages() + +method getMessages*( + s: SqliteDriver, + includeData = true, + contentTopics = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes = newSeq[WakuMessageHash](0), + maxPageSize = DefaultPageSize, + ascendingOrder = true, + requestId = "", +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + if not includeData: + return s.db.selectMessageHashesByStoreQueryWithLimit( + contentTopics, + pubsubTopic, + cursor, + startTime, + endTime, + hashes, + limit = maxPageSize, + ascending = ascendingOrder, + ) + + return s.db.selectMessagesByStoreQueryWithLimit( + contentTopics, + pubsubTopic, + cursor, + startTime, + endTime, + hashes, + limit = maxPageSize, + ascending = ascendingOrder, + ) + +method getMessagesCount*( + s: SqliteDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + return s.db.getMessageCount() + +method getPagesCount*(s: SqliteDriver): Future[ArchiveDriverResult[int64]] {.async.} = + return s.db.getPageCount() + +method getPagesSize*(s: SqliteDriver): Future[ArchiveDriverResult[int64]] {.async.} = + return s.db.getPageSize() + +method getDatabaseSize*(s: SqliteDriver): Future[ArchiveDriverResult[int64]] {.async.} = + return s.db.getDatabaseSize() + +method performVacuum*(s: SqliteDriver): Future[ArchiveDriverResult[void]] {.async.} = + return s.db.performSqliteVacuum() + +method getOldestMessageTimestamp*( + s: SqliteDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + return s.db.selectOldestTimestamp() + +method getNewestMessageTimestamp*( + s: SqliteDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + return s.db.selectnewestTimestamp() + +method deleteMessagesOlderThanTimestamp*( + s: SqliteDriver, ts: Timestamp +): Future[ArchiveDriverResult[void]] {.async.} = + return s.db.deleteMessagesOlderThanTimestamp(ts) + +method deleteOldestMessagesNotWithinLimit*( + s: SqliteDriver, limit: int +): Future[ArchiveDriverResult[void]] {.async.} = + return s.db.deleteOldestMessagesNotWithinLimit(limit) + +method decreaseDatabaseSize*( + driver: SqliteDriver, targetSizeInBytes: int64, forceRemoval: bool = false +): Future[ArchiveDriverResult[void]] {.async.} = + ## To remove 20% of the outdated data from database + const DeleteLimit = 0.80 + + ## when db size overshoots the database limit, shread 20% of outdated messages + ## get size of database + let dbSize = (await driver.getDatabaseSize()).valueOr: + return err("failed to get database size: " & $error) + + ## database size in bytes + let totalSizeOfDB: int64 = int64(dbSize) + + if totalSizeOfDB < targetSizeInBytes: + return ok() + + ## to shread/delete messsges, get the total row/message count + let numMessages = (await driver.getMessagesCount()).valueOr: + return err("failed to get messages count: " & error) + + ## NOTE: Using SQLite vacuuming is done manually, we delete a percentage of rows + ## if vacumming is done automatically then we aim to check DB size periodially for efficient + ## retention policy implementation. + + ## 80% of the total messages are to be kept, delete others + let pageDeleteWindow = int(float(numMessages) * DeleteLimit) + + (await driver.deleteOldestMessagesNotWithinLimit(limit = pageDeleteWindow)).isOkOr: + return err("deleting oldest messages failed: " & error) + + return ok() + +method close*(s: SqliteDriver): Future[ArchiveDriverResult[void]] {.async.} = + ## Close the database connection + # Dispose statements + s.insertStmt.dispose() + # Close connection + s.db.close() + return ok() + +method existsTable*( + s: SqliteDriver, tableName: string +): Future[ArchiveDriverResult[bool]] {.async.} = + return err("existsTable method not implemented in sqlite_driver") diff --git a/third-party/nwaku/waku/waku_archive/retention_policy.nim b/third-party/nwaku/waku/waku_archive/retention_policy.nim new file mode 100644 index 0000000..d4b75ee --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/retention_policy.nim @@ -0,0 +1,13 @@ +{.push raises: [].} + +import results, chronos +import ./driver + +type RetentionPolicyResult*[T] = Result[T, string] + +type RetentionPolicy* = ref object of RootObj + +method execute*( + p: RetentionPolicy, store: ArchiveDriver +): Future[RetentionPolicyResult[void]] {.base, async.} = + discard diff --git a/third-party/nwaku/waku/waku_archive/retention_policy/builder.nim b/third-party/nwaku/waku/waku_archive/retention_policy/builder.nim new file mode 100644 index 0000000..6cb131b --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/retention_policy/builder.nim @@ -0,0 +1,85 @@ +{.push raises: [].} + +import std/[strutils, options], regex, results +import + ../retention_policy, + ./retention_policy_time, + ./retention_policy_capacity, + ./retention_policy_size + +proc new*( + T: type RetentionPolicy, retPolicy: string +): RetentionPolicyResult[Option[RetentionPolicy]] = + let retPolicy = retPolicy.toLower + + # Validate the retention policy format + if retPolicy == "" or retPolicy == "none": + return ok(none(RetentionPolicy)) + + const StoreMessageRetentionPolicyRegex = re2"^\w+:\d*\.?\d+((g|m)b)?$" + if not retPolicy.match(StoreMessageRetentionPolicyRegex): + return err("invalid 'store message retention policy' format: " & retPolicy) + + # Apply the retention policy, if any + let rententionPolicyParts = retPolicy.split(":", 1) + let + policy = rententionPolicyParts[0] + policyArgs = rententionPolicyParts[1] + + if policy == "time": + var retentionTimeSeconds: int64 + try: + retentionTimeSeconds = parseInt(policyArgs) + except ValueError: + return err("invalid time retention policy argument") + + let retPolicy: RetentionPolicy = TimeRetentionPolicy.new(retentionTimeSeconds) + return ok(some(retPolicy)) + elif policy == "capacity": + var retentionCapacity: int + try: + retentionCapacity = parseInt(policyArgs) + except ValueError: + return err("invalid capacity retention policy argument") + + let retPolicy: RetentionPolicy = CapacityRetentionPolicy.new(retentionCapacity) + return ok(some(retPolicy)) + elif policy == "size": + var retentionSize: string + retentionSize = policyArgs + + # captures the size unit such as GB or MB + let sizeUnit = retentionSize.substr(retentionSize.len - 2) + # captures the string type number data of the size provided + let sizeQuantityStr = retentionSize.substr(0, retentionSize.len - 3) + # to hold the numeric value data of size + var inptSizeQuantity: float + var sizeQuantity: int64 + var sizeMultiplier: float + + try: + inptSizeQuantity = parseFloat(sizeQuantityStr) + except ValueError: + return err("invalid size retention policy argument: " & getCurrentExceptionMsg()) + + case sizeUnit + of "gb": + sizeMultiplier = 1024.0 * 1024.0 * 1024.0 + of "mb": + sizeMultiplier = 1024.0 * 1024.0 + else: + return err ( + """invalid size retention value unit: expected "Mb" or "Gb" but got """ & + sizeUnit + ) + + # quantity is converted into bytes for uniform processing + sizeQuantity = int64(inptSizeQuantity * sizeMultiplier) + + if sizeQuantity <= 0: + return err("invalid size retention policy argument: a non-zero value is required") + + let retPolicy: RetentionPolicy = SizeRetentionPolicy.new(sizeQuantity) + return ok(some(retPolicy)) + else: + return err("unknown retention policy") diff --git a/third-party/nwaku/waku/waku_archive/retention_policy/retention_policy_capacity.nim b/third-party/nwaku/waku/waku_archive/retention_policy/retention_policy_capacity.nim new file mode 100644 index 0000000..aa7cff7 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/retention_policy/retention_policy_capacity.nim @@ -0,0 +1,69 @@ +{.push raises: [].} + +import results, chronicles, chronos +import ../driver, ../retention_policy + +logScope: + topics = "waku archive retention_policy" + +const DefaultCapacity*: int = 25_000 + +const MaxOverflow = 1.3 + +type + # CapacityRetentionPolicy implements auto deletion as follows: + # - The sqlite DB will driver up to `totalCapacity = capacity` * `MaxOverflow` messages, + # giving an overflowWindow of `capacity * (MaxOverflow - 1) = overflowWindow`. + # + # - In case of an overflow, messages are sorted by `receiverTimestamp` and the oldest ones are + # deleted. The number of messages that get deleted is `(overflowWindow / 2) = deleteWindow`, + # bringing the total number of driverd messages back to `capacity + (overflowWindow / 2)`. + # + # The rationale for batch deleting is efficiency. We keep half of the overflow window in addition + # to `capacity` because we delete the oldest messages with respect to `receiverTimestamp` instead of + # `senderTimestamp`. `ReceiverTimestamp` is guaranteed to be set, while senders could omit setting + # `senderTimestamp`. However, `receiverTimestamp` can differ from node to node for the same message. + # So sorting by `receiverTimestamp` might (slightly) prioritize some actually older messages and we + # compensate that by keeping half of the overflow window. + CapacityRetentionPolicy* = ref object of RetentionPolicy + capacity: int + # represents both the number of messages that are persisted in the sqlite DB (excl. the overflow window explained above), and the number of messages that get loaded via `getAll`. + totalCapacity: int # = capacity * MaxOverflow + deleteWindow: int + # = capacity * (MaxOverflow - 1) / 2; half of the overflow window, the amount of messages deleted when overflow occurs + +proc calculateTotalCapacity(capacity: int, overflow: float): int = + int(float(capacity) * overflow) + +proc calculateOverflowWindow(capacity: int, overflow: float): int = + int(float(capacity) * (overflow - 1)) + +proc calculateDeleteWindow(capacity: int, overflow: float): int = + calculateOverflowWindow(capacity, overflow) div 2 + +proc new*(T: type CapacityRetentionPolicy, capacity = DefaultCapacity): T = + let + totalCapacity = calculateTotalCapacity(capacity, MaxOverflow) + deleteWindow = calculateDeleteWindow(capacity, MaxOverflow) + + CapacityRetentionPolicy( + capacity: capacity, totalCapacity: totalCapacity, deleteWindow: deleteWindow + ) + +method execute*( + p: CapacityRetentionPolicy, driver: ArchiveDriver +): Future[RetentionPolicyResult[void]] {.async.} = + debug "beginning executing message retention policy - capacity" + + let numMessages = (await driver.getMessagesCount()).valueOr: + return err("failed to get messages count: " & error) + + if numMessages < p.totalCapacity: + return ok() + + (await driver.deleteOldestMessagesNotWithinLimit(limit = p.capacity + p.deleteWindow)).isOkOr: + return err("deleting oldest messages failed: " & error) + + debug "end executing message retention policy - capacity" + + return ok() diff --git a/third-party/nwaku/waku/waku_archive/retention_policy/retention_policy_size.nim b/third-party/nwaku/waku/waku_archive/retention_policy/retention_policy_size.nim new file mode 100644 index 0000000..2e3b270 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/retention_policy/retention_policy_size.nim @@ -0,0 +1,27 @@ +{.push raises: [].} + +import results, chronicles, chronos +import ../driver, ../retention_policy + +logScope: + topics = "waku archive retention_policy" + +# default size is 30 GiB or 32212254720.0 in bytes +const DefaultRetentionSize*: int64 = 32212254720 + +type SizeRetentionPolicy* = ref object of RetentionPolicy + sizeLimit: int64 + +proc new*(T: type SizeRetentionPolicy, size = DefaultRetentionSize): T = + SizeRetentionPolicy(sizeLimit: size) + +method execute*( + p: SizeRetentionPolicy, driver: ArchiveDriver +): Future[RetentionPolicyResult[void]] {.async.} = + debug "beginning of executing message retention policy - size" + + (await driver.decreaseDatabaseSize(p.sizeLimit)).isOkOr: + return err("decreaseDatabaseSize failed: " & $error) + + debug "end of executing message retention policy - size" + return ok() diff --git a/third-party/nwaku/waku/waku_archive/retention_policy/retention_policy_time.nim b/third-party/nwaku/waku/waku_archive/retention_policy/retention_policy_time.nim new file mode 100644 index 0000000..6cc3682 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive/retention_policy/retention_policy_time.nim @@ -0,0 +1,39 @@ +{.push raises: [].} + +import std/times, results, chronicles, chronos +import ../../waku_core, ../driver, ../retention_policy + +logScope: + topics = "waku archive retention_policy" + +const DefaultRetentionTime*: int64 = 30.days.seconds + +type TimeRetentionPolicy* = ref object of RetentionPolicy + retentionTime: chronos.Duration + +proc new*(T: type TimeRetentionPolicy, retentionTime = DefaultRetentionTime): T = + TimeRetentionPolicy(retentionTime: retentionTime.seconds) + +method execute*( + p: TimeRetentionPolicy, driver: ArchiveDriver +): Future[RetentionPolicyResult[void]] {.async.} = + ## Delete messages that exceed the retention time by 10% and more (batch delete for efficiency) + debug "beginning of executing message retention policy - time" + + let omtRes = await driver.getOldestMessageTimestamp() + if omtRes.isErr(): + return err("failed to get oldest message timestamp: " & omtRes.error) + + let now = getNanosecondTime(getTime().toUnixFloat()) + let retentionTimestamp = now - p.retentionTime.nanoseconds + let thresholdTimestamp = retentionTimestamp - p.retentionTime.nanoseconds div 10 + + if thresholdTimestamp <= omtRes.value: + return ok() + + let res = await driver.deleteMessagesOlderThanTimestamp(ts = retentionTimestamp) + if res.isErr(): + return err("failed to delete oldest messages: " & res.error) + + debug "end of executing message retention policy - time" + return ok() diff --git a/third-party/nwaku/waku/waku_archive_legacy.nim b/third-party/nwaku/waku/waku_archive_legacy.nim new file mode 100644 index 0000000..bcb6b6a --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy.nim @@ -0,0 +1,6 @@ +import + ./waku_archive_legacy/common, + ./waku_archive_legacy/archive, + ./waku_archive_legacy/driver + +export common, archive, driver diff --git a/third-party/nwaku/waku/waku_archive_legacy/archive.nim b/third-party/nwaku/waku/waku_archive_legacy/archive.nim new file mode 100644 index 0000000..5e08aa5 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/archive.nim @@ -0,0 +1,285 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import + std/[times, options, sequtils, algorithm], + stew/byteutils, + chronicles, + chronos, + metrics, + results +import + ../common/paging, + ./driver, + ../waku_core, + ../waku_core/message/digest, + ./common, + ./archive_metrics + +logScope: + topics = "waku archive" + +const + DefaultPageSize*: uint = 20 + MaxPageSize*: uint = 100 + + # Retention policy + WakuArchiveDefaultRetentionPolicyInterval* = chronos.minutes(30) + + # Metrics reporting + WakuArchiveDefaultMetricsReportInterval* = chronos.minutes(30) + + # Message validation + # 20 seconds maximum allowable sender timestamp "drift" + MaxMessageTimestampVariance* = getNanoSecondTime(20) + +type MessageValidator* = + proc(msg: WakuMessage): Result[void, string] {.closure, gcsafe, raises: [].} + +## Archive + +type WakuArchive* = ref object + driver: ArchiveDriver + + validator: MessageValidator + +proc validate*(msg: WakuMessage): Result[void, string] = + if msg.ephemeral: + # Ephemeral message, do not store + return + + if msg.timestamp == 0: + return ok() + + let + now = getNanosecondTime(getTime().toUnixFloat()) + lowerBound = now - MaxMessageTimestampVariance + upperBound = now + MaxMessageTimestampVariance + + if msg.timestamp < lowerBound: + return err(invalidMessageOld) + + if upperBound < msg.timestamp: + return err(invalidMessageFuture) + + return ok() + +proc new*( + T: type WakuArchive, driver: ArchiveDriver, validator: MessageValidator = validate +): Result[T, string] = + if driver.isNil(): + return err("archive driver is Nil") + + let archive = WakuArchive(driver: driver, validator: validator) + + return ok(archive) + +proc handleMessage*( + self: WakuArchive, pubsubTopic: PubsubTopic, msg: WakuMessage +) {.async.} = + let + msgDigest = computeDigest(msg) + msgDigestHex = msgDigest.data.to0xHex() + msgHash = computeMessageHash(pubsubTopic, msg) + msgHashHex = msgHash.to0xHex() + msgTimestamp = + if msg.timestamp > 0: + msg.timestamp + else: + getNanosecondTime(getTime().toUnixFloat()) + + trace "handling message", + msg_hash = msgHashHex, + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + msgTimestamp = msg.timestamp, + digest = msgDigestHex + + self.validator(msg).isOkOr: + waku_legacy_archive_errors.inc(labelValues = [error]) + trace "invalid message", + msg_hash = msgHashHex, + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + timestamp = msg.timestamp, + error = error + return + + let insertStartTime = getTime().toUnixFloat() + + (await self.driver.put(pubsubTopic, msg, msgDigest, msgHash, msgTimestamp)).isOkOr: + waku_legacy_archive_errors.inc(labelValues = [insertFailure]) + error "failed to insert message", + msg_hash = msgHashHex, + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + timestamp = msg.timestamp, + error = error + return + + let insertDuration = getTime().toUnixFloat() - insertStartTime + waku_legacy_archive_insert_duration_seconds.observe(insertDuration) + + debug "message archived", + msg_hash = msgHashHex, + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + msgTimestamp = msg.timestamp, + digest = msgDigestHex, + insertDuration = insertDuration + +proc findMessages*( + self: WakuArchive, query: ArchiveQuery +): Future[ArchiveResult] {.async, gcsafe.} = + ## Search the archive to return a single page of messages matching the query criteria + + let maxPageSize = + if query.pageSize <= 0: + DefaultPageSize + else: + min(query.pageSize, MaxPageSize) + + let isAscendingOrder = query.direction.into() + + if query.contentTopics.len > 10: + return err(ArchiveError.invalidQuery("too many content topics")) + + if query.cursor.isSome() and query.cursor.get().hash.len != 32: + return err(ArchiveError.invalidQuery("invalid cursor hash length")) + + let queryStartTime = getTime().toUnixFloat() + + let rows = ( + await self.driver.getMessages( + includeData = query.includeData, + contentTopic = query.contentTopics, + pubsubTopic = query.pubsubTopic, + cursor = query.cursor, + startTime = query.startTime, + endTime = query.endTime, + hashes = query.hashes, + maxPageSize = maxPageSize + 1, + ascendingOrder = isAscendingOrder, + requestId = query.requestId, + ) + ).valueOr: + return err(ArchiveError(kind: ArchiveErrorKind.DRIVER_ERROR, cause: error)) + + let queryDuration = getTime().toUnixFloat() - queryStartTime + waku_legacy_archive_query_duration_seconds.observe(queryDuration) + + var hashes = newSeq[WakuMessageHash]() + var messages = newSeq[WakuMessage]() + var topics = newSeq[PubsubTopic]() + var cursor = none(ArchiveCursor) + + if rows.len == 0: + return ok(ArchiveResponse(hashes: hashes, messages: messages, cursor: cursor)) + + ## Messages + let pageSize = min(rows.len, int(maxPageSize)) + + if query.includeData: + topics = rows[0 ..< pageSize].mapIt(it[0]) + messages = rows[0 ..< pageSize].mapIt(it[1]) + + hashes = rows[0 ..< pageSize].mapIt(it[4]) + + ## Cursor + if rows.len > int(maxPageSize): + ## Build last message cursor + ## The cursor is built from the last message INCLUDED in the response + ## (i.e. the second last message in the rows list) + + let (pubsubTopic, message, digest, storeTimestamp, hash) = rows[^2] + + cursor = some( + ArchiveCursor( + digest: MessageDigest.fromBytes(digest), + storeTime: storeTimestamp, + sendertime: message.timestamp, + pubsubTopic: pubsubTopic, + hash: hash, + ) + ) + + # All messages MUST be returned in chronological order + if not isAscendingOrder: + reverse(hashes) + reverse(messages) + reverse(topics) + + return ok( + ArchiveResponse(hashes: hashes, messages: messages, topics: topics, cursor: cursor) + ) + +proc findMessagesV2*( + self: WakuArchive, query: ArchiveQuery +): Future[ArchiveResult] {.async, deprecated, gcsafe.} = + ## Search the archive to return a single page of messages matching the query criteria + + let maxPageSize = + if query.pageSize <= 0: + DefaultPageSize + else: + min(query.pageSize, MaxPageSize) + + let isAscendingOrder = query.direction.into() + + if query.contentTopics.len > 10: + return err(ArchiveError.invalidQuery("too many content topics")) + + let queryStartTime = getTime().toUnixFloat() + + let rows = ( + await self.driver.getMessagesV2( + contentTopic = query.contentTopics, + pubsubTopic = query.pubsubTopic, + cursor = query.cursor, + startTime = query.startTime, + endTime = query.endTime, + maxPageSize = maxPageSize + 1, + ascendingOrder = isAscendingOrder, + requestId = query.requestId, + ) + ).valueOr: + return err(ArchiveError(kind: ArchiveErrorKind.DRIVER_ERROR, cause: error)) + + let queryDuration = getTime().toUnixFloat() - queryStartTime + waku_legacy_archive_query_duration_seconds.observe(queryDuration) + + var messages = newSeq[WakuMessage]() + var cursor = none(ArchiveCursor) + + if rows.len == 0: + return ok(ArchiveResponse(messages: messages, cursor: cursor)) + + ## Messages + let pageSize = min(rows.len, int(maxPageSize)) + + messages = rows[0 ..< pageSize].mapIt(it[1]) + + ## Cursor + if rows.len > int(maxPageSize): + ## Build last message cursor + ## The cursor is built from the last message INCLUDED in the response + ## (i.e. the second last message in the rows list) + + let (pubsubTopic, message, digest, storeTimestamp, _) = rows[^2] + + cursor = some( + ArchiveCursor( + digest: MessageDigest.fromBytes(digest), + storeTime: storeTimestamp, + sendertime: message.timestamp, + pubsubTopic: pubsubTopic, + ) + ) + + # All messages MUST be returned in chronological order + if not isAscendingOrder: + reverse(messages) + + return ok(ArchiveResponse(messages: messages, cursor: cursor)) diff --git a/third-party/nwaku/waku/waku_archive_legacy/archive_metrics.nim b/third-party/nwaku/waku/waku_archive_legacy/archive_metrics.nim new file mode 100644 index 0000000..c3569a1 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/archive_metrics.nim @@ -0,0 +1,22 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import metrics + +declarePublicGauge waku_legacy_archive_messages, + "number of historical messages", ["type"] +declarePublicCounter waku_legacy_archive_errors, + "number of store protocol errors", ["type"] +declarePublicHistogram waku_legacy_archive_insert_duration_seconds, + "message insertion duration" +declarePublicHistogram waku_legacy_archive_query_duration_seconds, + "history query duration" + +# Error types (metric label values) +const + invalidMessageOld* = "invalid_message_too_old" + invalidMessageFuture* = "invalid_message_future_timestamp" + insertFailure* = "insert_failure" + retPolicyFailure* = "retpolicy_failure" diff --git a/third-party/nwaku/waku/waku_archive_legacy/common.nim b/third-party/nwaku/waku/waku_archive_legacy/common.nim new file mode 100644 index 0000000..ee45181 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/common.nim @@ -0,0 +1,88 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import std/options, results, stew/byteutils, stew/arrayops, nimcrypto/sha2 +import ../waku_core, ../common/paging + +## Waku message digest + +type MessageDigest* = MDigest[256] + +proc fromBytes*(T: type MessageDigest, src: seq[byte]): T = + var data: array[32, byte] + + let byteCount = copyFrom[byte](data, src) + + assert byteCount == 32 + + return MessageDigest(data: data) + +proc computeDigest*(msg: WakuMessage): MessageDigest = + var ctx: sha256 + ctx.init() + defer: + ctx.clear() + + ctx.update(msg.contentTopic.toBytes()) + ctx.update(msg.payload) + + # Computes the hash + return ctx.finish() + +## Public API types + +type + #TODO Once Store v2 is removed, the cursor becomes the hash of the last message + ArchiveCursor* = object + digest*: MessageDigest + storeTime*: Timestamp + senderTime*: Timestamp + pubsubTopic*: PubsubTopic + hash*: WakuMessageHash + + ArchiveQuery* = object + includeData*: bool # indicate if messages should be returned in addition to hashes. + pubsubTopic*: Option[PubsubTopic] + contentTopics*: seq[ContentTopic] + cursor*: Option[ArchiveCursor] + startTime*: Option[Timestamp] + endTime*: Option[Timestamp] + hashes*: seq[WakuMessageHash] + pageSize*: uint + direction*: PagingDirection + requestId*: string + + ArchiveResponse* = object + hashes*: seq[WakuMessageHash] + messages*: seq[WakuMessage] + topics*: seq[PubsubTopic] + cursor*: Option[ArchiveCursor] + + ArchiveErrorKind* {.pure.} = enum + UNKNOWN = uint32(0) + DRIVER_ERROR = uint32(1) + INVALID_QUERY = uint32(2) + + ArchiveError* = object + case kind*: ArchiveErrorKind + of DRIVER_ERROR, INVALID_QUERY: + # TODO: Add an enum to be able to distinguish between error causes + cause*: string + else: + discard + + ArchiveResult* = Result[ArchiveResponse, ArchiveError] + +proc `$`*(err: ArchiveError): string = + case err.kind + of ArchiveErrorKind.DRIVER_ERROR: + "DRIVER_ERROR: " & err.cause + of ArchiveErrorKind.INVALID_QUERY: + "INVALID_QUERY: " & err.cause + of ArchiveErrorKind.UNKNOWN: + "UNKNOWN" + +proc invalidQuery*(T: type ArchiveError, cause: string): T = + ArchiveError(kind: ArchiveErrorKind.INVALID_QUERY, cause: cause) diff --git a/third-party/nwaku/waku/waku_archive_legacy/driver.nim b/third-party/nwaku/waku/waku_archive_legacy/driver.nim new file mode 100644 index 0000000..8ff8df0 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/driver.nim @@ -0,0 +1,121 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import std/options, results, chronos +import ../waku_core, ./common + +const DefaultPageSize*: uint = 25 + +type + ArchiveDriverResult*[T] = Result[T, string] + ArchiveDriver* = ref object of RootObj + +#TODO Once Store v2 is removed keep only messages and hashes +type ArchiveRow* = (PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash) + +# ArchiveDriver interface + +method put*( + driver: ArchiveDriver, + pubsubTopic: PubsubTopic, + message: WakuMessage, + digest: MessageDigest, + messageHash: WakuMessageHash, + receivedTime: Timestamp, +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard + +method getAllMessages*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = + discard + +method getMessagesV2*( + driver: ArchiveDriver, + contentTopic = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + maxPageSize = DefaultPageSize, + ascendingOrder = true, + requestId: string, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, deprecated, async.} = + discard + +method getMessages*( + driver: ArchiveDriver, + includeData = true, + contentTopic = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes = newSeq[WakuMessageHash](0), + maxPageSize = DefaultPageSize, + ascendingOrder = true, + requestId = "", +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} = + discard + +method getMessagesCount*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[int64]] {.base, async.} = + discard + +method getPagesCount*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[int64]] {.base, async.} = + discard + +method getPagesSize*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[int64]] {.base, async.} = + discard + +method getDatabaseSize*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[int64]] {.base, async.} = + discard + +method performVacuum*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard + +method getOldestMessageTimestamp*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[Timestamp]] {.base, async.} = + discard + +method getNewestMessageTimestamp*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[Timestamp]] {.base, async.} = + discard + +method deleteMessagesOlderThanTimestamp*( + driver: ArchiveDriver, ts: Timestamp +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard + +method deleteOldestMessagesNotWithinLimit*( + driver: ArchiveDriver, limit: int +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard + +method decreaseDatabaseSize*( + driver: ArchiveDriver, targetSizeInBytes: int64, forceRemoval: bool = false +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard + +method close*( + driver: ArchiveDriver +): Future[ArchiveDriverResult[void]] {.base, async.} = + discard + +method existsTable*( + driver: ArchiveDriver, tableName: string +): Future[ArchiveDriverResult[bool]] {.base, async.} = + discard diff --git a/third-party/nwaku/waku/waku_archive_legacy/driver/builder.nim b/third-party/nwaku/waku/waku_archive_legacy/driver/builder.nim new file mode 100644 index 0000000..77bd46f --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/driver/builder.nim @@ -0,0 +1,104 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import results, chronicles, chronos +import + ../driver, + ../../common/databases/dburl, + ../../common/databases/db_sqlite, + ../../common/error_handling, + ./sqlite_driver, + ./sqlite_driver/migrations as archive_driver_sqlite_migrations, + ./queue_driver + +export sqlite_driver, queue_driver + +when defined(postgres): + import ## These imports add dependency with an external libpq library + ./postgres_driver + export postgres_driver + +proc new*( + T: type ArchiveDriver, + url: string, + vacuum: bool, + migrate: bool, + maxNumConn: int, + onFatalErrorAction: OnFatalErrorHandler, +): Future[Result[T, string]] {.async.} = + ## url - string that defines the database + ## vacuum - if true, a cleanup operation will be applied to the database + ## migrate - if true, the database schema will be updated + ## maxNumConn - defines the maximum number of connections to handle simultaneously (Postgres) + ## onFatalErrorAction - called if, e.g., the connection with db got lost + + let dbUrlValidationRes = dburl.validateDbUrl(url) + if dbUrlValidationRes.isErr(): + return err("DbUrl failure in ArchiveDriver.new: " & dbUrlValidationRes.error) + + let engineRes = dburl.getDbEngine(url) + if engineRes.isErr(): + return err("error getting db engine in setupWakuArchiveDriver: " & engineRes.error) + + let engine = engineRes.get() + + case engine + of "sqlite": + let pathRes = dburl.getDbPath(url) + if pathRes.isErr(): + return err("error get path in setupWakuArchiveDriver: " & pathRes.error) + + let dbRes = SqliteDatabase.new(pathRes.get()) + if dbRes.isErr(): + return err("error in setupWakuArchiveDriver: " & dbRes.error) + + let db = dbRes.get() + + # SQLite vacuum + let sqliteStatsRes = db.gatherSqlitePageStats() + if sqliteStatsRes.isErr(): + return err("error while gathering sqlite stats: " & $sqliteStatsRes.error) + + let (pageSize, pageCount, freelistCount) = sqliteStatsRes.get() + debug "sqlite database page stats", + pageSize = pageSize, pages = pageCount, freePages = freelistCount + + if vacuum and (pageCount > 0 and freelistCount > 0): + let vacuumRes = db.performSqliteVacuum() + if vacuumRes.isErr(): + return err("error in vacuum sqlite: " & $vacuumRes.error) + + # Database migration + if migrate: + let migrateRes = archive_driver_sqlite_migrations.migrate(db) + if migrateRes.isErr(): + return err("error in migrate sqlite: " & $migrateRes.error) + + debug "setting up sqlite waku archive driver" + let res = SqliteDriver.new(db) + if res.isErr(): + return err("failed to init sqlite archive driver: " & res.error) + + return ok(res.get()) + of "postgres": + when defined(postgres): + let res = PostgresDriver.new( + dbUrl = url, + maxConnections = maxNumConn, + onFatalErrorAction = onFatalErrorAction, + ) + if res.isErr(): + return err("failed to init postgres archive driver: " & res.error) + + let driver = res.get() + return ok(driver) + else: + return err( + "Postgres has been configured but not been compiled. Check compiler definitions." + ) + else: + debug "setting up in-memory waku archive driver" + let driver = QueueDriver.new() # Defaults to a capacity of 25.000 messages + return ok(driver) diff --git a/third-party/nwaku/waku/waku_archive_legacy/driver/postgres_driver.nim b/third-party/nwaku/waku/waku_archive_legacy/driver/postgres_driver.nim new file mode 100644 index 0000000..496005c --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/driver/postgres_driver.nim @@ -0,0 +1,8 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import ./postgres_driver/postgres_driver + +export postgres_driver diff --git a/third-party/nwaku/waku/waku_archive_legacy/driver/postgres_driver/postgres_driver.nim b/third-party/nwaku/waku/waku_archive_legacy/driver/postgres_driver/postgres_driver.nim new file mode 100644 index 0000000..3891184 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/driver/postgres_driver/postgres_driver.nim @@ -0,0 +1,978 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import + std/[options, sequtils, strutils, strformat, times], + stew/[byteutils, arrayops], + results, + chronos, + db_connector/[postgres, db_common], + chronicles +import + ../../../common/error_handling, + ../../../waku_core, + ../../common, + ../../driver, + ./postgres_healthcheck, + ../../../common/databases/db_postgres as waku_postgres + +type PostgresDriver* = ref object of ArchiveDriver + ## Establish a separate pools for read/write operations + writeConnPool: PgAsyncPool + readConnPool: PgAsyncPool + +const InsertRowStmtName = "InsertRow" +const InsertRowStmtDefinition = # TODO: get the sql queries from a file + """INSERT INTO messages (id, messageHash, contentTopic, payload, pubsubTopic, + version, timestamp, meta) VALUES ($1, $2, $3, $4, $5, $6, $7, CASE WHEN $8 = '' THEN NULL ELSE $8 END) ON CONFLICT DO NOTHING;""" + +const InsertRowInMessagesLookupStmtName = "InsertRowMessagesLookup" +const InsertRowInMessagesLookupStmtDefinition = + """INSERT INTO messages_lookup (messageHash, timestamp) VALUES ($1, $2) ON CONFLICT DO NOTHING;""" + +const SelectNoCursorAscStmtName = "SelectWithoutCursorAsc" +const SelectNoCursorAscStmtDef = + """SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + timestamp >= $4 AND + timestamp <= $5 + ORDER BY timestamp ASC, messageHash ASC LIMIT $6;""" + +const SelectNoCursorDescStmtName = "SelectWithoutCursorDesc" +const SelectNoCursorDescStmtDef = + """SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + timestamp >= $4 AND + timestamp <= $5 + ORDER BY timestamp DESC, messageHash DESC LIMIT $6;""" + +const SelectWithCursorDescStmtName = "SelectWithCursorDesc" +const SelectWithCursorDescStmtDef = + """SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + (timestamp, messageHash) < ($4,$5) AND + timestamp >= $6 AND + timestamp <= $7 + ORDER BY timestamp DESC, messageHash DESC LIMIT $8;""" + +const SelectWithCursorAscStmtName = "SelectWithCursorAsc" +const SelectWithCursorAscStmtDef = + """SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages + WHERE contentTopic IN ($1) AND + messageHash IN ($2) AND + pubsubTopic = $3 AND + (timestamp, messageHash) > ($4,$5) AND + timestamp >= $6 AND + timestamp <= $7 + ORDER BY timestamp ASC, messageHash ASC LIMIT $8;""" + +const SelectMessageByHashName = "SelectMessageByHash" +const SelectMessageByHashDef = + """SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages WHERE messageHash = $1""" + +const SelectNoCursorV2AscStmtName = "SelectWithoutCursorV2Asc" +const SelectNoCursorV2AscStmtDef = + """SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages + WHERE contentTopic IN ($1) AND + pubsubTopic = $2 AND + timestamp >= $3 AND + timestamp <= $4 + ORDER BY timestamp ASC LIMIT $5;""" + +const SelectNoCursorV2DescStmtName = "SelectWithoutCursorV2Desc" +const SelectNoCursorV2DescStmtDef = + """SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages + WHERE contentTopic IN ($1) AND + pubsubTopic = $2 AND + timestamp >= $3 AND + timestamp <= $4 + ORDER BY timestamp DESC LIMIT $5;""" + +const SelectWithCursorV2DescStmtName = "SelectWithCursorV2Desc" +const SelectWithCursorV2DescStmtDef = + """SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages + WHERE contentTopic IN ($1) AND + pubsubTopic = $2 AND + (timestamp, id) < ($3,$4) AND + timestamp >= $5 AND + timestamp <= $6 + ORDER BY timestamp DESC LIMIT $7;""" + +const SelectWithCursorV2AscStmtName = "SelectWithCursorV2Asc" +const SelectWithCursorV2AscStmtDef = + """SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages + WHERE contentTopic IN ($1) AND + pubsubTopic = $2 AND + (timestamp, id) > ($3,$4) AND + timestamp >= $5 AND + timestamp <= $6 + ORDER BY timestamp ASC LIMIT $7;""" + +const DefaultMaxNumConns = 50 + +proc new*( + T: type PostgresDriver, + dbUrl: string, + maxConnections = DefaultMaxNumConns, + onFatalErrorAction: OnFatalErrorHandler = nil, +): ArchiveDriverResult[T] = + ## Very simplistic split of max connections + let maxNumConnOnEachPool = int(maxConnections / 2) + + let readConnPool = PgAsyncPool.new(dbUrl, maxNumConnOnEachPool).valueOr: + return err("error creating read conn pool PgAsyncPool") + + let writeConnPool = PgAsyncPool.new(dbUrl, maxNumConnOnEachPool).valueOr: + return err("error creating write conn pool PgAsyncPool") + + if not isNil(onFatalErrorAction): + asyncSpawn checkConnectivity(readConnPool, onFatalErrorAction) + + if not isNil(onFatalErrorAction): + asyncSpawn checkConnectivity(writeConnPool, onFatalErrorAction) + + let driver = PostgresDriver(writeConnPool: writeConnPool, readConnPool: readConnPool) + return ok(driver) + +proc reset*(s: PostgresDriver): Future[ArchiveDriverResult[void]] {.async.} = + ## Clear the database partitions + let targetSize = 0 + let forceRemoval = true + let ret = await s.decreaseDatabaseSize(targetSize, forceRemoval) + return ret + +proc rowCallbackImpl( + pqResult: ptr PGresult, + outRows: var seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)], +) = + ## Proc aimed to contain the logic of the callback passed to the `psasyncpool`. + ## That callback is used in "SELECT" queries. + ## + ## pqResult - contains the query results + ## outRows - seq of Store-rows. This is populated from the info contained in pqResult + + let numFields = pqResult.pqnfields() + if numFields != 8: + error "Wrong number of fields, expected 8", numFields + return + + for iRow in 0 ..< pqResult.pqNtuples(): + var wakuMessage: WakuMessage + var timestamp: Timestamp + var version: uint + var pubSubTopic: string + var contentTopic: string + var digest: string + var payload: string + var hashHex: string + var msgHash: WakuMessageHash + var meta: string + + try: + contentTopic = $(pqgetvalue(pqResult, iRow, 0)) + payload = parseHexStr($(pqgetvalue(pqResult, iRow, 1))) + pubSubTopic = $(pqgetvalue(pqResult, iRow, 2)) + version = parseUInt($(pqgetvalue(pqResult, iRow, 3))) + timestamp = parseInt($(pqgetvalue(pqResult, iRow, 4))) + digest = parseHexStr($(pqgetvalue(pqResult, iRow, 5))) + hashHex = parseHexStr($(pqgetvalue(pqResult, iRow, 6))) + meta = parseHexStr($(pqgetvalue(pqResult, iRow, 7))) + msgHash = fromBytes(hashHex.toOpenArrayByte(0, 31)) + except ValueError: + error "could not parse correctly", error = getCurrentExceptionMsg() + + wakuMessage.timestamp = timestamp + wakuMessage.version = uint32(version) + wakuMessage.contentTopic = contentTopic + wakuMessage.payload = @(payload.toOpenArrayByte(0, payload.high)) + wakuMessage.meta = @(meta.toOpenArrayByte(0, meta.high)) + + outRows.add( + ( + pubSubTopic, + wakuMessage, + @(digest.toOpenArrayByte(0, digest.high)), + timestamp, + msgHash, + ) + ) + +method put*( + s: PostgresDriver, + pubsubTopic: PubsubTopic, + message: WakuMessage, + digest: MessageDigest, + messageHash: WakuMessageHash, + receivedTime: Timestamp, +): Future[ArchiveDriverResult[void]] {.async.} = + let digest = toHex(digest.data) + let messageHash = toHex(messageHash) + let contentTopic = message.contentTopic + let payload = toHex(message.payload) + let version = $message.version + let timestamp = $message.timestamp + let meta = toHex(message.meta) + + trace "put PostgresDriver", timestamp = timestamp + + ( + await s.writeConnPool.runStmt( + InsertRowStmtName, + InsertRowStmtDefinition, + @[ + digest, messageHash, contentTopic, payload, pubsubTopic, version, timestamp, + meta, + ], + @[ + int32(digest.len), + int32(messageHash.len), + int32(contentTopic.len), + int32(payload.len), + int32(pubsubTopic.len), + int32(version.len), + int32(timestamp.len), + int32(meta.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], + ) + ).isOkOr: + return err("could not put msg in messages table: " & $error) + + ## Now add the row to messages_lookup + return await s.writeConnPool.runStmt( + InsertRowInMessagesLookupStmtName, + InsertRowInMessagesLookupStmtDefinition, + @[messageHash, timestamp], + @[int32(messageHash.len), int32(timestamp.len)], + @[int32(0), int32(0)], + ) + +method getAllMessages*( + s: PostgresDriver +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + ## Retrieve all messages from the store. + var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + ( + await s.readConnPool.pgQuery( + """SELECT contentTopic, + payload, pubsubTopic, version, timestamp, + id, messageHash, meta FROM messages ORDER BY timestamp ASC""", + newSeq[string](0), + rowCallback, + ) + ).isOkOr: + return err("failed in query: " & $error) + + return ok(rows) + +proc getMessagesArbitraryQuery( + s: PostgresDriver, + contentTopic: seq[ContentTopic] = @[], + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hexHashes: seq[string] = @[], + maxPageSize = DefaultPageSize, + ascendingOrder = true, + requestId: string, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + ## This proc allows to handle atypical queries. We don't use prepared statements for those. + + var query = + """SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages""" + var statements: seq[string] + var args: seq[string] + + if contentTopic.len > 0: + let cstmt = "contentTopic IN (" & "?".repeat(contentTopic.len).join(",") & ")" + statements.add(cstmt) + for t in contentTopic: + args.add(t) + + if hexHashes.len > 0: + let cstmt = "messageHash IN (" & "?".repeat(hexHashes.len).join(",") & ")" + statements.add(cstmt) + for t in hexHashes: + args.add(t) + + if pubsubTopic.isSome(): + statements.add("pubsubTopic = ?") + args.add(pubsubTopic.get()) + + if cursor.isSome(): + let hashHex = toHex(cursor.get().hash) + + var entree: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc entreeCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, entree) + + ( + await s.readConnPool.runStmt( + SelectMessageByHashName, + SelectMessageByHashDef, + @[hashHex], + @[int32(hashHex.len)], + @[int32(0)], + entreeCallback, + requestId, + ) + ).isOkOr: + return err("failed to run query with cursor: " & $error) + + if entree.len == 0: + return ok(entree) + + let storetime = entree[0][3] + + let comp = if ascendingOrder: ">" else: "<" + statements.add("(timestamp, messageHash) " & comp & " (?,?)") + args.add($storetime) + args.add(hashHex) + + if startTime.isSome(): + statements.add("timestamp >= ?") + args.add($startTime.get()) + + if endTime.isSome(): + statements.add("timestamp <= ?") + args.add($endTime.get()) + + if statements.len > 0: + query &= " WHERE " & statements.join(" AND ") + + var direction: string + if ascendingOrder: + direction = "ASC" + else: + direction = "DESC" + + query &= " ORDER BY timestamp " & direction & ", messageHash " & direction + + query &= " LIMIT ?" + args.add($maxPageSize) + + var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + (await s.readConnPool.pgQuery(query, args, rowCallback, requestId)).isOkOr: + return err("failed to run query: " & $error) + + return ok(rows) + +proc getMessagesV2ArbitraryQuery( + s: PostgresDriver, + contentTopic: seq[ContentTopic] = @[], + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + maxPageSize = DefaultPageSize, + ascendingOrder = true, + requestId: string, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async, deprecated.} = + ## This proc allows to handle atypical queries. We don't use prepared statements for those. + + var query = + """SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages""" + var statements: seq[string] + var args: seq[string] + + if contentTopic.len > 0: + let cstmt = "contentTopic IN (" & "?".repeat(contentTopic.len).join(",") & ")" + statements.add(cstmt) + for t in contentTopic: + args.add(t) + + if pubsubTopic.isSome(): + statements.add("pubsubTopic = ?") + args.add(pubsubTopic.get()) + + if cursor.isSome(): + let comp = if ascendingOrder: ">" else: "<" + statements.add("(timestamp, id) " & comp & " (?,?)") + args.add($cursor.get().storeTime) + args.add(toHex(cursor.get().digest.data)) + + if startTime.isSome(): + statements.add("timestamp >= ?") + args.add($startTime.get()) + + if endTime.isSome(): + statements.add("timestamp <= ?") + args.add($endTime.get()) + + if statements.len > 0: + query &= " WHERE " & statements.join(" AND ") + + var direction: string + if ascendingOrder: + direction = "ASC" + else: + direction = "DESC" + + query &= " ORDER BY timestamp " & direction & ", id " & direction + + query &= " LIMIT ?" + args.add($maxPageSize) + + var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + (await s.readConnPool.pgQuery(query, args, rowCallback, requestId)).isOkOr: + return err("failed to run query: " & $error) + + return ok(rows) + +proc getMessagesPreparedStmt( + s: PostgresDriver, + contentTopic: string, + pubsubTopic: PubsubTopic, + cursor = none(ArchiveCursor), + startTime: Timestamp, + endTime: Timestamp, + hashes: string, + maxPageSize = DefaultPageSize, + ascOrder = true, + requestId: string, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + ## This proc aims to run the most typical queries in a more performant way, i.e. by means of + ## prepared statements. + ## + ## contentTopic - string with list of conten topics. e.g: "'ctopic1','ctopic2','ctopic3'" + + var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + let startTimeStr = $startTime + let endTimeStr = $endTime + let limit = $maxPageSize + + if cursor.isSome(): + let hash = toHex(cursor.get().hash) + + var entree: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + + proc entreeCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, entree) + + ( + await s.readConnPool.runStmt( + SelectMessageByHashName, + SelectMessageByHashDef, + @[hash], + @[int32(hash.len)], + @[int32(0)], + entreeCallback, + requestId, + ) + ).isOkOr: + return err("failed to run query with cursor: " & $error) + + if entree.len == 0: + return ok(entree) + + let timestamp = $entree[0][3] + + var stmtName = + if ascOrder: SelectWithCursorAscStmtName else: SelectWithCursorDescStmtName + var stmtDef = + if ascOrder: SelectWithCursorAscStmtDef else: SelectWithCursorDescStmtDef + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[ + contentTopic, hashes, pubsubTopic, timestamp, hash, startTimeStr, endTimeStr, + limit, + ], + @[ + int32(contentTopic.len), + int32(hashes.len), + int32(pubsubTopic.len), + int32(timestamp.len), + int32(hash.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[ + int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0) + ], + rowCallback, + requestId, + ) + ).isOkOr: + return err("failed to run query with cursor: " & $error) + else: + var stmtName = + if ascOrder: SelectNoCursorAscStmtName else: SelectNoCursorDescStmtName + var stmtDef = if ascOrder: SelectNoCursorAscStmtDef else: SelectNoCursorDescStmtDef + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[contentTopic, hashes, pubsubTopic, startTimeStr, endTimeStr, limit], + @[ + int32(contentTopic.len), + int32(hashes.len), + int32(pubsubTopic.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], + rowCallback, + requestId, + ) + ).isOkOr: + return err("failed to run query without cursor: " & $error) + + return ok(rows) + +proc getMessagesV2PreparedStmt( + s: PostgresDriver, + contentTopic: string, + pubsubTopic: PubsubTopic, + cursor = none(ArchiveCursor), + startTime: Timestamp, + endTime: Timestamp, + maxPageSize = DefaultPageSize, + ascOrder = true, + requestId: string, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async, deprecated.} = + ## This proc aims to run the most typical queries in a more performant way, i.e. by means of + ## prepared statements. + ## + ## contentTopic - string with list of conten topics. e.g: "'ctopic1','ctopic2','ctopic3'" + + var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + let startTimeStr = $startTime + let endTimeStr = $endTime + let limit = $maxPageSize + + if cursor.isSome(): + var stmtName = + if ascOrder: SelectWithCursorV2AscStmtName else: SelectWithCursorV2DescStmtName + var stmtDef = + if ascOrder: SelectWithCursorV2AscStmtDef else: SelectWithCursorV2DescStmtDef + + let digest = toHex(cursor.get().digest.data) + let timestamp = $cursor.get().storeTime + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[contentTopic, pubsubTopic, timestamp, digest, startTimeStr, endTimeStr, limit], + @[ + int32(contentTopic.len), + int32(pubsubTopic.len), + int32(timestamp.len), + int32(digest.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)], + rowCallback, + requestId, + ) + ).isOkOr: + return err("failed to run query with cursor: " & $error) + else: + var stmtName = + if ascOrder: SelectNoCursorV2AscStmtName else: SelectNoCursorV2DescStmtName + var stmtDef = + if ascOrder: SelectNoCursorV2AscStmtDef else: SelectNoCursorV2DescStmtDef + + ( + await s.readConnPool.runStmt( + stmtName, + stmtDef, + @[contentTopic, pubsubTopic, startTimeStr, endTimeStr, limit], + @[ + int32(contentTopic.len), + int32(pubsubTopic.len), + int32(startTimeStr.len), + int32(endTimeStr.len), + int32(limit.len), + ], + @[int32(0), int32(0), int32(0), int32(0), int32(0)], + rowCallback, + requestId, + ) + ).isOkOr: + return err("failed to run query without cursor: " & $error) + + return ok(rows) + +proc getMessagesByMessageHashes( + s: PostgresDriver, hashes: string, maxPageSize: uint, requestId: string +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + ## Retrieves information only filtering by a given messageHashes list. + ## This proc levarages on the messages_lookup table to have better query performance + ## and only query the desired partitions in the partitioned messages table + var query = + fmt""" + WITH min_timestamp AS ( + SELECT MIN(timestamp) AS min_ts + FROM messages_lookup + WHERE messagehash IN ( + {hashes} + ) + ) + SELECT contentTopic, payload, pubsubTopic, version, m.timestamp, id, m.messageHash, meta + FROM messages m + INNER JOIN + messages_lookup l + ON + m.timestamp = l.timestamp + AND m.messagehash = l.messagehash + WHERE + l.timestamp >= (SELECT min_ts FROM min_timestamp) + AND l.messagehash IN ( + {hashes} + ) + ORDER BY + m.timestamp DESC, + m.messagehash DESC + LIMIT {maxPageSize}; + """ + + var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc rowCallback(pqResult: ptr PGresult) = + rowCallbackImpl(pqResult, rows) + + ( + await s.readConnPool.pgQuery( + query = query, rowCallback = rowCallback, requestId = requestId + ) + ).isOkOr: + return err("failed to run query: " & $error) + + return ok(rows) + +method getMessages*( + s: PostgresDriver, + includeData = true, + contentTopicSeq = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes = newSeq[WakuMessageHash](0), + maxPageSize = DefaultPageSize, + ascendingOrder = true, + requestId = "", +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + let hexHashes = hashes.mapIt(toHex(it)) + + if cursor.isNone() and pubsubTopic.isNone() and contentTopicSeq.len == 0 and + startTime.isNone() and endTime.isNone() and hexHashes.len > 0: + return await s.getMessagesByMessageHashes( + "'" & hexHashes.join("','") & "'", maxPageSize, requestId + ) + + if contentTopicSeq.len == 1 and hexHashes.len == 1 and pubsubTopic.isSome() and + startTime.isSome() and endTime.isSome(): + ## Considered the most common query. Therefore, we use prepared statements to optimize it. + return await s.getMessagesPreparedStmt( + contentTopicSeq.join(","), + PubsubTopic(pubsubTopic.get()), + cursor, + startTime.get(), + endTime.get(), + hexHashes.join(","), + maxPageSize, + ascendingOrder, + requestId, + ) + else: + ## We will run atypical query. In this case we don't use prepared statemets + return await s.getMessagesArbitraryQuery( + contentTopicSeq, pubsubTopic, cursor, startTime, endTime, hexHashes, maxPageSize, + ascendingOrder, requestId, + ) + +method getMessagesV2*( + s: PostgresDriver, + contentTopicSeq = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + maxPageSize = DefaultPageSize, + ascendingOrder = true, + requestId: string, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async, deprecated.} = + if contentTopicSeq.len == 1 and pubsubTopic.isSome() and startTime.isSome() and + endTime.isSome(): + ## Considered the most common query. Therefore, we use prepared statements to optimize it. + return await s.getMessagesV2PreparedStmt( + contentTopicSeq.join(","), + PubsubTopic(pubsubTopic.get()), + cursor, + startTime.get(), + endTime.get(), + maxPageSize, + ascendingOrder, + requestId, + ) + else: + ## We will run atypical query. In this case we don't use prepared statemets + return await s.getMessagesV2ArbitraryQuery( + contentTopicSeq, pubsubTopic, cursor, startTime, endTime, maxPageSize, + ascendingOrder, requestId, + ) + +proc getStr( + s: PostgresDriver, query: string +): Future[ArchiveDriverResult[string]] {.async.} = + # Performs a query that is expected to return a single string + + var ret: string + proc rowCallback(pqResult: ptr PGresult) = + if pqResult.pqnfields() != 1: + error "Wrong number of fields in getStr" + return + + if pqResult.pqNtuples() != 1: + error "Wrong number of rows in getStr" + return + + ret = $(pqgetvalue(pqResult, 0, 0)) + + (await s.readConnPool.pgQuery(query, newSeq[string](0), rowCallback)).isOkOr: + return err("failed in getRow: " & $error) + + return ok(ret) + +proc getInt( + s: PostgresDriver, query: string +): Future[ArchiveDriverResult[int64]] {.async.} = + # Performs a query that is expected to return a single numeric value (int64) + + var retInt = 0'i64 + let str = (await s.getStr(query)).valueOr: + return err("could not get str in getInt: " & $error) + + try: + retInt = parseInt(str) + except ValueError: + return err( + "exception in getInt, parseInt, str: " & str & " query: " & query & " exception: " & + getCurrentExceptionMsg() + ) + + return ok(retInt) + +method getDatabaseSize*( + s: PostgresDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + let intRes = (await s.getInt("SELECT pg_database_size(current_database())")).valueOr: + return err("error in getDatabaseSize: " & error) + + let databaseSize: int64 = int64(intRes) + return ok(databaseSize) + +method getMessagesCount*( + s: PostgresDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + let intRes = await s.getInt("SELECT COUNT(1) FROM messages") + if intRes.isErr(): + return err("error in getMessagesCount: " & intRes.error) + + return ok(intRes.get()) + +method getOldestMessageTimestamp*( + s: PostgresDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + return err("not implemented because legacy will get deprecated") + +method getNewestMessageTimestamp*( + s: PostgresDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + let intRes = await s.getInt("SELECT MAX(timestamp) FROM messages") + if intRes.isErr(): + return err("error in getNewestMessageTimestamp: " & intRes.error) + + return ok(Timestamp(intRes.get())) + +method deleteOldestMessagesNotWithinLimit*( + s: PostgresDriver, limit: int +): Future[ArchiveDriverResult[void]] {.async.} = + ## Will be completely removed when deprecating store legacy + # let execRes = await s.writeConnPool.pgQuery( + # """DELETE FROM messages WHERE id NOT IN + # ( + # SELECT id FROM messages ORDER BY timestamp DESC LIMIT ? + # );""", + # @[$limit], + # ) + # if execRes.isErr(): + # return err("error in deleteOldestMessagesNotWithinLimit: " & execRes.error) + + return ok() + +method close*(s: PostgresDriver): Future[ArchiveDriverResult[void]] {.async.} = + ## Close the database connection + let writeCloseRes = await s.writeConnPool.close() + let readCloseRes = await s.readConnPool.close() + + writeCloseRes.isOkOr: + return err("error closing write pool: " & $error) + + readCloseRes.isOkOr: + return err("error closing read pool: " & $error) + + return ok() + +proc sleep*( + s: PostgresDriver, seconds: int +): Future[ArchiveDriverResult[void]] {.async.} = + # This is for testing purposes only. It is aimed to test the proper + # implementation of asynchronous requests. It merely triggers a sleep in the + # database for the amount of seconds given as a parameter. + + proc rowCallback(result: ptr PGresult) = + ## We are not interested in any value in this case + discard + + try: + let params = @[$seconds] + (await s.writeConnPool.pgQuery("SELECT pg_sleep(?)", params, rowCallback)).isOkOr: + return err("error in postgres_driver sleep: " & $error) + except DbError: + # This always raises an exception although the sleep works + return err("exception sleeping: " & getCurrentExceptionMsg()) + + return ok() + +proc performWriteQuery*( + s: PostgresDriver, query: string +): Future[ArchiveDriverResult[void]] {.async.} = + ## Performs a query that somehow changes the state of the database + + (await s.writeConnPool.pgQuery(query)).isOkOr: + return err("error in performWriteQuery: " & $error) + + return ok() + +method decreaseDatabaseSize*( + driver: PostgresDriver, targetSizeInBytes: int64, forceRemoval: bool = false +): Future[ArchiveDriverResult[void]] {.async.} = + ## This is completely disabled and only the non-legacy driver + ## will take care of that + # var dbSize = (await driver.getDatabaseSize()).valueOr: + # return err("decreaseDatabaseSize failed to get database size: " & $error) + + # ## database size in bytes + # var totalSizeOfDB: int64 = int64(dbSize) + + # if totalSizeOfDB <= targetSizeInBytes: + # return ok() + + # debug "start reducing database size", + # targetSize = $targetSizeInBytes, currentSize = $totalSizeOfDB + + # while totalSizeOfDB > targetSizeInBytes and driver.containsAnyPartition(): + # (await driver.removeOldestPartition(forceRemoval)).isOkOr: + # return err( + # "decreaseDatabaseSize inside loop failed to remove oldest partition: " & $error + # ) + + # dbSize = (await driver.getDatabaseSize()).valueOr: + # return + # err("decreaseDatabaseSize inside loop failed to get database size: " & $error) + + # let newCurrentSize = int64(dbSize) + # if newCurrentSize == totalSizeOfDB: + # return err("the previous partition removal didn't clear database size") + + # totalSizeOfDB = newCurrentSize + + # debug "reducing database size", + # targetSize = $targetSizeInBytes, newCurrentSize = $totalSizeOfDB + + return ok() + +method existsTable*( + s: PostgresDriver, tableName: string +): Future[ArchiveDriverResult[bool]] {.async.} = + let query: string = + fmt""" + SELECT EXISTS ( + SELECT FROM + pg_tables + WHERE + tablename = '{tableName}' + ); + """ + + var exists: string + proc rowCallback(pqResult: ptr PGresult) = + if pqResult.pqnfields() != 1: + error "Wrong number of fields in existsTable" + return + + if pqResult.pqNtuples() != 1: + error "Wrong number of rows in existsTable" + return + + exists = $(pqgetvalue(pqResult, 0, 0)) + + (await s.readConnPool.pgQuery(query, newSeq[string](0), rowCallback)).isOkOr: + return err("existsTable failed in getRow: " & $error) + + return ok(exists == "t") + +proc getCurrentVersion*( + s: PostgresDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + let existsVersionTable = (await s.existsTable("version")).valueOr: + return err("error in getCurrentVersion-existsTable: " & $error) + + if not existsVersionTable: + return ok(0) + + let res = (await s.getInt(fmt"SELECT version FROM version")).valueOr: + return err("error in getMessagesCount: " & $error) + + return ok(res) + +method deleteMessagesOlderThanTimestamp*( + s: PostgresDriver, tsNanoSec: Timestamp +): Future[ArchiveDriverResult[void]] {.async.} = + ## First of all, let's remove the older partitions so that we can reduce + ## the database size. + # (await s.removePartitionsOlderThan(tsNanoSec)).isOkOr: + # return err("error while removing older partitions: " & $error) + + # ( + # await s.writeConnPool.pgQuery( + # "DELETE FROM messages WHERE timestamp < " & $tsNanoSec + # ) + # ).isOkOr: + # return err("error in deleteMessagesOlderThanTimestamp: " & $error) + + return ok() diff --git a/third-party/nwaku/waku/waku_archive_legacy/driver/postgres_driver/postgres_healthcheck.nim b/third-party/nwaku/waku/waku_archive_legacy/driver/postgres_driver/postgres_healthcheck.nim new file mode 100644 index 0000000..4c9f170 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/driver/postgres_driver/postgres_healthcheck.nim @@ -0,0 +1,38 @@ +{.push raises: [].} + +import chronos, chronicles, results +import ../../../common/databases/db_postgres, ../../../common/error_handling + +## Simple query to validate that the postgres is working and attending requests +const HealthCheckQuery = "SELECT version();" +const CheckConnectivityInterval = 60.seconds +const MaxNumTrials = 20 +const TrialInterval = 1.seconds + +proc checkConnectivity*( + connPool: PgAsyncPool, onFatalErrorAction: OnFatalErrorHandler +) {.async.} = + while true: + (await connPool.pgQuery(HealthCheckQuery)).isOkOr: + ## The connection failed once. Let's try reconnecting for a while. + ## Notice that the 'pgQuery' proc tries to establish a new connection. + + block errorBlock: + ## Force close all the opened connections. No need to close gracefully. + (await connPool.resetConnPool()).isOkOr: + onFatalErrorAction("checkConnectivity legacy resetConnPool error: " & error) + + var numTrial = 0 + while numTrial < MaxNumTrials: + let res = await connPool.pgQuery(HealthCheckQuery) + if res.isOk(): + ## Connection resumed. Let's go back to the normal healthcheck. + break errorBlock + + await sleepAsync(TrialInterval) + numTrial.inc() + + ## The connection couldn't be resumed. Let's inform the upper layers. + onFatalErrorAction("postgres legacy health check error: " & error) + + await sleepAsync(CheckConnectivityInterval) diff --git a/third-party/nwaku/waku/waku_archive_legacy/driver/queue_driver.nim b/third-party/nwaku/waku/waku_archive_legacy/driver/queue_driver.nim new file mode 100644 index 0000000..1ea8a29 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/driver/queue_driver.nim @@ -0,0 +1,8 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import ./queue_driver/queue_driver, ./queue_driver/index + +export queue_driver, index diff --git a/third-party/nwaku/waku/waku_archive_legacy/driver/queue_driver/index.nim b/third-party/nwaku/waku/waku_archive_legacy/driver/queue_driver/index.nim new file mode 100644 index 0000000..2328870 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/driver/queue_driver/index.nim @@ -0,0 +1,91 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import nimcrypto/sha2 +import ../../../waku_core, ../../common + +type Index* = object + ## This type contains the description of an Index used in the pagination of WakuMessages + pubsubTopic*: string + senderTime*: Timestamp # the time at which the message is generated + receiverTime*: Timestamp + digest*: MessageDigest # calculated over payload and content topic + hash*: WakuMessageHash + +proc compute*( + T: type Index, msg: WakuMessage, receivedTime: Timestamp, pubsubTopic: PubsubTopic +): T = + ## Takes a WakuMessage with received timestamp and returns its Index. + let + digest = computeDigest(msg) + senderTime = msg.timestamp + hash = computeMessageHash(pubsubTopic, msg) + + return Index( + pubsubTopic: pubsubTopic, + senderTime: senderTime, + receiverTime: receivedTime, + digest: digest, + hash: hash, + ) + +proc tohistoryCursor*(index: Index): ArchiveCursor = + return ArchiveCursor( + pubsubTopic: index.pubsubTopic, + senderTime: index.senderTime, + storeTime: index.receiverTime, + digest: index.digest, + hash: index.hash, + ) + +proc toIndex*(index: ArchiveCursor): Index = + return Index( + pubsubTopic: index.pubsubTopic, + senderTime: index.senderTime, + receiverTime: index.storeTime, + digest: index.digest, + hash: index.hash, + ) + +proc `==`*(x, y: Index): bool = + ## receiverTime plays no role in index equality + return + ( + (x.senderTime == y.senderTime) and (x.digest == y.digest) and + (x.pubsubTopic == y.pubsubTopic) + ) or (x.hash == y.hash) # this applies to store v3 queries only + +proc cmp*(x, y: Index): int = + ## compares x and y + ## returns 0 if they are equal + ## returns -1 if x < y + ## returns 1 if x > y + ## + ## Default sorting order priority is: + ## 1. senderTimestamp + ## 2. receiverTimestamp (a fallback only if senderTimestamp unset on either side, and all other fields unequal) + ## 3. message digest + ## 4. pubsubTopic + + if x == y: + # Quick exit ensures receiver time does not affect index equality + return 0 + + # Timestamp has a higher priority for comparison + let + # Use receiverTime where senderTime is unset + xTimestamp = if x.senderTime == 0: x.receiverTime else: x.senderTime + yTimestamp = if y.senderTime == 0: y.receiverTime else: y.senderTime + + let timecmp = cmp(xTimestamp, yTimestamp) + if timecmp != 0: + return timecmp + + # Continue only when timestamps are equal + let digestcmp = cmp(x.digest.data, y.digest.data) + if digestcmp != 0: + return digestcmp + + return cmp(x.pubsubTopic, y.pubsubTopic) diff --git a/third-party/nwaku/waku/waku_archive_legacy/driver/queue_driver/queue_driver.nim b/third-party/nwaku/waku/waku_archive_legacy/driver/queue_driver/queue_driver.nim new file mode 100644 index 0000000..942a720 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/driver/queue_driver/queue_driver.nim @@ -0,0 +1,364 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import std/options, results, stew/sorted_set, chronicles, chronos +import ../../../waku_core, ../../common, ../../driver, ./index + +logScope: + topics = "waku archive queue_store" + +const QueueDriverDefaultMaxCapacity* = 25_000 + +type + QueryFilterMatcher = + proc(index: Index, msg: WakuMessage): bool {.gcsafe, raises: [], closure.} + + QueueDriver* = ref object of ArchiveDriver + ## Bounded repository for indexed messages + ## + ## The store queue will keep messages up to its + ## configured capacity. As soon as this capacity + ## is reached and a new message is added, the oldest + ## item will be removed to make space for the new one. + ## This implies both a `delete` and `add` operation + ## for new items. + + # TODO: a circular/ring buffer may be a more efficient implementation + items: SortedSet[Index, WakuMessage] # sorted set of stored messages + capacity: int # Maximum amount of messages to keep + + QueueDriverErrorKind {.pure.} = enum + INVALID_CURSOR + + QueueDriverGetPageResult = Result[seq[ArchiveRow], QueueDriverErrorKind] + +proc `$`(error: QueueDriverErrorKind): string = + case error + of INVALID_CURSOR: "invalid_cursor" + +### Helpers + +proc walkToCursor( + w: SortedSetWalkRef[Index, WakuMessage], startCursor: Index, forward: bool +): SortedSetResult[Index, WakuMessage] = + ## Walk to util we find the cursor + ## TODO: Improve performance here with a binary/tree search + + var nextItem = + if forward: + w.first() + else: + w.last() + + ## Fast forward until we reach the startCursor + while nextItem.isOk(): + if nextItem.value.key == startCursor: + break + + # Not yet at cursor. Continue advancing + nextItem = + if forward: + w.next() + else: + w.prev() + + return nextItem + +#### API + +proc new*(T: type QueueDriver, capacity: int = QueueDriverDefaultMaxCapacity): T = + var items = SortedSet[Index, WakuMessage].init() + return QueueDriver(items: items, capacity: capacity) + +proc contains*(driver: QueueDriver, index: Index): bool = + ## Return `true` if the store queue already contains the `index`, `false` otherwise. + return driver.items.eq(index).isOk() + +proc len*(driver: QueueDriver): int {.noSideEffect.} = + return driver.items.len + +proc getPage( + driver: QueueDriver, + pageSize: uint = 0, + forward: bool = true, + cursor: Option[Index] = none(Index), + predicate: QueryFilterMatcher = nil, +): QueueDriverGetPageResult {.raises: [].} = + ## Populate a single page in forward direction + ## Start at the `startCursor` (exclusive), or first entry (inclusive) if not defined. + ## Page size must not exceed `maxPageSize` + ## Each entry must match the `pred` + var outSeq: seq[ArchiveRow] + + var w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + defer: + w.destroy() + + var currentEntry: SortedSetResult[Index, WakuMessage] + + # Find starting entry + if cursor.isSome(): + let cursorEntry = w.walkToCursor(cursor.get(), forward) + if cursorEntry.isErr(): + return err(QueueDriverErrorKind.INVALID_CURSOR) + + # Advance walker once more + currentEntry = + if forward: + w.next() + else: + w.prev() + else: + # Start from the beginning of the queue + currentEntry = + if forward: + w.first() + else: + w.last() + + trace "Starting page query", currentEntry = currentEntry + + ## This loop walks forward over the queue: + ## 1. from the given cursor (or first/last entry, if not provided) + ## 2. adds entries matching the predicate function to output page + ## 3. until either the end of the queue or maxPageSize is reached + var numberOfItems: uint = 0 + while currentEntry.isOk() and numberOfItems < pageSize: + trace "Continuing page query", + currentEntry = currentEntry, numberOfItems = numberOfItems + + let + key = currentEntry.value.key + data = currentEntry.value.data + + if predicate.isNil() or predicate(key, data): + numberOfItems += 1 + + outSeq.add( + (key.pubsubTopic, data, @(key.digest.data), key.receiverTime, key.hash) + ) + + currentEntry = + if forward: + w.next() + else: + w.prev() + + trace "Successfully retrieved page", len = outSeq.len + + return ok(outSeq) + +## --- SortedSet accessors --- + +iterator fwdIterator*(driver: QueueDriver): (Index, WakuMessage) = + ## Forward iterator over the entire store queue + var + w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + res = w.first() + + while res.isOk(): + yield (res.value.key, res.value.data) + res = w.next() + + w.destroy() + +iterator bwdIterator*(driver: QueueDriver): (Index, WakuMessage) = + ## Backwards iterator over the entire store queue + var + w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + res = w.last() + + while res.isOk(): + yield (res.value.key, res.value.data) + res = w.prev() + + w.destroy() + +proc first*(driver: QueueDriver): ArchiveDriverResult[Index] = + var + w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + res = w.first() + w.destroy() + + if res.isErr(): + return err("Not found") + + return ok(res.value.key) + +proc last*(driver: QueueDriver): ArchiveDriverResult[Index] = + var + w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + res = w.last() + w.destroy() + + if res.isErr(): + return err("Not found") + + return ok(res.value.key) + +## --- Queue API --- + +proc add*( + driver: QueueDriver, index: Index, msg: WakuMessage +): ArchiveDriverResult[void] = + ## Add a message to the queue + ## + ## If we're at capacity, we will be removing, the oldest (first) item + if driver.contains(index): + trace "could not add item to store queue. Index already exists", index = index + return err("duplicate") + + # TODO: the below delete block can be removed if we convert to circular buffer + if driver.items.len >= driver.capacity: + var + w = SortedSetWalkRef[Index, WakuMessage].init(driver.items) + firstItem = w.first + + if cmp(index, firstItem.value.key) < 0: + # When at capacity, we won't add if message index is smaller (older) than our oldest item + w.destroy # Clean up walker + return err("too_old") + + discard driver.items.delete(firstItem.value.key) + w.destroy # better to destroy walker after a delete operation + + driver.items.insert(index).value.data = msg + + return ok() + +method put*( + driver: QueueDriver, + pubsubTopic: PubsubTopic, + message: WakuMessage, + digest: MessageDigest, + messageHash: WakuMessageHash, + receivedTime: Timestamp, +): Future[ArchiveDriverResult[void]] {.async.} = + let index = Index( + pubsubTopic: pubsubTopic, + senderTime: message.timestamp, + receiverTime: receivedTime, + digest: digest, + hash: messageHash, + ) + + return driver.add(index, message) + +method getAllMessages*( + driver: QueueDriver +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + # TODO: Implement this message_store method + return err("interface method not implemented") + +method existsTable*( + driver: QueueDriver, tableName: string +): Future[ArchiveDriverResult[bool]] {.async.} = + return err("interface method not implemented") + +method getMessages*( + driver: QueueDriver, + includeData = true, + contentTopic: seq[ContentTopic] = @[], + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes: seq[WakuMessageHash] = @[], + maxPageSize = DefaultPageSize, + ascendingOrder = true, + requestId = "", +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + let cursor = cursor.map(toIndex) + + let matchesQuery: QueryFilterMatcher = + func (index: Index, msg: WakuMessage): bool = + if pubsubTopic.isSome() and index.pubsubTopic != pubsubTopic.get(): + return false + + if contentTopic.len > 0 and msg.contentTopic notin contentTopic: + return false + + if startTime.isSome() and msg.timestamp < startTime.get(): + return false + + if endTime.isSome() and msg.timestamp > endTime.get(): + return false + + if hashes.len > 0 and index.hash notin hashes: + return false + + return true + + var pageRes: QueueDriverGetPageResult + try: + pageRes = driver.getPage(maxPageSize, ascendingOrder, cursor, matchesQuery) + except CatchableError, Exception: + return err(getCurrentExceptionMsg()) + + if pageRes.isErr(): + return err($pageRes.error) + + return ok(pageRes.value) + +method getMessagesCount*( + driver: QueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + return ok(int64(driver.len())) + +method getPagesCount*( + driver: QueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + return ok(int64(driver.len())) + +method getPagesSize*( + driver: QueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + return ok(int64(driver.len())) + +method getDatabaseSize*( + driver: QueueDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + return ok(int64(driver.len())) + +method performVacuum*( + driver: QueueDriver +): Future[ArchiveDriverResult[void]] {.async.} = + return err("interface method not implemented") + +method getOldestMessageTimestamp*( + driver: QueueDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + return driver.first().map( + proc(index: Index): Timestamp = + index.receiverTime + ) + +method getNewestMessageTimestamp*( + driver: QueueDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + return driver.last().map( + proc(index: Index): Timestamp = + index.receiverTime + ) + +method deleteMessagesOlderThanTimestamp*( + driver: QueueDriver, ts: Timestamp +): Future[ArchiveDriverResult[void]] {.async.} = + # TODO: Implement this message_store method + return err("interface method not implemented") + +method deleteOldestMessagesNotWithinLimit*( + driver: QueueDriver, limit: int +): Future[ArchiveDriverResult[void]] {.async.} = + # TODO: Implement this message_store method + return err("interface method not implemented") + +method decreaseDatabaseSize*( + driver: QueueDriver, targetSizeInBytes: int64, forceRemoval: bool = false +): Future[ArchiveDriverResult[void]] {.async.} = + return err("interface method not implemented") + +method close*(driver: QueueDriver): Future[ArchiveDriverResult[void]] {.async.} = + return ok() diff --git a/third-party/nwaku/waku/waku_archive_legacy/driver/sqlite_driver.nim b/third-party/nwaku/waku/waku_archive_legacy/driver/sqlite_driver.nim new file mode 100644 index 0000000..027e004 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/driver/sqlite_driver.nim @@ -0,0 +1,8 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import ./sqlite_driver/sqlite_driver + +export sqlite_driver diff --git a/third-party/nwaku/waku/waku_archive_legacy/driver/sqlite_driver/cursor.nim b/third-party/nwaku/waku/waku_archive_legacy/driver/sqlite_driver/cursor.nim new file mode 100644 index 0000000..9729f0f --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/driver/sqlite_driver/cursor.nim @@ -0,0 +1,11 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import ../../../waku_core, ../../common + +type DbCursor* = (Timestamp, seq[byte], PubsubTopic) + +proc toDbCursor*(c: ArchiveCursor): DbCursor = + (c.storeTime, @(c.digest.data), c.pubsubTopic) diff --git a/third-party/nwaku/waku/waku_archive_legacy/driver/sqlite_driver/migrations.nim b/third-party/nwaku/waku/waku_archive_legacy/driver/sqlite_driver/migrations.nim new file mode 100644 index 0000000..4c25ddf --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/driver/sqlite_driver/migrations.nim @@ -0,0 +1,74 @@ +{.push raises: [].} + +import + std/[tables, strutils, os], results, chronicles, sqlite3_abi # sqlite3_column_int64 +import ../../../common/databases/db_sqlite, ../../../common/databases/common + +logScope: + topics = "waku archive migration" + +const SchemaVersion* = 9 # increase this when there is an update in the database schema + +template projectRoot(): string = + currentSourcePath.rsplit(DirSep, 1)[0] / ".." / ".." / ".." / ".." + +const MessageStoreMigrationPath: string = projectRoot / "migrations" / "message_store" + +proc isSchemaVersion7*(db: SqliteDatabase): DatabaseResult[bool] = + ## Temporary proc created to analyse when the table actually belongs to the SchemaVersion 7. + ## + ## During many nwaku versions, 0.14.0 until 0.18.0, the SchemaVersion wasn't set or checked. + ## Docker `nwaku` nodes that start working from these versions, 0.14.0 until 0.18.0, they started + ## with this discrepancy: `user_version`== 0 (not set) but Message table with SchemaVersion 7. + ## + ## We found issues where `user_version` (SchemaVersion) was set to 0 in the database even though + ## its scheme structure reflected SchemaVersion 7. In those cases, when `nwaku` re-started to + ## apply the migration scripts (in 0.19.0) the node didn't start properly because it tried to + ## migrate a database that already had the Schema structure #7, so it failed when changing the PK. + ## + ## TODO: This was added in version 0.20.0. We might remove this in version 0.30.0, as we + ## could consider that many users use +0.20.0. + + var pkColumns = newSeq[string]() + proc queryRowCallback(s: ptr sqlite3_stmt) = + let colName = cstring sqlite3_column_text(s, 0) + pkColumns.add($colName) + + let query = + """SELECT l.name FROM pragma_table_info("Message") as l WHERE l.pk != 0;""" + let res = db.query(query, queryRowCallback) + if res.isErr(): + return err("failed to determine the current SchemaVersion: " & $res.error) + + if pkColumns == @["pubsubTopic", "id", "storedAt"]: + return ok(true) + else: + info "Not considered schema version 7" + return ok(false) + +proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult[void] = + ## Compares the `user_version` of the sqlite database with the provided `targetVersion`, then + ## it runs migration scripts if the `user_version` is outdated. The `migrationScriptsDir` path + ## points to the directory holding the migrations scripts once the db is updated, it sets the + ## `user_version` to the `tragetVersion`. + ## + ## If not `targetVersion` is provided, it defaults to `SchemaVersion`. + ## + ## NOTE: Down migration it is not currently supported + debug "starting message store's sqlite database migration" + + let userVersion = ?db.getUserVersion() + let isSchemaVersion7 = ?db.isSchemaVersion7() + + if userVersion == 0'i64 and isSchemaVersion7: + info "We found user_version 0 but the database schema reflects the user_version 7" + ## Force the correct schema version + ?db.setUserVersion(7) + + let migrationRes = + migrate(db, targetVersion, migrationsScriptsDir = MessageStoreMigrationPath) + if migrationRes.isErr(): + return err("failed to execute migration scripts: " & migrationRes.error) + + debug "finished message store's sqlite database migration" + return ok() diff --git a/third-party/nwaku/waku/waku_archive_legacy/driver/sqlite_driver/queries.nim b/third-party/nwaku/waku/waku_archive_legacy/driver/sqlite_driver/queries.nim new file mode 100644 index 0000000..47f1d86 --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/driver/sqlite_driver/queries.nim @@ -0,0 +1,739 @@ +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import std/[options, sequtils], stew/byteutils, sqlite3_abi, results +import + ../../../common/databases/db_sqlite, + ../../../common/databases/common, + ../../../waku_core, + ./cursor + +const DbTable = "Message" + +type SqlQueryStr = string + +### SQLite column helper methods + +proc queryRowWakuMessageCallback( + s: ptr sqlite3_stmt, + contentTopicCol, payloadCol, versionCol, senderTimestampCol, metaCol: cint, +): WakuMessage = + let + topic = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, contentTopicCol)) + topicLength = sqlite3_column_bytes(s, contentTopicCol) + contentTopic = string.fromBytes(@(toOpenArray(topic, 0, topicLength - 1))) + + p = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, payloadCol)) + m = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, metaCol)) + + payloadLength = sqlite3_column_bytes(s, payloadCol) + metaLength = sqlite3_column_bytes(s, metaCol) + payload = @(toOpenArray(p, 0, payloadLength - 1)) + version = sqlite3_column_int64(s, versionCol) + senderTimestamp = sqlite3_column_int64(s, senderTimestampCol) + meta = @(toOpenArray(m, 0, metaLength - 1)) + + return WakuMessage( + contentTopic: ContentTopic(contentTopic), + payload: payload, + version: uint32(version), + timestamp: Timestamp(senderTimestamp), + meta: meta, + ) + +proc queryRowReceiverTimestampCallback( + s: ptr sqlite3_stmt, storedAtCol: cint +): Timestamp = + let storedAt = sqlite3_column_int64(s, storedAtCol) + return Timestamp(storedAt) + +proc queryRowPubsubTopicCallback( + s: ptr sqlite3_stmt, pubsubTopicCol: cint +): PubsubTopic = + let + pubsubTopicPointer = + cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, pubsubTopicCol)) + pubsubTopicLength = sqlite3_column_bytes(s, pubsubTopicCol) + pubsubTopic = + string.fromBytes(@(toOpenArray(pubsubTopicPointer, 0, pubsubTopicLength - 1))) + + return pubsubTopic + +proc queryRowDigestCallback(s: ptr sqlite3_stmt, digestCol: cint): seq[byte] = + let + digestPointer = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, digestCol)) + digestLength = sqlite3_column_bytes(s, digestCol) + digest = @(toOpenArray(digestPointer, 0, digestLength - 1)) + + return digest + +proc queryRowWakuMessageHashCallback( + s: ptr sqlite3_stmt, hashCol: cint +): WakuMessageHash = + let + hashPointer = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, hashCol)) + hashLength = sqlite3_column_bytes(s, hashCol) + hash = fromBytes(toOpenArray(hashPointer, 0, hashLength - 1)) + + return hash + +### SQLite queries + +## Create table + +proc createTableQuery(table: string): SqlQueryStr = + "CREATE TABLE IF NOT EXISTS " & table & " (" & " pubsubTopic BLOB NOT NULL," & + " contentTopic BLOB NOT NULL," & " payload BLOB," & " version INTEGER NOT NULL," & + " timestamp INTEGER NOT NULL," & " id BLOB," & " messageHash BLOB," & + " storedAt INTEGER NOT NULL," & " meta BLOB," & + " CONSTRAINT messageIndex PRIMARY KEY (messageHash)" & ") WITHOUT ROWID;" + +proc createTable*(db: SqliteDatabase): DatabaseResult[void] = + let query = createTableQuery(DbTable) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard, + ) + return ok() + +## Create indices + +proc createOldestMessageTimestampIndexQuery(table: string): SqlQueryStr = + "CREATE INDEX IF NOT EXISTS i_ts ON " & table & " (storedAt);" + +proc createOldestMessageTimestampIndex*(db: SqliteDatabase): DatabaseResult[void] = + let query = createOldestMessageTimestampIndexQuery(DbTable) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard, + ) + return ok() + +proc createHistoryQueryIndexQuery(table: string): SqlQueryStr = + "CREATE INDEX IF NOT EXISTS i_query ON " & table & + " (contentTopic, pubsubTopic, storedAt, id);" + +proc createHistoryQueryIndex*(db: SqliteDatabase): DatabaseResult[void] = + let query = createHistoryQueryIndexQuery(DbTable) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard, + ) + return ok() + +## Insert message +type InsertMessageParams* = ( + seq[byte], + seq[byte], + Timestamp, + seq[byte], + seq[byte], + seq[byte], + int64, + Timestamp, + seq[byte], +) + +proc insertMessageQuery(table: string): SqlQueryStr = + return + "INSERT INTO " & table & + "(id, messageHash, storedAt, contentTopic, payload, pubsubTopic, version, timestamp, meta)" & + " VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);" + +proc prepareInsertMessageStmt*( + db: SqliteDatabase +): SqliteStmt[InsertMessageParams, void] = + let query = insertMessageQuery(DbTable) + return + db.prepareStmt(query, InsertMessageParams, void).expect("this is a valid statement") + +## Count table messages + +proc countMessagesQuery(table: string): SqlQueryStr = + return "SELECT COUNT(*) FROM " & table + +proc getMessageCount*(db: SqliteDatabase): DatabaseResult[int64] = + var count: int64 + proc queryRowCallback(s: ptr sqlite3_stmt) = + count = sqlite3_column_int64(s, 0) + + let query = countMessagesQuery(DbTable) + let res = db.query(query, queryRowCallback) + if res.isErr(): + return err("failed to count number of messages in the database") + + return ok(count) + +## Get oldest message receiver timestamp + +proc selectOldestMessageTimestampQuery(table: string): SqlQueryStr = + return "SELECT MIN(storedAt) FROM " & table + +proc selectOldestReceiverTimestamp*( + db: SqliteDatabase +): DatabaseResult[Timestamp] {.inline.} = + var timestamp: Timestamp + proc queryRowCallback(s: ptr sqlite3_stmt) = + timestamp = queryRowReceiverTimestampCallback(s, 0) + + let query = selectOldestMessageTimestampQuery(DbTable) + let res = db.query(query, queryRowCallback) + if res.isErr(): + return err("failed to get the oldest receiver timestamp from the database") + + return ok(timestamp) + +## Get newest message receiver timestamp + +proc selectNewestMessageTimestampQuery(table: string): SqlQueryStr = + return "SELECT MAX(storedAt) FROM " & table + +proc selectNewestReceiverTimestamp*( + db: SqliteDatabase +): DatabaseResult[Timestamp] {.inline.} = + var timestamp: Timestamp + proc queryRowCallback(s: ptr sqlite3_stmt) = + timestamp = queryRowReceiverTimestampCallback(s, 0) + + let query = selectNewestMessageTimestampQuery(DbTable) + let res = db.query(query, queryRowCallback) + if res.isErr(): + return err("failed to get the newest receiver timestamp from the database") + + return ok(timestamp) + +## Delete messages older than timestamp + +proc deleteMessagesOlderThanTimestampQuery(table: string, ts: Timestamp): SqlQueryStr = + return "DELETE FROM " & table & " WHERE storedAt < " & $ts + +proc deleteMessagesOlderThanTimestamp*( + db: SqliteDatabase, ts: int64 +): DatabaseResult[void] = + let query = deleteMessagesOlderThanTimestampQuery(DbTable, ts) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard, + ) + return ok() + +## Delete oldest messages not within limit + +proc deleteOldestMessagesNotWithinLimitQuery(table: string, limit: int): SqlQueryStr = + return + "DELETE FROM " & table & " WHERE (storedAt, id, pubsubTopic) NOT IN (" & + " SELECT storedAt, id, pubsubTopic FROM " & table & + " ORDER BY storedAt DESC, id DESC" & " LIMIT " & $limit & ");" + +proc deleteOldestMessagesNotWithinLimit*( + db: SqliteDatabase, limit: int +): DatabaseResult[void] = + # NOTE: The word `limit` here refers the store capacity/maximum number-of-messages allowed limit + let query = deleteOldestMessagesNotWithinLimitQuery(DbTable, limit = limit) + discard + ?db.query( + query, + proc(s: ptr sqlite3_stmt) = + discard, + ) + return ok() + +## Select all messages + +proc selectAllMessagesQuery(table: string): SqlQueryStr = + return + "SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta" & + " FROM " & table & " ORDER BY storedAt ASC" + +proc selectAllMessages*( + db: SqliteDatabase +): DatabaseResult[ + seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] +] {.gcsafe.} = + ## Retrieve all messages from the store. + var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] + proc queryRowCallback(s: ptr sqlite3_stmt) = + let + pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3) + wakuMessage = queryRowWakuMessageCallback( + s, + contentTopicCol = 1, + payloadCol = 2, + versionCol = 4, + senderTimestampCol = 5, + metaCol = 8, + ) + digest = queryRowDigestCallback(s, digestCol = 6) + storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0) + hash = queryRowWakuMessageHashCallback(s, hashCol = 7) + + rows.add((pubsubTopic, wakuMessage, digest, storedAt, hash)) + + let query = selectAllMessagesQuery(DbTable) + let res = db.query(query, queryRowCallback) + if res.isErr(): + return err(res.error()) + + return ok(rows) + +## Select messages by history query with limit + +proc combineClauses(clauses: varargs[Option[string]]): Option[string] = + let whereSeq = @clauses.filterIt(it.isSome()).mapIt(it.get()) + if whereSeq.len <= 0: + return none(string) + + var where: string = whereSeq[0] + for clause in whereSeq[1 ..^ 1]: + where &= " AND " & clause + return some(where) + +proc whereClausev2( + cursor: bool, + pubsubTopic: Option[PubsubTopic], + contentTopic: seq[ContentTopic], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + ascending: bool, +): Option[string] {.deprecated.} = + let cursorClause = + if cursor: + let comp = if ascending: ">" else: "<" + + some("(storedAt, id) " & comp & " (?, ?)") + else: + none(string) + + let pubsubTopicClause = + if pubsubTopic.isNone(): + none(string) + else: + some("pubsubTopic = (?)") + + let contentTopicClause = + if contentTopic.len <= 0: + none(string) + else: + var where = "contentTopic IN (" + where &= "?" + for _ in 1 ..< contentTopic.len: + where &= ", ?" + where &= ")" + some(where) + + let startTimeClause = + if startTime.isNone(): + none(string) + else: + some("storedAt >= (?)") + + let endTimeClause = + if endTime.isNone(): + none(string) + else: + some("storedAt <= (?)") + + return combineClauses( + cursorClause, pubsubTopicClause, contentTopicClause, startTimeClause, endTimeClause + ) + +proc selectMessagesWithLimitQueryv2( + table: string, where: Option[string], limit: uint, ascending = true, v3 = false +): SqlQueryStr {.deprecated.} = + let order = if ascending: "ASC" else: "DESC" + + var query: string + + query = + "SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta" + query &= " FROM " & table + + if where.isSome(): + query &= " WHERE " & where.get() + + query &= " ORDER BY storedAt " & order & ", id " & order + + query &= " LIMIT " & $limit & ";" + + return query + +proc prepareStmt( + db: SqliteDatabase, stmt: string +): DatabaseResult[SqliteStmt[void, void]] = + var s: RawStmtPtr + checkErr sqlite3_prepare_v2(db.env, stmt, stmt.len.cint, addr s, nil) + return ok(SqliteStmt[void, void](s)) + +proc execSelectMessagesV2WithLimitStmt( + s: SqliteStmt, + cursor: Option[DbCursor], + pubsubTopic: Option[PubsubTopic], + contentTopic: seq[ContentTopic], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + onRowCallback: DataProc, +): DatabaseResult[void] {.deprecated.} = + let s = RawStmtPtr(s) + + # Bind params + var paramIndex = 1 + + if cursor.isSome(): + let (storedAt, id, _) = cursor.get() + checkErr bindParam(s, paramIndex, storedAt) + paramIndex += 1 + checkErr bindParam(s, paramIndex, id) + paramIndex += 1 + + if pubsubTopic.isSome(): + let pubsubTopic = toBytes(pubsubTopic.get()) + checkErr bindParam(s, paramIndex, pubsubTopic) + paramIndex += 1 + + for topic in contentTopic: + checkErr bindParam(s, paramIndex, topic.toBytes()) + paramIndex += 1 + + if startTime.isSome(): + let time = startTime.get() + checkErr bindParam(s, paramIndex, time) + paramIndex += 1 + + if endTime.isSome(): + let time = endTime.get() + checkErr bindParam(s, paramIndex, time) + paramIndex += 1 + + try: + while true: + let v = sqlite3_step(s) + case v + of SQLITE_ROW: + onRowCallback(s) + of SQLITE_DONE: + return ok() + else: + return err($sqlite3_errstr(v)) + except Exception, CatchableError: + # release implicit transaction + discard sqlite3_reset(s) # same return information as step + discard sqlite3_clear_bindings(s) # no errors possible + +proc selectMessagesByHistoryQueryWithLimit*( + db: SqliteDatabase, + contentTopic: seq[ContentTopic], + pubsubTopic: Option[PubsubTopic], + cursor: Option[DbCursor], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + limit: uint, + ascending: bool, +): DatabaseResult[ + seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] +] {.deprecated.} = + var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] = + @[] + + proc queryRowCallback(s: ptr sqlite3_stmt) = + let + pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3) + message = queryRowWakuMessageCallback( + s, + contentTopicCol = 1, + payloadCol = 2, + versionCol = 4, + senderTimestampCol = 5, + metaCol = 8, + ) + digest = queryRowDigestCallback(s, digestCol = 6) + storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0) + hash = queryRowWakuMessageHashCallback(s, hashCol = 7) + + messages.add((pubsubTopic, message, digest, storedAt, hash)) + + let query = block: + let where = whereClausev2( + cursor.isSome(), pubsubTopic, contentTopic, startTime, endTime, ascending + ) + + selectMessagesWithLimitQueryv2(DbTable, where, limit, ascending) + + let dbStmt = ?db.prepareStmt(query) + ?dbStmt.execSelectMessagesV2WithLimitStmt( + cursor, pubsubTopic, contentTopic, startTime, endTime, queryRowCallback + ) + dbStmt.dispose() + + return ok(messages) + +### Store v3 ### + +proc execSelectMessageByHash( + s: SqliteStmt, hash: WakuMessageHash, onRowCallback: DataProc +): DatabaseResult[void] = + let s = RawStmtPtr(s) + + checkErr bindParam(s, 1, toSeq(hash)) + + try: + while true: + let v = sqlite3_step(s) + case v + of SQLITE_ROW: + onRowCallback(s) + of SQLITE_DONE: + return ok() + else: + return err($sqlite3_errstr(v)) + except Exception, CatchableError: + # release implicit transaction + discard sqlite3_reset(s) # same return information as step + discard sqlite3_clear_bindings(s) # no errors possible + +proc selectMessageByHashQuery(): SqlQueryStr = + var query: string + + query = "SELECT contentTopic, payload, version, timestamp, meta, messageHash" + query &= " FROM " & DbTable + query &= " WHERE messageHash = (?)" + + return query + +proc whereClause( + cursor: bool, + pubsubTopic: Option[PubsubTopic], + contentTopic: seq[ContentTopic], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + hashes: seq[WakuMessageHash], + ascending: bool, +): Option[string] = + let cursorClause = + if cursor: + let comp = if ascending: ">" else: "<" + + some("(timestamp, messageHash) " & comp & " (?, ?)") + else: + none(string) + + let pubsubTopicClause = + if pubsubTopic.isNone(): + none(string) + else: + some("pubsubTopic = (?)") + + let contentTopicClause = + if contentTopic.len <= 0: + none(string) + else: + var where = "contentTopic IN (" + where &= "?" + for _ in 1 ..< contentTopic.len: + where &= ", ?" + where &= ")" + some(where) + + let startTimeClause = + if startTime.isNone(): + none(string) + else: + some("storedAt >= (?)") + + let endTimeClause = + if endTime.isNone(): + none(string) + else: + some("storedAt <= (?)") + + let hashesClause = + if hashes.len <= 0: + none(string) + else: + var where = "messageHash IN (" + where &= "?" + for _ in 1 ..< hashes.len: + where &= ", ?" + where &= ")" + some(where) + + return combineClauses( + cursorClause, pubsubTopicClause, contentTopicClause, startTimeClause, endTimeClause, + hashesClause, + ) + +proc execSelectMessagesWithLimitStmt( + s: SqliteStmt, + cursor: Option[(Timestamp, WakuMessageHash)], + pubsubTopic: Option[PubsubTopic], + contentTopic: seq[ContentTopic], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + hashes: seq[WakuMessageHash], + onRowCallback: DataProc, +): DatabaseResult[void] = + let s = RawStmtPtr(s) + + # Bind params + var paramIndex = 1 + + if cursor.isSome(): + let (time, hash) = cursor.get() + checkErr bindParam(s, paramIndex, time) + paramIndex += 1 + checkErr bindParam(s, paramIndex, toSeq(hash)) + paramIndex += 1 + + if pubsubTopic.isSome(): + let pubsubTopic = toBytes(pubsubTopic.get()) + checkErr bindParam(s, paramIndex, pubsubTopic) + paramIndex += 1 + + for topic in contentTopic: + checkErr bindParam(s, paramIndex, topic.toBytes()) + paramIndex += 1 + + for hash in hashes: + checkErr bindParam(s, paramIndex, toSeq(hash)) + paramIndex += 1 + + if startTime.isSome(): + let time = startTime.get() + checkErr bindParam(s, paramIndex, time) + paramIndex += 1 + + if endTime.isSome(): + let time = endTime.get() + checkErr bindParam(s, paramIndex, time) + paramIndex += 1 + + try: + while true: + let v = sqlite3_step(s) + case v + of SQLITE_ROW: + onRowCallback(s) + of SQLITE_DONE: + return ok() + else: + return err($sqlite3_errstr(v)) + except Exception, CatchableError: + # release implicit transaction + discard sqlite3_reset(s) # same return information as step + discard sqlite3_clear_bindings(s) # no errors possible + +proc selectMessagesWithLimitQuery( + table: string, where: Option[string], limit: uint, ascending = true, v3 = false +): SqlQueryStr = + let order = if ascending: "ASC" else: "DESC" + + var query: string + + query = + "SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta" + query &= " FROM " & table + + if where.isSome(): + query &= " WHERE " & where.get() + + query &= " ORDER BY storedAt " & order & ", messageHash " & order + + query &= " LIMIT " & $limit & ";" + + return query + +proc selectMessagesByStoreQueryWithLimit*( + db: SqliteDatabase, + contentTopic: seq[ContentTopic], + pubsubTopic: Option[PubsubTopic], + cursor: Option[WakuMessageHash], + startTime: Option[Timestamp], + endTime: Option[Timestamp], + hashes: seq[WakuMessageHash], + limit: uint, + ascending: bool, +): DatabaseResult[ + seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] +] = + # Must first get the message timestamp before paginating by time + let newCursor = + if cursor.isSome() and cursor.get() != EmptyWakuMessageHash: + let hash: WakuMessageHash = cursor.get() + + var wakuMessage: Option[WakuMessage] + + proc queryRowCallback(s: ptr sqlite3_stmt) = + wakuMessage = some( + queryRowWakuMessageCallback( + s, + contentTopicCol = 0, + payloadCol = 1, + versionCol = 2, + senderTimestampCol = 3, + metaCol = 4, + ) + ) + + let query = selectMessageByHashQuery() + let dbStmt = ?db.prepareStmt(query) + ?dbStmt.execSelectMessageByHash(hash, queryRowCallback) + dbStmt.dispose() + + if wakuMessage.isSome(): + let time = wakuMessage.get().timestamp + + some((time, hash)) + else: + return err("cursor not found") + else: + none((Timestamp, WakuMessageHash)) + + var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] = + @[] + + proc queryRowCallback(s: ptr sqlite3_stmt) = + let + pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3) + message = queryRowWakuMessageCallback( + s, + contentTopicCol = 1, + payloadCol = 2, + versionCol = 4, + senderTimestampCol = 5, + metaCol = 8, + ) + digest = queryRowDigestCallback(s, digestCol = 6) + storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0) + hash = queryRowWakuMessageHashCallback(s, hashCol = 7) + + messages.add((pubsubTopic, message, digest, storedAt, hash)) + + let query = block: + let where = whereClause( + newCursor.isSome(), + pubsubTopic, + contentTopic, + startTime, + endTime, + hashes, + ascending, + ) + + selectMessagesWithLimitQuery(DbTable, where, limit, ascending, true) + + let dbStmt = ?db.prepareStmt(query) + ?dbStmt.execSelectMessagesWithLimitStmt( + newCursor, pubsubTopic, contentTopic, startTime, endTime, hashes, queryRowCallback + ) + dbStmt.dispose() + + return ok(messages) diff --git a/third-party/nwaku/waku/waku_archive_legacy/driver/sqlite_driver/sqlite_driver.nim b/third-party/nwaku/waku/waku_archive_legacy/driver/sqlite_driver/sqlite_driver.nim new file mode 100644 index 0000000..5a6c12b --- /dev/null +++ b/third-party/nwaku/waku/waku_archive_legacy/driver/sqlite_driver/sqlite_driver.nim @@ -0,0 +1,225 @@ +# The code in this file is an adaptation of the Sqlite KV Store found in nim-eth. +# https://github.com/status-im/nim-eth/blob/master/eth/db/kvstore_sqlite3.nim +when (NimMajor, NimMinor) < (1, 4): + {.push raises: [Defect].} +else: + {.push raises: [].} + +import std/options, stew/byteutils, chronicles, chronos, results +import + ../../../common/databases/db_sqlite, + ../../../waku_core, + ../../../waku_core/message/digest, + ../../common, + ../../driver, + ./cursor, + ./queries + +logScope: + topics = "waku archive sqlite" + +proc init(db: SqliteDatabase): ArchiveDriverResult[void] = + ## Misconfiguration can lead to nil DB + if db.isNil(): + return err("db not initialized") + + # Create table, if doesn't exist + let resCreate = createTable(db) + if resCreate.isErr(): + return err("failed to create table: " & resCreate.error()) + + # Create indices, if don't exist + let resRtIndex = createOldestMessageTimestampIndex(db) + if resRtIndex.isErr(): + return err("failed to create i_rt index: " & resRtIndex.error()) + + let resMsgIndex = createHistoryQueryIndex(db) + if resMsgIndex.isErr(): + return err("failed to create i_query index: " & resMsgIndex.error()) + + return ok() + +type SqliteDriver* = ref object of ArchiveDriver + db: SqliteDatabase + insertStmt: SqliteStmt[InsertMessageParams, void] + +proc new*(T: type SqliteDriver, db: SqliteDatabase): ArchiveDriverResult[T] = + # Database initialization + let resInit = init(db) + if resInit.isErr(): + return err(resInit.error()) + + # General initialization + let insertStmt = db.prepareInsertMessageStmt() + return ok(SqliteDriver(db: db, insertStmt: insertStmt)) + +method put*( + s: SqliteDriver, + pubsubTopic: PubsubTopic, + message: WakuMessage, + digest: MessageDigest, + messageHash: WakuMessageHash, + receivedTime: Timestamp, +): Future[ArchiveDriverResult[void]] {.async.} = + ## Inserts a message into the store + let res = s.insertStmt.exec( + ( + @(digest.data), # id + @(messageHash), # messageHash + receivedTime, # storedAt + toBytes(message.contentTopic), # contentTopic + message.payload, # payload + toBytes(pubsubTopic), # pubsubTopic + int64(message.version), # version + message.timestamp, # senderTimestamp + message.meta, # meta + ) + ) + + return res + +method getAllMessages*( + s: SqliteDriver +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + ## Retrieve all messages from the store. + return s.db.selectAllMessages() + +method getMessagesV2*( + s: SqliteDriver, + contentTopic = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + maxPageSize = DefaultPageSize, + ascendingOrder = true, + requestId: string, +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async, deprecated.} = + let cursor = cursor.map(toDbCursor) + + let rowsRes = s.db.selectMessagesByHistoryQueryWithLimit( + contentTopic, + pubsubTopic, + cursor, + startTime, + endTime, + limit = maxPageSize, + ascending = ascendingOrder, + ) + + return rowsRes + +method getMessages*( + s: SqliteDriver, + includeData = true, + contentTopic = newSeq[ContentTopic](0), + pubsubTopic = none(PubsubTopic), + cursor = none(ArchiveCursor), + startTime = none(Timestamp), + endTime = none(Timestamp), + hashes = newSeq[WakuMessageHash](0), + maxPageSize = DefaultPageSize, + ascendingOrder = true, + requestId = "", +): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} = + let cursor = + if cursor.isSome(): + some(cursor.get().hash) + else: + none(WakuMessageHash) + + let rowsRes = s.db.selectMessagesByStoreQueryWithLimit( + contentTopic, + pubsubTopic, + cursor, + startTime, + endTime, + hashes, + limit = maxPageSize, + ascending = ascendingOrder, + ) + + return rowsRes + +method getMessagesCount*( + s: SqliteDriver +): Future[ArchiveDriverResult[int64]] {.async.} = + return s.db.getMessageCount() + +method getPagesCount*(s: SqliteDriver): Future[ArchiveDriverResult[int64]] {.async.} = + return s.db.getPageCount() + +method getPagesSize*(s: SqliteDriver): Future[ArchiveDriverResult[int64]] {.async.} = + return s.db.getPageSize() + +method getDatabaseSize*(s: SqliteDriver): Future[ArchiveDriverResult[int64]] {.async.} = + return s.db.getDatabaseSize() + +method performVacuum*(s: SqliteDriver): Future[ArchiveDriverResult[void]] {.async.} = + return s.db.performSqliteVacuum() + +method getOldestMessageTimestamp*( + s: SqliteDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + return s.db.selectOldestReceiverTimestamp() + +method getNewestMessageTimestamp*( + s: SqliteDriver +): Future[ArchiveDriverResult[Timestamp]] {.async.} = + return s.db.selectnewestReceiverTimestamp() + +method deleteMessagesOlderThanTimestamp*( + s: SqliteDriver, ts: Timestamp +): Future[ArchiveDriverResult[void]] {.async.} = + return s.db.deleteMessagesOlderThanTimestamp(ts) + +method deleteOldestMessagesNotWithinLimit*( + s: SqliteDriver, limit: int +): Future[ArchiveDriverResult[void]] {.async.} = + return s.db.deleteOldestMessagesNotWithinLimit(limit) + +method decreaseDatabaseSize*( + driver: SqliteDriver, targetSizeInBytes: int64, forceRemoval: bool = false +): Future[ArchiveDriverResult[void]] {.async.} = + ## To remove 20% of the outdated data from database + const DeleteLimit = 0.80 + + ## when db size overshoots the database limit, shread 20% of outdated messages + ## get size of database + let dbSize = (await driver.getDatabaseSize()).valueOr: + return err("failed to get database size: " & $error) + + ## database size in bytes + let totalSizeOfDB: int64 = int64(dbSize) + + if totalSizeOfDB < targetSizeInBytes: + return ok() + + ## to shread/delete messsges, get the total row/message count + let numMessages = (await driver.getMessagesCount()).valueOr: + return err("failed to get messages count: " & error) + + ## NOTE: Using SQLite vacuuming is done manually, we delete a percentage of rows + ## if vacumming is done automatically then we aim to check DB size periodially for efficient + ## retention policy implementation. + + ## 80% of the total messages are to be kept, delete others + let pageDeleteWindow = int(float(numMessages) * DeleteLimit) + + (await driver.deleteOldestMessagesNotWithinLimit(limit = pageDeleteWindow)).isOkOr: + return err("deleting oldest messages failed: " & error) + + return ok() + +method close*(s: SqliteDriver): Future[ArchiveDriverResult[void]] {.async.} = + ## Close the database connection + # Dispose statements + s.insertStmt.dispose() + # Close connection + s.db.close() + return ok() + +method existsTable*( + s: SqliteDriver, tableName: string +): Future[ArchiveDriverResult[bool]] {.async.} = + return err("existsTable method not implemented in sqlite_driver") diff --git a/third-party/nwaku/waku/waku_core.nim b/third-party/nwaku/waku/waku_core.nim new file mode 100644 index 0000000..44dcce3 --- /dev/null +++ b/third-party/nwaku/waku/waku_core.nim @@ -0,0 +1,10 @@ +import + ./waku_core/topics, + ./waku_core/time, + ./waku_core/message, + ./waku_core/peers, + ./waku_core/subscription, + ./waku_core/multiaddrstr, + ./waku_core/codecs + +export topics, time, message, peers, subscription, multiaddrstr, codecs diff --git a/third-party/nwaku/waku/waku_core/codecs.nim b/third-party/nwaku/waku/waku_core/codecs.nim new file mode 100644 index 0000000..6dcdfe2 --- /dev/null +++ b/third-party/nwaku/waku/waku_core/codecs.nim @@ -0,0 +1,12 @@ +const + WakuRelayCodec* = "/vac/waku/relay/2.0.0" + WakuStoreCodec* = "/vac/waku/store-query/3.0.0" + WakuFilterSubscribeCodec* = "/vac/waku/filter-subscribe/2.0.0-beta1" + WakuFilterPushCodec* = "/vac/waku/filter-push/2.0.0-beta1" + WakuLightPushCodec* = "/vac/waku/lightpush/3.0.0" + WakuLegacyLightPushCodec* = "/vac/waku/lightpush/2.0.0-beta1" + WakuReconciliationCodec* = "/vac/waku/reconciliation/1.0.0" + WakuTransferCodec* = "/vac/waku/transfer/1.0.0" + WakuMetadataCodec* = "/vac/waku/metadata/1.0.0" + WakuPeerExchangeCodec* = "/vac/waku/peer-exchange/2.0.0-alpha1" + WakuLegacyStoreCodec* = "/vac/waku/store/2.0.0-beta4" diff --git a/third-party/nwaku/waku/waku_core/message.nim b/third-party/nwaku/waku/waku_core/message.nim new file mode 100644 index 0000000..6b698fb --- /dev/null +++ b/third-party/nwaku/waku/waku_core/message.nim @@ -0,0 +1,3 @@ +import ./message/message, ./message/default_values, ./message/codec, ./message/digest + +export message, default_values, codec, digest diff --git a/third-party/nwaku/waku/waku_core/message/codec.nim b/third-party/nwaku/waku/waku_core/message/codec.nim new file mode 100644 index 0000000..9b01cf6 --- /dev/null +++ b/third-party/nwaku/waku/waku_core/message/codec.nim @@ -0,0 +1,73 @@ +## Waku Message module: encoding and decoding +# See: +# - RFC 14: https://rfc.vac.dev/spec/14/ +# - Proto definition: https://github.com/vacp2p/waku/blob/main/waku/message/v1/message.proto +{.push raises: [].} + +import ../../common/protobuf, ../topics, ../time, ./message + +proc encode*(message: WakuMessage): ProtoBuffer = + var buf = initProtoBuffer() + + buf.write3(1, message.payload) + buf.write3(2, message.contentTopic) + buf.write3(3, message.version) + buf.write3(10, zint64(message.timestamp)) + buf.write3(11, message.meta) + buf.write3(21, message.proof) + buf.write3(31, uint32(message.ephemeral)) + buf.finish3() + + buf + +proc decode*(T: type WakuMessage, buffer: seq[byte]): ProtobufResult[T] = + var msg = WakuMessage() + let pb = initProtoBuffer(buffer) + + var payload: seq[byte] + if not ?pb.getField(1, payload): + return err(ProtobufError.missingRequiredField("payload")) + else: + msg.payload = payload + + var topic: ContentTopic + if not ?pb.getField(2, topic): + return err(ProtobufError.missingRequiredField("content_topic")) + else: + msg.contentTopic = topic + + var version: uint32 + if not ?pb.getField(3, version): + msg.version = 0 + else: + msg.version = version + + var timestamp: zint64 + if not ?pb.getField(10, timestamp): + msg.timestamp = Timestamp(0) + else: + msg.timestamp = Timestamp(timestamp) + + var meta: seq[byte] + if not ?pb.getField(11, meta): + msg.meta = @[] + else: + if meta.len > MaxMetaAttrLength: + return err(ProtobufError.invalidLengthField("meta")) + + msg.meta = meta + + # this is part of https://rfc.vac.dev/spec/17/ spec + var proof: seq[byte] + if not ?pb.getField(21, proof): + msg.proof = @[] + else: + msg.proof = proof + + var ephemeral: uint32 + if not ?pb.getField(31, ephemeral): + msg.ephemeral = false + else: + msg.ephemeral = bool(ephemeral) + + ok(msg) diff --git a/third-party/nwaku/waku/waku_core/message/default_values.nim b/third-party/nwaku/waku/waku_core/message/default_values.nim new file mode 100644 index 0000000..101e50c --- /dev/null +++ b/third-party/nwaku/waku/waku_core/message/default_values.nim @@ -0,0 +1,8 @@ +import ../../common/utils/parse_size_units + +const + ## https://rfc.vac.dev/spec/64/#message-size + DefaultMaxWakuMessageSizeStr* = "150KiB" # Remember that 1 MiB is the PubSub default + DefaultMaxWakuMessageSize* = parseCorrectMsgSize(DefaultMaxWakuMessageSizeStr) + + DefaultSafetyBufferProtocolOverhead* = 64 * 1024 # overhead measured in bytes diff --git a/third-party/nwaku/waku/waku_core/message/digest.nim b/third-party/nwaku/waku/waku_core/message/digest.nim new file mode 100644 index 0000000..8b99abd --- /dev/null +++ b/third-party/nwaku/waku/waku_core/message/digest.nim @@ -0,0 +1,68 @@ +{.push raises: [].} + +import std/sequtils, stew/[byteutils, endians2, arrayops], nimcrypto/sha2, results +import ../topics, ./message + +## 14/WAKU2-MESSAGE: Deterministic message hashing +## https://rfc.vac.dev/spec/14/#deterministic-message-hashing + +type WakuMessageHash* = array[32, byte] + +func shortLog*(hash: WakuMessageHash): string = + ## Returns compact string representation of ``WakuMessageHash``. + var hexhash = newStringOfCap(13) + hexhash &= hash.toOpenArray(0, 1).to0xHex() + hexhash &= "..." + hexhash &= hash.toOpenArray(hash.len - 2, hash.high).toHex() + hexhash + +func `$`*(hash: WakuMessageHash): string = + shortLog(hash) + +const EmptyWakuMessageHash*: WakuMessageHash = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, +] + +converter fromBytes*(array: openArray[byte]): WakuMessageHash = + var hash: WakuMessageHash + discard copyFrom(hash, array) + hash + +converter toBytesArray*(digest: MDigest[256]): WakuMessageHash = + digest.data + +converter toBytes*(digest: MDigest[256]): seq[byte] = + toSeq(digest.data) + +proc hexToHash*(hexString: string): Result[WakuMessageHash, string] = + var hash: WakuMessageHash + + try: + hash = hexString.hexToSeqByte().fromBytes() + except ValueError as e: + return err("Exception converting hex string to hash: " & e.msg) + + return ok(hash) + +proc computeMessageHash*(pubsubTopic: PubsubTopic, msg: WakuMessage): WakuMessageHash = + var ctx: sha256 + ctx.init() + defer: + ctx.clear() + + ctx.update(pubsubTopic.toBytes()) + ctx.update(msg.payload) + ctx.update(msg.contentTopic.toBytes()) + ctx.update(msg.meta) + ctx.update(toBytesBE(uint64(msg.timestamp))) + + return ctx.finish() # Computes the hash + +proc cmp*(x, y: WakuMessageHash): int = + if x < y: + return -1 + elif x == y: + return 0 + + return 1 diff --git a/third-party/nwaku/waku/waku_core/message/message.nim b/third-party/nwaku/waku/waku_core/message/message.nim new file mode 100644 index 0000000..acd7055 --- /dev/null +++ b/third-party/nwaku/waku/waku_core/message/message.nim @@ -0,0 +1,29 @@ +## Waku Message module. +## +## See https://github.com/vacp2p/specs/blob/master/specs/waku/v2/waku-message.md +## for spec. + +{.push raises: [].} + +import ../topics, ../time + +const MaxMetaAttrLength* = 64 # 64 bytes + +type WakuMessage* = object # Data payload transmitted. + payload*: seq[byte] + # String identifier that can be used for content-based filtering. + contentTopic*: ContentTopic + # Application specific metadata. + meta*: seq[byte] + # Number to discriminate different types of payload encryption. + # Compatibility with Whisper/WakuV1. + version*: uint32 + # Sender generated timestamp. + timestamp*: Timestamp + # The ephemeral attribute indicates signifies the transient nature of the + # message (if the message should be stored). + ephemeral*: bool + # Part of RFC 17: https://rfc.vac.dev/spec/17/ + # The proof attribute indicates that the message is not spam. This + # attribute will be used in the rln-relay protocol. + proof*: seq[byte] diff --git a/third-party/nwaku/waku/waku_core/multiaddrstr.nim b/third-party/nwaku/waku/waku_core/multiaddrstr.nim new file mode 100644 index 0000000..cd0caf2 --- /dev/null +++ b/third-party/nwaku/waku/waku_core/multiaddrstr.nim @@ -0,0 +1,21 @@ +{.push raises: [].} + +import libp2p/[peerinfo, switch] + +import ./peers + +proc constructMultiaddrStr*(wireaddr: MultiAddress, peerId: PeerId): string = + # Constructs a multiaddress with both wire address and p2p identity + return $wireaddr & "/p2p/" & $peerId + +proc constructMultiaddrStr*(peerInfo: PeerInfo): string = + # Constructs a multiaddress with both location (wire) address and p2p identity + if peerInfo.listenAddrs.len == 0: + return "" + return constructMultiaddrStr(peerInfo.listenAddrs[0], peerInfo.peerId) + +proc constructMultiaddrStr*(remotePeerInfo: RemotePeerInfo): string = + # Constructs a multiaddress with both location (wire) address and p2p identity + if remotePeerInfo.addrs.len == 0: + return "" + return constructMultiaddrStr(remotePeerInfo.addrs[0], remotePeerInfo.peerId) diff --git a/third-party/nwaku/waku/waku_core/peers.nim b/third-party/nwaku/waku/waku_core/peers.nim new file mode 100644 index 0000000..883f266 --- /dev/null +++ b/third-party/nwaku/waku/waku_core/peers.nim @@ -0,0 +1,378 @@ +{.push raises: [].} + +import + std/[options, sequtils, strutils, uri, net], + results, + chronos, + chronicles, + eth/keys, + eth/p2p/discoveryv5/enr, + eth/net/utils, + libp2p/crypto/crypto, + libp2p/crypto/secp, + libp2p/errors, + libp2p/multiaddress, + libp2p/multicodec, + libp2p/peerid, + libp2p/peerinfo, + libp2p/routing_record, + regex, + json_serialization +import ../waku_enr + +type + Connectedness* = enum + # NotConnected: default state for a new peer. No connection and no further information on connectedness. + NotConnected + # CannotConnect: attempted to connect to peer, but failed. + CannotConnect + # CanConnect: was recently connected to peer and disconnected gracefully. + CanConnect + # Connected: actively connected to peer. + Connected + + PeerOrigin* = enum + UnknownOrigin + Discv5 + Static + PeerExchange + Dns + + PeerDirection* = enum + UnknownDirection + Inbound + Outbound + +type RemotePeerInfo* = ref object + peerId*: PeerID + addrs*: seq[MultiAddress] + enr*: Option[enr.Record] + protocols*: seq[string] + + agent*: string + protoVersion*: string + publicKey*: crypto.PublicKey + connectedness*: Connectedness + disconnectTime*: int64 + origin*: PeerOrigin + direction*: PeerDirection + lastFailedConn*: Moment + numberFailedConn*: int + +func `$`*(remotePeerInfo: RemotePeerInfo): string = + $remotePeerInfo.peerId + +proc writeValue*( + w: var JsonWriter, value: RemotePeerInfo +) {.inline, raises: [IOError].} = + w.writeValue $value + +proc init*( + T: typedesc[RemotePeerInfo], + peerId: PeerID, + addrs: seq[MultiAddress] = @[], + enr: Option[enr.Record] = none(enr.Record), + protocols: seq[string] = @[], + publicKey: crypto.PublicKey = crypto.PublicKey(), + agent: string = "", + protoVersion: string = "", + connectedness: Connectedness = NotConnected, + disconnectTime: int64 = 0, + origin: PeerOrigin = UnknownOrigin, + direction: PeerDirection = UnknownDirection, + lastFailedConn: Moment = Moment.init(0, Second), + numberFailedConn: int = 0, +): T = + RemotePeerInfo( + peerId: peerId, + addrs: addrs, + enr: enr, + protocols: protocols, + publicKey: publicKey, + agent: agent, + protoVersion: protoVersion, + connectedness: connectedness, + disconnectTime: disconnectTime, + origin: origin, + direction: direction, + lastFailedConn: lastFailedConn, + numberFailedConn: numberFailedConn, + ) + +proc init*( + T: typedesc[RemotePeerInfo], + peerId: string, + addrs: seq[MultiAddress] = @[], + enr: Option[enr.Record] = none(enr.Record), + protocols: seq[string] = @[], +): T {.raises: [Defect, ResultError[cstring], LPError].} = + let peerId = PeerID.init(peerId).tryGet() + RemotePeerInfo(peerId: peerId, addrs: addrs, enr: enr, protocols: protocols) + +## Parse + +proc validWireAddr(ma: MultiAddress): bool = + ## Check if wire Address is supported + const ValidTransports = mapOr(TCP, WebSockets) + return ValidTransports.match(ma) + +proc parsePeerInfo*(peer: RemotePeerInfo): Result[RemotePeerInfo, string] = + ## Parses a fully qualified peer multiaddr, in the + ## format `(ip4|ip6)/tcp/p2p`, into dialable PeerInfo + ok(peer) + +proc parsePeerInfoFromCircuitRelayAddr( + address: string +): Result[RemotePeerInfo, string] = + var match: RegexMatch2 + # Parse like: /ip4/162.19.247.156/tcp/60010/p2p/16Uiu2HAmCzWcYBCw3xKW8De16X9wtcbQrqD8x7CRRv4xpsFJ4oN8/p2p-circuit/p2p/16Uiu2HAm2eqzqp6xn32fzgGi8K4BuF88W4Xy6yxsmDcW8h1gj6ie + let maPattern = + re2"\/(ip4|ip6|dns|dnsaddr|dns4|dns6)\/[0-9a-fA-F:.]+\/(tcp|ws|wss)\/\d+\/p2p\/(.+)\/p2p-circuit\/p2p\/(.+)" + if not regex.match(address, maPattern, match): + return err("failed to parse ma: " & address) + + if match.captures.len != 4: + return err( + "failed parsing p2p-circuit addr, expected 4 regex capture groups: " & address & + " found: " & $(match.namedGroups.len) + ) + + let relayPeerId = address[match.group(2)] + let targetPeerIdStr = address[match.group(3)] + + discard PeerID.init(relayPeerId).valueOr: + return err("invalid relay peer id from p2p-circuit address: " & address) + let targetPeerId = PeerID.init(targetPeerIdStr).valueOr: + return err("invalid targetPeerId peer id from p2p-circuit address: " & address) + + let pattern = "/p2p-circuit" + let idx = address.find(pattern) + let wireAddr: MultiAddress = + if idx != -1: + # Extract everything from the start up to and including "/p2p-circuit" + let adr = address[0 .. (idx + pattern.len - 1)] + MultiAddress.init(adr).valueOr: + return err("could not create multiaddress from: " & adr) + else: + return err("could not find /p2p-circuit pattern in: " & address) + + return ok(RemotePeerInfo.init(targetPeerId, @[wireAddr])) + +proc parsePeerInfoFromRegularAddr(peer: MultiAddress): Result[RemotePeerInfo, string] = + var p2pPart: MultiAddress + var wireAddr = MultiAddress() + for addrPart in peer.items(): + case addrPart[].protoName()[] + # All protocols listed here: https://github.com/multiformats/multiaddr/blob/b746a7d014e825221cc3aea6e57a92d78419990f/protocols.csv + of "p2p": + p2pPart = + ?addrPart.mapErr( + proc(err: string): string = + "Error getting p2pPart [" & err & "]" + ) + of "ip4", "ip6", "dns", "dnsaddr", "dns4", "dns6", "tcp", "ws", "wss": + let val = + ?addrPart.mapErr( + proc(err: string): string = + "Error getting addrPart [" & err & "]" + ) + ?wireAddr.append(val).mapErr( + proc(err: string): string = + "Error appending addrPart [" & err & "]" + ) + + let p2pPartStr = p2pPart.toString().get() + if not p2pPartStr.contains("/"): + let msg = + "Error in parsePeerInfo: p2p part should contain / [p2pPartStr:" & p2pPartStr & + "] [peer:" & $peer & "]" + return err(msg) + + let peerId = + ?PeerID.init(p2pPartStr.split("/")[^1]).mapErr( + proc(e: cstring): string = + $e + ) + + if not wireAddr.validWireAddr(): + return err("invalid multiaddress: no supported transport found") + + return ok(RemotePeerInfo.init(peerId, @[wireAddr])) + +proc parsePeerInfo*(maddrs: varargs[MultiAddress]): Result[RemotePeerInfo, string] = + ## Parses a fully qualified peer multiaddr into dialable RemotePeerInfo + var peerID: PeerID + var addrs = newSeq[MultiAddress]() + for i in 0 ..< maddrs.len: + let peerAddrStr = $maddrs[i] + let peerInfo = + if "p2p-circuit" in peerAddrStr: + ?parsePeerInfoFromCircuitRelayAddr(peerAddrStr) + else: + ?parsePeerInfoFromRegularAddr(maddrs[i]) + if i == 0: + peerID = peerInfo.peerID + elif peerID.cmp(peerInfo.peerID) != 0: + return err("Error in parsePeerInfo: multiple peerIds received") + addrs.add(peerInfo.addrs[0]) + return ok(RemotePeerInfo.init(peerID, addrs)) + +proc parsePeerInfo*(maddrs: varargs[string]): Result[RemotePeerInfo, string] = + ## Parses a fully qualified peer multiaddr, in the + ## format `(ip4|ip6)/tcp/p2p`, into dialable PeerInfo + var multiAddresses = newSeq[MultiAddress]() + for maddr in maddrs: + let multiAddr = + ?MultiAddress.init(maddr).mapErr( + proc(err: string): string = + "MultiAddress.init [" & err & "]" + ) + multiAddresses.add(multiAddr) + + parsePeerInfo(multiAddresses) + +func getTransportProtocol(typedR: enr.TypedRecord): Option[IpTransportProtocol] = + if typedR.tcp6.isSome() or typedR.tcp.isSome(): + return some(IpTransportProtocol.tcpProtocol) + + if typedR.udp6.isSome() or typedR.udp.isSome(): + return some(IpTransportProtocol.udpProtocol) + + return none(IpTransportProtocol) + +proc parseUrlPeerAddr*( + peerAddr: Option[string] +): Result[Option[RemotePeerInfo], string] = + # Checks whether the peerAddr parameter represents a valid p2p multiaddress. + # The param must be in the format `(ip4|ip6)/tcp/p2p/$peerId` but URL-encoded + if not peerAddr.isSome() or peerAddr.get() == "": + return ok(none(RemotePeerInfo)) + + let parsedAddr = decodeUrl(peerAddr.get()) + let parsedPeerInfo = parsePeerInfo(parsedAddr) + if parsedPeerInfo.isErr(): + return err("Failed parsing remote peer info [" & parsedPeerInfo.error & "]") + + return ok(some(parsedPeerInfo.value)) + +proc toRemotePeerInfo*(enrRec: enr.Record): Result[RemotePeerInfo, cstring] = + ## Converts an ENR to dialable RemotePeerInfo + let typedR = enr.TypedRecord.fromRecord(enrRec) + if not typedR.secp256k1.isSome(): + return err("enr: no secp256k1 key in record") + + let + pubKey = ?keys.PublicKey.fromRaw(typedR.secp256k1.get()) + peerId = + ?PeerID.init(crypto.PublicKey(scheme: Secp256k1, skkey: secp.SkPublicKey(pubKey))) + + let transportProto = getTransportProtocol(typedR) + if transportProto.isNone(): + return err("enr: could not determine transport protocol") + + var addrs = newSeq[MultiAddress]() + case transportProto.get() + of tcpProtocol: + if typedR.ip.isSome() and typedR.tcp.isSome(): + let ip = ipv4(typedR.ip.get()) + addrs.add MultiAddress.init(ip, tcpProtocol, Port(typedR.tcp.get())) + + if typedR.ip6.isSome(): + let ip = ipv6(typedR.ip6.get()) + if typedR.tcp6.isSome(): + addrs.add MultiAddress.init(ip, tcpProtocol, Port(typedR.tcp6.get())) + elif typedR.tcp.isSome(): + addrs.add MultiAddress.init(ip, tcpProtocol, Port(typedR.tcp.get())) + else: + discard + of udpProtocol: + if typedR.ip.isSome() and typedR.udp.isSome(): + let ip = ipv4(typedR.ip.get()) + addrs.add MultiAddress.init(ip, udpProtocol, Port(typedR.udp.get())) + + if typedR.ip6.isSome(): + let ip = ipv6(typedR.ip6.get()) + if typedR.udp6.isSome(): + addrs.add MultiAddress.init(ip, udpProtocol, Port(typedR.udp6.get())) + elif typedR.udp.isSome(): + addrs.add MultiAddress.init(ip, udpProtocol, Port(typedR.udp.get())) + else: + discard + + if addrs.len == 0: + return err("enr: no addresses in record") + + let protocolsRes = catch: + enrRec.getCapabilitiesCodecs() + + var protocols: seq[string] + if not protocolsRes.isErr(): + protocols = protocolsRes.get() + else: + error "Could not retrieve supported protocols from enr", + peerId = peerId, msg = protocolsRes.error.msg + + return ok(RemotePeerInfo.init(peerId, addrs, some(enrRec), protocols)) + +converter toRemotePeerInfo*(peerRecord: PeerRecord): RemotePeerInfo = + ## Converts peer records to dialable RemotePeerInfo + ## Useful if signed peer records have been received in an exchange + RemotePeerInfo.init(peerRecord.peerId, peerRecord.addresses.mapIt(it.address)) + +converter toRemotePeerInfo*(peerInfo: PeerInfo): RemotePeerInfo = + ## Converts the local peerInfo to dialable RemotePeerInfo + ## Useful for testing or internal connections + RemotePeerInfo( + peerId: peerInfo.peerId, + addrs: peerInfo.listenAddrs, + enr: none(enr.Record), + protocols: peerInfo.protocols, + agent: peerInfo.agentVersion, + protoVersion: peerInfo.protoVersion, + publicKey: peerInfo.publicKey, + ) + +proc hasProtocol*(ma: MultiAddress, proto: string): bool = + ## Checks if a multiaddress contains a given protocol + ## Useful for filtering multiaddresses based on their protocols + ## + ## Returns ``true`` if ``ma`` contains protocol ``proto``. + let proto = MultiCodec.codec(proto) + + let protos = ma.protocols() + if protos.isErr(): + return false + + return protos.get().anyIt(it == proto) + +func hasUdpPort*(peer: RemotePeerInfo): bool = + if peer.enr.isNone(): + return false + + let + enrRec = peer.enr.get() + typedEnr = enr.TypedRecord.fromRecord(enrRec) + + typedEnr.udp.isSome() or typedEnr.udp6.isSome() + +proc getAgent*(peer: RemotePeerInfo): string = + ## Returns the agent version of a peer + if peer.agent.isEmptyOrWhitespace(): + return "unknown" + + return peer.agent + +proc getShards*(peer: RemotePeerInfo): seq[uint16] = + if peer.enr.isNone(): + return @[] + + let enrRec = peer.enr.get() + let typedRecord = enrRec.toTyped().valueOr: + trace "invalid ENR record", error = error + return @[] + + let shards = typedRecord.relaySharding() + if shards.isSome(): + return shards.get().shardIds + + return @[] diff --git a/third-party/nwaku/waku/waku_core/subscription.nim b/third-party/nwaku/waku/waku_core/subscription.nim new file mode 100644 index 0000000..19f3386 --- /dev/null +++ b/third-party/nwaku/waku/waku_core/subscription.nim @@ -0,0 +1,3 @@ +import ./subscription/subscription_manager, ./subscription/push_handler + +export subscription_manager, push_handler diff --git a/third-party/nwaku/waku/waku_core/subscription/push_handler.nim b/third-party/nwaku/waku/waku_core/subscription/push_handler.nim new file mode 100644 index 0000000..139f134 --- /dev/null +++ b/third-party/nwaku/waku/waku_core/subscription/push_handler.nim @@ -0,0 +1,8 @@ +{.push raises: [].} + +import chronos + +import ../topics, ../message + +type FilterPushHandler* = + proc(pubsubTopic: PubsubTopic, message: WakuMessage) {.async, gcsafe, closure.} diff --git a/third-party/nwaku/waku/waku_core/subscription/subscription_manager.nim b/third-party/nwaku/waku/waku_core/subscription/subscription_manager.nim new file mode 100644 index 0000000..1b950b3 --- /dev/null +++ b/third-party/nwaku/waku/waku_core/subscription/subscription_manager.nim @@ -0,0 +1,52 @@ +{.push raises: [].} + +import std/tables, results, chronicles, chronos + +import ./push_handler, ../topics, ../message + +## Subscription manager +type SubscriptionManager* = object + subscriptions: TableRef[(string, ContentTopic), FilterPushHandler] + +proc init*(T: type SubscriptionManager): T = + SubscriptionManager( + subscriptions: newTable[(string, ContentTopic), FilterPushHandler]() + ) + +proc clear*(m: var SubscriptionManager) = + m.subscriptions.clear() + +proc registerSubscription*( + m: SubscriptionManager, + pubsubTopic: PubsubTopic, + contentTopic: ContentTopic, + handler: FilterPushHandler, +) = + try: + # TODO: Handle over subscription surprises + m.subscriptions[(pubsubTopic, contentTopic)] = handler + except CatchableError: + error "failed to register filter subscription", error = getCurrentExceptionMsg() + +proc removeSubscription*( + m: SubscriptionManager, pubsubTopic: PubsubTopic, contentTopic: ContentTopic +) = + m.subscriptions.del((pubsubTopic, contentTopic)) + +proc notifySubscriptionHandler*( + m: SubscriptionManager, + pubsubTopic: PubsubTopic, + contentTopic: ContentTopic, + message: WakuMessage, +) = + if not m.subscriptions.hasKey((pubsubTopic, contentTopic)): + return + + try: + let handler = m.subscriptions[(pubsubTopic, contentTopic)] + asyncSpawn handler(pubsubTopic, message) + except CatchableError: + discard + +proc getSubscriptionsCount*(m: SubscriptionManager): int = + m.subscriptions.len() diff --git a/third-party/nwaku/waku/waku_core/time.nim b/third-party/nwaku/waku/waku_core/time.nim new file mode 100644 index 0000000..cd46b33 --- /dev/null +++ b/third-party/nwaku/waku/waku_core/time.nim @@ -0,0 +1,37 @@ +{.push raises: [].} + +import std/times, metrics + +type Timestamp* = int64 # A nanosecond precision timestamp + +proc getNanosecondTime*(timeInSeconds: int64): Timestamp = + let ns = Timestamp(timeInSeconds * int64(1_000_000_000)) + return ns + +proc getNanosecondTime*(timeInSeconds: float64): Timestamp = + let ns = Timestamp(timeInSeconds * float64(1_000_000_000)) + return ns + +proc nowInUnixFloat(): float = + return getTime().toUnixFloat() + +proc getNowInNanosecondTime*(): Timestamp = + return getNanosecondTime(nowInUnixFloat()) + +template nanosecondTime*( + collector: Summary | Histogram | typedesc[IgnoredCollector], body: untyped +) = + when defined(metrics): + let start = nowInUnixFloat() + body + collector.observe(nowInUnixFloat() - start) + else: + body + +template nanosecondTime*(collector: Gauge, body: untyped) = + when defined(metrics): + let start = nowInUnixFloat() + body + metrics.set(collector, nowInUnixFloat() - start) + else: + body diff --git a/third-party/nwaku/waku/waku_core/topics.nim b/third-party/nwaku/waku/waku_core/topics.nim new file mode 100644 index 0000000..62b9f60 --- /dev/null +++ b/third-party/nwaku/waku/waku_core/topics.nim @@ -0,0 +1,12 @@ +import ./topics/content_topic, ./topics/pubsub_topic, ./topics/sharding + +export content_topic, pubsub_topic, sharding + +type + SubscriptionKind* = enum + ContentSub + ContentUnsub + PubsubSub + PubsubUnsub + + SubscriptionEvent* = tuple[kind: SubscriptionKind, topic: string] diff --git a/third-party/nwaku/waku/waku_core/topics/content_topic.nim b/third-party/nwaku/waku/waku_core/topics/content_topic.nim new file mode 100644 index 0000000..5984a76 --- /dev/null +++ b/third-party/nwaku/waku/waku_core/topics/content_topic.nim @@ -0,0 +1,140 @@ +## Waku content topics definition and namespacing utils +## +## See 23/WAKU2-TOPICS RFC: https://rfc.vac.dev/spec/23/ + +{.push raises: [].} + +import std/options, std/strutils, results +import ./parsing + +export parsing + +## Content topic + +type ContentTopic* = string + +const DefaultContentTopic* = ContentTopic("/waku/2/default-content/proto") + +## Namespaced content topic + +type NsContentTopic* = object + generation*: Option[int] + application*: string + version*: string + name*: string + encoding*: string + +proc init*( + T: type NsContentTopic, + generation: Option[int], + application: string, + version: string, + name: string, + encoding: string, +): T = + NsContentTopic( + generation: generation, + application: application, + version: version, + name: name, + encoding: encoding, + ) + +# Serialization + +proc `$`*(topic: NsContentTopic): string = + ## Returns a string representation of a namespaced topic + ## in the format `////` + ## Autosharding adds 1 optional prefix `/ + + var formatted = "" + + if topic.generation.isSome(): + formatted = formatted & "/" & $topic.generation.get() + + formatted & "/" & topic.application & "/" & topic.version & "/" & topic.name & "/" & + topic.encoding + +# Deserialization + +proc parse*( + T: type NsContentTopic, topic: ContentTopic | string +): ParsingResult[NsContentTopic] = + ## Splits a namespaced topic string into its constituent parts. + ## The topic string has to be in the format `////` + ## Autosharding adds 1 optional prefix `/ + + if not topic.startsWith("/"): + return err( + ParsingError.invalidFormat("content-topic '" & topic & "' must start with slash") + ) + + let parts = topic[1 ..< topic.len].split("/") + + case parts.len + of 4: + let app = parts[0] + if app.len == 0: + return err(ParsingError.missingPart("application")) + + let ver = parts[1] + if ver.len == 0: + return err(ParsingError.missingPart("version")) + + let name = parts[2] + if name.len == 0: + return err(ParsingError.missingPart("topic-name")) + + let enc = parts[3] + if enc.len == 0: + return err(ParsingError.missingPart("encoding")) + + return ok(NsContentTopic.init(none(int), app, ver, name, enc)) + of 5: + if parts[0].len == 0: + return err(ParsingError.missingPart("generation")) + + let gen = + try: + parseInt(parts[0]) + except ValueError: + return err(ParsingError.invalidFormat("generation should be a numeric value")) + + let app = parts[1] + if app.len == 0: + return err(ParsingError.missingPart("application")) + + let ver = parts[2] + if ver.len == 0: + return err(ParsingError.missingPart("version")) + + let name = parts[3] + if name.len == 0: + return err(ParsingError.missingPart("topic-name")) + + let enc = parts[4] + if enc.len == 0: + return err(ParsingError.missingPart("encoding")) + + return ok(NsContentTopic.init(some(gen), app, ver, name, enc)) + else: + let errMsg = + "Invalid content topic structure. Expected either //// or /////" + return err(ParsingError.invalidFormat(errMsg)) + +proc parse*( + T: type NsContentTopic, topics: seq[ContentTopic] +): ParsingResult[seq[NsContentTopic]] = + var res: seq[NsContentTopic] = @[] + for contentTopic in topics: + let parseRes = NsContentTopic.parse(contentTopic) + if parseRes.isErr(): + let error: ParsingError = parseRes.error + return ParsingResult[seq[NsContentTopic]].err(error) + res.add(parseRes.value) + return ParsingResult[seq[NsContentTopic]].ok(res) + +# Content topic compatibility + +converter toContentTopic*(topic: NsContentTopic): ContentTopic = + $topic diff --git a/third-party/nwaku/waku/waku_core/topics/parsing.nim b/third-party/nwaku/waku/waku_core/topics/parsing.nim new file mode 100644 index 0000000..b36b5eb --- /dev/null +++ b/third-party/nwaku/waku/waku_core/topics/parsing.nim @@ -0,0 +1,30 @@ +{.push raises: [].} + +import results + +type + ParsingErrorKind* {.pure.} = enum + InvalidFormat + MissingPart + + ParsingError* = object + case kind*: ParsingErrorKind + of InvalidFormat: + cause*: string + of MissingPart: + part*: string + +type ParsingResult*[T] = Result[T, ParsingError] + +proc invalidFormat*(T: type ParsingError, cause = "invalid format"): T = + ParsingError(kind: ParsingErrorKind.InvalidFormat, cause: cause) + +proc missingPart*(T: type ParsingError, part = "unknown"): T = + ParsingError(kind: ParsingErrorKind.MissingPart, part: part) + +proc `$`*(err: ParsingError): string = + case err.kind + of ParsingErrorKind.InvalidFormat: + return "invalid format: " & err.cause + of ParsingErrorKind.MissingPart: + return "missing part: " & err.part diff --git a/third-party/nwaku/waku/waku_core/topics/pubsub_topic.nim b/third-party/nwaku/waku/waku_core/topics/pubsub_topic.nim new file mode 100644 index 0000000..27ea271 --- /dev/null +++ b/third-party/nwaku/waku/waku_core/topics/pubsub_topic.nim @@ -0,0 +1,91 @@ +## Waku pub-sub topics definition and namespacing utils +## +## See 23/WAKU2-TOPICS RFC: https://rfc.vac.dev/spec/23/ + +{.push raises: [].} + +import std/strutils, stew/base10, results +import ./parsing + +export parsing + +## Pub-sub topic + +type PubsubTopic* = string + +## Relay Shard + +type RelayShard* = object + clusterId*: uint16 + shardId*: uint16 + +const DefaultShardId* = uint16(0) +const DefaultClusterId* = uint16(0) +const DefaultRelayShard* = + RelayShard(clusterId: DefaultClusterId, shardId: DefaultShardId) + +# Serialization + +proc `$`*(topic: RelayShard): string = + ## Returns a string representation of a namespaced topic + ## in the format `/waku/2/rs// + return "/waku/2/rs/" & $topic.clusterId & "/" & $topic.shardId + +const DefaultPubsubTopic* = $DefaultRelayShard + +# Deserialization + +const + Waku2PubsubTopicPrefix = "/waku/2" + StaticShardingPubsubTopicPrefix = Waku2PubsubTopicPrefix & "/rs" + +proc parseStaticSharding*( + T: type RelayShard, topic: PubsubTopic +): ParsingResult[RelayShard] = + if not topic.startsWith(StaticShardingPubsubTopicPrefix): + return err( + ParsingError.invalidFormat("must start with " & StaticShardingPubsubTopicPrefix) + ) + + let parts = topic[11 ..< topic.len].split("/") + if parts.len != 2: + return err(ParsingError.invalidFormat("invalid topic structure")) + + let clusterPart = parts[0] + if clusterPart.len == 0: + return err(ParsingError.missingPart("cluster_id")) + let clusterId = + ?Base10.decode(uint16, clusterPart).mapErr( + proc(err: auto): auto = + ParsingError.invalidFormat($err) + ) + + let shardPart = parts[1] + if shardPart.len == 0: + return err(ParsingError.missingPart("shard_number")) + let shardId = + ?Base10.decode(uint16, shardPart).mapErr( + proc(err: auto): auto = + ParsingError.invalidFormat($err) + ) + + ok(RelayShard(clusterId: clusterId, shardId: shardId)) + +proc parse*(T: type RelayShard, topic: PubsubTopic): ParsingResult[RelayShard] = + ## Splits a namespaced topic string into its constituent parts. + ## The topic string has to be in the format `////` + RelayShard.parseStaticSharding(topic) + +# Pubsub topic compatibility + +converter toPubsubTopic*(topic: RelayShard): PubsubTopic = + $topic + +proc `==`*[T: RelayShard](x, y: T): bool = + if x.clusterId != y.clusterId: + return false + + if x.shardId != y.shardId: + return false + + return true diff --git a/third-party/nwaku/waku/waku_core/topics/sharding.nim b/third-party/nwaku/waku/waku_core/topics/sharding.nim new file mode 100644 index 0000000..006850a --- /dev/null +++ b/third-party/nwaku/waku/waku_core/topics/sharding.nim @@ -0,0 +1,141 @@ +## Waku autosharding utils +## +## See 51/WAKU2-RELAY-SHARDING RFC: https://rfc.vac.dev/spec/51/#automatic-sharding + +{.push raises: [].} + +import nimcrypto, std/options, std/tables, stew/endians2, results, stew/byteutils + +import ./content_topic, ./pubsub_topic + +# TODO: this is autosharding, not just "sharding" +type Sharding* = object + clusterId*: uint16 + # TODO: generations could be stored in a table here + shardCountGenZero*: uint32 + +proc new*(T: type Sharding, clusterId: uint16, shardCount: uint32): T = + return Sharding(clusterId: clusterId, shardCountGenZero: shardCount) + +proc getGenZeroShard*(s: Sharding, topic: NsContentTopic, count: int): RelayShard = + let bytes = toBytes(topic.application) & toBytes(topic.version) + + let hash = sha256.digest(bytes) + + # We only use the last 64 bits of the hash as having more shards is unlikely. + let hashValue = uint64.fromBytesBE(hash.data[24 .. 31]) + + let shard = hashValue mod uint64(count) + + RelayShard(clusterId: s.clusterId, shardId: uint16(shard)) + +proc getShard*(s: Sharding, topic: NsContentTopic): Result[RelayShard, string] = + ## Compute the (pubsub topic) shard to use for this content topic. + + if topic.generation.isNone(): + ## Implicit generation # is 0 for all content topic + return ok(s.getGenZeroShard(topic, int(s.shardCountGenZero))) + + case topic.generation.get() + of 0: + return ok(s.getGenZeroShard(topic, int(s.shardCountGenZero))) + else: + return err("Generation > 0 are not supported yet") + +proc getShard*(s: Sharding, topic: ContentTopic): Result[RelayShard, string] = + let parsedTopic = NsContentTopic.parse(topic).valueOr: + return err($error) + + let shard = ?s.getShard(parsedTopic) + + ok(shard) + +proc getShardsFromContentTopics*( + s: Sharding, contentTopics: ContentTopic | seq[ContentTopic] +): Result[Table[RelayShard, seq[NsContentTopic]], string] = + let topics = + when contentTopics is seq[ContentTopic]: + contentTopics + else: + @[contentTopics] + + let parseRes = NsContentTopic.parse(topics) + let nsContentTopics = + if parseRes.isErr(): + return err("Cannot parse content topic: " & $parseRes.error) + else: + parseRes.get() + + var topicMap = initTable[RelayShard, seq[NsContentTopic]]() + for content in nsContentTopics: + let shard = s.getShard(content).valueOr: + return err("Cannot deduce shard from content topic: " & $error) + + if not topicMap.hasKey(shard): + topicMap[shard] = @[] + + try: + topicMap[shard].add(content) + except CatchableError: + return err(getCurrentExceptionMsg()) + + ok(topicMap) + +#type ShardsPriority = seq[tuple[topic: RelayShard, value: float64]] + +#[ proc shardCount*(topic: NsContentTopic): Result[int, string] = + ## Returns the total shard count from the content topic. + let shardCount = + if topic.generation.isNone(): + ## Implicit generation # is 0 for all content topic + GenerationZeroShardsCount + else: + case topic.generation.get(): + of 0: + GenerationZeroShardsCount + else: + return err("Generation > 0 are not supported yet") + + ok((shardCount)) ]# + +#[ proc applyWeight(hashValue: uint64, weight: float64): float64 = + (-weight) / math.ln(float64(hashValue) / float64(high(uint64))) ]# + +#[ proc hashOrder*(x, y: (RelayShard, float64)): int = + cmp(x[1], y[1]) ]# + +#[ proc weightedShardList*(topic: NsContentTopic, shardCount: int, weightList: seq[float64]): Result[ShardsPriority, string] = + ## Returns the ordered list of shards and their priority values. + if weightList.len < shardCount: + return err("Must provide weights for every shards") + + let shardsNWeights = zip(toSeq(0..shardCount), weightList) + + var list = newSeq[(RelayShard, float64)](shardCount) + + for (shard, weight) in shardsNWeights: + let pubsub = RelayShard(clusterId: ClusterId, shardId: uint16(shard)) + + let clusterBytes = toBytesBE(uint16(ClusterId)) + let shardBytes = toBytesBE(uint16(shard)) + let bytes = toBytes(topic.application) & toBytes(topic.version) & @clusterBytes & @shardBytes + let hash = sha256.digest(bytes) + let hashValue = uint64.fromBytesBE(hash.data) + let value = applyWeight(hashValue, weight) + + list[shard] = (pubsub, value) + + list.sort(hashOrder) + + ok(list) ]# + +#[ proc singleHighestWeigthShard*(topic: NsContentTopic): Result[RelayShard, string] = + let count = ? shardCount(topic) + + let weights = repeat(1.0, count) + + let list = ? weightedShardList(topic, count, weights) + + let (pubsub, _) = list[list.len - 1] + + ok(pubsub) ]# diff --git a/third-party/nwaku/waku/waku_enr.nim b/third-party/nwaku/waku/waku_enr.nim new file mode 100644 index 0000000..e3fcde9 --- /dev/null +++ b/third-party/nwaku/waku/waku_enr.nim @@ -0,0 +1,8 @@ +import + ./common/enr, + ./waku_enr/capabilities, + ./waku_enr/multiaddr, + ./waku_enr/sharding, + ./waku_enr/mix + +export enr, capabilities, multiaddr, sharding, mix diff --git a/third-party/nwaku/waku/waku_enr/capabilities.nim b/third-party/nwaku/waku/waku_enr/capabilities.nim new file mode 100644 index 0000000..f74b8b1 --- /dev/null +++ b/third-party/nwaku/waku/waku_enr/capabilities.nim @@ -0,0 +1,122 @@ +{.push raises: [].} + +import + std/[options, bitops, sequtils, net, tables], results, eth/keys, libp2p/crypto/crypto +import ../common/enr, ../waku_core/codecs +import mix/mix_protocol + +const CapabilitiesEnrField* = "waku2" + +type + ## 8-bit flag field to indicate Waku node capabilities. + ## Only the 4 LSBs are currently defined according + ## to RFC31 (https://rfc.vac.dev/spec/31/). + CapabilitiesBitfield* = distinct uint8 + + ## See: https://rfc.vac.dev/spec/31/#waku2-enr-key + ## each enum numbers maps to a bit (where 0 is the LSB) + Capabilities* {.pure.} = enum + Relay = 0 + Store = 1 + Filter = 2 + Lightpush = 3 + Sync = 4 + Mix = 5 + +const capabilityToCodec = { + Capabilities.Relay: WakuRelayCodec, + Capabilities.Store: WakuStoreCodec, + Capabilities.Filter: WakuFilterSubscribeCodec, + Capabilities.Lightpush: WakuLightPushCodec, + Capabilities.Sync: WakuReconciliationCodec, + Capabilities.Mix: MixProtocolID, +}.toTable + +func init*( + T: type CapabilitiesBitfield, + lightpush, filter, store, relay, sync, mix: bool = false, +): T = + ## Creates an waku2 ENR flag bit field according to RFC 31 (https://rfc.vac.dev/spec/31/) + var bitfield: uint8 + if relay: + bitfield.setBit(0) + if store: + bitfield.setBit(1) + if filter: + bitfield.setBit(2) + if lightpush: + bitfield.setBit(3) + if sync: + bitfield.setBit(4) + if mix: + bitfield.setBit(5) + CapabilitiesBitfield(bitfield) + +func init*(T: type CapabilitiesBitfield, caps: varargs[Capabilities]): T = + ## Creates an waku2 ENR flag bit field according to RFC 31 (https://rfc.vac.dev/spec/31/) + var bitfield: uint8 + for cap in caps: + bitfield.setBit(ord(cap)) + CapabilitiesBitfield(bitfield) + +converter toCapabilitiesBitfield*(field: uint8): CapabilitiesBitfield = + CapabilitiesBitfield(field) + +proc supportsCapability*(bitfield: CapabilitiesBitfield, cap: Capabilities): bool = + testBit(bitfield.uint8, ord(cap)) + +func toCapabilities*(bitfield: CapabilitiesBitfield): seq[Capabilities] = + toSeq(Capabilities.low .. Capabilities.high).filterIt( + supportsCapability(bitfield, it) + ) + +# ENR builder extension + +proc withWakuCapabilities*(builder: var EnrBuilder, caps: CapabilitiesBitfield) = + builder.addFieldPair(CapabilitiesEnrField, @[caps.uint8]) + +proc withWakuCapabilities*(builder: var EnrBuilder, caps: varargs[Capabilities]) = + withWakuCapabilities(builder, CapabilitiesBitfield.init(caps)) + +proc withWakuCapabilities*(builder: var EnrBuilder, caps: openArray[Capabilities]) = + withWakuCapabilities(builder, CapabilitiesBitfield.init(@caps)) + +# ENR record accessors (e.g., Record, TypedRecord, etc.) + +func waku2*(record: TypedRecord): Option[CapabilitiesBitfield] = + let field = record.tryGet(CapabilitiesEnrField, seq[uint8]) + if field.isNone(): + return none(CapabilitiesBitfield) + + if field.get().len != 1: + return none(CapabilitiesBitfield) + + some(CapabilitiesBitfield(field.get()[0])) + +proc supportsCapability*(r: Record, cap: Capabilities): bool = + let recordRes = r.toTyped() + if recordRes.isErr(): + return false + + let bitfieldOpt = recordRes.value.waku2 + if bitfieldOpt.isNone(): + return false + + let bitfield = bitfieldOpt.get() + bitfield.supportsCapability(cap) + +proc getCapabilities*(r: Record): seq[Capabilities] = + let recordRes = r.toTyped() + if recordRes.isErr(): + return @[] + + let bitfieldOpt = recordRes.value.waku2 + if bitfieldOpt.isNone(): + return @[] + + let bitfield = bitfieldOpt.get() + bitfield.toCapabilities() + +proc getCapabilitiesCodecs*(r: Record): seq[string] {.raises: [ValueError].} = + let capabilities = r.getCapabilities() + return capabilities.mapIt(capabilityToCodec[it]) diff --git a/third-party/nwaku/waku/waku_enr/mix.nim b/third-party/nwaku/waku/waku_enr/mix.nim new file mode 100644 index 0000000..50468df --- /dev/null +++ b/third-party/nwaku/waku/waku_enr/mix.nim @@ -0,0 +1,20 @@ +{.push raises: [].} + +import std/[options], results, libp2p/crypto/curve25519, nimcrypto/utils as ncrutils + +import ../common/enr + +const MixKeyEnrField* = "mix-key" + +func withMixKey*(builder: var EnrBuilder, mixPubKey: Curve25519Key) = + builder.addFieldPair(MixKeyEnrField, getBytes(mixPubKey)) + +func mixKey*(record: Record): Option[seq[byte]] = + let recordRes = record.toTyped() + if recordRes.isErr(): + return none(seq[byte]) + + let field = recordRes.value.tryGet(MixKeyEnrField, seq[byte]) + if field.isNone(): + return none(seq[byte]) + return field diff --git a/third-party/nwaku/waku/waku_enr/multiaddr.nim b/third-party/nwaku/waku/waku_enr/multiaddr.nim new file mode 100644 index 0000000..83e3d19 --- /dev/null +++ b/third-party/nwaku/waku/waku_enr/multiaddr.nim @@ -0,0 +1,95 @@ +{.push raises: [].} + +import + std/[options, sequtils, net], + stew/endians2, + results, + eth/keys, + libp2p/[multiaddress, multicodec], + libp2p/crypto/crypto +import ../common/enr + +const MultiaddrEnrField* = "multiaddrs" + +func encodeMultiaddrs*(multiaddrs: seq[MultiAddress]): seq[byte] = + var buffer = newSeq[byte]() + for multiaddr in multiaddrs: + let + raw = multiaddr.data.buffer # binary encoded multiaddr + size = raw.len.uint16.toBytes(Endianness.bigEndian) + # size as Big Endian unsigned 16-bit integer + + buffer.add(concat(@size, raw)) + + buffer + +func readBytes( + rawBytes: seq[byte], numBytes: int, pos: var int = 0 +): Result[seq[byte], cstring] = + ## Attempts to read `numBytes` from a sequence, from + ## position `pos`. Returns the requested slice or + ## an error if `rawBytes` boundary is exceeded. + ## + ## If successful, `pos` is advanced by `numBytes` + if rawBytes[pos ..^ 1].len() < numBytes: + return err("insufficient bytes") + + let slicedSeq = rawBytes[pos ..< pos + numBytes] + pos += numBytes + + return ok(slicedSeq) + +func decodeMultiaddrs(buffer: seq[byte]): EnrResult[seq[MultiAddress]] = + ## Parses a `multiaddrs` ENR field according to + ## https://rfc.vac.dev/spec/31/ + var multiaddrs: seq[MultiAddress] + + var pos = 0 + while pos < buffer.len(): + let addrLenRaw = ?readBytes(buffer, 2, pos) + let addrLen = uint16.fromBytesBE(addrLenRaw) + if addrLen == 0: + # Ensure pos always advances and we don't get stuck in infinite loop + return err("malformed multiaddr field: invalid length") + + let addrRaw = ?readBytes(buffer, addrLen.int, pos) + let address = MultiAddress.init(addrRaw).valueOr: + continue # Not a valid multiaddress + + multiaddrs.add(address) + + return ok(multiaddrs) + +# ENR builder extension +func stripPeerId(multiaddr: MultiAddress): MultiAddress = + if not multiaddr.contains(multiCodec("p2p")).get(): + return multiaddr + + var cleanAddr = MultiAddress.init() + for item in multiaddr.items: + if item.value.protoName().get() != "p2p": + # Add all parts except p2p peerId + discard cleanAddr.append(item.value) + + return cleanAddr + +func withMultiaddrs*(builder: var EnrBuilder, multiaddrs: seq[MultiAddress]) = + let multiaddrs = multiaddrs.map(stripPeerId) + let value = encodeMultiaddrs(multiaddrs) + builder.addFieldPair(MultiaddrEnrField, value) + +func withMultiaddrs*(builder: var EnrBuilder, multiaddrs: varargs[MultiAddress]) = + withMultiaddrs(builder, @multiaddrs) + +# ENR record accessors (e.g., Record, TypedRecord, etc.) + +func multiaddrs*(record: TypedRecord): Option[seq[MultiAddress]] = + let field = record.tryGet(MultiaddrEnrField, seq[byte]) + if field.isNone(): + return none(seq[MultiAddress]) + + let decodeRes = decodeMultiaddrs(field.get()) + if decodeRes.isErr(): + return none(seq[MultiAddress]) + + some(decodeRes.value) diff --git a/third-party/nwaku/waku/waku_enr/sharding.nim b/third-party/nwaku/waku/waku_enr/sharding.nim new file mode 100644 index 0000000..4ee77bf --- /dev/null +++ b/third-party/nwaku/waku/waku_enr/sharding.nim @@ -0,0 +1,255 @@ +{.push raises: [].} + +import + std/[options, bitops, sequtils, net], + stew/endians2, + results, + chronicles, + eth/keys, + libp2p/[multiaddress, multicodec], + libp2p/crypto/crypto +import ../common/enr, ../waku_core/topics/pubsub_topic + +logScope: + topics = "waku enr sharding" + +const MaxShardIndex*: uint16 = 1023 + +const + ShardingIndicesListEnrField* = "rs" + ShardingIndicesListMaxLength* = 64 + ShardingBitVectorEnrField* = "rsv" + +type RelayShards* = object + clusterId*: uint16 + shardIds*: seq[uint16] + +func topics*(rs: RelayShards): seq[RelayShard] = + rs.shardIds.mapIt(RelayShard(clusterId: rs.clusterId, shardId: it)) + +func init*(T: type RelayShards, clusterId, shardId: uint16): Result[T, string] = + if shardId > MaxShardIndex: + return err("invalid shard Id") + + ok(RelayShards(clusterId: clusterId, shardIds: @[shardId])) + +func init*( + T: type RelayShards, clusterId: uint16, shardIds: varargs[uint16] +): Result[T, string] = + if toSeq(shardIds).anyIt(it > MaxShardIndex): + return err("invalid shard") + + let indicesSeq = deduplicate(@shardIds) + if shardIds.len < 1: + return err("invalid shard count") + + ok(RelayShards(clusterId: clusterId, shardIds: indicesSeq)) + +func init*( + T: type RelayShards, clusterId: uint16, shardIds: seq[uint16] +): Result[T, string] = + if shardIds.anyIt(it > MaxShardIndex): + return err("invalid shard") + + let indicesSeq = deduplicate(shardIds) + if shardIds.len < 1: + return err("invalid shard count") + + ok(RelayShards(clusterId: clusterId, shardIds: indicesSeq)) + +func topicsToRelayShards*(topics: seq[string]): Result[Option[RelayShards], string] = + if topics.len < 1: + return ok(none(RelayShards)) + + let parsedTopicsRes = topics.mapIt(RelayShard.parse(it)) + + for res in parsedTopicsRes: + if res.isErr(): + return err("failed to parse topic: " & $res.error) + + if parsedTopicsRes.anyIt(it.get().clusterId != parsedTopicsRes[0].get().clusterId): + return err("use shards with the same cluster Id.") + + let relayShard = + ?RelayShards.init( + parsedTopicsRes[0].get().clusterId, parsedTopicsRes.mapIt(it.get().shardId) + ) + + return ok(some(relayShard)) + +func contains*(rs: RelayShards, clusterId, shardId: uint16): bool = + return rs.clusterId == clusterId and rs.shardIds.contains(shardId) + +func contains*(rs: RelayShards, shard: RelayShard): bool = + return rs.contains(shard.clusterId, shard.shardId) + +func contains*(rs: RelayShards, topic: PubsubTopic): bool = + let parseRes = RelayShard.parse(topic) + if parseRes.isErr(): + return false + + rs.contains(parseRes.value) + +# ENR builder extension + +func toIndicesList*(rs: RelayShards): EnrResult[seq[byte]] = + if rs.shardIds.len > high(uint8).int: + return err("shards list too long") + + var res: seq[byte] + res.add(rs.clusterId.toBytesBE()) + + res.add(rs.shardIds.len.uint8) + for shardId in rs.shardIds: + res.add(shardId.toBytesBE()) + + ok(res) + +func fromIndicesList*(buf: seq[byte]): Result[RelayShards, string] = + if buf.len < 3: + return + err("insufficient data: expected at least 3 bytes, got " & $buf.len & " bytes") + + let clusterId = uint16.fromBytesBE(buf[0 .. 1]) + let length = int(buf[2]) + + if buf.len != 3 + 2 * length: + return err( + "invalid data: `length` field is " & $length & " but " & $buf.len & + " bytes were provided" + ) + + var shardIds: seq[uint16] + for i in 0 ..< length: + shardIds.add(uint16.fromBytesBE(buf[3 + 2 * i ..< 5 + 2 * i])) + + ok(RelayShards(clusterId: clusterId, shardIds: shardIds)) + +func toBitVector*(rs: RelayShards): seq[byte] = + ## The value is comprised of a two-byte cluster id in network byte + ## order concatenated with a 128-byte wide bit vector. The bit vector + ## indicates which shard ids of the respective cluster id the node is part + ## of. The right-most bit in the bit vector represents shard id 0, the left-most + ## bit represents shard id 1023. + var res: seq[byte] + res.add(rs.clusterId.toBytesBE()) + + var vec = newSeq[byte](128) + for shardId in rs.shardIds: + vec[shardId div 8].setBit(shardId mod 8) + + res.add(vec) + + res + +func fromBitVector(buf: seq[byte]): EnrResult[RelayShards] = + if buf.len != 130: + return err("invalid data: expected 130 bytes") + + let clusterId = uint16.fromBytesBE(buf[0 .. 1]) + var shardIds: seq[uint16] + + for i in 0u16 ..< 128u16: + for j in 0u16 ..< 8u16: + if not buf[2 + i].testBit(j): + continue + + shardIds.add(j + 8 * i) + + ok(RelayShards(clusterId: clusterId, shardIds: shardIds)) + +func withWakuRelayShardingIndicesList*( + builder: var EnrBuilder, rs: RelayShards +): EnrResult[void] = + let value = ?rs.toIndicesList() + builder.addFieldPair(ShardingIndicesListEnrField, value) + ok() + +func withWakuRelayShardingBitVector*( + builder: var EnrBuilder, rs: RelayShards +): EnrResult[void] = + let value = rs.toBitVector() + builder.addFieldPair(ShardingBitVectorEnrField, value) + ok() + +func withWakuRelaySharding*(builder: var EnrBuilder, rs: RelayShards): EnrResult[void] = + if rs.shardIds.len >= ShardingIndicesListMaxLength: + builder.withWakuRelayShardingBitVector(rs) + else: + builder.withWakuRelayShardingIndicesList(rs) + +func withShardedTopics*( + builder: var EnrBuilder, topics: seq[string] +): Result[void, string] = + let relayShardOp = topicsToRelayShards(topics).valueOr: + return err("building ENR with relay sharding failed: " & $error) + + let relayShard = relayShardOp.valueOr: + return ok() + + builder.withWakuRelaySharding(relayShard).isOkOr: + return err($error) + + return ok() + +# ENR record accessors (e.g., Record, TypedRecord, etc.) + +proc relayShardingIndicesList*(record: TypedRecord): Option[RelayShards] = + let field = record.tryGet(ShardingIndicesListEnrField, seq[byte]).valueOr: + return none(RelayShards) + + let indexList = fromIndicesList(field).valueOr: + debug "invalid shards list", error = error + return none(RelayShards) + + some(indexList) + +proc relayShardingBitVector*(record: TypedRecord): Option[RelayShards] = + let field = record.tryGet(ShardingBitVectorEnrField, seq[byte]).valueOr: + return none(RelayShards) + + let bitVector = fromBitVector(field).valueOr: + debug "invalid shards bit vector", error = error + return none(RelayShards) + + some(bitVector) + +proc relaySharding*(record: TypedRecord): Option[RelayShards] = + let indexList = record.relayShardingIndicesList().valueOr: + return record.relayShardingBitVector() + + return some(indexList) + +## Utils + +proc containsShard*(r: Record, clusterId, shardId: uint16): bool = + if shardId > MaxShardIndex: + return false + + let record = r.toTyped().valueOr: + trace "invalid ENR record", error = error + return false + + let rs = record.relaySharding().valueOr: + return false + + rs.contains(clusterId, shardId) + +proc containsShard*(r: Record, shard: RelayShard): bool = + return containsShard(r, shard.clusterId, shard.shardId) + +proc containsShard*(r: Record, topic: PubsubTopic): bool = + let parseRes = RelayShard.parse(topic) + if parseRes.isErr(): + debug "invalid static sharding topic", topic = topic, error = parseRes.error + return false + + containsShard(r, parseRes.value) + +proc isClusterMismatched*(record: Record, clusterId: uint16): bool = + ## Check the ENR sharding info for matching cluster id + if (let typedRecord = record.toTyped(); typedRecord.isOk()): + if (let relayShard = typedRecord.get().relaySharding(); relayShard.isSome()): + return relayShard.get().clusterId != clusterId + + return false diff --git a/third-party/nwaku/waku/waku_filter_v2.nim b/third-party/nwaku/waku/waku_filter_v2.nim new file mode 100644 index 0000000..3f1d010 --- /dev/null +++ b/third-party/nwaku/waku/waku_filter_v2.nim @@ -0,0 +1,4 @@ +import + ./waku_filter_v2/common, ./waku_filter_v2/protocol, ./waku_filter_v2/subscriptions + +export common, protocol, subscriptions diff --git a/third-party/nwaku/waku/waku_filter_v2/client.nim b/third-party/nwaku/waku/waku_filter_v2/client.nim new file mode 100644 index 0000000..2ad275a --- /dev/null +++ b/third-party/nwaku/waku/waku_filter_v2/client.nim @@ -0,0 +1,215 @@ +## Waku Filter client for subscribing and receiving filtered messages + +{.push raises: [].} + +import + std/options, + chronicles, + chronos, + libp2p/protocols/protocol, + bearssl/rand, + stew/byteutils +import + ../node/peer_manager, + ../node/delivery_monitor/subscriptions_observer, + ../waku_core, + ./common, + ./protocol_metrics, + ./rpc_codec, + ./rpc + +logScope: + topics = "waku filter client" + +type WakuFilterClient* = ref object of LPProtocol + rng: ref HmacDrbgContext + peerManager: PeerManager + pushHandlers: seq[FilterPushHandler] + subscrObservers: seq[SubscriptionObserver] + +func generateRequestId(rng: ref HmacDrbgContext): string = + var bytes: array[10, byte] + hmacDrbgGenerate(rng[], bytes) + return toHex(bytes) + +proc addSubscrObserver*(wfc: WakuFilterClient, obs: SubscriptionObserver) = + wfc.subscrObservers.add(obs) + +proc sendSubscribeRequest( + wfc: WakuFilterClient, + servicePeer: RemotePeerInfo, + filterSubscribeRequest: FilterSubscribeRequest, +): Future[FilterSubscribeResult] {.async: (raises: []).} = + trace "Sending filter subscribe request", + peerId = servicePeer.peerId, filterSubscribeRequest + + var connOpt: Option[Connection] + try: + connOpt = await wfc.peerManager.dialPeer(servicePeer, WakuFilterSubscribeCodec) + if connOpt.isNone(): + trace "Failed to dial filter service peer", servicePeer + waku_filter_errors.inc(labelValues = [dialFailure]) + return err(FilterSubscribeError.peerDialFailure($servicePeer)) + except CatchableError: + let errMsg = "failed to dialPeer: " & getCurrentExceptionMsg() + trace "failed to dialPeer", error = getCurrentExceptionMsg() + waku_filter_errors.inc(labelValues = [errMsg]) + return err(FilterSubscribeError.badResponse(errMsg)) + + let connection = connOpt.get() + + try: + await connection.writeLP(filterSubscribeRequest.encode().buffer) + except CatchableError: + let errMsg = + "exception in waku_filter_v2 client writeLP: " & getCurrentExceptionMsg() + trace "exception in waku_filter_v2 client writeLP", error = getCurrentExceptionMsg() + waku_filter_errors.inc(labelValues = [errMsg]) + return err(FilterSubscribeError.badResponse(errMsg)) + + var respBuf: seq[byte] + try: + respBuf = await connection.readLp(DefaultMaxSubscribeResponseSize) + except CatchableError: + let errMsg = + "exception in waku_filter_v2 client readLp: " & getCurrentExceptionMsg() + trace "exception in waku_filter_v2 client readLp", error = getCurrentExceptionMsg() + waku_filter_errors.inc(labelValues = [errMsg]) + return err(FilterSubscribeError.badResponse(errMsg)) + + let respDecodeRes = FilterSubscribeResponse.decode(respBuf) + if respDecodeRes.isErr(): + trace "Failed to decode filter subscribe response", servicePeer + waku_filter_errors.inc(labelValues = [decodeRpcFailure]) + return err(FilterSubscribeError.badResponse(decodeRpcFailure)) + + let response = respDecodeRes.get() + + # DOS protection rate limit checks does not know about request id + if response.statusCode != FilterSubscribeErrorKind.TOO_MANY_REQUESTS.uint32 and + response.requestId != filterSubscribeRequest.requestId: + trace "Filter subscribe response requestId mismatch", servicePeer, response + waku_filter_errors.inc(labelValues = [requestIdMismatch]) + return err(FilterSubscribeError.badResponse(requestIdMismatch)) + + if response.statusCode != 200: + trace "Filter subscribe error response", servicePeer, response + waku_filter_errors.inc(labelValues = [errorResponse]) + let cause = + if response.statusDesc.isSome(): + response.statusDesc.get() + else: + "filter subscribe error" + return err(FilterSubscribeError.parse(response.statusCode, cause = cause)) + + return ok() + +proc ping*( + wfc: WakuFilterClient, servicePeer: RemotePeerInfo +): Future[FilterSubscribeResult] {.async.} = + debug "sending ping", servicePeer = shortLog($servicePeer) + let requestId = generateRequestId(wfc.rng) + let filterSubscribeRequest = FilterSubscribeRequest.ping(requestId) + + return await wfc.sendSubscribeRequest(servicePeer, filterSubscribeRequest) + +proc subscribe*( + wfc: WakuFilterClient, + servicePeer: RemotePeerInfo, + pubsubTopic: PubsubTopic, + contentTopics: ContentTopic | seq[ContentTopic], +): Future[FilterSubscribeResult] {.async: (raises: []).} = + var contentTopicSeq: seq[ContentTopic] + when contentTopics is seq[ContentTopic]: + contentTopicSeq = contentTopics + else: + contentTopicSeq = @[contentTopics] + + let requestId = generateRequestId(wfc.rng) + let filterSubscribeRequest = FilterSubscribeRequest.subscribe( + requestId = requestId, pubsubTopic = pubsubTopic, contentTopics = contentTopicSeq + ) + + ?await wfc.sendSubscribeRequest(servicePeer, filterSubscribeRequest) + + for obs in wfc.subscrObservers: + obs.onSubscribe(pubSubTopic, contentTopicSeq) + + return ok() + +proc unsubscribe*( + wfc: WakuFilterClient, + servicePeer: RemotePeerInfo, + pubsubTopic: PubsubTopic, + contentTopics: ContentTopic | seq[ContentTopic], +): Future[FilterSubscribeResult] {.async: (raises: []).} = + var contentTopicSeq: seq[ContentTopic] + when contentTopics is seq[ContentTopic]: + contentTopicSeq = contentTopics + else: + contentTopicSeq = @[contentTopics] + + let requestId = generateRequestId(wfc.rng) + let filterSubscribeRequest = FilterSubscribeRequest.unsubscribe( + requestId = requestId, pubsubTopic = pubsubTopic, contentTopics = contentTopicSeq + ) + + ?await wfc.sendSubscribeRequest(servicePeer, filterSubscribeRequest) + + for obs in wfc.subscrObservers: + obs.onUnsubscribe(pubSubTopic, contentTopicSeq) + + return ok() + +proc unsubscribeAll*( + wfc: WakuFilterClient, servicePeer: RemotePeerInfo +): Future[FilterSubscribeResult] {.async: (raises: []).} = + let requestId = generateRequestId(wfc.rng) + let filterSubscribeRequest = + FilterSubscribeRequest.unsubscribeAll(requestId = requestId) + + return await wfc.sendSubscribeRequest(servicePeer, filterSubscribeRequest) + +proc registerPushHandler*(wfc: WakuFilterClient, handler: FilterPushHandler) = + wfc.pushHandlers.add(handler) + +proc initProtocolHandler(wfc: WakuFilterClient) = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = + ## Notice that the client component is acting as a server of WakuFilterPushCodec messages + while not conn.atEof(): + var buf: seq[byte] + try: + buf = await conn.readLp(int(DefaultMaxPushSize)) + except CancelledError, LPStreamError: + error "error while reading conn", error = getCurrentExceptionMsg() + + let msgPush = MessagePush.decode(buf).valueOr: + error "Failed to decode message push", peerId = conn.peerId, error = $error + waku_filter_errors.inc(labelValues = [decodeRpcFailure]) + return + + let msg_hash = + computeMessageHash(msgPush.pubsubTopic, msgPush.wakuMessage).to0xHex() + + debug "Received message push", + peerId = conn.peerId, + msg_hash, + payload = shortLog(msgPush.wakuMessage.payload), + pubsubTopic = msgPush.pubsubTopic, + content_topic = msgPush.wakuMessage.contentTopic, + conn + + for handler in wfc.pushHandlers: + asyncSpawn handler(msgPush.pubsubTopic, msgPush.wakuMessage) + + # Protocol specifies no response for now + + wfc.handler = handler + wfc.codec = WakuFilterPushCodec + +proc new*( + T: type WakuFilterClient, peerManager: PeerManager, rng: ref HmacDrbgContext +): T = + let wfc = WakuFilterClient(rng: rng, peerManager: peerManager, pushHandlers: @[]) + wfc.initProtocolHandler() + wfc diff --git a/third-party/nwaku/waku/waku_filter_v2/common.nim b/third-party/nwaku/waku/waku_filter_v2/common.nim new file mode 100644 index 0000000..1f203c2 --- /dev/null +++ b/third-party/nwaku/waku/waku_filter_v2/common.nim @@ -0,0 +1,94 @@ +{.push raises: [].} + +import results + +from ../waku_core/codecs import WakuFilterSubscribeCodec, WakuFilterPushCodec +export WakuFilterSubscribeCodec, WakuFilterPushCodec + +type + FilterSubscribeErrorKind* {.pure.} = enum + UNKNOWN = uint32(000) + BAD_RESPONSE = uint32(300) + BAD_REQUEST = uint32(400) + NOT_FOUND = uint32(404) + TOO_MANY_REQUESTS = uint32(429) + SERVICE_UNAVAILABLE = uint32(503) + PEER_DIAL_FAILURE = uint32(504) + + FilterSubscribeError* = object + case kind*: FilterSubscribeErrorKind + of PEER_DIAL_FAILURE: + address*: string + of BAD_RESPONSE, BAD_REQUEST, NOT_FOUND, TOO_MANY_REQUESTS, SERVICE_UNAVAILABLE: + cause*: string + else: + discard + + FilterSubscribeResult* = Result[void, FilterSubscribeError] + +# Convenience functions +proc peerDialFailure*( + T: type FilterSubscribeError, address: string +): FilterSubscribeError = + FilterSubscribeError( + kind: FilterSubscribeErrorKind.PEER_DIAL_FAILURE, address: address + ) + +proc badResponse*( + T: type FilterSubscribeError, cause = "bad response" +): FilterSubscribeError = + FilterSubscribeError(kind: FilterSubscribeErrorKind.BAD_RESPONSE, cause: cause) + +proc badRequest*( + T: type FilterSubscribeError, cause = "bad request" +): FilterSubscribeError = + FilterSubscribeError(kind: FilterSubscribeErrorKind.BAD_REQUEST, cause: cause) + +proc notFound*( + T: type FilterSubscribeError, cause = "peer has no subscriptions" +): FilterSubscribeError = + FilterSubscribeError(kind: FilterSubscribeErrorKind.NOT_FOUND, cause: cause) + +proc tooManyRequests*( + T: type FilterSubscribeError, cause = "too many requests" +): FilterSubscribeError = + FilterSubscribeError(kind: FilterSubscribeErrorKind.TOO_MANY_REQUESTS, cause: cause) + +proc serviceUnavailable*( + T: type FilterSubscribeError, cause = "service unavailable" +): FilterSubscribeError = + FilterSubscribeError(kind: FilterSubscribeErrorKind.SERVICE_UNAVAILABLE, cause: cause) + +proc parse*(T: type FilterSubscribeErrorKind, kind: uint32): T = + case kind + of 000, 200, 300, 400, 404, 429, 503: + cast[FilterSubscribeErrorKind](kind) + else: + FilterSubscribeErrorKind.UNKNOWN + +proc parse*(T: type FilterSubscribeError, kind: uint32, cause = "", address = ""): T = + let kind = FilterSubscribeErrorKind.parse(kind) + case kind + of PEER_DIAL_FAILURE: + FilterSubscribeError(kind: kind, address: address) + of BAD_RESPONSE, BAD_REQUEST, NOT_FOUND, TOO_MANY_REQUESTS, SERVICE_UNAVAILABLE: + FilterSubscribeError(kind: kind, cause: cause) + else: + FilterSubscribeError(kind: kind) + +proc `$`*(err: FilterSubscribeError): string = + case err.kind + of FilterSubscribeErrorKind.PEER_DIAL_FAILURE: + "PEER_DIAL_FAILURE: " & err.address + of FilterSubscribeErrorKind.BAD_RESPONSE: + "BAD_RESPONSE: " & err.cause + of FilterSubscribeErrorKind.BAD_REQUEST: + "BAD_REQUEST: " & err.cause + of FilterSubscribeErrorKind.NOT_FOUND: + "NOT_FOUND: " & err.cause + of FilterSubscribeErrorKind.TOO_MANY_REQUESTS: + "TOO_MANY_REQUESTS: " & err.cause + of FilterSubscribeErrorKind.SERVICE_UNAVAILABLE: + "SERVICE_UNAVAILABLE: " & err.cause + of FilterSubscribeErrorKind.UNKNOWN: + "UNKNOWN" diff --git a/third-party/nwaku/waku/waku_filter_v2/protocol.nim b/third-party/nwaku/waku/waku_filter_v2/protocol.nim new file mode 100644 index 0000000..8b419c3 --- /dev/null +++ b/third-party/nwaku/waku/waku_filter_v2/protocol.nim @@ -0,0 +1,405 @@ +## Waku Filter protocol for subscribing and filtering messages + +{.push raises: [].} + +import + std/[options, sequtils, sets, tables], + stew/byteutils, + chronicles, + chronos, + libp2p/peerid, + libp2p/protocols/protocol, + libp2p/protocols/pubsub/timedcache +import + ../node/peer_manager, + ../waku_core, + ../common/rate_limit/per_peer_limiter, + ./[common, protocol_metrics, rpc_codec, rpc, subscriptions] + +logScope: + topics = "waku filter" + +const MaxContentTopicsPerRequest* = 100 + +type WakuFilter* = ref object of LPProtocol + subscriptions*: FilterSubscriptions + # a mapping of peer ids to a sequence of filter criteria + peerManager: PeerManager + messageCache: TimedCache[string] + peerRequestRateLimiter*: PerPeerRateLimiter + subscriptionsManagerFut: Future[void] + peerConnections: Table[PeerId, Connection] + +proc pingSubscriber(wf: WakuFilter, peerId: PeerID): FilterSubscribeResult = + debug "pinging subscriber", peerId = peerId + + if not wf.subscriptions.isSubscribed(peerId): + error "pinging peer has no subscriptions", peerId = peerId + return err(FilterSubscribeError.notFound()) + + wf.subscriptions.refreshSubscription(peerId) + + ok() + +proc setSubscriptionTimeout*(wf: WakuFilter, newTimeout: Duration) = + wf.subscriptions.setSubscriptionTimeout(newTimeout) + +proc subscribe( + wf: WakuFilter, + peerId: PeerID, + pubsubTopic: Option[PubsubTopic], + contentTopics: seq[ContentTopic], +): Future[FilterSubscribeResult] {.async.} = + # TODO: check if this condition is valid??? + if pubsubTopic.isNone() or contentTopics.len == 0: + error "pubsubTopic and contentTopics must be specified", peerId = peerId + return err( + FilterSubscribeError.badRequest("pubsubTopic and contentTopics must be specified") + ) + + if contentTopics.len > MaxContentTopicsPerRequest: + error "exceeds maximum content topics", peerId = peerId + return err( + FilterSubscribeError.badRequest( + "exceeds maximum content topics: " & $MaxContentTopicsPerRequest + ) + ) + + let filterCriteria = toHashSet(contentTopics.mapIt((pubsubTopic.get(), it))) + + debug "subscribing peer to filter criteria", + peerId = peerId, filterCriteria = filterCriteria + + (await wf.subscriptions.addSubscription(peerId, filterCriteria)).isOkOr: + return err(FilterSubscribeError.serviceUnavailable(error)) + + debug "correct subscription", peerId = peerId + + ok() + +proc unsubscribe( + wf: WakuFilter, + peerId: PeerID, + pubsubTopic: Option[PubsubTopic], + contentTopics: seq[ContentTopic], +): FilterSubscribeResult = + if pubsubTopic.isNone() or contentTopics.len == 0: + error "pubsubTopic and contentTopics must be specified", peerId = peerId + return err( + FilterSubscribeError.badRequest("pubsubTopic and contentTopics must be specified") + ) + + if contentTopics.len > MaxContentTopicsPerRequest: + error "exceeds maximum content topics", peerId = peerId + return err( + FilterSubscribeError.badRequest( + "exceeds maximum content topics: " & $MaxContentTopicsPerRequest + ) + ) + + let filterCriteria = toHashSet(contentTopics.mapIt((pubsubTopic.get(), it))) + + debug "unsubscribing peer from filter criteria", + peerId = peerId, filterCriteria = filterCriteria + + wf.subscriptions.removeSubscription(peerId, filterCriteria).isOkOr: + error "failed to remove subscription", error = $error + return err(FilterSubscribeError.notFound()) + + ## Note: do not remove from peerRequestRateLimiter to prevent trick with subscribe/unsubscribe loop + ## We remove only if peerManager removes the peer + debug "correct unsubscription", peerId = peerId + + ok() + +proc unsubscribeAll( + wf: WakuFilter, peerId: PeerID +): Future[FilterSubscribeResult] {.async.} = + if not wf.subscriptions.isSubscribed(peerId): + debug "unsubscribing peer has no subscriptions", peerId = peerId + return err(FilterSubscribeError.notFound()) + + debug "removing peer subscription", peerId = peerId + await wf.subscriptions.removePeer(peerId) + wf.subscriptions.cleanUp() + + ok() + +proc handleSubscribeRequest*( + wf: WakuFilter, peerId: PeerId, request: FilterSubscribeRequest +): Future[FilterSubscribeResponse] {.async.} = + info "received filter subscribe request", peerId = peerId, request = request + waku_filter_requests.inc(labelValues = [$request.filterSubscribeType]) + + var subscribeResult: FilterSubscribeResult + + let requestStartTime = Moment.now() + + block: + ## Handle subscribe request + case request.filterSubscribeType + of FilterSubscribeType.SUBSCRIBER_PING: + subscribeResult = wf.pingSubscriber(peerId) + of FilterSubscribeType.SUBSCRIBE: + subscribeResult = + await wf.subscribe(peerId, request.pubsubTopic, request.contentTopics) + of FilterSubscribeType.UNSUBSCRIBE: + subscribeResult = + wf.unsubscribe(peerId, request.pubsubTopic, request.contentTopics) + of FilterSubscribeType.UNSUBSCRIBE_ALL: + subscribeResult = await wf.unsubscribeAll(peerId) + + let + requestDuration = Moment.now() - requestStartTime + requestDurationSec = requestDuration.milliseconds.float / 1000 + # Duration in seconds with millisecond precision floating point + waku_filter_request_duration_seconds.observe( + requestDurationSec, labelValues = [$request.filterSubscribeType] + ) + + if subscribeResult.isErr(): + error "subscription request error", peerId = shortLog(peerId), request = request + return FilterSubscribeResponse( + requestId: request.requestId, + statusCode: subscribeResult.error.kind.uint32, + statusDesc: some($subscribeResult.error), + ) + else: + return FilterSubscribeResponse.ok(request.requestId) + +proc pushToPeer( + wf: WakuFilter, peerId: PeerId, buffer: seq[byte] +): Future[Result[void, string]] {.async.} = + debug "pushing message to subscribed peer", peerId = shortLog(peerId) + + let stream = ( + await wf.peerManager.getStreamByPeerIdAndProtocol(peerId, WakuFilterPushCodec) + ).valueOr: + error "pushToPeer failed", error + return err("pushToPeer failed: " & $error) + + await stream.writeLp(buffer) + + debug "published successful", peerId = shortLog(peerId), stream + waku_service_network_bytes.inc( + amount = buffer.len().int64, labelValues = [WakuFilterPushCodec, "out"] + ) + + return ok() + +proc pushToPeers( + wf: WakuFilter, peers: seq[PeerId], messagePush: MessagePush +) {.async.} = + let targetPeerIds = peers.mapIt(shortLog(it)) + let msgHash = + messagePush.pubsubTopic.computeMessageHash(messagePush.wakuMessage).to0xHex() + + ## it's also refresh expire of msghash, that's why update cache every time, even if it has a value. + if wf.messageCache.put(msgHash, Moment.now()): + error "duplicate message found, not-pushing message to subscribed peers", + pubsubTopic = messagePush.pubsubTopic, + contentTopic = messagePush.wakuMessage.contentTopic, + payload = shortLog(messagePush.wakuMessage.payload), + target_peer_ids = targetPeerIds, + msg_hash = msgHash + else: + notice "pushing message to subscribed peers", + pubsubTopic = messagePush.pubsubTopic, + contentTopic = messagePush.wakuMessage.contentTopic, + payload = shortLog(messagePush.wakuMessage.payload), + target_peer_ids = targetPeerIds, + msg_hash = msgHash + + let bufferToPublish = messagePush.encode().buffer + var pushFuts: seq[Future[Result[void, string]]] + + for peerId in peers: + let pushFut = wf.pushToPeer(peerId, bufferToPublish) + pushFuts.add(pushFut) + + await allFutures(pushFuts) + +proc maintainSubscriptions*(wf: WakuFilter) {.async.} = + debug "maintaining subscriptions" + + ## Remove subscriptions for peers that have been removed from peer store + var peersToRemove: seq[PeerId] + for peerId in wf.subscriptions.peersSubscribed.keys: + if not wf.peerManager.switch.peerStore.hasPeer(peerId, WakuFilterPushCodec): + debug "peer has been removed from peer store, we will remove subscription", + peerId = peerId + peersToRemove.add(peerId) + + if peersToRemove.len > 0: + await wf.subscriptions.removePeers(peersToRemove) + wf.peerRequestRateLimiter.unregister(peersToRemove) + + wf.subscriptions.cleanUp() + + ## Periodic report of number of subscriptions + waku_filter_subscriptions.set(wf.subscriptions.peersSubscribed.len.float64) + +const MessagePushTimeout = 20.seconds +proc handleMessage*( + wf: WakuFilter, pubsubTopic: PubsubTopic, message: WakuMessage +) {.async.} = + let msgHash = computeMessageHash(pubsubTopic, message).to0xHex() + + debug "handling message", pubsubTopic = pubsubTopic, msg_hash = msgHash + + let handleMessageStartTime = Moment.now() + + block: + ## Find subscribers and push message to them + let subscribedPeers = + wf.subscriptions.findSubscribedPeers(pubsubTopic, message.contentTopic) + if subscribedPeers.len == 0: + error "no subscribed peers found", + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + msg_hash = msgHash + return + + let messagePush = MessagePush(pubsubTopic: pubsubTopic, wakuMessage: message) + + if not await wf.pushToPeers(subscribedPeers, messagePush).withTimeout( + MessagePushTimeout + ): + error "timed out pushing message to peers", + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + msg_hash = msgHash, + numPeers = subscribedPeers.len, + target_peer_ids = subscribedPeers.mapIt(shortLog(it)) + waku_filter_errors.inc(labelValues = [pushTimeoutFailure]) + else: + notice "pushed message succesfully to all subscribers", + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + msg_hash = msgHash, + numPeers = subscribedPeers.len, + target_peer_ids = subscribedPeers.mapIt(shortLog(it)) + + let + handleMessageDuration = Moment.now() - handleMessageStartTime + handleMessageDurationSec = handleMessageDuration.milliseconds.float / 1000 + # Duration in seconds with millisecond precision floating point + waku_filter_handle_message_duration_seconds.observe(handleMessageDurationSec) + +proc initProtocolHandler(wf: WakuFilter) = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = + debug "filter subscribe request handler triggered", + peerId = shortLog(conn.peerId), conn + + var response: FilterSubscribeResponse + + wf.peerRequestRateLimiter.checkUsageLimit(WakuFilterSubscribeCodec, conn): + var buf: seq[byte] + try: + buf = await conn.readLp(int(DefaultMaxSubscribeSize)) + except LPStreamError: + error "failed to read stream in readLp", + remote_peer_id = conn.peerId, error = getCurrentExceptionMsg() + return + + waku_service_network_bytes.inc( + amount = buf.len().int64, labelValues = [WakuFilterSubscribeCodec, "in"] + ) + + let decodeRes = FilterSubscribeRequest.decode(buf) + if decodeRes.isErr(): + error "failed to decode filter subscribe request", + peer_id = conn.peerId, err = decodeRes.error + waku_filter_errors.inc(labelValues = [decodeRpcFailure]) + return + + let request = decodeRes.value #TODO: toAPI() split here + + try: + response = await wf.handleSubscribeRequest(conn.peerId, request) + except CatchableError: + error "handleSubscribeRequest failed", + remote_peer_id = conn.peerId, err = getCurrentExceptionMsg() + return + + debug "sending filter subscribe response", + peer_id = shortLog(conn.peerId), response = response + do: + debug "filter request rejected due rate limit exceeded", + peerId = shortLog(conn.peerId), limit = $wf.peerRequestRateLimiter.setting + response = FilterSubscribeResponse( + requestId: "N/A", + statusCode: FilterSubscribeErrorKind.TOO_MANY_REQUESTS.uint32, + statusDesc: some("filter request rejected due rate limit exceeded"), + ) + + try: + await conn.writeLp(response.encode().buffer) #TODO: toRPC() separation here + except LPStreamError: + error "failed to write stream in writeLp", + remote_peer_id = conn.peerId, error = getCurrentExceptionMsg() + return + + wf.handler = handler + wf.codec = WakuFilterSubscribeCodec + +proc onPeerEventHandler(wf: WakuFilter, peerId: PeerId, event: PeerEvent) {.async.} = + ## These events are dispatched nim-libp2p, triggerPeerEvents proc + case event.kind + of Left: + ## Drop the previous known connection reference + wf.peerConnections.del(peerId) + else: + discard + +proc new*( + T: type WakuFilter, + peerManager: PeerManager, + subscriptionTimeout: Duration = DefaultSubscriptionTimeToLiveSec, + maxFilterPeers: uint32 = MaxFilterPeers, + maxFilterCriteriaPerPeer: uint32 = MaxFilterCriteriaPerPeer, + messageCacheTTL: Duration = MessageCacheTTL, + rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](), +): T = + let wf = WakuFilter( + subscriptions: FilterSubscriptions.new( + subscriptionTimeout, maxFilterPeers, maxFilterCriteriaPerPeer + ), + peerManager: peerManager, + messageCache: init(TimedCache[string], messageCacheTTL), + peerRequestRateLimiter: PerPeerRateLimiter(setting: rateLimitSetting), + ) + + proc peerEventHandler( + peerId: PeerId, event: PeerEvent + ): Future[void] {.gcsafe, async: (raises: [CancelledError]).} = + try: + await wf.onPeerEventHandler(peerId, event) + except CatchableError: + error "onPeerEventHandler failed", + remote_peer_id = shortLog(peerId), + event = event, + error = getCurrentExceptionMsg() + + peerManager.addExtPeerEventHandler(peerEventHandler, PeerEventKind.Left) + + wf.initProtocolHandler() + setServiceLimitMetric(WakuFilterSubscribeCodec, rateLimitSetting) + return wf + +proc periodicSubscriptionsMaintenance(wf: WakuFilter) {.async.} = + const MaintainSubscriptionsInterval = 1.minutes + debug "starting to maintain subscriptions" + while true: + await wf.maintainSubscriptions() + await sleepAsync(MaintainSubscriptionsInterval) + +proc start*(wf: WakuFilter) {.async.} = + debug "starting filter protocol" + await procCall LPProtocol(wf).start() + wf.subscriptionsManagerFut = wf.periodicSubscriptionsMaintenance() + +proc stop*(wf: WakuFilter) {.async.} = + debug "stopping filter protocol" + await wf.subscriptionsManagerFut.cancelAndWait() + await procCall LPProtocol(wf).stop() diff --git a/third-party/nwaku/waku/waku_filter_v2/protocol_metrics.nim b/third-party/nwaku/waku/waku_filter_v2/protocol_metrics.nim new file mode 100644 index 0000000..9045629 --- /dev/null +++ b/third-party/nwaku/waku/waku_filter_v2/protocol_metrics.nim @@ -0,0 +1,26 @@ +{.push raises: [].} + +import metrics + +export metrics + +declarePublicCounter waku_filter_errors, "number of filter protocol errors", ["type"] +declarePublicCounter waku_filter_requests, + "number of filter subscribe requests received", ["type"] +declarePublicGauge waku_filter_subscriptions, "number of subscribed filter clients" +declarePublicHistogram waku_filter_request_duration_seconds, + "duration of filter subscribe requests", ["type"] +declarePublicHistogram waku_filter_handle_message_duration_seconds, + "duration to push message to filter subscribers", + buckets = [ + 0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0, + 15.0, 20.0, 30.0, Inf, + ] + +# Error types (metric label values) +const + dialFailure* = "dial_failure" + decodeRpcFailure* = "decode_rpc_failure" + requestIdMismatch* = "request_id_mismatch" + errorResponse* = "error_response" + pushTimeoutFailure* = "push_timeout_failure" diff --git a/third-party/nwaku/waku/waku_filter_v2/rpc.nim b/third-party/nwaku/waku/waku_filter_v2/rpc.nim new file mode 100644 index 0000000..a81a7bd --- /dev/null +++ b/third-party/nwaku/waku/waku_filter_v2/rpc.nim @@ -0,0 +1,96 @@ +{.push raises: [].} + +import json_serialization, std/options +import ../waku_core + +type + FilterSubscribeType* {.pure.} = enum + # Indicates the type of request from client to service node + SUBSCRIBER_PING = uint32(0) + SUBSCRIBE = uint32(1) + UNSUBSCRIBE = uint32(2) + UNSUBSCRIBE_ALL = uint32(3) + + FilterSubscribeRequest* = object # Request from client to service node + requestId*: string + filterSubscribeType*: FilterSubscribeType + pubsubTopic*: Option[PubsubTopic] + contentTopics*: seq[ContentTopic] + + FilterSubscribeResponse* = object # Response from service node to client + requestId*: string + statusCode*: uint32 + statusDesc*: Option[string] + + MessagePush* = object # Message pushed from service node to client + wakuMessage*: WakuMessage + pubsubTopic*: string + +# Convenience functions + +proc ping*(T: type FilterSubscribeRequest, requestId: string): T = + FilterSubscribeRequest(requestId: requestId, filterSubscribeType: SUBSCRIBER_PING) + +proc subscribe*( + T: type FilterSubscribeRequest, + requestId: string, + pubsubTopic: PubsubTopic, + contentTopics: seq[ContentTopic], +): T = + FilterSubscribeRequest( + requestId: requestId, + filterSubscribeType: SUBSCRIBE, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopics, + ) + +proc unsubscribe*( + T: type FilterSubscribeRequest, + requestId: string, + pubsubTopic: PubsubTopic, + contentTopics: seq[ContentTopic], +): T = + FilterSubscribeRequest( + requestId: requestId, + filterSubscribeType: UNSUBSCRIBE, + pubsubTopic: some(pubsubTopic), + contentTopics: contentTopics, + ) + +proc unsubscribeAll*(T: type FilterSubscribeRequest, requestId: string): T = + FilterSubscribeRequest(requestId: requestId, filterSubscribeType: UNSUBSCRIBE_ALL) + +proc ok*(T: type FilterSubscribeResponse, requestId: string, desc = "OK"): T = + FilterSubscribeResponse(requestId: requestId, statusCode: 200, statusDesc: some(desc)) + +proc `$`*(err: FilterSubscribeResponse): string = + "FilterSubscribeResponse of req:" & err.requestId & " [" & $err.statusCode & "] " & + $err.statusDesc + +proc `$`*(req: FilterSubscribeRequest): string = + "FilterSubscribeRequest of req:" & req.requestId & " [" & $req.filterSubscribeType & + "] pubsubTopic:" & $req.pubsubTopic & " contentTopics:" & $req.contentTopics + +proc `$`*(t: FilterSubscribeType): string = + result = + case t + of SUBSCRIBER_PING: "SUBSCRIBER_PING" + of SUBSCRIBE: "SUBSCRIBE" + of UNSUBSCRIBE: "UNSUBSCRIBE" + of UNSUBSCRIBE_ALL: "UNSUBSCRIBE_ALL" + +proc writeValue*( + writer: var JsonWriter, value: FilterSubscribeRequest +) {.inline, raises: [IOError].} = + writer.beginRecord() + writer.writeField("requestId", value.requestId) + writer.writeField("type", value.filterSubscribeType) + if value.pubsubTopic.isSome: + writer.writeField("pubsubTopic", value.pubsubTopic) + if value.contentTopics.len > 0: + writer.writeField("contentTopics", value.contentTopics) + writer.endRecord() + +proc `$`*(self: MessagePush): string = + let msg_hash = computeMessageHash(self.pubsubTopic, self.wakuMessage) + return "msg_hash: " & shortLog(msg_hash) & " pubsubTopic: " & self.pubsubTopic diff --git a/third-party/nwaku/waku/waku_filter_v2/rpc_codec.nim b/third-party/nwaku/waku/waku_filter_v2/rpc_codec.nim new file mode 100644 index 0000000..94bdb36 --- /dev/null +++ b/third-party/nwaku/waku/waku_filter_v2/rpc_codec.nim @@ -0,0 +1,98 @@ +{.push raises: [].} + +import std/options +import ../common/protobuf, ../waku_core, ./rpc + +const + DefaultMaxSubscribeSize* = 10 * DefaultMaxWakuMessageSize + 64 * 1024 + # We add a 64kB safety buffer for protocol overhead + DefaultMaxSubscribeResponseSize* = 64 * 1024 # Responses are small. 64kB safety buffer. + DefaultMaxPushSize* = 10 * DefaultMaxWakuMessageSize + 64 * 1024 + # We add a 64kB safety buffer for protocol overhead + +proc encode*(rpc: FilterSubscribeRequest): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.requestId) + pb.write3(2, uint32(ord(rpc.filterSubscribeType))) + + pb.write3(10, rpc.pubsubTopic) + + for contentTopic in rpc.contentTopics: + pb.write3(11, contentTopic) + + pb + +proc decode*(T: type FilterSubscribeRequest, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = FilterSubscribeRequest() + + if not ?pb.getField(1, rpc.requestId): + return err(ProtobufError.missingRequiredField("request_id")) + + var filterSubscribeType: uint32 + if not ?pb.getField(2, filterSubscribeType): + # Revert to ping by default if not set + rpc.filterSubscribeType = FilterSubscribeType.SUBSCRIBER_PING + else: + rpc.filterSubscribeType = FilterSubscribeType(filterSubscribeType) + + var pubsubTopic: PubsubTopic + if not ?pb.getField(10, pubsubTopic): + rpc.pubsubTopic = none(PubsubTopic) + else: + rpc.pubsubTopic = some(pubsubTopic) + + discard ?pb.getRepeatedField(11, rpc.contentTopics) + + ok(rpc) + +proc encode*(rpc: FilterSubscribeResponse): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.requestId) + pb.write3(10, rpc.statusCode) + pb.write3(11, rpc.statusDesc) + + pb + +proc decode*(T: type FilterSubscribeResponse, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = FilterSubscribeResponse() + + if not ?pb.getField(1, rpc.requestId): + return err(ProtobufError.missingRequiredField("request_id")) + + if not ?pb.getField(10, rpc.statusCode): + return err(ProtobufError.missingRequiredField("status_code")) + + var statusDesc: string + if not ?pb.getField(11, statusDesc): + rpc.statusDesc = none(string) + else: + rpc.statusDesc = some(statusDesc) + + ok(rpc) + +proc encode*(rpc: MessagePush): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.wakuMessage.encode()) + pb.write3(2, rpc.pubsubTopic) + + pb + +proc decode*(T: type MessagePush, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = MessagePush() + + var message: seq[byte] + if not ?pb.getField(1, message): + return err(ProtobufError.missingRequiredField("message")) + else: + rpc.wakuMessage = ?WakuMessage.decode(message) + + if not ?pb.getField(2, rpc.pubsubTopic): + return err(ProtobufError.missingRequiredField("pubsub_topic")) + + ok(rpc) diff --git a/third-party/nwaku/waku/waku_filter_v2/subscriptions.nim b/third-party/nwaku/waku/waku_filter_v2/subscriptions.nim new file mode 100644 index 0000000..8d3b808 --- /dev/null +++ b/third-party/nwaku/waku/waku_filter_v2/subscriptions.nim @@ -0,0 +1,195 @@ +{.push raises: [].} + +import + std/[options, sets, tables, sequtils], + chronicles, + chronos, + libp2p/peerid, + libp2p/stream/connection, + stew/shims/sets +import ../waku_core, ../utils/tableutils, ../node/peer_manager + +logScope: + topics = "waku filter subscriptions" + +const + MaxFilterPeers* = 100 + MaxFilterCriteriaPerPeer* = 1000 + DefaultSubscriptionTimeToLiveSec* = 5.minutes + MessageCacheTTL* = 2.minutes + +type + # a single filter criterion is fully defined by a pubsub topic and content topic + FilterCriterion* = tuple[pubsubTopic: PubsubTopic, contentTopic: ContentTopic] + + FilterCriteria* = HashSet[FilterCriterion] # a sequence of filter criteria + + SubscribedPeers* = HashSet[PeerID] # a sequence of peer ids + + PeerData* = tuple[lastSeen: Moment, criteriaCount: uint] + + FilterSubscriptions* = ref object + peersSubscribed*: Table[PeerID, PeerData] + subscriptions*: Table[FilterCriterion, SubscribedPeers] + subscriptionTimeout: Duration + maxPeers: uint + maxCriteriaPerPeer: uint + +proc new*( + T: type FilterSubscriptions, + subscriptionTimeout: Duration = DefaultSubscriptionTimeToLiveSec, + maxFilterPeers: uint32 = MaxFilterPeers, + maxFilterCriteriaPerPeer: uint32 = MaxFilterCriteriaPerPeer, +): FilterSubscriptions = + return FilterSubscriptions( + peersSubscribed: initTable[PeerID, PeerData](), + subscriptions: initTable[FilterCriterion, SubscribedPeers](), + subscriptionTimeout: subscriptionTimeout, + maxPeers: maxFilterPeers, + maxCriteriaPerPeer: maxFilterCriteriaPerPeer, + ) + +proc isSubscribed*(s: FilterSubscriptions, peerId: PeerID): bool = + s.peersSubscribed.withValue(peerId, data): + return Moment.now() - data.lastSeen <= s.subscriptionTimeout + + return false + +proc subscribedPeerCount*(s: FilterSubscriptions): uint = + return cast[uint](s.peersSubscribed.len) + +proc getPeerSubscriptions*( + s: FilterSubscriptions, peerId: PeerID +): seq[FilterCriterion] = + ## Get all pubsub-content topics a peer is subscribed to + var subscribedContentTopics: seq[FilterCriterion] = @[] + s.peersSubscribed.withValue(peerId, data): + if data.criteriaCount == 0: + return subscribedContentTopics + + for filterCriterion, subscribedPeers in s.subscriptions.mpairs: + if peerId in subscribedPeers: + subscribedContentTopics.add(filterCriterion) + + return subscribedContentTopics + +proc findSubscribedPeers*( + s: FilterSubscriptions, pubsubTopic: PubsubTopic, contentTopic: ContentTopic +): seq[PeerID] = + let filterCriterion: FilterCriterion = (pubsubTopic, contentTopic) + + var foundPeers: seq[PeerID] = @[] + # only peers subscribed to criteria and with legit subscription is counted + s.subscriptions.withValue(filterCriterion, peers): + for peer in peers[]: + if s.isSubscribed(peer): + foundPeers.add(peer) + + debug "findSubscribedPeers result", + filter_criterion = filterCriterion, + subscr_set = s.subscriptions, + found_peers = foundPeers + + return foundPeers + +proc removePeer*(s: FilterSubscriptions, peerId: PeerID) {.async.} = + ## Remove all subscriptions for a given peer + debug "removePeer", + currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)), peerId = peerId + + s.peersSubscribed.del(peerId) + + debug "removePeer after deletion", + currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)), peerId = peerId + +proc removePeers*(s: FilterSubscriptions, peerIds: seq[PeerID]) {.async.} = + ## Remove all subscriptions for a given list of peers + debug "removePeers", + currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)), + peerIds = peerIds.mapIt(shortLog(it)) + + for peer in peerIds: + await s.removePeer(peer) + + debug "removePeers after deletion", + currentPeerIds = toSeq(s.peersSubscribed.keys).mapIt(shortLog(it)), + peerIds = peerIds.mapIt(shortLog(it)) + +proc cleanUp*(fs: FilterSubscriptions) = + debug "cleanUp", currentPeerIds = toSeq(fs.peersSubscribed.keys).mapIt(shortLog(it)) + + ## Remove all subscriptions for peers that have not been seen for a while + let now = Moment.now() + fs.peersSubscribed.keepItIf(now - val.lastSeen <= fs.subscriptionTimeout) + + var filtersToRemove: seq[FilterCriterion] = @[] + for filterCriterion, subscribedPeers in fs.subscriptions.mpairs: + subscribedPeers.keepItIf(fs.isSubscribed(it) == true) + + fs.subscriptions.keepItIf(val.len > 0) + + debug "after cleanUp", + currentPeerIds = toSeq(fs.peersSubscribed.keys).mapIt(shortLog(it)) + +proc refreshSubscription*(s: var FilterSubscriptions, peerId: PeerID) = + s.peersSubscribed.withValue(peerId, data): + data.lastSeen = Moment.now() + +proc addSubscription*( + s: FilterSubscriptions, peerId: PeerID, filterCriteria: FilterCriteria +): Future[Result[void, string]] {.async.} = + ## Add a subscription for a given peer + + var peerData: ptr PeerData + + s.peersSubscribed.withValue(peerId, data): + if data.criteriaCount + cast[uint](filterCriteria.len) > s.maxCriteriaPerPeer: + return err("peer has reached maximum number of filter criteria") + + data.lastSeen = Moment.now() + peerData = data + do: + ## not yet subscribed + if cast[uint](s.peersSubscribed.len) >= s.maxPeers: + return err("node has reached maximum number of subscriptions: " & $(s.maxPeers)) + + let newPeerData: PeerData = (lastSeen: Moment.now(), criteriaCount: 0) + peerData = addr(s.peersSubscribed.mgetOrPut(peerId, newPeerData)) + + for filterCriterion in filterCriteria: + var peersOfSub = addr(s.subscriptions.mgetOrPut(filterCriterion, SubscribedPeers())) + if peerId notin peersOfSub[]: + peersOfSub[].incl(peerId) + peerData.criteriaCount += 1 + + debug "subscription added correctly", + new_peer = shortLog(peerId), subscr_set = s.subscriptions + + return ok() + +proc removeSubscription*( + s: FilterSubscriptions, peerId: PeerID, filterCriteria: FilterCriteria +): Result[void, string] = + ## Remove a subscription for a given peer + + s.peersSubscribed.withValue(peerId, peerData): + peerData.lastSeen = Moment.now() + for filterCriterion in filterCriteria: + s.subscriptions.withValue(filterCriterion, peers): + if peers[].missingOrexcl(peerId) == false: + peerData.criteriaCount -= 1 + + if peers[].len == 0: + s.subscriptions.del(filterCriterion) + if peerData.criteriaCount == 0: + s.peersSubscribed.del(peerId) + do: + ## Maybe let just run through and log it as a warning + return err("Peer was not subscribed to criterion") + + return ok() + do: + return err("Peer has no subscriptions") + +proc setSubscriptionTimeout*(s: FilterSubscriptions, newTimeout: Duration) = + s.subscriptionTimeout = newTimeout diff --git a/third-party/nwaku/waku/waku_keystore.nim b/third-party/nwaku/waku/waku_keystore.nim new file mode 100644 index 0000000..4793b38 --- /dev/null +++ b/third-party/nwaku/waku/waku_keystore.nim @@ -0,0 +1,13 @@ +# The keyfile submodule (implementation adapted from nim-eth keyfile module https://github.com/status-im/nim-eth/blob/master/eth/keyfile) +import ./waku_keystore/keyfile + +export keyfile + +# The Waku Keystore implementation +import + ./waku_keystore/keystore, + ./waku_keystore/conversion_utils, + ./waku_keystore/protocol_types, + ./waku_keystore/utils + +export keystore, conversion_utils, protocol_types, utils diff --git a/third-party/nwaku/waku/waku_keystore/conversion_utils.nim b/third-party/nwaku/waku/waku_keystore/conversion_utils.nim new file mode 100644 index 0000000..4a76811 --- /dev/null +++ b/third-party/nwaku/waku/waku_keystore/conversion_utils.nim @@ -0,0 +1,33 @@ +{.push raises: [].} + +import json, results, stew/byteutils, ./protocol_types + +# Encodes a KeystoreMembership credential to a byte sequence +proc encode*(credential: KeystoreMembership): seq[byte] = + # TODO: use custom encoding, avoid wordy json + var stringCredential: string + # NOTE: toUgly appends to the string, doesn't replace its contents + stringCredential.toUgly(%credential) + return toBytes(stringCredential) + +# Decodes a byte sequence to a KeystoreMembership credential +proc decode*(encodedCredential: seq[byte]): KeystoreResult[KeystoreMembership] = + # TODO: use custom decoding, avoid wordy json + try: + # we parse the json decrypted keystoreCredential + let jsonObject = parseJson(string.fromBytes(encodedCredential)) + return ok(to(jsonObject, KeystoreMembership)) + except JsonParsingError: + return err( + AppKeystoreError( + kind: KeystoreJsonError, + msg: "error during decoding credentials: " & getCurrentExceptionMsg(), + ) + ) + except Exception: #parseJson raises Exception + return err( + AppKeystoreError( + kind: KeystoreOsError, + msg: "error in conversion_utils decode: " & getCurrentExceptionMsg(), + ) + ) diff --git a/third-party/nwaku/waku/waku_keystore/keyfile.nim b/third-party/nwaku/waku/waku_keystore/keyfile.nim new file mode 100644 index 0000000..488e241 --- /dev/null +++ b/third-party/nwaku/waku/waku_keystore/keyfile.nim @@ -0,0 +1,602 @@ +# This implementation is originally taken from nim-eth keyfile module https://github.com/status-im/nim-eth/blob/master/eth/keyfile and adapted to +# - create keyfiles for arbitrary-long input byte data (rather than fixed-size private keys) +# - allow storage of multiple keyfiles (encrypted with different passwords) in same file and iteration among successful decryptions +# - enable/disable at compilation time the keyfile id and version fields + +{.push raises: [].} + +import + std/[os, strutils, json, sequtils], + nimcrypto/[bcmode, hmac, rijndael, pbkdf2, sha2, sysrand, utils, keccak, scrypt], + results, + eth/keys, + eth/keyfile/uuid + +export results + +const + # Version 3 constants + SaltSize = 16 + DKLen = 32 + MaxDKLen = 128 + ScryptR = 1 + ScryptP = 8 + Pbkdf2WorkFactor = 1_000_000 + ScryptWorkFactor = 262_144 + +type + KeyFileError* = enum + KeyfileRandomError = "keyfile error: Random generator error" + KeyfileUuidError = "keyfile error: UUID generator error" + KeyfileBufferOverrun = "keyfile error: Supplied buffer is too small" + KeyfileIncorrectDKLen = + "keyfile error: `dklen` parameter is 0 or more then MaxDKLen" + KeyfileMalformedError = "keyfile error: JSON has incorrect structure" + KeyfileNotImplemented = "keyfile error: Feature is not implemented" + KeyfileNotSupported = "keyfile error: Feature is not supported" + KeyfileEmptyMac = + "keyfile error: `mac` parameter is zero length or not in hexadecimal form" + KeyfileEmptyCiphertext = + "keyfile error: `ciphertext` parameter is zero length or not in hexadecimal format" + KeyfileEmptySalt = + "keyfile error: `salt` parameter is zero length or not in hexadecimal format" + KeyfileEmptyIV = + "keyfile error: `cipherparams.iv` parameter is zero length or not in hexadecimal format" + KeyfileIncorrectIV = + "keyfile error: Size of IV vector is not equal to cipher block size" + KeyfilePrfNotSupported = "keyfile error: PRF algorithm for PBKDF2 is not supported" + KeyfileKdfNotSupported = "keyfile error: KDF algorithm is not supported" + KeyfileCipherNotSupported = "keyfile error: `cipher` parameter is not supported" + KeyfileIncorrectMac = "keyfile error: `mac` verification failed" + KeyfileScryptBadParam = "keyfile error: bad scrypt's parameters" + KeyfileOsError = "keyfile error: OS specific error" + KeyfileIoError = "keyfile error: IO specific error" + KeyfileJsonError = "keyfile error: JSON encoder/decoder error" + KeyfileDoesNotExist = "keyfile error: file does not exist" + + KdfKind* = enum + PBKDF2 ## PBKDF2 + SCRYPT ## SCRYPT + + HashKind* = enum + HashNoSupport + HashSHA2_224 + HashSHA2_256 + HashSHA2_384 + HashSHA2_512 + HashKECCAK224 + HashKECCAK256 + HashKECCAK384 + HashKECCAK512 + HashSHA3_224 + HashSHA3_256 + HashSHA3_384 + HashSHA3_512 + + CryptKind* = enum + CipherNoSupport ## Cipher not supported + AES128CTR ## AES-128-CTR + + CipherParams = object + iv: seq[byte] + + Cipher = object + kind: CryptKind + params: CipherParams + text: seq[byte] + + Crypto = object + kind: KdfKind + cipher: Cipher + kdfParams: JsonNode + mac: seq[byte] + + ScryptParams* = object + dklen: int + n, p, r: int + salt: string + + Pbkdf2Params* = object + dklen: int + c: int + prf: HashKind + salt: string + + DKey = array[DKLen, byte] + KfResult*[T] = Result[T, KeyFileError] + + # basic types for building Keystore JSON + CypherParams = object + iv: string + + CryptoNew = object + cipher: string + cipherparams: CypherParams + ciphertext: string + kdf: string + kdfparams: JsonNode + mac: string + + KeystoreEntry = object + crypto: CryptoNew + id: string + version: string + +const + SupportedHashes = [ + "sha224", "sha256", "sha384", "sha512", "keccak224", "keccak256", "keccak384", + "keccak512", "sha3_224", "sha3_256", "sha3_384", "sha3_512", + ] + + SupportedHashesKinds = [ + HashSHA2_224, HashSHA2_256, HashSHA2_384, HashSHA2_512, HashKECCAK224, + HashKECCAK256, HashKECCAK384, HashKECCAK512, HashSHA3_224, HashSHA3_256, + HashSHA3_384, HashSHA3_512, + ] + + # When true, the keyfile json will contain "version" and "id" fields, respectively. Default to false. + VersionInKeyfile: bool = false + IdInKeyfile: bool = false + +proc mapErrTo[T, E](r: Result[T, E], v: static KeyFileError): KfResult[T] = + r.mapErr( + proc(e: E): KeyFileError = + v + ) + +proc `$`(k: KdfKind): string = + case k + of SCRYPT: + return "scrypt" + else: + return "pbkdf2" + +proc `$`(k: CryptKind): string = + case k + of AES128CTR: + return "aes-128-ctr" + else: + return "aes-128-ctr" + +# Parses the prf name to HashKind +proc getPrfHash(prf: string): HashKind = + let p = prf.toLowerAscii() + if p.startsWith("hmac-"): + var hash = p[5 ..^ 1] + var res = SupportedHashes.find(hash) + if res >= 0: + return SupportedHashesKinds[res] + return HashNoSupport + +# Parses the cipher name to CryptoKind +proc getCipher(c: string): CryptKind = + var cl = c.toLowerAscii() + if cl == "aes-128-ctr": + return AES128CTR + else: + return CipherNoSupport + +# Key derivation routine for PBKDF2 +proc deriveKey( + password: string, + salt: string, + kdfkind: KdfKind, + hashkind: HashKind, + workfactor: int, +): KfResult[DKey] = + if kdfkind == PBKDF2: + var output: DKey + var c = if workfactor == 0: Pbkdf2WorkFactor else: workfactor + case hashkind + of HashSHA2_224: + var ctx: HMAC[sha224] + discard ctx.pbkdf2(password, salt, c, output) + ctx.clear() + ok(output) + of HashSHA2_256: + var ctx: HMAC[sha256] + discard ctx.pbkdf2(password, salt, c, output) + ctx.clear() + ok(output) + of HashSHA2_384: + var ctx: HMAC[sha384] + discard ctx.pbkdf2(password, salt, c, output) + ctx.clear() + ok(output) + of HashSHA2_512: + var ctx: HMAC[sha512] + discard ctx.pbkdf2(password, salt, c, output) + ctx.clear() + ok(output) + of HashKECCAK224: + var ctx: HMAC[keccak224] + discard ctx.pbkdf2(password, salt, c, output) + ctx.clear() + ok(output) + of HashKECCAK256: + var ctx: HMAC[keccak256] + discard ctx.pbkdf2(password, salt, c, output) + ctx.clear() + ok(output) + of HashKECCAK384: + var ctx: HMAC[keccak384] + discard ctx.pbkdf2(password, salt, c, output) + ctx.clear() + ok(output) + of HashKECCAK512: + var ctx: HMAC[keccak512] + discard ctx.pbkdf2(password, salt, c, output) + ctx.clear() + ok(output) + of HashSHA3_224: + var ctx: HMAC[sha3_224] + discard ctx.pbkdf2(password, salt, c, output) + ctx.clear() + ok(output) + of HashSHA3_256: + var ctx: HMAC[sha3_256] + discard ctx.pbkdf2(password, salt, c, output) + ctx.clear() + ok(output) + of HashSHA3_384: + var ctx: HMAC[sha3_384] + discard ctx.pbkdf2(password, salt, c, output) + ctx.clear() + ok(output) + of HashSHA3_512: + var ctx: HMAC[sha3_512] + discard ctx.pbkdf2(password, salt, c, output) + ctx.clear() + ok(output) + else: + err(KeyfilePrfNotSupported) + else: + err(KeyfileNotImplemented) + +# Scrypt wrapper +func scrypt[T, M]( + password: openArray[T], + salt: openArray[M], + N, r, p: int, + output: var openArray[byte], +): int = + let (xyvLen, bLen) = scryptCalc(N, r, p) + var xyv = newSeq[uint32](xyvLen) + var b = newSeq[byte](bLen) + scrypt(password, salt, N, r, p, xyv, b, output) + +# Key derivation routine for Scrypt +proc deriveKey(password: string, salt: string, workFactor, r, p: int): KfResult[DKey] = + let wf = if workFactor == 0: ScryptWorkFactor else: workFactor + var output: DKey + if scrypt(password, salt, wf, r, p, output) == 0: + return err(KeyfileScryptBadParam) + + return ok(output) + +# Encryption routine +proc encryptData( + plaintext: openArray[byte], + cryptkind: CryptKind, + key: openArray[byte], + iv: openArray[byte], +): KfResult[seq[byte]] = + if cryptkind == AES128CTR: + var ciphertext = newSeqWith(plaintext.len, 0.byte) + var ctx: CTR[aes128] + ctx.init(toOpenArray(key, 0, 15), iv) + ctx.encrypt(plaintext, ciphertext) + ctx.clear() + ok(ciphertext) + else: + err(KeyfileNotImplemented) + +# Decryption routine +proc decryptData( + ciphertext: openArray[byte], + cryptkind: CryptKind, + key: openArray[byte], + iv: openArray[byte], +): KfResult[seq[byte]] = + if cryptkind == AES128CTR: + if len(iv) != aes128.sizeBlock: + return err(KeyfileIncorrectIV) + var plaintext = newSeqWith(ciphertext.len, 0.byte) + var ctx: CTR[aes128] + ctx.init(toOpenArray(key, 0, 15), iv) + ctx.decrypt(ciphertext, plaintext) + ctx.clear() + ok(plaintext) + else: + err(KeyfileNotImplemented) + +# Encodes KDF parameters in JSON +proc kdfParams(kdfkind: KdfKind, salt: string, workfactor: int): KfResult[JsonNode] = + if kdfkind == SCRYPT: + let wf = if workfactor == 0: ScryptWorkFactor else: workfactor + ok(%*{"dklen": DKLen, "n": wf, "r": ScryptR, "p": ScryptP, "salt": salt}) + elif kdfkind == PBKDF2: + let wf = if workfactor == 0: Pbkdf2WorkFactor else: workfactor + ok(%*{"dklen": DKLen, "c": wf, "prf": "hmac-sha256", "salt": salt}) + else: + err(KeyfileNotImplemented) + +# Decodes hex strings to byte sequences +proc decodeHex*(m: string): seq[byte] = + if len(m) > 0: + try: + return utils.fromHex(m) + except CatchableError: + return newSeq[byte]() + else: + return newSeq[byte]() + +# Parses the salt from hex string to byte string +proc decodeSalt(m: string): string = + var sarr: seq[byte] + if len(m) > 0: + try: + sarr = utils.fromHex(m) + var output = newString(len(sarr)) + copyMem(addr output[0], addr sarr[0], len(sarr)) + return output + except CatchableError: + return "" + else: + return "" + +# Compares the message authentication code +proc compareMac(m1: openArray[byte], m2: openArray[byte]): bool = + if len(m1) == len(m2) and len(m1) > 0: + return equalMem(unsafeAddr m1[0], unsafeAddr m2[0], len(m1)) + else: + return false + +# Creates a keyfile for secret encrypted with password according to the other parameters +# Returns keyfile in JSON according to Web3 Secure storage format (here, differently than standard, version and id are optional) +proc createKeyFileJson*( + secret: openArray[byte], + password: string, + version: int = 3, + cryptkind: CryptKind = AES128CTR, + kdfkind: KdfKind = PBKDF2, + workfactor: int = 0, +): KfResult[JsonNode] = + ## Create JSON object with keyfile structure. + ## + ## ``secret`` - secret data, which will be stored + ## ``password`` - encryption password + ## ``outjson`` - result JSON object + ## ``version`` - version of keyfile format (default is 3) + ## ``cryptkind`` - algorithm for encryption + ## (default is AES128-CTR) + ## ``kdfkind`` - algorithm for key deriviation function (default is PBKDF2) + ## ``workfactor`` - Key deriviation function work factor, 0 is to use + ## default workfactor. + var iv: array[aes128.sizeBlock, byte] + var salt: array[SaltSize, byte] + var saltstr = newString(SaltSize) + if randomBytes(iv) != aes128.sizeBlock: + return err(KeyfileRandomError) + if randomBytes(salt) != SaltSize: + return err(KeyfileRandomError) + copyMem(addr saltstr[0], addr salt[0], SaltSize) + + let u = ?uuidGenerate().mapErrTo(KeyfileUuidError) + + let + dkey = + case kdfkind + of PBKDF2: + ?deriveKey(password, saltstr, kdfkind, HashSHA2_256, workfactor) + of SCRYPT: + ?deriveKey(password, saltstr, workfactor, ScryptR, ScryptP) + + ciphertext = ?encryptData(secret, cryptkind, dkey, iv) + + var ctx: keccak256 + ctx.init() + ctx.update(toOpenArray(dkey, 16, 31)) + ctx.update(ciphertext) + var mac = ctx.finish() + ctx.clear() + + let params = ?kdfParams(kdfkind, toHex(salt, true), workfactor) + + var obj = KeystoreEntry( + crypto: CryptoNew( + cipher: $cryptkind, + cipherparams: CypherParams(iv: toHex(iv, true)), + ciphertext: toHex(ciphertext, true), + kdf: $kdfkind, + kdfparams: params, + mac: toHex(mac.data, true), + ) + ) + + let json = %*obj + if IdInKeyfile: + json.add("id", %($u)) + if VersionInKeyfile: + json.add("version", %version) + + ok(json) + +# Parses Cipher JSON information +proc decodeCrypto(n: JsonNode): KfResult[Crypto] = + var crypto = n.getOrDefault("crypto") + if isNil(crypto): + return err(KeyfileMalformedError) + + var kdf = crypto.getOrDefault("kdf") + if isNil(kdf): + return err(KeyfileMalformedError) + + var c: Crypto + case kdf.getStr() + of "pbkdf2": + c.kind = PBKDF2 + of "scrypt": + c.kind = SCRYPT + else: + return err(KeyfileKdfNotSupported) + + var cipherparams = crypto.getOrDefault("cipherparams") + if isNil(cipherparams): + return err(KeyfileMalformedError) + + c.cipher.kind = getCipher(crypto.getOrDefault("cipher").getStr()) + c.cipher.params.iv = decodeHex(cipherparams.getOrDefault("iv").getStr()) + c.cipher.text = decodeHex(crypto.getOrDefault("ciphertext").getStr()) + c.mac = decodeHex(crypto.getOrDefault("mac").getStr()) + c.kdfParams = crypto.getOrDefault("kdfparams") + + if c.cipher.kind == CipherNoSupport: + return err(KeyfileCipherNotSupported) + if len(c.cipher.text) == 0: + return err(KeyfileEmptyCiphertext) + if len(c.mac) == 0: + return err(KeyfileEmptyMac) + if isNil(c.kdfParams): + return err(KeyfileMalformedError) + + return ok(c) + +# Parses PNKDF2 JSON parameters +proc decodePbkdf2Params(params: JsonNode): KfResult[Pbkdf2Params] = + var p: Pbkdf2Params + p.salt = decodeSalt(params.getOrDefault("salt").getStr()) + if len(p.salt) == 0: + return err(KeyfileEmptySalt) + + p.dklen = params.getOrDefault("dklen").getInt() + p.c = params.getOrDefault("c").getInt() + p.prf = getPrfHash(params.getOrDefault("prf").getStr()) + + if p.prf == HashNoSupport: + return err(KeyfilePrfNotSupported) + if p.dklen == 0 or p.dklen > MaxDKLen: + return err(KeyfileIncorrectDKLen) + + return ok(p) + +# Parses JSON Scrypt parameters +proc decodeScryptParams(params: JsonNode): KfResult[ScryptParams] = + var p: ScryptParams + p.salt = decodeSalt(params.getOrDefault("salt").getStr()) + if len(p.salt) == 0: + return err(KeyfileEmptySalt) + + p.dklen = params.getOrDefault("dklen").getInt() + p.n = params.getOrDefault("n").getInt() + p.p = params.getOrDefault("p").getInt() + p.r = params.getOrDefault("r").getInt() + + if p.dklen == 0 or p.dklen > MaxDKLen: + return err(KeyfileIncorrectDKLen) + + return ok(p) + +# Decrypts data +func decryptSecret(crypto: Crypto, dkey: DKey): KfResult[seq[byte]] = + var ctx: keccak256 + ctx.init() + ctx.update(toOpenArray(dkey, 16, 31)) + ctx.update(crypto.cipher.text) + var mac = ctx.finish() + ctx.clear() + if not compareMac(mac.data, crypto.mac): + return err(KeyfileIncorrectMac) + + let plaintext = + ?decryptData(crypto.cipher.text, crypto.cipher.kind, dkey, crypto.cipher.params.iv) + + ok(plaintext) + +# Parse JSON keyfile and decrypts its content using password +proc decodeKeyFileJson*(j: JsonNode, password: string): KfResult[seq[byte]] = + ## Decode secret from keyfile json object ``j`` using + ## password string ``password``. + let res = decodeCrypto(j) + if res.isErr: + return err(res.error) + let crypto = res.get() + + case crypto.kind + of PBKDF2: + let res = decodePbkdf2Params(crypto.kdfParams) + if res.isErr: + return err(res.error) + + let params = res.get() + let dkey = ?deriveKey(password, params.salt, PBKDF2, params.prf, params.c) + return decryptSecret(crypto, dkey) + of SCRYPT: + let res = decodeScryptParams(crypto.kdfParams) + if res.isErr: + return err(res.error) + + let params = res.get() + let dkey = ?deriveKey(password, params.salt, params.n, params.r, params.p) + return decryptSecret(crypto, dkey) + +# Loads the file at pathname, decrypts and returns all keyfiles encrypted under password +proc loadKeyFiles*( + pathname: string, password: string +): KfResult[seq[KfResult[seq[byte]]]] = + ## Load and decode data from file with pathname + ## ``pathname``, using password string ``password``. + ## The index successful decryptions is returned + var data: JsonNode + var decodedKeyfile: KfResult[seq[byte]] + var successfullyDecodedKeyfiles: seq[KfResult[seq[byte]]] + + if fileExists(pathname) == false: + return err(KeyfileDoesNotExist) + + # Note that lines strips the ending newline, if present + try: + for keyfile in lines(pathname): + # We skip empty lines + if keyfile.len == 0: + continue + # We skip all lines that doesn't seem to define a json + if keyfile[0] != '{' or keyfile[^1] != '}': + continue + + try: + data = json.parseJson(keyfile) + except JsonParsingError: + return err(KeyfileJsonError) + except ValueError: + return err(KeyfileJsonError) + except OSError: + return err(KeyfileOsError) + except Exception: #parseJson raises Exception + return err(KeyfileOsError) + + decodedKeyfile = decodeKeyFileJson(data, password) + if decodedKeyfile.isOk(): + successfullyDecodedKeyfiles.add decodedKeyfile + except IOError: + return err(KeyfileIoError) + + return ok(successfullyDecodedKeyfiles) + +# Note that the keyfile is open in Append mode so that multiple credentials can be stored in same file +proc saveKeyFile*(pathname: string, jobject: JsonNode): KfResult[void] = + ## Save JSON object ``jobject`` to file with pathname ``pathname``. + var f: File + if not f.open(pathname, fmAppend): + return err(KeyfileOsError) + try: + # To avoid other users/attackers to be able to read keyfiles, we make the file readable/writable only by the running user + setFilePermissions(pathname, {fpUserWrite, fpUserRead}) + f.write($jobject) + # We store a keyfile per line + f.write("\n") + ok() + except CatchableError: + err(KeyfileOsError) + finally: + f.close() diff --git a/third-party/nwaku/waku/waku_keystore/keystore.nim b/third-party/nwaku/waku/waku_keystore/keystore.nim new file mode 100644 index 0000000..6cc4ef7 --- /dev/null +++ b/third-party/nwaku/waku/waku_keystore/keystore.nim @@ -0,0 +1,269 @@ +{.push raises: [].} + +import options, json, strutils, sequtils, std/[tables, os] + +import ./keyfile, ./conversion_utils, ./protocol_types, ./utils + +# This proc creates an empty keystore (i.e. with no credentials) +proc createAppKeystore*( + path: string, appInfo: AppInfo, separator: string = "\n" +): KeystoreResult[void] = + let keystore = AppKeystore( + application: appInfo.application, + appIdentifier: appInfo.appIdentifier, + version: appInfo.version, + credentials: initTable[string, KeystoreMembership](), + ) + + var jsonKeystore: string + jsonKeystore.toUgly(%keystore) + + var f: File + if not f.open(path, fmWrite): + return + err(AppKeystoreError(kind: KeystoreOsError, msg: "Cannot open file for writing")) + + try: + # To avoid other users/attackers to be able to read keyfiles, we make the file readable/writable only by the running user + setFilePermissions(path, {fpUserWrite, fpUserRead}) + f.write(jsonKeystore) + # We separate keystores with separator + f.write(separator) + ok() + except CatchableError: + err( + AppKeystoreError( + kind: KeystoreOsError, + msg: "error while writing keystore: " & getCurrentExceptionMsg(), + ) + ) + finally: + f.close() + +# This proc load a keystore based on the application, appIdentifier and version filters. +# If none is found, it automatically creates an empty keystore for the passed parameters +proc loadAppKeystore*( + path: string, appInfo: AppInfo, separator: string = "\n" +): KeystoreResult[JsonNode] = + ## Load and decode JSON keystore from pathname + var data: JsonNode + + # If no keystore exists at path we create a new empty one with passed keystore parameters + if fileExists(path) == false: + let newKeystoreRes = createAppKeystore(path, appInfo, separator) + if newKeystoreRes.isErr(): + return err(newKeystoreRes.error) + + try: + # We read all the file contents + var f: File + if not f.open(path, fmRead): + return err( + AppKeystoreError(kind: KeystoreOsError, msg: "Cannot open file for reading") + ) + + ## the next blocks expect the whole keystore.json content to be compacted in one single line + let fileContents = readAll(f).replace(" ", "").replace("\n", "") + + # We iterate over each substring split by separator (which we expect to correspond to a single keystore json) + for keystore in fileContents.split(separator): + # We skip if read line is empty + if keystore.len == 0: + continue + # We skip all lines that don't seem to define a json + if not keystore.startsWith("{") or not keystore.endsWith("}"): + continue + + try: + # We parse the json + data = json.parseJson(keystore) + + # We check if parsed json contains the relevant keystore credentials fields and if these are set to the passed parameters + # (note that "if" is lazy, so if one of the .contains() fails, the json fields contents will not be checked and no ResultDefect will be raised due to accessing unavailable fields) + if not data.hasKeys(["application", "appIdentifier", "credentials", "version"]): + return err( + AppKeystoreError( + kind: KeystoreJsonKeyError, msg: "Missing required fields in keystore" + ) + ) + + if data["application"].getStr() != appInfo.application: + return err( + AppKeystoreError( + kind: KeystoreJsonValueMismatchError, + msg: + "Application does not match. Expected '" & appInfo.application & + "' but got '" & data["application"].getStr() & "'", + ) + ) + + if data["appIdentifier"].getStr() != appInfo.appIdentifier: + return err( + AppKeystoreError( + kind: KeystoreJsonValueMismatchError, + msg: + "AppIdentifier does not match. Expected '" & appInfo.appIdentifier & + "' but got '" & data["appIdentifier"].getStr() & "'", + ) + ) + + if data["version"].getStr() != appInfo.version: + return err( + AppKeystoreError( + kind: KeystoreJsonValueMismatchError, + msg: + "Version does not match. Expected '" & appInfo.version & "' but got '" & + data["version"].getStr() & "'", + ) + ) + # We return the first json keystore that matches the passed app parameters + # We assume a unique kesytore with such parameters is present in the file + return ok(data) + # TODO: we might continue rather than return for some of these errors + except JsonParsingError: + return err( + AppKeystoreError( + kind: KeystoreJsonError, + msg: + "error during loading keystore, JsonParsingError: " & + getCurrentExceptionMsg(), + ) + ) + except ValueError: + return err( + AppKeystoreError( + kind: KeystoreJsonError, + msg: + "error during loading keystore, ValueError: " & getCurrentExceptionMsg(), + ) + ) + except OSError: + return err( + AppKeystoreError( + kind: KeystoreOsError, + msg: "error during loading keystore, OSError: " & getCurrentExceptionMsg(), + ) + ) + except Exception: #parseJson raises Exception + return err( + AppKeystoreError( + kind: KeystoreOsError, + msg: "error during loading keystore, Exception: " & getCurrentExceptionMsg(), + ) + ) + except IOError: + return err( + AppKeystoreError( + kind: KeystoreIoError, + msg: "error during loading keystore, IOError: " & getCurrentExceptionMsg(), + ) + ) + + return err( + AppKeystoreError( + kind: KeystoreKeystoreDoesNotExist, msg: "The keystore file could not be parsed" + ) + ) + +# Adds a membership credential to the keystore matching the application, appIdentifier and version filters. +proc addMembershipCredentials*( + path: string, + membership: KeystoreMembership, + password: string, + appInfo: AppInfo, + separator: string = "\n", +): KeystoreResult[void] = + # We load the keystore corresponding to the desired parameters + # This call ensures that JSON has all required fields + let jsonKeystoreRes = loadAppKeystore(path, appInfo, separator) + + if jsonKeystoreRes.isErr(): + return err(jsonKeystoreRes.error) + + # We load the JSON node corresponding to the app keystore + var jsonKeystore = jsonKeystoreRes.get() + + try: + if jsonKeystore.hasKey("credentials"): + # We get all credentials in keystore + let keystoreCredentials = jsonKeystore["credentials"] + let key = membership.hash() + if keystoreCredentials.hasKey(key): + # noop + return ok() + + let encodedMembershipCredential = membership.encode() + let keyfileRes = createKeyFileJson(encodedMembershipCredential, password) + if keyfileRes.isErr(): + return err( + AppKeystoreError(kind: KeystoreCreateKeyfileError, msg: $keyfileRes.error) + ) + + # We add it to the credentials field of the keystore + jsonKeystore["credentials"][key] = keyfileRes.get() + except CatchableError: + return err(AppKeystoreError(kind: KeystoreJsonError, msg: getCurrentExceptionMsg())) + + # We save to disk the (updated) keystore. + let saveRes = save(jsonKeystore, path, separator) + if saveRes.isErr(): + return err(saveRes.error) + + return ok() + +# Returns the membership credentials in the keystore matching the application, appIdentifier and version filters, further filtered by the input +# identity credentials and membership contracts +proc getMembershipCredentials*( + path: string, password: string, query: KeystoreMembership, appInfo: AppInfo +): KeystoreResult[KeystoreMembership] = + # We load the keystore corresponding to the desired parameters + # This call ensures that JSON has all required fields + let jsonKeystoreRes = loadAppKeystore(path, appInfo) + + if jsonKeystoreRes.isErr(): + return err(jsonKeystoreRes.error) + + # We load the JSON node corresponding to the app keystore + var jsonKeystore = jsonKeystoreRes.get() + + try: + if jsonKeystore.hasKey("credentials"): + # We get all credentials in keystore + var keystoreCredentials = jsonKeystore["credentials"] + if keystoreCredentials.len == 0: + # error + return err( + AppKeystoreError( + kind: KeystoreCredentialNotFoundError, + msg: "No credentials found in keystore", + ) + ) + var keystoreCredential: JsonNode + if keystoreCredentials.len == 1: + keystoreCredential = keystoreCredentials.getFields().values().toSeq()[0] + else: + let key = query.hash() + if not keystoreCredentials.hasKey(key): + # error + return err( + AppKeystoreError( + kind: KeystoreCredentialNotFoundError, + msg: + "Credential not found in keystore. There are multiple credentials in this keystore, please check if you have used the correct parameters.", + ) + ) + keystoreCredential = keystoreCredentials[key] + + let decodedKeyfileRes = decodeKeyFileJson(keystoreCredential, password) + if decodedKeyfileRes.isErr(): + return err( + AppKeystoreError( + kind: KeystoreReadKeyfileError, msg: $decodedKeyfileRes.error + ) + ) + # we parse the json decrypted keystoreCredential + let decodedCredentialRes = decode(decodedKeyfileRes.get()) + let keyfileMembershipCredential = decodedCredentialRes.get() + return ok(keyfileMembershipCredential) + except CatchableError: + return err(AppKeystoreError(kind: KeystoreJsonError, msg: getCurrentExceptionMsg())) diff --git a/third-party/nwaku/waku/waku_keystore/protocol_types.nim b/third-party/nwaku/waku/waku_keystore/protocol_types.nim new file mode 100644 index 0000000..6cfc2f1 --- /dev/null +++ b/third-party/nwaku/waku/waku_keystore/protocol_types.nim @@ -0,0 +1,165 @@ +{.push raises: [].} + +import std/[sequtils, tables], results, stew/endians2, nimcrypto, stint + +# NOTE: 256-bytes long credentials are due to the use of BN254 in RLN. Other implementations/curves might have a different byte size +const CredentialByteSize* = 256 + +type + IdentityTrapdoor* = seq[byte] #array[32, byte] + IdentityNullifier* = seq[byte] #array[32, byte] + # identity key as defined in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership + IdentitySecretHash* = seq[byte] #array[32, byte] + # hash of identity key as defined ed in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership + IDCommitment* = seq[byte] #array[32, byte] + UserMessageLimit* = uint64 + +type IdentityCredential* = object + idTrapdoor*: IdentityTrapdoor + idNullifier*: IdentityNullifier + ## user's identity key (a secret key) which is selected randomly + ## see details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership + idSecretHash*: IdentitySecretHash + # hash of user's identity key generated by + # Poseidon hash function implemented in rln lib + # more details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Membership + idCommitment*: IDCommitment + +proc toUInt256*(idCommitment: IDCommitment): UInt256 = + let pk = UInt256.fromBytesLE(idCommitment) + return pk + +proc toIDCommitment*(idCommitmentUint: UInt256): IDCommitment = + let pk = IDCommitment(@(idCommitmentUint.toBytesLE())) + return pk + +type MembershipIndex* = uint + +proc toMembershipIndex*(v: UInt256): MembershipIndex = + return cast[MembershipIndex](v) + +# Converts a sequence of tuples containing 4 string (i.e. identity trapdoor, nullifier, secret hash, commitment) to an IdentityCredential +type RawMembershipCredentials* = (string, string, string, string) +proc toIdentityCredentials*( + groupKeys: seq[RawMembershipCredentials] +): Result[seq[IdentityCredential], string] = + ## groupKeys is sequence of membership key tuples in the form of (identity key, identity commitment) all in the hexadecimal format + ## the toIdentityCredentials proc populates a sequence of IdentityCredentials using the supplied groupKeys + ## Returns an error if the conversion fails + + var groupIdCredentials = newSeq[IdentityCredential]() + + for i in 0 .. groupKeys.len - 1: + try: + let + idTrapdoor = IdentityTrapdoor( + @(hexToUint[CredentialByteSize](groupKeys[i][0]).toBytesLE()) + ) + idNullifier = IdentityNullifier( + @(hexToUint[CredentialByteSize](groupKeys[i][1]).toBytesLE()) + ) + idSecretHash = IdentitySecretHash( + @(hexToUint[CredentialByteSize](groupKeys[i][2]).toBytesLE()) + ) + idCommitment = + IDCommitment(@(hexToUint[CredentialByteSize](groupKeys[i][3]).toBytesLE())) + groupIdCredentials.add( + IdentityCredential( + idTrapdoor: idTrapdoor, + idNullifier: idNullifier, + idSecretHash: idSecretHash, + idCommitment: idCommitment, + ) + ) + except ValueError as err: + return err("could not convert the group key to bytes: " & err.msg) + return ok(groupIdCredentials) + +proc serialize*(idComms: seq[IDCommitment]): seq[byte] = + ## serializes a seq of IDCommitments to a byte seq + ## the serialization is based on https://github.com/status-im/nwaku/blob/37bd29fbc37ce5cf636734e7dd410b1ed27b88c8/waku/v2/protocol/waku_rln_relay/rln.nim#L142 + ## the order of serialization is |id_commitment_len<8>|id_commitment| + var idCommsBytes = newSeq[byte]() + + # serialize the idComms, with its length prefixed + let len = toBytes(uint64(idComms.len), Endianness.littleEndian) + idCommsBytes.add(len) + + for idComm in idComms: + idCommsBytes = concat(idCommsBytes, @idComm) + + return idCommsBytes + +type MembershipContract* = object + chainId*: string + address*: string + +type KeystoreMembership* = ref object of RootObj + membershipContract*: MembershipContract + treeIndex*: MembershipIndex + identityCredential*: IdentityCredential + userMessageLimit*: UserMessageLimit + +proc `$`*(m: KeystoreMembership): string = + return + "KeystoreMembership(chainId: " & m.membershipContract.chainId & ", contractAddress: " & + m.membershipContract.address & ", treeIndex: " & $m.treeIndex & + ", userMessageLimit: " & $m.userMessageLimit & ", identityCredential: " & + $m.identityCredential & ")" + +proc `==`*(x, y: KeystoreMembership): bool = + return + x.membershipContract.chainId == y.membershipContract.chainId and + x.membershipContract.address == y.membershipContract.address and + x.treeIndex == y.treeIndex and x.userMessageLimit == y.userMessageLimit and + x.identityCredential.idTrapdoor == y.identityCredential.idTrapdoor and + x.identityCredential.idNullifier == y.identityCredential.idNullifier and + x.identityCredential.idSecretHash == y.identityCredential.idSecretHash and + x.identityCredential.idCommitment == y.identityCredential.idCommitment + +proc hash*(m: KeystoreMembership): string = + # hash together the chainId, address and treeIndex + return + $sha256.digest( + m.membershipContract.chainId & m.membershipContract.address & $m.treeIndex + ) + +type MembershipTable* = Table[string, KeystoreMembership] + +type AppInfo* = object + application*: string + appIdentifier*: string + version*: string + +type AppKeystore* = object + application*: string + appIdentifier*: string + credentials*: MembershipTable + version*: string + +type + AppKeystoreErrorKind* = enum + KeystoreOsError = "keystore error: OS specific error" + KeystoreIoError = "keystore error: IO specific error" + KeystoreJsonKeyError = "keystore error: fields not present in JSON" + KeystoreJsonValueMismatchError = "keystore error: JSON value mismatch" + KeystoreJsonError = "keystore error: JSON encoder/decoder error" + KeystoreKeystoreDoesNotExist = "keystore error: file does not exist" + KeystoreCreateKeystoreError = "Error while creating application keystore" + KeystoreLoadKeystoreError = "Error while loading application keystore" + KeystoreCreateKeyfileError = "Error while creating keyfile for credentials" + KeystoreSaveKeyfileError = "Error while saving keyfile for credentials" + KeystoreReadKeyfileError = "Error while reading keyfile for credentials" + KeystoreCredentialAlreadyPresentError = + "Error while adding credentials to keystore: credential already present" + KeystoreCredentialNotFoundError = + "Error while searching credentials in keystore: credential not found" + + AppKeystoreError* = object + kind*: AppKeystoreErrorKind + msg*: string + +proc `$`*(e: AppKeystoreError): string = + return $e.kind & ": " & e.msg + +type KeystoreResult*[T] = Result[T, AppKeystoreError] diff --git a/third-party/nwaku/waku/waku_keystore/utils.nim b/third-party/nwaku/waku/waku_keystore/utils.nim new file mode 100644 index 0000000..bbfbf31 --- /dev/null +++ b/third-party/nwaku/waku/waku_keystore/utils.nim @@ -0,0 +1,81 @@ +{.push raises: [].} + +import json, std/[os, sequtils] + +import ./keyfile, ./protocol_types + +# Checks if a JsonNode has all keys contained in "keys" +proc hasKeys*(data: JsonNode, keys: openArray[string]): bool = + return all( + keys, + proc(key: string): bool = + return data.hasKey(key), + ) + +# Safely saves a Keystore's JsonNode to disk. +# If exists, the destination file is renamed with extension .bkp; the file is written at its destination and the .bkp file is removed if write is successful, otherwise is restored +proc save*(json: JsonNode, path: string, separator: string): KeystoreResult[void] = + # We first backup the current keystore + if fileExists(path): + try: + moveFile(path, path & ".bkp") + except: # TODO: Fix "BareExcept" warning + return err( + AppKeystoreError( + kind: KeystoreOsError, + msg: "could not backup keystore: " & getCurrentExceptionMsg(), + ) + ) + + # We save the updated json + var f: File + if not f.open(path, fmAppend): + return err( + AppKeystoreError( + kind: KeystoreOsError, + msg: "error in waku_keystore save: " & getCurrentExceptionMsg(), + ) + ) + try: + # To avoid other users/attackers to be able to read keyfiles, we make the file readable/writable only by the running user + setFilePermissions(path, {fpUserWrite, fpUserRead}) + f.write($json) + # We store a keyfile per line + f.write(separator) + except CatchableError: + # We got some KeystoreOsError writing to disk. We attempt to restore the previous keystore backup + if fileExists(path & ".bkp"): + try: + f.close() + removeFile(path) + moveFile(path & ".bkp", path) + except: # TODO: Fix "BareExcept" warning + # Unlucky, we just fail + return err( + AppKeystoreError( + kind: KeystoreOsError, + msg: "could not restore keystore backup: " & getCurrentExceptionMsg(), + ) + ) + return err( + AppKeystoreError( + kind: KeystoreOsError, + msg: "could not write keystore: " & getCurrentExceptionMsg(), + ) + ) + finally: + f.close() + + # The write went fine, so we can remove the backup keystore + if fileExists(path & ".bkp"): + try: + removeFile(path & ".bkp") + except CatchableError: + return err( + AppKeystoreError( + kind: KeystoreOsError, + msg: "could not remove keystore backup: " & getCurrentExceptionMsg(), + ) + ) + + return ok() diff --git a/third-party/nwaku/waku/waku_lightpush.nim b/third-party/nwaku/waku/waku_lightpush.nim new file mode 100644 index 0000000..a905570 --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush.nim @@ -0,0 +1,3 @@ +import ./waku_lightpush/[protocol, common, rpc, rpc_codec, callbacks, self_req_handler] + +export protocol, common, rpc, rpc_codec, callbacks, self_req_handler diff --git a/third-party/nwaku/waku/waku_lightpush/callbacks.nim b/third-party/nwaku/waku/waku_lightpush/callbacks.nim new file mode 100644 index 0000000..4b362e6 --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush/callbacks.nim @@ -0,0 +1,60 @@ +{.push raises: [].} + +import results + +import + ../waku_core, + ../waku_relay, + ./common, + ../waku_rln_relay, + ../waku_rln_relay/protocol_types + +import std/times, libp2p/peerid, stew/byteutils + +proc checkAndGenerateRLNProof*( + rlnPeer: Option[WakuRLNRelay], message: WakuMessage +): Result[WakuMessage, string] = + # check if the message already has RLN proof + if message.proof.len > 0: + return ok(message) + + if rlnPeer.isNone(): + notice "Publishing message without RLN proof" + return ok(message) + # generate and append RLN proof + let + time = getTime().toUnix() + senderEpochTime = float64(time) + var msgWithProof = message + rlnPeer.get().appendRLNProof(msgWithProof, senderEpochTime).isOkOr: + return err(error) + return ok(msgWithProof) + +proc getNilPushHandler*(): PushMessageHandler = + return proc( + peer: PeerId, pubsubTopic: string, message: WakuMessage + ): Future[WakuLightPushResult] {.async.} = + return lightpushResultInternalError("no waku relay found") + +proc getRelayPushHandler*( + wakuRelay: WakuRelay, rlnPeer: Option[WakuRLNRelay] = none[WakuRLNRelay]() +): PushMessageHandler = + return proc( + peer: PeerId, pubsubTopic: string, message: WakuMessage + ): Future[WakuLightPushResult] {.async.} = + # append RLN proof + let msgWithProof = checkAndGenerateRLNProof(rlnPeer, message).valueOr: + return lighpushErrorResult(LightPushErrorCode.OUT_OF_RLN_PROOF, error) + + (await wakuRelay.validateMessage(pubSubTopic, msgWithProof)).isOkOr: + return lighpushErrorResult(LightPushErrorCode.INVALID_MESSAGE, $error) + + let publishedResult = await wakuRelay.publish(pubsubTopic, msgWithProof) + + if publishedResult.isErr(): + let msgHash = computeMessageHash(pubsubTopic, message).to0xHex() + notice "Lightpush request has not been published to any peers", + msg_hash = msgHash, reason = $publishedResult.error + return mapPubishingErrorToPushResult(publishedResult.error) + + return lightpushSuccessResult(publishedResult.get().uint32) diff --git a/third-party/nwaku/waku/waku_lightpush/client.nim b/third-party/nwaku/waku/waku_lightpush/client.nim new file mode 100644 index 0000000..2d5ba48 --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush/client.nim @@ -0,0 +1,158 @@ +{.push raises: [].} + +import std/options, results, chronicles, chronos, metrics, bearssl/rand, stew/byteutils +import libp2p/peerid, libp2p/stream/connection +import + ../waku_core/peers, + ../node/peer_manager, + ../node/delivery_monitor/publish_observer, + ../utils/requests, + ../waku_core, + ./common, + ./protocol_metrics, + ./rpc, + ./rpc_codec + +logScope: + topics = "waku lightpush client" + +type WakuLightPushClient* = ref object + peerManager*: PeerManager + rng*: ref rand.HmacDrbgContext + publishObservers: seq[PublishObserver] + +proc new*( + T: type WakuLightPushClient, peerManager: PeerManager, rng: ref rand.HmacDrbgContext +): T = + WakuLightPushClient(peerManager: peerManager, rng: rng) + +proc addPublishObserver*(wl: WakuLightPushClient, obs: PublishObserver) = + wl.publishObservers.add(obs) + +proc sendPushRequest( + wl: WakuLightPushClient, + req: LightPushRequest, + peer: PeerId | RemotePeerInfo, + conn: Option[Connection] = none(Connection), +): Future[WakuLightPushResult] {.async.} = + let connection = conn.valueOr: + (await wl.peerManager.dialPeer(peer, WakuLightPushCodec)).valueOr: + waku_lightpush_v3_errors.inc(labelValues = [dialFailure]) + return lighpushErrorResult( + LightPushErrorCode.NO_PEERS_TO_RELAY, + dialFailure & ": " & $peer & " is not accessible", + ) + + await connection.writeLP(req.encode().buffer) + + var buffer: seq[byte] + try: + buffer = await connection.readLp(DefaultMaxRpcSize.int) + except LPStreamRemoteClosedError: + error "Failed to read response from peer", error = getCurrentExceptionMsg() + return lightpushResultInternalError( + "Failed to read response from peer: " & getCurrentExceptionMsg() + ) + + let response = LightpushResponse.decode(buffer).valueOr: + error "failed to decode response" + waku_lightpush_v3_errors.inc(labelValues = [decodeRpcFailure]) + return lightpushResultInternalError(decodeRpcFailure) + + if response.requestId != req.requestId and + response.statusCode != LightPushErrorCode.TOO_MANY_REQUESTS: + error "response failure, requestId mismatch", + requestId = req.requestId, responseRequestId = response.requestId + return lightpushResultInternalError("response failure, requestId mismatch") + + return toPushResult(response) + +proc publish*( + wl: WakuLightPushClient, + pubSubTopic: Option[PubsubTopic] = none(PubsubTopic), + wakuMessage: WakuMessage, + peer: PeerId | RemotePeerInfo, +): Future[WakuLightPushResult] {.async, gcsafe.} = + var message = wakuMessage + if message.timestamp == 0: + message.timestamp = getNowInNanosecondTime() + + when peer is PeerId: + info "publish", + peerId = shortLog(peer), + msg_hash = computeMessageHash(pubsubTopic.get(""), message).to0xHex + else: + info "publish", + peerId = shortLog(peer.peerId), + msg_hash = computeMessageHash(pubsubTopic.get(""), message).to0xHex + + let pushRequest = LightpushRequest( + requestId: generateRequestId(wl.rng), pubSubTopic: pubSubTopic, message: message + ) + let publishedCount = ?await wl.sendPushRequest(pushRequest, peer) + + for obs in wl.publishObservers: + obs.onMessagePublished(pubSubTopic.get(""), message) + + return lightpushSuccessResult(publishedCount) + +proc publishToAny*( + wl: WakuLightPushClient, pubSubTopic: PubsubTopic, wakuMessage: WakuMessage +): Future[WakuLightPushResult] {.async, gcsafe.} = + ## This proc is similar to the publish one but in this case + ## we don't specify a particular peer and instead we get it from peer manager + + var message = wakuMessage + if message.timestamp == 0: + message.timestamp = getNowInNanosecondTime() + + let peer = wl.peerManager.selectPeer(WakuLightPushCodec).valueOr: + # TODO: check if it is matches the situation - shall we distinguish client side missing peers from server side? + return lighpushErrorResult( + LightPushErrorCode.NO_PEERS_TO_RELAY, "no suitable remote peers" + ) + + info "publishToAny", + my_peer_id = wl.peerManager.switch.peerInfo.peerId, + peer_id = peer.peerId, + msg_hash = computeMessageHash(pubsubTopic, message).to0xHex, + sentTime = getNowInNanosecondTime() + + let pushRequest = LightpushRequest( + requestId: generateRequestId(wl.rng), + pubSubTopic: some(pubSubTopic), + message: message, + ) + let publishedCount = ?await wl.sendPushRequest(pushRequest, peer) + + for obs in wl.publishObservers: + obs.onMessagePublished(pubSubTopic, message) + + return lightpushSuccessResult(publishedCount) + +proc publishWithConn*( + wl: WakuLightPushClient, + pubSubTopic: PubsubTopic, + message: WakuMessage, + conn: Connection, + destPeer: PeerId, +): Future[WakuLightPushResult] {.async, gcsafe.} = + info "publishWithConn", + my_peer_id = wl.peerManager.switch.peerInfo.peerId, + peer_id = destPeer, + msg_hash = computeMessageHash(pubsubTopic, message).to0xHex, + sentTime = getNowInNanosecondTime() + + let pushRequest = LightpushRequest( + requestId: generateRequestId(wl.rng), + pubSubTopic: some(pubSubTopic), + message: message, + ) + #TODO: figure out how to not pass destPeer as this is just a hack + let publishedCount = + ?await wl.sendPushRequest(pushRequest, destPeer, conn = some(conn)) + + for obs in wl.publishObservers: + obs.onMessagePublished(pubSubTopic, message) + + return lightpushSuccessResult(publishedCount) diff --git a/third-party/nwaku/waku/waku_lightpush/common.nim b/third-party/nwaku/waku/waku_lightpush/common.nim new file mode 100644 index 0000000..f268783 --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush/common.nim @@ -0,0 +1,87 @@ +{.push raises: [].} + +import std/options, results, chronos, libp2p/peerid +import ../waku_core, ./rpc, ../waku_relay/protocol + +from ../waku_core/codecs import WakuLightPushCodec +export WakuLightPushCodec +export LightPushStatusCode + +const LightPushSuccessCode* = (SUCCESS: LightPushStatusCode(200)) + +const LightPushErrorCode* = ( + BAD_REQUEST: LightPushStatusCode(400), + PAYLOAD_TOO_LARGE: LightPushStatusCode(413), + INVALID_MESSAGE: LightPushStatusCode(420), + UNSUPPORTED_PUBSUB_TOPIC: LightPushStatusCode(421), + TOO_MANY_REQUESTS: LightPushStatusCode(429), + INTERNAL_SERVER_ERROR: LightPushStatusCode(500), + SERVICE_NOT_AVAILABLE: LightPushStatusCode(503), + OUT_OF_RLN_PROOF: LightPushStatusCode(504), + NO_PEERS_TO_RELAY: LightPushStatusCode(505), +) + +type ErrorStatus* = tuple[code: LightpushStatusCode, desc: Option[string]] +type WakuLightPushResult* = Result[uint32, ErrorStatus] + +type PushMessageHandler* = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage +): Future[WakuLightPushResult] {.async.} + +const TooManyRequestsMessage* = "Request rejected due to too many requests" + +func isSuccess*(response: LightPushResponse): bool = + return response.statusCode == LightPushSuccessCode.SUCCESS + +func toPushResult*(response: LightPushResponse): WakuLightPushResult = + if isSuccess(response): + return ok(response.relayPeerCount.get(0)) + else: + return err((response.statusCode, response.statusDesc)) + +func lightpushSuccessResult*(relayPeerCount: uint32): WakuLightPushResult = + return ok(relayPeerCount) + +func lightpushResultInternalError*(msg: string): WakuLightPushResult = + return err((LightPushErrorCode.INTERNAL_SERVER_ERROR, some(msg))) + +func lightpushResultBadRequest*(msg: string): WakuLightPushResult = + return err((LightPushErrorCode.BAD_REQUEST, some(msg))) + +func lightpushResultServiceUnavailable*(msg: string): WakuLightPushResult = + return err((LightPushErrorCode.SERVICE_NOT_AVAILABLE, some(msg))) + +func lighpushErrorResult*( + statusCode: LightpushStatusCode, desc: Option[string] +): WakuLightPushResult = + return err((statusCode, desc)) + +func lighpushErrorResult*( + statusCode: LightpushStatusCode, desc: string +): WakuLightPushResult = + return err((statusCode, some(desc))) + +func mapPubishingErrorToPushResult*( + publishOutcome: PublishOutcome +): WakuLightPushResult = + case publishOutcome + of NoTopicSpecified: + return + err((LightPushErrorCode.INVALID_MESSAGE, some("Empty topic, skipping publish"))) + of DuplicateMessage: + return + err((LightPushErrorCode.INVALID_MESSAGE, some("Dropping already-seen message"))) + of NoPeersToPublish: + return err( + ( + LightPushErrorCode.NO_PEERS_TO_RELAY, + some("No peers for topic, skipping publish"), + ) + ) + of CannotGenerateMessageId: + return err( + ( + LightPushErrorCode.INTERNAL_SERVER_ERROR, + some("Error generating message id, skipping publish"), + ) + ) diff --git a/third-party/nwaku/waku/waku_lightpush/protocol.nim b/third-party/nwaku/waku/waku_lightpush/protocol.nim new file mode 100644 index 0000000..955b1ad --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush/protocol.nim @@ -0,0 +1,169 @@ +{.push raises: [].} + +import + std/[options, strutils], + results, + stew/byteutils, + chronicles, + chronos, + metrics, + bearssl/rand +import + ../node/peer_manager/peer_manager, + ../waku_core, + ../waku_core/topics/sharding, + ./common, + ./rpc, + ./rpc_codec, + ./protocol_metrics, + ../common/rate_limit/request_limiter + +logScope: + topics = "waku lightpush" + +type WakuLightPush* = ref object of LPProtocol + rng*: ref rand.HmacDrbgContext + peerManager*: PeerManager + pushHandler*: PushMessageHandler + requestRateLimiter*: RequestRateLimiter + autoSharding: Option[Sharding] + +proc handleRequest( + wl: WakuLightPush, peerId: PeerId, pushRequest: LightpushRequest +): Future[WakuLightPushResult] {.async.} = + let pubsubTopic = pushRequest.pubSubTopic.valueOr: + if wl.autoSharding.isNone(): + let msg = "Pubsub topic must be specified when static sharding is enabled" + error "lightpush request handling error", error = msg + return WakuLightPushResult.err( + (code: LightPushErrorCode.INVALID_MESSAGE, desc: some(msg)) + ) + + let parsedTopic = NsContentTopic.parse(pushRequest.message.contentTopic).valueOr: + let msg = "Invalid content-topic:" & $error + error "lightpush request handling error", error = msg + return WakuLightPushResult.err( + (code: LightPushErrorCode.INVALID_MESSAGE, desc: some(msg)) + ) + + wl.autoSharding.get().getShard(parsedTopic).valueOr: + let msg = "Auto-sharding error: " & error + error "lightpush request handling error", error = msg + return WakuLightPushResult.err( + (code: LightPushErrorCode.INTERNAL_SERVER_ERROR, desc: some(msg)) + ) + + # ensure checking topic will not cause error at gossipsub level + if pubsubTopic.isEmptyOrWhitespace(): + let msg = "topic must not be empty" + error "lightpush request handling error", error = msg + return + WakuLightPushResult.err((code: LightPushErrorCode.BAD_REQUEST, desc: some(msg))) + + waku_lightpush_v3_messages.inc(labelValues = ["PushRequest"]) + + let msg_hash = pubsubTopic.computeMessageHash(pushRequest.message).to0xHex() + notice "handling lightpush request", + my_peer_id = wl.peerManager.switch.peerInfo.peerId, + peer_id = peerId, + requestId = pushRequest.requestId, + pubsubTopic = pushRequest.pubsubTopic, + msg_hash = msg_hash, + receivedTime = getNowInNanosecondTime() + + let res = (await wl.pushHandler(peerId, pubsubTopic, pushRequest.message)).valueOr: + return err((code: error.code, desc: error.desc)) + return ok(res) + +proc handleRequest*( + wl: WakuLightPush, peerId: PeerId, buffer: seq[byte] +): Future[LightPushResponse] {.async.} = + let pushRequest = LightPushRequest.decode(buffer).valueOr: + let desc = decodeRpcFailure & ": " & $error + error "failed to push message", error = desc + let errorCode = LightPushErrorCode.BAD_REQUEST + waku_lightpush_v3_errors.inc(labelValues = [$errorCode]) + return LightPushResponse( + requestId: "N/A", # due to decode failure we don't know requestId + statusCode: errorCode, + statusDesc: some(desc), + ) + + let relayPeerCount = (await handleRequest(wl, peerId, pushRequest)).valueOr: + let desc = error.desc + waku_lightpush_v3_errors.inc(labelValues = [$error.code]) + error "failed to push message", error = desc + return LightPushResponse( + requestId: pushRequest.requestId, statusCode: error.code, statusDesc: desc + ) + + return LightPushResponse( + requestId: pushRequest.requestId, + statusCode: LightPushSuccessCode.SUCCESS, + statusDesc: none[string](), + relayPeerCount: some(relayPeerCount), + ) + +proc initProtocolHandler(wl: WakuLightPush) = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = + var rpc: LightPushResponse + wl.requestRateLimiter.checkUsageLimit(WakuLightPushCodec, conn): + var buffer: seq[byte] + try: + buffer = await conn.readLp(DefaultMaxRpcSize) + except LPStreamError: + error "lightpush read stream failed", error = getCurrentExceptionMsg() + return + + waku_service_network_bytes.inc( + amount = buffer.len().int64, labelValues = [WakuLightPushCodec, "in"] + ) + + try: + rpc = await handleRequest(wl, conn.peerId, buffer) + except CatchableError: + error "lightpush failed handleRequest", error = getCurrentExceptionMsg() + do: + debug "lightpush request rejected due rate limit exceeded", + peerId = conn.peerId, limit = $wl.requestRateLimiter.setting + + rpc = static( + LightPushResponse( + ## We will not copy and decode RPC buffer from stream only for requestId + ## in reject case as it is comparably too expensive and opens possible + ## attack surface + requestId: "N/A", + statusCode: LightPushErrorCode.TOO_MANY_REQUESTS, + statusDesc: some(TooManyRequestsMessage), + ) + ) + + try: + await conn.writeLp(rpc.encode().buffer) + except LPStreamError: + error "lightpush write stream failed", error = getCurrentExceptionMsg() + + ## For lightpush might not worth to measure outgoing traffic as it is only + ## small response about success/failure + + wl.handler = handler + wl.codec = WakuLightPushCodec + +proc new*( + T: type WakuLightPush, + peerManager: PeerManager, + rng: ref rand.HmacDrbgContext, + pushHandler: PushMessageHandler, + autoSharding: Option[Sharding], + rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](), +): T = + let wl = WakuLightPush( + rng: rng, + peerManager: peerManager, + pushHandler: pushHandler, + requestRateLimiter: newRequestRateLimiter(rateLimitSetting), + autoSharding: autoSharding, + ) + wl.initProtocolHandler() + setServiceLimitMetric(WakuLightpushCodec, rateLimitSetting) + return wl diff --git a/third-party/nwaku/waku/waku_lightpush/protocol_metrics.nim b/third-party/nwaku/waku/waku_lightpush/protocol_metrics.nim new file mode 100644 index 0000000..c662f5b --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush/protocol_metrics.nim @@ -0,0 +1,19 @@ +{.push raises: [].} + +import metrics + +declarePublicCounter waku_lightpush_v3_errors, + "number of lightpush protocol errors", ["type"] +declarePublicCounter waku_lightpush_v3_messages, + "number of lightpush messages received", ["type"] + +# Error types (metric label values) +const + dialFailure* = "dial_failure" + decodeRpcFailure* = "decode_rpc_failure" + peerNotFoundFailure* = "peer_not_found_failure" + emptyRequestBodyFailure* = "empty_request_body_failure" + emptyResponseBodyFailure* = "empty_response_body_failure" + messagePushFailure* = "message_push_failure" + requestLimitReachedFailure* = "request_limit_reached_failure" + notPublishedAnyPeer* = "not_published_to_any_peer" diff --git a/third-party/nwaku/waku/waku_lightpush/rpc.nim b/third-party/nwaku/waku/waku_lightpush/rpc.nim new file mode 100644 index 0000000..f19563b --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush/rpc.nim @@ -0,0 +1,20 @@ +{.push raises: [].} + +import std/options +import ../waku_core + +type LightPushStatusCode* = distinct uint32 +proc `==`*(a, b: LightPushStatusCode): bool {.borrow.} +proc `$`*(code: LightPushStatusCode): string {.borrow.} + +type + LightpushRequest* = object + requestId*: string + pubSubTopic*: Option[PubsubTopic] + message*: WakuMessage + + LightPushResponse* = object + requestId*: string + statusCode*: LightPushStatusCode + statusDesc*: Option[string] + relayPeerCount*: Option[uint32] diff --git a/third-party/nwaku/waku/waku_lightpush/rpc_codec.nim b/third-party/nwaku/waku/waku_lightpush/rpc_codec.nim new file mode 100644 index 0000000..0a4f934 --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush/rpc_codec.nim @@ -0,0 +1,81 @@ +{.push raises: [].} + +import std/options +import ../common/protobuf, ../waku_core, ./rpc + +const DefaultMaxRpcSize* = -1 + +proc encode*(rpc: LightpushRequest): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.requestId) + pb.write3(20, rpc.pubSubTopic) + pb.write3(21, rpc.message.encode()) + pb.finish3() + + return pb + +proc decode*(T: type LightpushRequest, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = LightpushRequest() + + var requestId: string + if not ?pb.getField(1, requestId): + return err(ProtobufError.missingRequiredField("request_id")) + else: + rpc.requestId = requestId + + var pubSubTopic: PubsubTopic + if not ?pb.getField(20, pubSubTopic): + rpc.pubSubTopic = none(PubsubTopic) + else: + rpc.pubSubTopic = some(pubSubTopic) + + var messageBuf: seq[byte] + if not ?pb.getField(21, messageBuf): + return err(ProtobufError.missingRequiredField("message")) + else: + rpc.message = ?WakuMessage.decode(messageBuf) + + return ok(rpc) + +proc encode*(rpc: LightPushResponse): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.requestId) + pb.write3(10, rpc.statusCode.uint32) + pb.write3(11, rpc.statusDesc) + pb.write3(12, rpc.relayPeerCount) + pb.finish3() + + return pb + +proc decode*(T: type LightPushResponse, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = LightPushResponse() + + var requestId: string + if not ?pb.getField(1, requestId): + return err(ProtobufError.missingRequiredField("request_id")) + else: + rpc.requestId = requestId + + var statusCode: uint32 + if not ?pb.getField(10, statusCode): + return err(ProtobufError.missingRequiredField("status_code")) + else: + rpc.statusCode = statusCode.LightPushStatusCode + + var statusDesc: string + if not ?pb.getField(11, statusDesc): + rpc.statusDesc = none(string) + else: + rpc.statusDesc = some(statusDesc) + + var relayPeerCount: uint32 + if not ?pb.getField(12, relayPeerCount): + rpc.relayPeerCount = none(uint32) + else: + rpc.relayPeerCount = some(relayPeerCount) + + return ok(rpc) diff --git a/third-party/nwaku/waku/waku_lightpush/self_req_handler.nim b/third-party/nwaku/waku/waku_lightpush/self_req_handler.nim new file mode 100644 index 0000000..06a0d37 --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush/self_req_handler.nim @@ -0,0 +1,36 @@ +{.push raises: [].} + +## Notice that the REST /lightpush requests normally assume that the node +## is acting as a lightpush-client that will trigger the service provider node +## to relay the message. +## In this module, we allow that a lightpush service node (full node) can be +## triggered directly through the REST /lightpush endpoint. +## The typical use case for that is when using `nwaku-compose`, +## which spawn a full service Waku node +## that could be used also as a lightpush client, helping testing and development. + +import results, chronos, std/options, metrics +import ../waku_core, ./protocol, ./common, ./rpc, ./rpc_codec, ../utils/requests + +proc handleSelfLightPushRequest*( + self: WakuLightPush, pubSubTopic: Option[PubsubTopic], message: WakuMessage +): Future[WakuLightPushResult] {.async.} = + ## Handles the lightpush requests made by the node to itself. + ## Normally used in REST-lightpush requests + ## On success, returns the msg_hash of the published message. + + try: + # provide self peerId as now this node is used directly, thus there is no light client sender peer. + let selfPeerId = self.peerManager.switch.peerInfo.peerId + + let req = LightpushRequest( + requestId: generateRequestId(self.rng), pubSubTopic: pubSubTopic, message: message + ) + + let response = await self.handleRequest(selfPeerId, req.encode().buffer) + + return response.toPushResult() + except Exception: + return lightPushResultInternalError( + "exception in handleSelfLightPushRequest: " & getCurrentExceptionMsg() + ) diff --git a/third-party/nwaku/waku/waku_lightpush_legacy.nim b/third-party/nwaku/waku/waku_lightpush_legacy.nim new file mode 100644 index 0000000..f1b25cb --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush_legacy.nim @@ -0,0 +1,5 @@ +import + ./waku_lightpush_legacy/ + [protocol, common, rpc, rpc_codec, callbacks, self_req_handler] + +export protocol, common, rpc, rpc_codec, callbacks, self_req_handler diff --git a/third-party/nwaku/waku/waku_lightpush_legacy/README.md b/third-party/nwaku/waku/waku_lightpush_legacy/README.md new file mode 100644 index 0000000..d885173 --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush_legacy/README.md @@ -0,0 +1 @@ +# Waku Light Push diff --git a/third-party/nwaku/waku/waku_lightpush_legacy/callbacks.nim b/third-party/nwaku/waku/waku_lightpush_legacy/callbacks.nim new file mode 100644 index 0000000..f5a79ea --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush_legacy/callbacks.nim @@ -0,0 +1,62 @@ +{.push raises: [].} + +import + ../waku_core, + ../waku_relay, + ./common, + ./protocol_metrics, + ../waku_rln_relay, + ../waku_rln_relay/protocol_types + +import std/times, libp2p/peerid, stew/byteutils + +proc checkAndGenerateRLNProof*( + rlnPeer: Option[WakuRLNRelay], message: WakuMessage +): Result[WakuMessage, string] = + # check if the message already has RLN proof + if message.proof.len > 0: + return ok(message) + + if rlnPeer.isNone(): + notice "Publishing message without RLN proof" + return ok(message) + # generate and append RLN proof + let + time = getTime().toUnix() + senderEpochTime = float64(time) + var msgWithProof = message + rlnPeer.get().appendRLNProof(msgWithProof, senderEpochTime).isOkOr: + return err(error) + return ok(msgWithProof) + +proc getNilPushHandler*(): PushMessageHandler = + return proc( + peer: PeerId, pubsubTopic: string, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + return err("no waku relay found") + +proc getRelayPushHandler*( + wakuRelay: WakuRelay, rlnPeer: Option[WakuRLNRelay] = none[WakuRLNRelay]() +): PushMessageHandler = + return proc( + peer: PeerId, pubsubTopic: string, message: WakuMessage + ): Future[WakuLightPushResult[void]] {.async.} = + # append RLN proof + let msgWithProof = checkAndGenerateRLNProof(rlnPeer, message) + if msgWithProof.isErr(): + return err(msgWithProof.error) + + (await wakuRelay.validateMessage(pubSubTopic, msgWithProof.value)).isOkOr: + return err(error) + + let publishResult = await wakuRelay.publish(pubsubTopic, msgWithProof.value) + if publishResult.isErr(): + ## Agreed change expected to the lightpush protocol to better handle such case. https://github.com/waku-org/pm/issues/93 + let msgHash = computeMessageHash(pubsubTopic, message).to0xHex() + notice "Lightpush request has not been published to any peers", + msg_hash = msgHash, reason = $publishResult.error + # for legacy lightpush we do not detail the reason towards clients. All error during publish result in not-published-to-any-peer + # this let client of the legacy protocol to react as they did so far. + return err(protocol_metrics.notPublishedAnyPeer) + + return ok() diff --git a/third-party/nwaku/waku/waku_lightpush_legacy/client.nim b/third-party/nwaku/waku/waku_lightpush_legacy/client.nim new file mode 100644 index 0000000..503cbe1 --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush_legacy/client.nim @@ -0,0 +1,116 @@ +{.push raises: [].} + +import std/options, results, chronicles, chronos, metrics, bearssl/rand, stew/byteutils +import libp2p/peerid +import + ../waku_core/peers, + ../node/peer_manager, + ../node/delivery_monitor/publish_observer, + ../utils/requests, + ../waku_core, + ./common, + ./protocol_metrics, + ./rpc, + ./rpc_codec + +logScope: + topics = "waku lightpush legacy client" + +type WakuLegacyLightPushClient* = ref object + peerManager*: PeerManager + rng*: ref rand.HmacDrbgContext + publishObservers: seq[PublishObserver] + +proc new*( + T: type WakuLegacyLightPushClient, + peerManager: PeerManager, + rng: ref rand.HmacDrbgContext, +): T = + WakuLegacyLightPushClient(peerManager: peerManager, rng: rng) + +proc addPublishObserver*(wl: WakuLegacyLightPushClient, obs: PublishObserver) = + wl.publishObservers.add(obs) + +proc sendPushRequest( + wl: WakuLegacyLightPushClient, req: PushRequest, peer: PeerId | RemotePeerInfo +): Future[WakuLightPushResult[void]] {.async, gcsafe.} = + let connOpt = await wl.peerManager.dialPeer(peer, WakuLegacyLightPushCodec) + if connOpt.isNone(): + waku_lightpush_errors.inc(labelValues = [dialFailure]) + return err(dialFailure) + let connection = connOpt.get() + + let rpc = PushRPC(requestId: generateRequestId(wl.rng), request: some(req)) + await connection.writeLP(rpc.encode().buffer) + + var buffer: seq[byte] + try: + buffer = await connection.readLp(DefaultMaxRpcSize.int) + except LPStreamRemoteClosedError: + return err("Exception reading: " & getCurrentExceptionMsg()) + + let decodeRespRes = PushRPC.decode(buffer) + if decodeRespRes.isErr(): + error "failed to decode response" + waku_lightpush_errors.inc(labelValues = [decodeRpcFailure]) + return err(decodeRpcFailure) + + let pushResponseRes = decodeRespRes.get() + if pushResponseRes.response.isNone(): + waku_lightpush_errors.inc(labelValues = [emptyResponseBodyFailure]) + return err(emptyResponseBodyFailure) + + let response = pushResponseRes.response.get() + if not response.isSuccess: + if response.info.isSome(): + return err(response.info.get()) + else: + return err("unknown failure") + + return ok() + +proc publish*( + wl: WakuLegacyLightPushClient, + pubSubTopic: PubsubTopic, + wakuMessage: WakuMessage, + peer: RemotePeerInfo, +): Future[WakuLightPushResult[string]] {.async, gcsafe.} = + ## On success, returns the msg_hash of the published message + + var message = wakuMessage + if message.timestamp == 0: + message.timestamp = getNowInNanosecondTime() + + let msg_hash_hex_str = computeMessageHash(pubsubTopic, message).to0xHex() + let pushRequest = PushRequest(pubSubTopic: pubSubTopic, message: message) + ?await wl.sendPushRequest(pushRequest, peer) + + for obs in wl.publishObservers: + obs.onMessagePublished(pubSubTopic, message) + + notice "publishing message with lightpush", + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + target_peer_id = peer.peerId, + msg_hash = msg_hash_hex_str + + return ok(msg_hash_hex_str) + +proc publishToAny*( + wl: WakuLegacyLightPushClient, pubSubTopic: PubsubTopic, message: WakuMessage +): Future[WakuLightPushResult[void]] {.async, gcsafe.} = + ## This proc is similar to the publish one but in this case + ## we don't specify a particular peer and instead we get it from peer manager + + info "publishToAny", msg_hash = computeMessageHash(pubsubTopic, message).to0xHex + + let peer = wl.peerManager.selectPeer(WakuLegacyLightPushCodec).valueOr: + return err("could not retrieve a peer supporting WakuLegacyLightPushCodec") + + let pushRequest = PushRequest(pubSubTopic: pubSubTopic, message: message) + ?await wl.sendPushRequest(pushRequest, peer) + + for obs in wl.publishObservers: + obs.onMessagePublished(pubSubTopic, message) + + return ok() diff --git a/third-party/nwaku/waku/waku_lightpush_legacy/common.nim b/third-party/nwaku/waku/waku_lightpush_legacy/common.nim new file mode 100644 index 0000000..fcdf181 --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush_legacy/common.nim @@ -0,0 +1,15 @@ +{.push raises: [].} + +import results, chronos, libp2p/peerid +import ../waku_core + +from ../waku_core/codecs import WakuLegacyLightPushCodec +export WakuLegacyLightPushCodec + +type WakuLightPushResult*[T] = Result[T, string] + +type PushMessageHandler* = proc( + peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage +): Future[WakuLightPushResult[void]] {.async.} + +const TooManyRequestsMessage* = "TOO_MANY_REQUESTS" diff --git a/third-party/nwaku/waku/waku_lightpush_legacy/protocol.nim b/third-party/nwaku/waku/waku_lightpush_legacy/protocol.nim new file mode 100644 index 0000000..75beded --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush_legacy/protocol.nim @@ -0,0 +1,127 @@ +{.push raises: [].} + +import std/options, results, stew/byteutils, chronicles, chronos, metrics, bearssl/rand +import + ../node/peer_manager/peer_manager, + ../waku_core, + ./common, + ./rpc, + ./rpc_codec, + ./protocol_metrics, + ../common/rate_limit/request_limiter + +logScope: + topics = "waku lightpush legacy" + +type WakuLegacyLightPush* = ref object of LPProtocol + rng*: ref rand.HmacDrbgContext + peerManager*: PeerManager + pushHandler*: PushMessageHandler + requestRateLimiter*: RequestRateLimiter + +proc handleRequest*( + wl: WakuLegacyLightPush, peerId: PeerId, buffer: seq[byte] +): Future[PushRPC] {.async.} = + let reqDecodeRes = PushRPC.decode(buffer) + var + isSuccess = false + pushResponseInfo = "" + requestId = "" + + if reqDecodeRes.isErr(): + pushResponseInfo = decodeRpcFailure & ": " & $reqDecodeRes.error + elif reqDecodeRes.get().request.isNone(): + pushResponseInfo = emptyRequestBodyFailure + else: + let pushRpcRequest = reqDecodeRes.get() + + requestId = pushRpcRequest.requestId + + let + request = pushRpcRequest.request + + pubSubTopic = request.get().pubSubTopic + message = request.get().message + let msg_hash = pubsubTopic.computeMessageHash(message).to0xHex() + waku_lightpush_messages.inc(labelValues = ["PushRequest"]) + + notice "handling legacy lightpush request", + my_peer_id = wl.peerManager.switch.peerInfo.peerId, + peer_id = peerId, + requestId = requestId, + pubsubTopic = pubsubTopic, + msg_hash = msg_hash, + receivedTime = getNowInNanosecondTime() + + let handleRes = await wl.pushHandler(peerId, pubsubTopic, message) + isSuccess = handleRes.isOk() + pushResponseInfo = (if isSuccess: "OK" else: handleRes.error) + + if not isSuccess: + waku_lightpush_errors.inc(labelValues = [pushResponseInfo]) + error "failed to push message", error = pushResponseInfo + let response = PushResponse(isSuccess: isSuccess, info: some(pushResponseInfo)) + let rpc = PushRPC(requestId: requestId, response: some(response)) + return rpc + +proc initProtocolHandler(wl: WakuLegacyLightPush) = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = + var rpc: PushRPC + wl.requestRateLimiter.checkUsageLimit(WakuLegacyLightPushCodec, conn): + var buffer: seq[byte] + try: + buffer = await conn.readLp(DefaultMaxRpcSize) + except LPStreamError: + error "lightpush legacy read stream failed", error = getCurrentExceptionMsg() + return + + waku_service_network_bytes.inc( + amount = buffer.len().int64, labelValues = [WakuLegacyLightPushCodec, "in"] + ) + + try: + rpc = await handleRequest(wl, conn.peerId, buffer) + except CatchableError: + error "lightpush legacy handleRequest failed", error = getCurrentExceptionMsg() + do: + debug "lightpush request rejected due rate limit exceeded", + peerId = conn.peerId, limit = $wl.requestRateLimiter.setting + + rpc = static( + PushRPC( + ## We will not copy and decode RPC buffer from stream only for requestId + ## in reject case as it is comparably too expensive and opens possible + ## attack surface + requestId: "N/A", + response: + some(PushResponse(isSuccess: false, info: some(TooManyRequestsMessage))), + ) + ) + + try: + await conn.writeLp(rpc.encode().buffer) + except LPStreamError: + error "lightpush legacy write stream failed", error = getCurrentExceptionMsg() + + ## For lightpush might not worth to measure outgoing trafic as it is only + ## small respones about success/failure + + wl.handler = handler + wl.codec = WakuLegacyLightPushCodec + +proc new*( + T: type WakuLegacyLightPush, + peerManager: PeerManager, + rng: ref rand.HmacDrbgContext, + pushHandler: PushMessageHandler, + rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](), +): T = + let wl = WakuLegacyLightPush( + rng: rng, + peerManager: peerManager, + pushHandler: pushHandler, + requestRateLimiter: newRequestRateLimiter(rateLimitSetting), + ) + wl.initProtocolHandler() + setServiceLimitMetric(WakuLegacyLightPushCodec, rateLimitSetting) + return wl diff --git a/third-party/nwaku/waku/waku_lightpush_legacy/protocol_metrics.nim b/third-party/nwaku/waku/waku_lightpush_legacy/protocol_metrics.nim new file mode 100644 index 0000000..61b47c1 --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush_legacy/protocol_metrics.nim @@ -0,0 +1,19 @@ +{.push raises: [].} + +import metrics + +declarePublicCounter waku_lightpush_errors, + "number of lightpush protocol errors", ["type"] +declarePublicCounter waku_lightpush_messages, + "number of lightpush messages received", ["type"] + +# Error types (metric label values) +const + dialFailure* = "dial_failure" + decodeRpcFailure* = "decode_rpc_failure" + peerNotFoundFailure* = "peer_not_found_failure" + emptyRequestBodyFailure* = "empty_request_body_failure" + emptyResponseBodyFailure* = "empty_response_body_failure" + messagePushFailure* = "message_push_failure" + requestLimitReachedFailure* = "request_limit_reached_failure" + notPublishedAnyPeer* = "not_published_to_any_peer" diff --git a/third-party/nwaku/waku/waku_lightpush_legacy/rpc.nim b/third-party/nwaku/waku/waku_lightpush_legacy/rpc.nim new file mode 100644 index 0000000..33ba3f5 --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush_legacy/rpc.nim @@ -0,0 +1,18 @@ +{.push raises: [].} + +import std/options +import ../waku_core + +type + PushRequest* = object + pubSubTopic*: string + message*: WakuMessage + + PushResponse* = object + isSuccess*: bool + info*: Option[string] + + PushRPC* = object + requestId*: string + request*: Option[PushRequest] + response*: Option[PushResponse] diff --git a/third-party/nwaku/waku/waku_lightpush_legacy/rpc_codec.nim b/third-party/nwaku/waku/waku_lightpush_legacy/rpc_codec.nim new file mode 100644 index 0000000..25d2bd2 --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush_legacy/rpc_codec.nim @@ -0,0 +1,96 @@ +{.push raises: [].} + +import std/options +import ../common/protobuf, ../waku_core, ./rpc + +const DefaultMaxRpcSize* = -1 + +proc encode*(rpc: PushRequest): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.pubSubTopic) + pb.write3(2, rpc.message.encode()) + pb.finish3() + + pb + +proc decode*(T: type PushRequest, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = PushRequest() + + var pubSubTopic: PubsubTopic + if not ?pb.getField(1, pubSubTopic): + return err(ProtobufError.missingRequiredField("pubsub_topic")) + else: + rpc.pubSubTopic = pubSubTopic + + var messageBuf: seq[byte] + if not ?pb.getField(2, messageBuf): + return err(ProtobufError.missingRequiredField("message")) + else: + rpc.message = ?WakuMessage.decode(messageBuf) + + ok(rpc) + +proc encode*(rpc: PushResponse): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, uint64(rpc.isSuccess)) + pb.write3(2, rpc.info) + pb.finish3() + + pb + +proc decode*(T: type PushResponse, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = PushResponse() + + var isSuccess: uint64 + if not ?pb.getField(1, isSuccess): + return err(ProtobufError.missingRequiredField("is_success")) + else: + rpc.isSuccess = bool(isSuccess) + + var info: string + if not ?pb.getField(2, info): + rpc.info = none(string) + else: + rpc.info = some(info) + + ok(rpc) + +proc encode*(rpc: PushRPC): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.requestId) + pb.write3(2, rpc.request.map(encode)) + pb.write3(3, rpc.response.map(encode)) + pb.finish3() + + pb + +proc decode*(T: type PushRPC, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = PushRPC() + + var requestId: string + if not ?pb.getField(1, requestId): + return err(ProtobufError.missingRequiredField("request_id")) + else: + rpc.requestId = requestId + + var requestBuffer: seq[byte] + if not ?pb.getField(2, requestBuffer): + rpc.request = none(PushRequest) + else: + let request = ?PushRequest.decode(requestBuffer) + rpc.request = some(request) + + var responseBuffer: seq[byte] + if not ?pb.getField(3, responseBuffer): + rpc.response = none(PushResponse) + else: + let response = ?PushResponse.decode(responseBuffer) + rpc.response = some(response) + + ok(rpc) diff --git a/third-party/nwaku/waku/waku_lightpush_legacy/self_req_handler.nim b/third-party/nwaku/waku/waku_lightpush_legacy/self_req_handler.nim new file mode 100644 index 0000000..3c5d09a --- /dev/null +++ b/third-party/nwaku/waku/waku_lightpush_legacy/self_req_handler.nim @@ -0,0 +1,59 @@ +{.push raises: [].} + +## Notice that the REST /lightpush requests normally assume that the node +## is acting as a lightpush-client that will trigger the service provider node +## to relay the message. +## In this module, we allow that a lightpush service node (full node) can be +## triggered directly through the REST /lightpush endpoint. +## The typical use case for that is when using `nwaku-compose`, +## which spawn a full service Waku node +## that could be used also as a lightpush client, helping testing and development. + +import results, chronos, chronicles, std/options, metrics, stew/byteutils +import + ../waku_core, + ./protocol, + ./common, + ./rpc, + ./rpc_codec, + ./protocol_metrics, + ../utils/requests + +proc handleSelfLightPushRequest*( + self: WakuLegacyLightPush, pubSubTopic: PubsubTopic, message: WakuMessage +): Future[WakuLightPushResult[string]] {.async.} = + ## Handles the lightpush requests made by the node to itself. + ## Normally used in REST-lightpush requests + ## On success, returns the msg_hash of the published message. + + try: + # provide self peerId as now this node is used directly, thus there is no light client sender peer. + let selfPeerId = self.peerManager.switch.peerInfo.peerId + + let req = PushRequest(pubSubTopic: pubSubTopic, message: message) + let rpc = PushRPC(requestId: generateRequestId(self.rng), request: some(req)) + + let respRpc = await self.handleRequest(selfPeerId, rpc.encode().buffer) + + if respRpc.response.isNone(): + waku_lightpush_errors.inc(labelValues = [emptyResponseBodyFailure]) + return err(emptyResponseBodyFailure) + + let response = respRpc.response.get() + if not response.isSuccess: + if response.info.isSome(): + return err(response.info.get()) + else: + return err("unknown failure") + + let msg_hash_hex_str = computeMessageHash(pubSubTopic, message).to0xHex() + + notice "publishing message with self hosted lightpush", + pubsubTopic = pubsubTopic, + contentTopic = message.contentTopic, + self_peer_id = selfPeerId, + msg_hash = msg_hash_hex_str + + return ok(msg_hash_hex_str) + except Exception: + return err("exception in handleSelfLightPushRequest: " & getCurrentExceptionMsg()) diff --git a/third-party/nwaku/waku/waku_metadata.nim b/third-party/nwaku/waku/waku_metadata.nim new file mode 100644 index 0000000..96380ba --- /dev/null +++ b/third-party/nwaku/waku/waku_metadata.nim @@ -0,0 +1,5 @@ +{.push raises: [].} + +import ./waku_metadata/protocol + +export protocol diff --git a/third-party/nwaku/waku/waku_metadata/protocol.nim b/third-party/nwaku/waku/waku_metadata/protocol.nim new file mode 100644 index 0000000..01aaf02 --- /dev/null +++ b/third-party/nwaku/waku/waku_metadata/protocol.nim @@ -0,0 +1,119 @@ +{.push raises: [].} + +import + std/[options, sequtils, sets], + results, + chronicles, + chronos, + metrics, + libp2p/protocols/protocol, + libp2p/stream/connection, + libp2p/crypto/crypto, + eth/p2p/discoveryv5/enr +import ../common/nimchronos, ../waku_core, ./rpc, ../common/callbacks + +from ../waku_core/codecs import WakuMetadataCodec +export WakuMetadataCodec + +logScope: + topics = "waku metadata" + +const RpcResponseMaxBytes* = 1024 + +type WakuMetadata* = ref object of LPProtocol + clusterId*: uint32 + getShards: GetShards + +proc respond( + m: WakuMetadata, conn: Connection +): Future[Result[void, string]] {.async, gcsafe.} = + let response = WakuMetadataResponse( + clusterId: some(m.clusterId.uint32), shards: m.getShards().mapIt(it.uint32) + ) + + let res = catch: + await conn.writeLP(response.encode().buffer) + if res.isErr(): + return err(res.error.msg) + + return ok() + +proc request*( + m: WakuMetadata, conn: Connection +): Future[Result[WakuMetadataResponse, string]] {.async, gcsafe.} = + let request = WakuMetadataRequest( + clusterId: some(m.clusterId), shards: m.getShards().mapIt(it.uint32) + ) + + let writeRes = catch: + await conn.writeLP(request.encode().buffer) + let readRes = catch: + await conn.readLp(RpcResponseMaxBytes) + + # close no matter what + let closeRes = catch: + await conn.closeWithEof() + if closeRes.isErr(): + return err("close failed: " & closeRes.error.msg) + + if writeRes.isErr(): + return err("write failed: " & writeRes.error.msg) + + let buffer = + if readRes.isErr(): + return err("read failed: " & readRes.error.msg) + else: + readRes.get() + + let response = WakuMetadataResponse.decode(buffer).valueOr: + return err("decode failed: " & $error) + + return ok(response) + +proc initProtocolHandler(m: WakuMetadata) = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = + defer: + # close, no data is expected + await conn.closeWithEof() + + let res = catch: + await conn.readLp(RpcResponseMaxBytes) + let buffer = res.valueOr: + error "Connection reading error", error = error.msg + return + + let response = WakuMetadataResponse.decode(buffer).valueOr: + error "Response decoding error", error = error + return + + debug "Received WakuMetadata request", + remoteClusterId = response.clusterId, + remoteShards = response.shards, + localClusterId = m.clusterId, + localShards = m.getShards(), + peer = conn.peerId + + try: + discard await m.respond(conn) + except CatchableError: + error "Failed to respond to WakuMetadata request", + error = getCurrentExceptionMsg() + + m.handler = handler + m.codec = WakuMetadataCodec + +proc new*(T: type WakuMetadata, clusterId: uint32, getShards: GetShards): T = + let wm = WakuMetadata(clusterId: clusterId, getShards: getShards) + + wm.initProtocolHandler() + + info "Created WakuMetadata protocol", + clusterId = wm.clusterId, shards = wm.getShards() + + return wm + +proc start*(wm: WakuMetadata) = + wm.started = true + +proc stop*(wm: WakuMetadata) = + wm.started = false diff --git a/third-party/nwaku/waku/waku_metadata/rpc.nim b/third-party/nwaku/waku/waku_metadata/rpc.nim new file mode 100644 index 0000000..fcb11e5 --- /dev/null +++ b/third-party/nwaku/waku/waku_metadata/rpc.nim @@ -0,0 +1,82 @@ +{.push raises: [].} + +import std/options + +import ../common/protobuf + +type WakuMetadataRequest* = object + clusterId*: Option[uint32] + shards*: seq[uint32] + +type WakuMetadataResponse* = object + clusterId*: Option[uint32] + shards*: seq[uint32] + +proc encode*(rpc: WakuMetadataRequest): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.clusterId) + for shard in rpc.shards: + pb.write3(2, shard) # deprecated + pb.writePacked(3, rpc.shards) + pb.finish3() + + pb + +proc decode*(T: type WakuMetadataRequest, buffer: seq[byte]): ProtoResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = WakuMetadataRequest() + + var clusterId: uint64 + if not ?pb.getField(1, clusterId): + rpc.clusterId = none(uint32) + else: + rpc.clusterId = some(clusterId.uint32) + + var shards: seq[uint64] + if ?pb.getPackedRepeatedField(3, shards): + for shard in shards: + rpc.shards.add(shard.uint32) + elif ?pb.getPackedRepeatedField(2, shards): + for shard in shards: + rpc.shards.add(shard.uint32) + elif ?pb.getRepeatedField(2, shards): + for shard in shards: + rpc.shards.add(shard.uint32) + + ok(rpc) + +proc encode*(rpc: WakuMetadataResponse): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.clusterId) + for shard in rpc.shards: + pb.write3(2, shard) # deprecated + pb.writePacked(3, rpc.shards) + pb.finish3() + + pb + +proc decode*(T: type WakuMetadataResponse, buffer: seq[byte]): ProtoResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = WakuMetadataResponse() + + var clusterId: uint64 + if not ?pb.getField(1, clusterId): + rpc.clusterId = none(uint32) + else: + rpc.clusterId = some(clusterId.uint32) + + var shards: seq[uint64] + + if ?pb.getPackedRepeatedField(3, shards): + for shard in shards: + rpc.shards.add(shard.uint32) + elif ?pb.getPackedRepeatedField(2, shards): + for shard in shards: + rpc.shards.add(shard.uint32) + elif ?pb.getRepeatedField(2, shards): + for shard in shards: + rpc.shards.add(shard.uint32) + + ok(rpc) diff --git a/third-party/nwaku/waku/waku_mix.nim b/third-party/nwaku/waku/waku_mix.nim new file mode 100644 index 0000000..6883059 --- /dev/null +++ b/third-party/nwaku/waku/waku_mix.nim @@ -0,0 +1,3 @@ +import ./waku_mix/protocol + +export protocol diff --git a/third-party/nwaku/waku/waku_mix/protocol.nim b/third-party/nwaku/waku/waku_mix/protocol.nim new file mode 100644 index 0000000..d318b77 --- /dev/null +++ b/third-party/nwaku/waku/waku_mix/protocol.nim @@ -0,0 +1,177 @@ +{.push raises: [].} + +import chronicles, std/[options, tables, sequtils], chronos, results, metrics + +import + libp2p/crypto/curve25519, + mix/mix_protocol, + mix/mix_node, + mix/mix_metrics, + mix/tag_manager, + libp2p/[multiaddress, multicodec, peerid], + eth/common/keys + +import + ../node/peer_manager, + ../waku_core, + ../waku_enr/mix, + ../waku_enr, + ../node/peer_manager/waku_peer_store, + ../common/nimchronos + +logScope: + topics = "waku mix" + +type + WakuMix* = ref object of MixProtocol + peerManager*: PeerManager + clusterId: uint16 + nodePoolLoopHandle: Future[void] + + WakuMixResult*[T] = Result[T, string] + +proc mixPoolFilter*(cluster: Option[uint16], peer: RemotePeerInfo): bool = + # Note that origin based(discv5) filtering is not done intentionally + # so that more mix nodes can be discovered. + if peer.enr.isNone(): + trace "peer has no ENR", peer = $peer + return false + + if cluster.isSome() and peer.enr.get().isClusterMismatched(cluster.get()): + trace "peer has mismatching cluster", peer = $peer + return false + + # Filter if mix is enabled + if not peer.enr.get().supportsCapability(Capabilities.Mix): + trace "peer doesn't support mix", peer = $peer + return false + + return true + +proc appendPeerIdToMultiaddr*(multiaddr: MultiAddress, peerId: PeerId): MultiAddress = + if multiaddr.contains(multiCodec("p2p")).get(): + return multiaddr + + var maddrStr = multiaddr.toString().valueOr: + error "Failed to convert multiaddress to string.", err = error + return multiaddr + maddrStr.add("/p2p/" & $peerId) + var cleanAddr = MultiAddress.init(maddrStr).valueOr: + error "Failed to convert string to multiaddress.", err = error + return multiaddr + return cleanAddr + +func getIPv4Multiaddr*(maddrs: seq[MultiAddress]): Option[MultiAddress] = + for multiaddr in maddrs: + trace "checking multiaddr", addr = $multiaddr + if multiaddr.contains(multiCodec("ip4")).get(): + trace "found ipv4 multiaddr", addr = $multiaddr + return some(multiaddr) + trace "no ipv4 multiaddr found" + return none(MultiAddress) + +proc populateMixNodePool*(mix: WakuMix) = + # populate only peers that i) are reachable ii) share cluster iii) support mix + let remotePeers = mix.peerManager.switch.peerStore.peers().filterIt( + mixPoolFilter(some(mix.clusterId), it) + ) + var mixNodes = initTable[PeerId, MixPubInfo]() + + for i in 0 ..< min(remotePeers.len, 100): + let remotePeerENR = remotePeers[i].enr.get() + let ipv4addr = getIPv4Multiaddr(remotePeers[i].addrs).valueOr: + trace "peer has no ipv4 address", peer = $remotePeers[i] + continue + let maddrWithPeerId = + toString(appendPeerIdToMultiaddr(ipv4addr, remotePeers[i].peerId)) + trace "remote peer ENR", + peerId = remotePeers[i].peerId, enr = remotePeerENR, maddr = maddrWithPeerId + + let peerMixPubKey = mixKey(remotePeerENR).get() + let mixNodePubInfo = + createMixPubInfo(maddrWithPeerId.value, intoCurve25519Key(peerMixPubKey)) + mixNodes[remotePeers[i].peerId] = mixNodePubInfo + + mix_pool_size.set(len(mixNodes)) + # set the mix node pool + mix.setNodePool(mixNodes) + trace "mix node pool updated", poolSize = mix.getNodePoolSize() + +proc startMixNodePoolMgr*(mix: WakuMix) {.async.} = + info "starting mix node pool manager" + # try more aggressively to populate the pool at startup + var attempts = 50 + # TODO: make initial pool size configurable + while mix.getNodePoolSize() < 100 and attempts > 0: + attempts -= 1 + mix.populateMixNodePool() + await sleepAsync(1.seconds) + + # TODO: make interval configurable + heartbeat "Updating mix node pool", 5.seconds: + mix.populateMixNodePool() + +#[ proc getBootStrapMixNodes*(node: WakuNode): Table[PeerId, MixPubInfo] = + var mixNodes = initTable[PeerId, MixPubInfo]() + # MixNode Multiaddrs and PublicKeys: + let bootNodesMultiaddrs = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o", + "/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF", + "/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA", + "/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f", + "/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu", + ] + let bootNodesMixPubKeys = ["9d09ce624f76e8f606265edb9cca2b7de9b41772a6d784bddaf92ffa8fba7d2c", + "9231e86da6432502900a84f867004ce78632ab52cd8e30b1ec322cd795710c2a", + "275cd6889e1f29ca48e5b9edb800d1a94f49f13d393a0ecf1a07af753506de6c", + "e0ed594a8d506681be075e8e23723478388fb182477f7a469309a25e7076fc18", + "8fd7a1a7c19b403d231452a9b1ea40eb1cc76f455d918ef8980e7685f9eeeb1f" + ] + for index, mixNodeMultiaddr in bootNodesMultiaddrs: + let peerIdRes = getPeerIdFromMultiAddr(mixNodeMultiaddr) + if peerIdRes.isErr: + error "Failed to get peer id from multiaddress: " , error = peerIdRes.error + let peerId = peerIdRes.get() + #if (not peerID == nil) and peerID == exceptPeerID: + # continue + let mixNodePubInfo = createMixPubInfo(mixNodeMultiaddr, intoCurve25519Key(ncrutils.fromHex(bootNodesMixPubKeys[index]))) + + mixNodes[peerId] = mixNodePubInfo + info "using mix bootstrap nodes ", bootNodes = mixNodes + return mixNodes + ]# + +proc new*( + T: type WakuMix, + nodeAddr: string, + peermgr: PeerManager, + clusterId: uint16, + mixPrivKey: Curve25519Key, +): WakuMixResult[T] = + let mixPubKey = public(mixPrivKey) + info "mixPrivKey", mixPrivKey = mixPrivKey, mixPubKey = mixPubKey + + let localMixNodeInfo = initMixNodeInfo( + nodeAddr, mixPubKey, mixPrivKey, peermgr.switch.peerInfo.publicKey.skkey, + peermgr.switch.peerInfo.privateKey.skkey, + ) + + # TODO : ideally mix should not be marked ready until certain min pool of mixNodes are discovered + var m = WakuMix(peerManager: peermgr, clusterId: clusterId) + procCall MixProtocol(m).init( + localMixNodeInfo, initTable[PeerId, MixPubInfo](), peermgr.switch + ) + return ok(m) + +method start*(mix: WakuMix) = + mix.nodePoolLoopHandle = mix.startMixNodePoolMgr() + +method stop*(mix: WakuMix) {.async.} = + if mix.nodePoolLoopHandle.isNil(): + return + await mix.nodePoolLoopHandle.cancelAndWait() + mix.nodePoolLoopHandle = nil + +#[ proc setMixBootStrapNodes*(node: WakuNode,){.async}= + node.mix.setNodePool(node.getBootStrapMixNodes()) + ]# +# Mix Protocol diff --git a/third-party/nwaku/waku/waku_node.nim b/third-party/nwaku/waku/waku_node.nim new file mode 100644 index 0000000..74415e9 --- /dev/null +++ b/third-party/nwaku/waku/waku_node.nim @@ -0,0 +1,7 @@ +import + ./node/net_config, + ./node/waku_switch as switch, + ./node/waku_node as node, + ./node/health_monitor as health_monitor + +export net_config, switch, node, health_monitor diff --git a/third-party/nwaku/waku/waku_noise/noise.nim b/third-party/nwaku/waku/waku_noise/noise.nim new file mode 100644 index 0000000..b464b0e --- /dev/null +++ b/third-party/nwaku/waku/waku_noise/noise.nim @@ -0,0 +1,364 @@ +# Waku Noise Protocols for Waku Payload Encryption +# Noise module implementing the Noise State Objects and ChaChaPoly encryption/decryption primitives +## See spec for more details: +## https://github.com/vacp2p/rfc/tree/master/content/docs/rfcs/35 +## +## Implementation partially inspired by noise-libp2p: +## https://github.com/status-im/nim-libp2p/blob/master/libp2p/protocols/secure/noise.nim + +{.push raises: [].} + +import std/[options, strutils] +import stew/byteutils +import chronos +import chronicles +import bearssl/rand +import stew/endians2 +import nimcrypto/[sha2, hmac] + +import libp2p/utility +import libp2p/crypto/[crypto, chacha20poly1305, hkdf] +import libp2p/protocols/secure/secure + +import ./noise_types + +logScope: + topics = "waku noise" + +################################################################# + +# Noise state machine primitives + +# Overview : +# - Alice and Bob process (i.e. read and write, based on their role) each token appearing in a handshake pattern, consisting of pre-message and message patterns; +# - Both users initialize and update according to processed tokens a Handshake State, a Symmetric State and a Cipher State; +# - A preshared key psk is processed by calling MixKeyAndHash(psk); +# - When an ephemeral public key e is read or written, the handshake hash value h is updated by calling mixHash(e); If the handshake expects a psk, MixKey(e) is further called +# - When an encrypted static public key s or a payload message m is read, it is decrypted with decryptAndHash; +# - When a static public key s or a payload message is writted, it is encrypted with encryptAndHash; +# - When any Diffie-Hellman token ee, es, se, ss is read or written, the chaining key ck is updated by calling MixKey on the computed secret; +# - If all tokens are processed, users compute two new Cipher States by calling Split; +# - The two Cipher States obtained from Split are used to encrypt/decrypt outbound/inbound messages. + +################################# +# Cipher State Primitives +################################# + +# Checks if a Cipher State has an encryption key set +proc hasKey*(cs: CipherState): bool = + return (cs.k != EmptyKey) + +# Encrypts a plaintext using key material in a Noise Cipher State +# The CipherState is updated increasing the nonce (used as a counter in Noise) by one +proc encryptWithAd*( + state: var CipherState, ad, plaintext: openArray[byte] +): seq[byte] {.raises: [Defect, NoiseNonceMaxError].} = + # We raise an error if encryption is called using a Cipher State with nonce greater than MaxNonce + if state.n > NonceMax: + raise newException(NoiseNonceMaxError, "Noise max nonce value reached") + + var ciphertext: seq[byte] + + # If an encryption key is set in the Cipher state, we proceed with encryption + if state.hasKey: + # The output is the concatenation of the ciphertext and authorization tag + # We define its length accordingly + ciphertext = newSeqOfCap[byte](plaintext.len + sizeof(ChaChaPolyTag)) + + # Since ChaChaPoly encryption primitive overwrites the input with the output, + # we copy the plaintext in the output ciphertext variable and we pass it to encryption + ciphertext.add(plaintext) + + # The nonce is read from the input CipherState + # By Noise specification the nonce is 8 bytes long out of the 12 bytes supported by ChaChaPoly + var nonce: ChaChaPolyNonce + nonce[4 ..< 12] = toBytesLE(state.n) + + # We perform encryption and we store the authorization tag + var authorizationTag: ChaChaPolyTag + ChaChaPoly.encrypt(state.k, nonce, authorizationTag, ciphertext, ad) + + # We append the authorization tag to ciphertext + ciphertext.add(authorizationTag) + + # We increase the Cipher state nonce + inc state.n + # If the nonce is greater than the maximum allowed nonce, we raise an exception + if state.n > NonceMax: + raise newException(NoiseNonceMaxError, "Noise max nonce value reached") + + trace "encryptWithAd", + authorizationTag = byteutils.toHex(authorizationTag), + ciphertext = ciphertext, + nonce = state.n - 1 + + # Otherwise we return the input plaintext according to specification http://www.noiseprotocol.org/noise.html#the-cipherstate-object + else: + ciphertext = @plaintext + debug "encryptWithAd called with no encryption key set. Returning plaintext." + + return ciphertext + +# Decrypts a ciphertext using key material in a Noise Cipher State +# The CipherState is updated increasing the nonce (used as a counter in Noise) by one +proc decryptWithAd*( + state: var CipherState, ad, ciphertext: openArray[byte] +): seq[byte] {.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} = + # We raise an error if encryption is called using a Cipher State with nonce greater than MaxNonce + if state.n > NonceMax: + raise newException(NoiseNonceMaxError, "Noise max nonce value reached") + + var plaintext: seq[byte] + + # If an encryption key is set in the Cipher state, we proceed with decryption + if state.hasKey: + # We read the authorization appendend at the end of a ciphertext + let inputAuthorizationTag = ciphertext.toOpenArray( + ciphertext.len - ChaChaPolyTag.len, ciphertext.high + ).intoChaChaPolyTag + + var + authorizationTag: ChaChaPolyTag + nonce: ChaChaPolyNonce + + # The nonce is read from the input CipherState + # By Noise specification the nonce is 8 bytes long out of the 12 bytes supported by ChaChaPoly + nonce[4 ..< 12] = toBytesLE(state.n) + + # Since ChaChaPoly decryption primitive overwrites the input with the output, + # we copy the ciphertext (authorization tag excluded) in the output plaintext variable and we pass it to decryption + plaintext = ciphertext[0 .. (ciphertext.high - ChaChaPolyTag.len)] + + ChaChaPoly.decrypt(state.k, nonce, authorizationTag, plaintext, ad) + + # We check if the input authorization tag matches the decryption authorization tag + if inputAuthorizationTag != authorizationTag: + debug "decryptWithAd failed", + plaintext = plaintext, + ciphertext = ciphertext, + inputAuthorizationTag = inputAuthorizationTag, + authorizationTag = authorizationTag + raise + newException(NoiseDecryptTagError, "decryptWithAd failed tag authentication.") + + # We increase the Cipher state nonce + inc state.n + # If the nonce is greater than the maximum allowed nonce, we raise an exception + if state.n > NonceMax: + raise newException(NoiseNonceMaxError, "Noise max nonce value reached") + + trace "decryptWithAd", + inputAuthorizationTag = inputAuthorizationTag, + authorizationTag = authorizationTag, + nonce = state.n + + # Otherwise we return the input ciphertext according to specification http://www.noiseprotocol.org/noise.html#the-cipherstate-object + else: + plaintext = @ciphertext + debug "decryptWithAd called with no encryption key set. Returning ciphertext." + + return plaintext + +# Sets the nonce of a Cipher State +proc setNonce*(cs: var CipherState, nonce: uint64) = + cs.n = nonce + +# Sets the key of a Cipher State +proc setCipherStateKey*(cs: var CipherState, key: ChaChaPolyKey) = + cs.k = key + +# Generates a random Symmetric Cipher State for test purposes +proc randomCipherState*(rng: var HmacDrbgContext, nonce: uint64 = 0): CipherState = + var randomCipherState: CipherState + hmacDrbgGenerate(rng, randomCipherState.k) + setNonce(randomCipherState, nonce) + return randomCipherState + +# Gets the key of a Cipher State +proc getKey*(cs: CipherState): ChaChaPolyKey = + return cs.k + +# Gets the nonce of a Cipher State +proc getNonce*(cs: CipherState): uint64 = + return cs.n + +################################# +# Symmetric State primitives +################################# + +# Initializes a Symmetric State +proc init*(_: type[SymmetricState], hsPattern: HandshakePattern): SymmetricState = + var ss: SymmetricState + # We compute the hash of the protocol name + ss.h = hsPattern.name.hashProtocol + # We initialize the chaining key ck + ss.ck = ss.h.data.intoChaChaPolyKey + # We initialize the Cipher state + ss.cs = CipherState(k: EmptyKey) + return ss + +# MixKey as per Noise specification http://www.noiseprotocol.org/noise.html#the-symmetricstate-object +# Updates a Symmetric state chaining key and symmetric state +proc mixKey*(ss: var SymmetricState, inputKeyMaterial: openArray[byte]) = + # We derive two keys using HKDF + var tempKeys: array[2, ChaChaPolyKey] + sha256.hkdf(ss.ck, inputKeyMaterial, [], tempKeys) + # We update ck and the Cipher state's key k using the output of HDKF + ss.ck = tempKeys[0] + ss.cs = CipherState(k: tempKeys[1]) + trace "mixKey", ck = ss.ck, k = ss.cs.k + +# MixHash as per Noise specification http://www.noiseprotocol.org/noise.html#the-symmetricstate-object +# Hashes data into a Symmetric State's handshake hash value h +proc mixHash*(ss: var SymmetricState, data: openArray[byte]) = + # We prepare the hash context + var ctx: sha256 + ctx.init() + # We add the previous handshake hash + ctx.update(ss.h.data) + # We append the input data + ctx.update(data) + # We hash and store the result in the Symmetric State's handshake hash value + ss.h = ctx.finish() + trace "mixHash", hash = ss.h.data + +# mixKeyAndHash as per Noise specification http://www.noiseprotocol.org/noise.html#the-symmetricstate-object +# Combines MixKey and MixHash +proc mixKeyAndHash*( + ss: var SymmetricState, inputKeyMaterial: openArray[byte] +) {.used.} = + var tempKeys: array[3, ChaChaPolyKey] + # Derives 3 keys using HKDF, the chaining key and the input key material + sha256.hkdf(ss.ck, inputKeyMaterial, [], tempKeys) + # Sets the chaining key + ss.ck = tempKeys[0] + # Updates the handshake hash value + ss.mixHash(tempKeys[1]) + # Updates the Cipher state's key + # Note for later support of 512 bits hash functions: "If HASHLEN is 64, then truncates tempKeys[2] to 32 bytes." + ss.cs = CipherState(k: tempKeys[2]) + +# EncryptAndHash as per Noise specification http://www.noiseprotocol.org/noise.html#the-symmetricstate-object +# Combines encryptWithAd and mixHash +# Note that by setting extraAd, it is possible to pass extra additional data that will be concatenated to the ad specified by Noise (can be used to authenticate messageNametag) +proc encryptAndHash*( + ss: var SymmetricState, plaintext: openArray[byte], extraAd: openArray[byte] = [] +): seq[byte] {.raises: [Defect, NoiseNonceMaxError].} = + # The output ciphertext + var ciphertext: seq[byte] + # The additional data + let ad = @(ss.h.data) & @(extraAd) + # Note that if an encryption key is not set yet in the Cipher state, ciphertext will be equal to plaintex + ciphertext = ss.cs.encryptWithAd(ad, plaintext) + # We call mixHash over the result + ss.mixHash(ciphertext) + return ciphertext + +# DecryptAndHash as per Noise specification http://www.noiseprotocol.org/noise.html#the-symmetricstate-object +# Combines decryptWithAd and mixHash +proc decryptAndHash*( + ss: var SymmetricState, ciphertext: openArray[byte], extraAd: openArray[byte] = [] +): seq[byte] {.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} = + # The output plaintext + var plaintext: seq[byte] + # The additional data + let ad = @(ss.h.data) & @(extraAd) + # Note that if an encryption key is not set yet in the Cipher state, plaintext will be equal to ciphertext + plaintext = ss.cs.decryptWithAd(ad, ciphertext) + # According to specification, the ciphertext enters mixHash (and not the plaintext) + ss.mixHash(ciphertext) + return plaintext + +# Split as per Noise specification http://www.noiseprotocol.org/noise.html#the-symmetricstate-object +# Once a handshake is complete, returns two Cipher States to encrypt/decrypt outbound/inbound messages +proc split*(ss: var SymmetricState): tuple[cs1, cs2: CipherState] = + # Derives 2 keys using HKDF and the chaining key + var tempKeys: array[2, ChaChaPolyKey] + sha256.hkdf(ss.ck, [], [], tempKeys) + # Returns a tuple of two Cipher States initialized with the derived keys + return (CipherState(k: tempKeys[0]), CipherState(k: tempKeys[1])) + +# Gets the chaining key field of a Symmetric State +proc getChainingKey*(ss: SymmetricState): ChaChaPolyKey = + return ss.ck + +# Gets the handshake hash field of a Symmetric State +proc getHandshakeHash*(ss: SymmetricState): MDigest[256] = + return ss.h + +# Gets the Cipher State field of a Symmetric State +proc getCipherState*(ss: SymmetricState): CipherState = + return ss.cs + +################################# +# Handshake State primitives +################################# + +# Initializes a Handshake State +proc init*( + _: type[HandshakeState], hsPattern: HandshakePattern, psk: seq[byte] = @[] +): HandshakeState = + # The output Handshake State + var hs: HandshakeState + # By default the Handshake State initiator flag is set to false + # Will be set to true when the user associated to the handshake state starts an handshake + hs.initiator = false + # We copy the information on the handshake pattern for which the state is initialized (protocol name, handshake pattern, psk) + hs.handshakePattern = hsPattern + hs.psk = psk + # We initialize the Symmetric State + hs.ss = SymmetricState.init(hsPattern) + return hs + +################################################################# + +################################# +# ChaChaPoly Symmetric Cipher +################################# + +# ChaChaPoly encryption +# It takes a Cipher State (with key, nonce, and associated data) and encrypts a plaintext +# The cipher state in not changed +proc encrypt*( + state: ChaChaPolyCipherState, plaintext: openArray[byte] +): ChaChaPolyCiphertext {.noinit, raises: [Defect, NoiseEmptyChaChaPolyInput].} = + # If plaintext is empty, we raise an error + if plaintext == @[]: + raise newException(NoiseEmptyChaChaPolyInput, "Tried to encrypt empty plaintext") + var ciphertext: ChaChaPolyCiphertext + # Since ChaChaPoly's library "encrypt" primitive directly changes the input plaintext to the ciphertext, + # we copy the plaintext into the ciphertext variable and we pass the latter to encrypt + ciphertext.data.add plaintext + # TODO: add padding + # ChaChaPoly.encrypt takes as input: the key (k), the nonce (nonce), a data structure for storing the computed authorization tag (tag), + # the plaintext (overwritten to ciphertext) (data), the associated data (ad) + ChaChaPoly.encrypt(state.k, state.nonce, ciphertext.tag, ciphertext.data, state.ad) + return ciphertext + +# ChaChaPoly decryption +# It takes a Cipher State (with key, nonce, and associated data) and decrypts a ciphertext +# The cipher state is not changed +proc decrypt*( + state: ChaChaPolyCipherState, ciphertext: ChaChaPolyCiphertext +): seq[byte] {.raises: [Defect, NoiseEmptyChaChaPolyInput, NoiseDecryptTagError].} = + # If ciphertext is empty, we raise an error + if ciphertext.data == @[]: + raise newException(NoiseEmptyChaChaPolyInput, "Tried to decrypt empty ciphertext") + var + # The input authorization tag + tagIn = ciphertext.tag + # The authorization tag computed during decryption + tagOut: ChaChaPolyTag + # Since ChaChaPoly's library "decrypt" primitive directly changes the input ciphertext to the plaintext, + # we copy the ciphertext into the plaintext variable and we pass the latter to decrypt + var plaintext = ciphertext.data + # ChaChaPoly.decrypt takes as input: the key (k), the nonce (nonce), a data structure for storing the computed authorization tag (tag), + # the ciphertext (overwritten to plaintext) (data), the associated data (ad) + ChaChaPoly.decrypt(state.k, state.nonce, tagOut, plaintext, state.ad) + # TODO: add unpadding + trace "decrypt", tagIn = tagIn, tagOut = tagOut, nonce = state.nonce + # We check if the authorization tag computed while decrypting is the same as the input tag + if tagIn != tagOut: + debug "decrypt failed", plaintext = shortLog(plaintext) + raise newException(NoiseDecryptTagError, "decrypt tag authentication failed.") + return plaintext diff --git a/third-party/nwaku/waku/waku_noise/noise_handshake_processing.nim b/third-party/nwaku/waku/waku_noise/noise_handshake_processing.nim new file mode 100644 index 0000000..3ee518a --- /dev/null +++ b/third-party/nwaku/waku/waku_noise/noise_handshake_processing.nim @@ -0,0 +1,669 @@ +# Waku Noise Protocols for Waku Payload Encryption +## See spec for more details: +## https://github.com/vacp2p/rfc/tree/master/content/docs/rfcs/35 + +{.push raises: [].} + +import std/[options, strutils, tables] +import chronos +import chronicles +import bearssl/rand +import results + +import libp2p/crypto/[chacha20poly1305, curve25519] + +import ./noise_types +import ./noise +import ./noise_utils + +logScope: + topics = "waku noise" + +################################################################# + +# Handshake Processing + +################################# +## Utilities +################################# + +# Based on the message handshake direction and if the user is or not the initiator, returns a boolean tuple telling if the user +# has to read or write the next handshake message +proc getReadingWritingState( + hs: HandshakeState, direction: MessageDirection +): (bool, bool) = + var reading, writing: bool + + if hs.initiator and direction == D_r: + # I'm Alice and direction is -> + reading = false + writing = true + elif hs.initiator and direction == D_l: + # I'm Alice and direction is <- + reading = true + writing = false + elif not hs.initiator and direction == D_r: + # I'm Bob and direction is -> + reading = true + writing = false + elif not hs.initiator and direction == D_l: + # I'm Bob and direction is <- + reading = false + writing = true + + return (reading, writing) + +# Checks if a pre-message is valid according to Noise specifications +# http://www.noiseprotocol.org/noise.html#handshake-patterns +proc isValid(msg: seq[PreMessagePattern]): bool = + var isValid: bool = true + + # Non-empty pre-messages can only have patterns "e", "s", "e,s" in each direction + let allowedPatterns: seq[PreMessagePattern] = + @[ + PreMessagePattern(direction: D_r, tokens: @[T_s]), + PreMessagePattern(direction: D_r, tokens: @[T_e]), + PreMessagePattern(direction: D_r, tokens: @[T_e, T_s]), + PreMessagePattern(direction: D_l, tokens: @[T_s]), + PreMessagePattern(direction: D_l, tokens: @[T_e]), + PreMessagePattern(direction: D_l, tokens: @[T_e, T_s]), + ] + + # We check if pre message patterns are allowed + for pattern in msg: + if not (pattern in allowedPatterns): + isValid = false + break + + return isValid + +################################# +# Handshake messages processing procedures +################################# + +# Processes pre-message patterns +proc processPreMessagePatternTokens( + hs: var HandshakeState, inPreMessagePKs: seq[NoisePublicKey] = @[] +) {. + raises: [Defect, NoiseMalformedHandshake, NoiseHandshakeError, NoisePublicKeyError] +.} = + var + # I make a copy of the input pre-message public keys, so that I can easily delete processed ones without using iterators/counters + preMessagePKs = inPreMessagePKs + # Here we store currently processed pre message public key + currPK: NoisePublicKey + + # We retrieve the pre-message patterns to process, if any + # If none, there's nothing to do + if hs.handshakePattern.preMessagePatterns == EmptyPreMessage: + return + + # If not empty, we check that pre-message is valid according to Noise specifications + if isValid(hs.handshakePattern.preMessagePatterns) == false: + raise newException(NoiseMalformedHandshake, "Invalid pre-message in handshake") + + # We iterate over each pattern contained in the pre-message + for messagePattern in hs.handshakePattern.preMessagePatterns: + let + direction = messagePattern.direction + tokens = messagePattern.tokens + + # We get if the user is reading or writing the current pre-message pattern + var (reading, writing) = getReadingWritingState(hs, direction) + + # We process each message pattern token + for token in tokens: + # We process the pattern token + case token + of T_e: + # We expect an ephemeral key, so we attempt to read it (next PK to process will always be at index 0 of preMessagePKs) + if preMessagePKs.len > 0: + currPK = preMessagePKs[0] + else: + raise newException( + NoiseHandshakeError, "Noise pre-message read e, expected a public key" + ) + + # If user is reading the "e" token + if reading: + trace "noise pre-message read e" + + # We check if current key is encrypted or not. We assume pre-message public keys are all unencrypted on users' end + if currPK.flag == 0.uint8: + # Sets re and calls MixHash(re.public_key). + hs.re = intoCurve25519Key(currPK.pk) + hs.ss.mixHash(hs.re) + else: + raise newException( + NoisePublicKeyError, + "Noise read e, incorrect encryption flag for pre-message public key", + ) + + # If user is writing the "e" token + elif writing: + trace "noise pre-message write e" + + # When writing, the user is sending a public key, + # We check that the public part corresponds to the set local key and we call MixHash(e.public_key). + if hs.e.publicKey == intoCurve25519Key(currPK.pk): + hs.ss.mixHash(hs.e.publicKey) + else: + raise newException( + NoisePublicKeyError, + "Noise pre-message e key doesn't correspond to locally set e key pair", + ) + + # Noise specification: section 9.2 + # In non-PSK handshakes, the "e" token in a pre-message pattern or message pattern always results + # in a call to MixHash(e.public_key). + # In a PSK handshake, all of these calls are followed by MixKey(e.public_key). + if "psk" in hs.handshakePattern.name: + hs.ss.mixKey(currPK.pk) + + # We delete processed public key + preMessagePKs.delete(0) + of T_s: + # We expect a static key, so we attempt to read it (next PK to process will always be at index of preMessagePKs) + if preMessagePKs.len > 0: + currPK = preMessagePKs[0] + else: + raise newException( + NoiseHandshakeError, "Noise pre-message read s, expected a public key" + ) + + # If user is reading the "s" token + if reading: + trace "noise pre-message read s" + + # We check if current key is encrypted or not. We assume pre-message public keys are all unencrypted on users' end + if currPK.flag == 0.uint8: + # Sets re and calls MixHash(re.public_key). + hs.rs = intoCurve25519Key(currPK.pk) + hs.ss.mixHash(hs.rs) + else: + raise newException( + NoisePublicKeyError, + "Noise read s, incorrect encryption flag for pre-message public key", + ) + + # If user is writing the "s" token + elif writing: + trace "noise pre-message write s" + + # If writing, it means that the user is sending a public key, + # We check that the public part corresponds to the set local key and we call MixHash(s.public_key). + if hs.s.publicKey == intoCurve25519Key(currPK.pk): + hs.ss.mixHash(hs.s.publicKey) + else: + raise newException( + NoisePublicKeyError, + "Noise pre-message s key doesn't correspond to locally set s key pair", + ) + + # Noise specification: section 9.2 + # In non-PSK handshakes, the "e" token in a pre-message pattern or message pattern always results + # in a call to MixHash(e.public_key). + # In a PSK handshake, all of these calls are followed by MixKey(e.public_key). + if "psk" in hs.handshakePattern.name: + hs.ss.mixKey(currPK.pk) + + # We delete processed public key + preMessagePKs.delete(0) + else: + raise + newException(NoiseMalformedHandshake, "Invalid Token for pre-message pattern") + +# This procedure encrypts/decrypts the implicit payload attached at the end of every message pattern +# An optional extraAd to pass extra additional data in encryption/decryption can be set (useful to authenticate messageNametag) +proc processMessagePatternPayload( + hs: var HandshakeState, transportMessage: seq[byte], extraAd: openArray[byte] = [] +): seq[byte] {.raises: [Defect, NoiseDecryptTagError, NoiseNonceMaxError].} = + var payload: seq[byte] + + # We retrieve current message pattern (direction + tokens) to process + let direction = hs.handshakePattern.messagePatterns[hs.msgPatternIdx].direction + + # We get if the user is reading or writing the input handshake message + var (reading, writing) = getReadingWritingState(hs, direction) + + # We decrypt the transportMessage, if any + if reading: + payload = hs.ss.decryptAndHash(transportMessage, extraAd) + payload = pkcs7_unpad(payload, NoisePaddingBlockSize) + elif writing: + payload = pkcs7_pad(transportMessage, NoisePaddingBlockSize) + payload = hs.ss.encryptAndHash(payload, extraAd) + + return payload + +# We process an input handshake message according to current handshake state and we return the next handshake step's handshake message +proc processMessagePatternTokens( + rng: var rand.HmacDrbgContext, + hs: var HandshakeState, + inputHandshakeMessage: seq[NoisePublicKey] = @[], +): Result[seq[NoisePublicKey], cstring] {. + raises: [ + Defect, NoiseHandshakeError, NoiseMalformedHandshake, NoisePublicKeyError, + NoiseDecryptTagError, NoiseNonceMaxError, + ] +.} = + # We retrieve current message pattern (direction + tokens) to process + let + messagePattern = hs.handshakePattern.messagePatterns[hs.msgPatternIdx] + direction = messagePattern.direction + tokens = messagePattern.tokens + + # We get if the user is reading or writing the input handshake message + var (reading, writing) = getReadingWritingState(hs, direction) + + # I make a copy of the handshake message so that I can easily delete processed PKs without using iterators/counters + # (Possibly) non-empty if reading + var inHandshakeMessage = inputHandshakeMessage + + # The party's output public keys + # (Possibly) non-empty if writing + var outHandshakeMessage: seq[NoisePublicKey] = @[] + + # In currPK we store the currently processed public key from the handshake message + var currPK: NoisePublicKey + + # We process each message pattern token + for token in tokens: + case token + of T_e: + # If user is reading the "s" token + if reading: + trace "noise read e" + + # We expect an ephemeral key, so we attempt to read it (next PK to process will always be at index 0 of preMessagePKs) + if inHandshakeMessage.len > 0: + currPK = inHandshakeMessage[0] + else: + raise newException(NoiseHandshakeError, "Noise read e, expected a public key") + + # We check if current key is encrypted or not + # Note: by specification, ephemeral keys should always be unencrypted. But we support encrypted ones. + if currPK.flag == 0.uint8: + # Unencrypted Public Key + # Sets re and calls MixHash(re.public_key). + hs.re = intoCurve25519Key(currPK.pk) + hs.ss.mixHash(hs.re) + + # The following is out of specification: we call decryptAndHash for encrypted ephemeral keys, similarly as happens for (encrypted) static keys + elif currPK.flag == 1.uint8: + # Encrypted public key + # Decrypts re, sets re and calls MixHash(re.public_key). + hs.re = intoCurve25519Key(hs.ss.decryptAndHash(currPK.pk)) + else: + raise newException( + NoisePublicKeyError, + "Noise read e, incorrect encryption flag for public key", + ) + + # Noise specification: section 9.2 + # In non-PSK handshakes, the "e" token in a pre-message pattern or message pattern always results + # in a call to MixHash(e.public_key). + # In a PSK handshake, all of these calls are followed by MixKey(e.public_key). + if "psk" in hs.handshakePattern.name: + hs.ss.mixKey(hs.re) + + # We delete processed public key + inHandshakeMessage.delete(0) + + # If user is writing the "e" token + elif writing: + trace "noise write e" + + # We generate a new ephemeral keypair + hs.e = genKeyPair(rng) + + # We update the state + hs.ss.mixHash(hs.e.publicKey) + + # Noise specification: section 9.2 + # In non-PSK handshakes, the "e" token in a pre-message pattern or message pattern always results + # in a call to MixHash(e.public_key). + # In a PSK handshake, all of these calls are followed by MixKey(e.public_key). + if "psk" in hs.handshakePattern.name: + hs.ss.mixKey(hs.e.publicKey) + + # We add the ephemeral public key to the Waku payload + outHandshakeMessage.add toNoisePublicKey(getPublicKey(hs.e)) + of T_s: + # If user is reading the "s" token + if reading: + trace "noise read s" + + # We expect a static key, so we attempt to read it (next PK to process will always be at index 0 of preMessagePKs) + if inHandshakeMessage.len > 0: + currPK = inHandshakeMessage[0] + else: + raise newException(NoiseHandshakeError, "Noise read s, expected a public key") + + # We check if current key is encrypted or not + if currPK.flag == 0.uint8: + # Unencrypted Public Key + # Sets re and calls MixHash(re.public_key). + hs.rs = intoCurve25519Key(currPK.pk) + hs.ss.mixHash(hs.rs) + elif currPK.flag == 1.uint8: + # Encrypted public key + # Decrypts rs, sets rs and calls MixHash(rs.public_key). + hs.rs = intoCurve25519Key(hs.ss.decryptAndHash(currPK.pk)) + else: + raise newException( + NoisePublicKeyError, + "Noise read s, incorrect encryption flag for public key", + ) + + # We delete processed public key + inHandshakeMessage.delete(0) + + # If user is writing the "s" token + elif writing: + trace "noise write s" + + # If the local static key is not set (the handshake state was not properly initialized), we raise an error + if isDefault(hs.s): + raise newException(NoisePublicKeyError, "Static key not set") + + # We encrypt the public part of the static key in case a key is set in the Cipher State + # That is, encS may either be an encrypted or unencrypted static key. + let encS = hs.ss.encryptAndHash(hs.s.publicKey) + + # We add the (encrypted) static public key to the Waku payload + # Note that encS = (Enc(s) || tag) if encryption key is set, otherwise encS = s. + # We distinguish these two cases by checking length of encryption and we set the proper encryption flag + if encS.len > Curve25519Key.len: + outHandshakeMessage.add NoisePublicKey(flag: 1, pk: encS) + else: + outHandshakeMessage.add NoisePublicKey(flag: 0, pk: encS) + of T_psk: + # If user is reading the "psk" token + + trace "noise psk" + + # Calls MixKeyAndHash(psk) + hs.ss.mixKeyAndHash(hs.psk) + of T_ee: + # If user is reading the "ee" token + + trace "noise dh ee" + + # If local and/or remote ephemeral keys are not set, we raise an error + if isDefault(hs.e) or isDefault(hs.re): + raise newException(NoisePublicKeyError, "Local or remote ephemeral key not set") + + # Calls MixKey(DH(e, re)). + hs.ss.mixKey(dh(hs.e.privateKey, hs.re)) + of T_es: + # If user is reading the "es" token + + trace "noise dh es" + + # We check if keys are correctly set. + # If both present, we call MixKey(DH(e, rs)) if initiator, MixKey(DH(s, re)) if responder. + if hs.initiator: + if isDefault(hs.e) or isDefault(hs.rs): + raise newException( + NoisePublicKeyError, "Local or remote ephemeral/static key not set" + ) + hs.ss.mixKey(dh(hs.e.privateKey, hs.rs)) + else: + if isDefault(hs.re) or isDefault(hs.s): + raise newException( + NoisePublicKeyError, "Local or remote ephemeral/static key not set" + ) + hs.ss.mixKey(dh(hs.s.privateKey, hs.re)) + of T_se: + # If user is reading the "se" token + + trace "noise dh se" + + # We check if keys are correctly set. + # If both present, call MixKey(DH(s, re)) if initiator, MixKey(DH(e, rs)) if responder. + if hs.initiator: + if isDefault(hs.s) or isDefault(hs.re): + raise newException( + NoiseMalformedHandshake, "Local or remote ephemeral/static key not set" + ) + hs.ss.mixKey(dh(hs.s.privateKey, hs.re)) + else: + if isDefault(hs.rs) or isDefault(hs.e): + raise newException( + NoiseMalformedHandshake, "Local or remote ephemeral/static key not set" + ) + hs.ss.mixKey(dh(hs.e.privateKey, hs.rs)) + of T_ss: + # If user is reading the "ss" token + + trace "noise dh ss" + + # If local and/or remote static keys are not set, we raise an error + if isDefault(hs.s) or isDefault(hs.rs): + raise + newException(NoiseMalformedHandshake, "Local or remote static key not set") + + # Calls MixKey(DH(s, rs)). + hs.ss.mixKey(dh(hs.s.privateKey, hs.rs)) + + return ok(outHandshakeMessage) + +################################# +## Procedures to progress handshakes between users +################################# + +# Initializes a Handshake State +proc initialize*( + hsPattern: HandshakePattern, + ephemeralKey: KeyPair = default(KeyPair), + staticKey: KeyPair = default(KeyPair), + prologue: seq[byte] = @[], + psk: seq[byte] = @[], + preMessagePKs: seq[NoisePublicKey] = @[], + initiator: bool = false, +): HandshakeState {. + raises: [Defect, NoiseMalformedHandshake, NoiseHandshakeError, NoisePublicKeyError] +.} = + var hs = HandshakeState.init(hsPattern) + hs.ss.mixHash(prologue) + hs.e = ephemeralKey + hs.s = staticKey + hs.psk = psk + hs.msgPatternIdx = 0 + hs.initiator = initiator + # We process any eventual handshake pre-message pattern by processing pre-message public keys + processPreMessagePatternTokens(hs, preMessagePKs) + return hs + +# Advances 1 step in handshake +# Each user in a handshake alternates writing and reading of handshake messages. +# If the user is writing the handshake message, the transport message (if not empty) and eventually a non-empty message nametag has to be passed to transportMessage and messageNametag and readPayloadV2 can be left to its default value +# It the user is reading the handshake message, the read payload v2 has to be passed to readPayloadV2 and the transportMessage can be left to its default values. Decryption is skipped if the payloadv2 read doesn't have a message nametag equal to messageNametag (empty input nametags are converted to all-0 MessageNametagLength bytes arrays) +proc stepHandshake*( + rng: var rand.HmacDrbgContext, + hs: var HandshakeState, + readPayloadV2: PayloadV2 = default(PayloadV2), + transportMessage: seq[byte] = @[], + messageNametag: openArray[byte] = [], +): Result[HandshakeStepResult, cstring] {. + raises: [ + Defect, NoiseHandshakeError, NoiseMessageNametagError, NoiseMalformedHandshake, + NoisePublicKeyError, NoiseDecryptTagError, NoiseNonceMaxError, + ] +.} = + var hsStepResult: HandshakeStepResult + + # If there are no more message patterns left for processing + # we return an empty HandshakeStepResult + if hs.msgPatternIdx > uint8(hs.handshakePattern.messagePatterns.len - 1): + debug "stepHandshake called more times than the number of message patterns present in handshake" + return ok(hsStepResult) + + # We process the next handshake message pattern + + # We get if the user is reading or writing the input handshake message + let direction = hs.handshakePattern.messagePatterns[hs.msgPatternIdx].direction + var (reading, writing) = getReadingWritingState(hs, direction) + + # If we write an answer at this handshake step + if writing: + # We initialize a payload v2 and we set proper protocol ID (if supported) + try: + hsStepResult.payload2.protocolId = PayloadV2ProtocolIDs[hs.handshakePattern.name] + except CatchableError: + raise newException(NoiseMalformedHandshake, "Handshake Pattern not supported") + + # We set the messageNametag and the handshake and transport messages + hsStepResult.payload2.messageNametag = toMessageNametag(messageNametag) + hsStepResult.payload2.handshakeMessage = processMessagePatternTokens(rng, hs).get() + # We write the payload by passing the messageNametag as extra additional data + hsStepResult.payload2.transportMessage = processMessagePatternPayload( + hs, transportMessage, extraAd = hsStepResult.payload2.messageNametag + ) + + # If we read an answer during this handshake step + elif reading: + # If the read message nametag doesn't match the expected input one we raise an error + if readPayloadV2.messageNametag != toMessageNametag(messageNametag): + raise newException( + NoiseMessageNametagError, + "The message nametag of the read message doesn't match the expected one", + ) + + # We process the read public keys and (eventually decrypt) the read transport message + let + readHandshakeMessage = readPayloadV2.handshakeMessage + readTransportMessage = readPayloadV2.transportMessage + + # Since we only read, nothing meanigful (i.e. public keys) is returned + discard processMessagePatternTokens(rng, hs, readHandshakeMessage) + # We retrieve and store the (decrypted) received transport message by passing the messageNametag as extra additional data + hsStepResult.transportMessage = processMessagePatternPayload( + hs, readTransportMessage, extraAd = readPayloadV2.messageNametag + ) + else: + raise newException( + NoiseHandshakeError, "Handshake Error: neither writing or reading user" + ) + + # We increase the handshake state message pattern index to progress to next step + hs.msgPatternIdx += 1 + + return ok(hsStepResult) + +# Finalizes the handshake by calling Split and assigning the proper Cipher States to users +proc finalizeHandshake*(hs: var HandshakeState): HandshakeResult = + var hsResult: HandshakeResult + + ## Noise specification, Section 5: + ## Processing the final handshake message returns two CipherState objects, + ## the first for encrypting transport messages from initiator to responder, + ## and the second for messages in the other direction. + + # We call Split() + let (cs1, cs2) = hs.ss.split() + + # Optional: We derive a secret for the nametag derivation + let (nms1, nms2) = genMessageNametagSecrets(hs) + + # We assign the proper Cipher States + if hs.initiator: + hsResult.csOutbound = cs1 + hsResult.csInbound = cs2 + # and nametags secrets + hsResult.nametagsInbound.secret = some(nms1) + hsResult.nametagsOutbound.secret = some(nms2) + else: + hsResult.csOutbound = cs2 + hsResult.csInbound = cs1 + # and nametags secrets + hsResult.nametagsInbound.secret = some(nms2) + hsResult.nametagsOutbound.secret = some(nms1) + + # We initialize the message nametags inbound/outbound buffers + hsResult.nametagsInbound.initNametagsBuffer + hsResult.nametagsOutbound.initNametagsBuffer + + # We store the optional fields rs and h + hsResult.rs = hs.rs + hsResult.h = hs.ss.h + + return hsResult + +################################# +# After-handshake procedures +################################# + +## Noise specification, Section 5: +## Transport messages are then encrypted and decrypted by calling EncryptWithAd() +## and DecryptWithAd() on the relevant CipherState with zero-length associated data. +## If DecryptWithAd() signals an error due to DECRYPT() failure, then the input message is discarded. +## The application may choose to delete the CipherState and terminate the session on such an error, +## or may continue to attempt communications. If EncryptWithAd() or DecryptWithAd() signal an error +## due to nonce exhaustion, then the application must delete the CipherState and terminate the session. + +# Writes an encrypted message using the proper Cipher State +proc writeMessage*( + hsr: var HandshakeResult, + transportMessage: seq[byte], + outboundMessageNametagBuffer: var MessageNametagBuffer, +): PayloadV2 {.raises: [Defect, NoiseNonceMaxError].} = + var payload2: PayloadV2 + + # We set the message nametag using the input buffer + payload2.messageNametag = pop(outboundMessageNametagBuffer) + + # According to 35/WAKU2-NOISE RFC, no Handshake protocol information is sent when exchanging messages + # This correspond to setting protocol-id to 0 + payload2.protocolId = 0.uint8 + # We pad the transport message + let paddedTransportMessage = pkcs7_pad(transportMessage, NoisePaddingBlockSize) + # Encryption is done with zero-length associated data as per specification + payload2.transportMessage = encryptWithAd( + hsr.csOutbound, ad = @(payload2.messageNametag), plaintext = paddedTransportMessage + ) + + return payload2 + +# Reads an encrypted message using the proper Cipher State +# Decryption is attempted only if the input PayloadV2 has a messageNametag equal to the one expected +proc readMessage*( + hsr: var HandshakeResult, + readPayload2: PayloadV2, + inboundMessageNametagBuffer: var MessageNametagBuffer, +): Result[seq[byte], cstring] {. + raises: [ + Defect, NoiseDecryptTagError, NoiseMessageNametagError, NoiseNonceMaxError, + NoiseSomeMessagesWereLost, + ] +.} = + # The output decrypted message + var message: seq[byte] + + # If the message nametag does not correspond to the nametag expected in the inbound message nametag buffer + # an error is raised (to be handled externally, i.e. re-request lost messages, discard, etc.) + let nametagIsOk = + checkNametag(readPayload2.messageNametag, inboundMessageNametagBuffer).isOk + assert(nametagIsOk) + + # At this point the messageNametag matches the expected nametag. + # According to 35/WAKU2-NOISE RFC, no Handshake protocol information is sent when exchanging messages + if readPayload2.protocolId == 0.uint8: + # On application level we decide to discard messages which fail decryption, without raising an error + try: + # Decryption is done with messageNametag as associated data + let paddedMessage = decryptWithAd( + hsr.csInbound, + ad = @(readPayload2.messageNametag), + ciphertext = readPayload2.transportMessage, + ) + # We unpdad the decrypted message + message = pkcs7_unpad(paddedMessage, NoisePaddingBlockSize) + # The message successfully decrypted, we can delete the first element of the inbound Message Nametag Buffer + delete(inboundMessageNametagBuffer, 1) + except NoiseDecryptTagError: + debug "A read message failed decryption. Returning empty message as plaintext." + message = @[] + + return ok(message) diff --git a/third-party/nwaku/waku/waku_noise/noise_types.nim b/third-party/nwaku/waku/waku_noise/noise_types.nim new file mode 100644 index 0000000..3b88c43 --- /dev/null +++ b/third-party/nwaku/waku/waku_noise/noise_types.nim @@ -0,0 +1,290 @@ +# Waku Noise Protocols for Waku Payload Encryption +## See spec for more details: +## https://github.com/vacp2p/rfc/tree/master/content/docs/rfcs/35 +## +## Implementation partially inspired by noise-libp2p: +## https://github.com/status-im/nim-libp2p/blob/master/libp2p/protocols/secure/noise.nim + +{.push raises: [].} + +import std/[options, tables] +import chronos +import chronicles +import bearssl +import nimcrypto/sha2 + +import libp2p/errors +import libp2p/crypto/[crypto, chacha20poly1305, curve25519] + +logScope: + topics = "waku noise" + +################################################################# + +# Constants and data structures + +const + # EmptyKey represents a non-initialized ChaChaPolyKey + EmptyKey* = default(ChaChaPolyKey) + # The maximum ChaChaPoly allowed nonce in Noise Handshakes + NonceMax* = uint64.high - 1 + # The padding blocksize of a transport message + NoisePaddingBlockSize* = 248 + # The default length of a message nametag + MessageNametagLength* = 16 + # The default length of the secret to generate Inbound/Outbound nametags buffer + MessageNametagSecretLength* = 32 + # The default size of an Inbound/outbound MessageNametagBuffer + MessageNametagBufferSize* = 50 + +type + + ################################# + # Elliptic Curve arithemtic + ################################# + + # Default underlying elliptic curve arithmetic (useful for switching to multiple ECs) + # Current default is Curve25519 + EllipticCurve* = Curve25519 + EllipticCurveKey* = Curve25519Key + + # An EllipticCurveKey (public, private) key pair + KeyPair* = object + privateKey*: EllipticCurveKey + publicKey*: EllipticCurveKey + + ################################# + # Noise Public Keys + ################################# + + # A Noise public key is a public key exchanged during Noise handshakes (no private part) + # This follows https://rfc.vac.dev/spec/35/#public-keys-serialization + # pk contains the X coordinate of the public key, if unencrypted (this implies flag = 0) + # or the encryption of the X coordinate concatenated with the authorization tag, if encrypted (this implies flag = 1) + # Note: besides encryption, flag can be used to distinguish among multiple supported Elliptic Curves + NoisePublicKey* = object + flag*: uint8 + pk*: seq[byte] + + ################################# + # ChaChaPoly Encryption + ################################# + + # A ChaChaPoly ciphertext (data) + authorization tag (tag) + ChaChaPolyCiphertext* = object + data*: seq[byte] + tag*: ChaChaPolyTag + + # A ChaChaPoly Cipher State containing key (k), nonce (nonce) and associated data (ad) + ChaChaPolyCipherState* = object + k*: ChaChaPolyKey + nonce*: ChaChaPolyNonce + ad*: seq[byte] + + ################################# + # Noise handshake patterns + ################################# + + # The Noise tokens appearing in Noise (pre)message patterns + # as in http://www.noiseprotocol.org/noise.html#handshake-pattern-basics + NoiseTokens* = enum + T_e = "e" + T_s = "s" + T_es = "es" + T_ee = "ee" + T_se = "se" + T_ss = "ss" + T_psk = "psk" + + # The direction of a (pre)message pattern in canonical form (i.e. Alice-initiated form) + # as in http://www.noiseprotocol.org/noise.html#alice-and-bob + MessageDirection* = enum + D_r = "->" + D_l = "<-" + + # The pre message pattern consisting of a message direction and some Noise tokens, if any. + # (if non empty, only tokens e and s are allowed: http://www.noiseprotocol.org/noise.html#handshake-pattern-basics) + PreMessagePattern* = object + direction*: MessageDirection + tokens*: seq[NoiseTokens] + + # The message pattern consisting of a message direction and some Noise tokens + # All Noise tokens are allowed + MessagePattern* = object + direction*: MessageDirection + tokens*: seq[NoiseTokens] + + # The handshake pattern object. It stores the handshake protocol name, the handshake pre message patterns and the handshake message patterns + HandshakePattern* = object + name*: string + preMessagePatterns*: seq[PreMessagePattern] + messagePatterns*: seq[MessagePattern] + + ################################# + # Noise state machine + ################################# + + # The Cipher State as in https://noiseprotocol.org/noise.html#the-cipherstate-object + # Contains an encryption key k and a nonce n (used in Noise as a counter) + CipherState* = object + k*: ChaChaPolyKey + n*: uint64 + + # The Symmetric State as in https://noiseprotocol.org/noise.html#the-symmetricstate-object + # Contains a Cipher State cs, the chaining key ck and the handshake hash value h + SymmetricState* = object + cs*: CipherState + ck*: ChaChaPolyKey + h*: MDigest[256] + + # The Handshake State as in https://noiseprotocol.org/noise.html#the-handshakestate-object + # Contains + # - the local and remote ephemeral/static keys e,s,re,rs (if any) + # - the initiator flag (true if the user creating the state is the handshake initiator, false otherwise) + # - the handshakePattern (containing the handshake protocol name, and (pre)message patterns) + # This object is futher extended from specifications by storing: + # - a message pattern index msgPatternIdx indicating the next handshake message pattern to process + # - the user's preshared psk, if any + HandshakeState* = object + s*: KeyPair + e*: KeyPair + rs*: EllipticCurveKey + re*: EllipticCurveKey + ss*: SymmetricState + initiator*: bool + handshakePattern*: HandshakePattern + msgPatternIdx*: uint8 + psk*: seq[byte] + + # While processing messages patterns, users either: + # - read (decrypt) the other party's (encrypted) transport message + # - write (encrypt) a message, sent through a PayloadV2 + # These two intermediate results are stored in the HandshakeStepResult data structure + HandshakeStepResult* = object + payload2*: PayloadV2 + transportMessage*: seq[byte] + + # When a handshake is complete, the HandhshakeResult will contain the two + # Cipher States used to encrypt/decrypt outbound/inbound messages + # The recipient static key rs and handshake hash values h are stored to address some possible future applications (channel-binding, session management, etc.). + # However, are not required by Noise specifications and are thus optional + HandshakeResult* = object + csOutbound*: CipherState + csInbound*: CipherState + # Optional fields: + nametagsInbound*: MessageNametagBuffer + nametagsOutbound*: MessageNametagBuffer + rs*: EllipticCurveKey + h*: MDigest[256] + + ################################# + # Waku Payload V2 + ################################# + + # PayloadV2 defines an object for Waku payloads with version 2 as in + # https://rfc.vac.dev/spec/35/#public-keys-serialization + # It contains a message nametag, protocol ID field, the handshake message (for Noise handshakes) and + # a transport message (for Noise handshakes and ChaChaPoly encryptions) + MessageNametag* = array[MessageNametagLength, byte] + + MessageNametagBuffer* = object + buffer*: array[MessageNametagBufferSize, MessageNametag] + counter*: uint64 + secret*: Option[array[MessageNametagSecretLength, byte]] + + PayloadV2* = object + messageNametag*: MessageNametag + protocolId*: uint8 + handshakeMessage*: seq[NoisePublicKey] + transportMessage*: seq[byte] + + ################################# + # Some useful error types + ################################# + NoiseError* = object of LPError + NoiseHandshakeError* = object of NoiseError + NoiseEmptyChaChaPolyInput* = object of NoiseError + NoiseDecryptTagError* = object of NoiseError + NoiseNonceMaxError* = object of NoiseError + NoisePublicKeyError* = object of NoiseError + NoiseMalformedHandshake* = object of NoiseError + NoiseMessageNametagError* = object of NoiseError + NoiseSomeMessagesWereLost* = object of NoiseError + +################################# +# Constants (supported protocols) +################################# +const + + # The empty pre message patterns + EmptyPreMessage*: seq[PreMessagePattern] = @[] + + # Supported Noise handshake patterns as defined in https://rfc.vac.dev/spec/35/#specification + NoiseHandshakePatterns* = { + "K1K1": HandshakePattern( + name: "Noise_K1K1_25519_ChaChaPoly_SHA256", + preMessagePatterns: + @[ + PreMessagePattern(direction: D_r, tokens: @[T_s]), + PreMessagePattern(direction: D_l, tokens: @[T_s]), + ], + messagePatterns: + @[ + MessagePattern(direction: D_r, tokens: @[T_e]), + MessagePattern(direction: D_l, tokens: @[T_e, T_ee, T_es]), + MessagePattern(direction: D_r, tokens: @[T_se]), + ], + ), + "XK1": HandshakePattern( + name: "Noise_XK1_25519_ChaChaPoly_SHA256", + preMessagePatterns: @[PreMessagePattern(direction: D_l, tokens: @[T_s])], + messagePatterns: + @[ + MessagePattern(direction: D_r, tokens: @[T_e]), + MessagePattern(direction: D_l, tokens: @[T_e, T_ee, T_es]), + MessagePattern(direction: D_r, tokens: @[T_s, T_se]), + ], + ), + "XX": HandshakePattern( + name: "Noise_XX_25519_ChaChaPoly_SHA256", + preMessagePatterns: EmptyPreMessage, + messagePatterns: + @[ + MessagePattern(direction: D_r, tokens: @[T_e]), + MessagePattern(direction: D_l, tokens: @[T_e, T_ee, T_s, T_es]), + MessagePattern(direction: D_r, tokens: @[T_s, T_se]), + ], + ), + "XXpsk0": HandshakePattern( + name: "Noise_XXpsk0_25519_ChaChaPoly_SHA256", + preMessagePatterns: EmptyPreMessage, + messagePatterns: + @[ + MessagePattern(direction: D_r, tokens: @[T_psk, T_e]), + MessagePattern(direction: D_l, tokens: @[T_e, T_ee, T_s, T_es]), + MessagePattern(direction: D_r, tokens: @[T_s, T_se]), + ], + ), + "WakuPairing": HandshakePattern( + name: "Noise_WakuPairing_25519_ChaChaPoly_SHA256", + preMessagePatterns: @[PreMessagePattern(direction: D_l, tokens: @[T_e])], + messagePatterns: + @[ + MessagePattern(direction: D_r, tokens: @[T_e, T_ee]), + MessagePattern(direction: D_l, tokens: @[T_s, T_es]), + MessagePattern(direction: D_r, tokens: @[T_s, T_se, T_ss]), + ], + ), + }.toTable() + + # Supported Protocol ID for PayloadV2 objects + # Protocol IDs are defined according to https://rfc.vac.dev/spec/35/#specification + PayloadV2ProtocolIDs* = { + "": 0.uint8, + "Noise_K1K1_25519_ChaChaPoly_SHA256": 10.uint8, + "Noise_XK1_25519_ChaChaPoly_SHA256": 11.uint8, + "Noise_XX_25519_ChaChaPoly_SHA256": 12.uint8, + "Noise_XXpsk0_25519_ChaChaPoly_SHA256": 13.uint8, + "Noise_WakuPairing_25519_ChaChaPoly_SHA256": 14.uint8, + "ChaChaPoly": 30.uint8, + }.toTable() diff --git a/third-party/nwaku/waku/waku_noise/noise_utils.nim b/third-party/nwaku/waku/waku_noise/noise_utils.nim new file mode 100644 index 0000000..babab1c --- /dev/null +++ b/third-party/nwaku/waku/waku_noise/noise_utils.nim @@ -0,0 +1,588 @@ +# Waku Noise Protocols for Waku Payload Encryption +# Noise utilities module +## See spec for more details: +## https://github.com/vacp2p/rfc/tree/master/content/docs/rfcs/35 + +{.push raises: [].} + +import std/[algorithm, base64, oids, options, strutils, tables, sequtils] +import chronos +import chronicles +import bearssl/rand +import results +import stew/[endians2, byteutils] +import nimcrypto/sha2 + +import libp2p/crypto/[chacha20poly1305, curve25519, hkdf] + +import ./noise_types +import ./noise + +logScope: + topics = "waku noise" + +################################################################# + +################################# +# Generic Utilities +################################# + +# Generates random byte sequences of given size +proc randomSeqByte*(rng: var HmacDrbgContext, size: int): seq[byte] = + var output = newSeq[byte](size.uint32) + hmacDrbgGenerate(rng, output) + return output + +# Pads a payload according to PKCS#7 as per RFC 5652 https://datatracker.ietf.org/doc/html/rfc5652#section-6.3 +proc pkcs7_pad*(payload: seq[byte], paddingSize: int): seq[byte] = + assert(paddingSize < 256) + + let k = paddingSize - (payload.len mod paddingSize) + + var padding: seq[byte] + + if k != 0: + padding = newSeqWith(k, k.byte) + else: + padding = newSeqWith(paddingSize, paddingSize.byte) + + let padded = concat(payload, padding) + + return padded + +# Unpads a payload according to PKCS#7 as per RFC 5652 https://datatracker.ietf.org/doc/html/rfc5652#section-6.3 +proc pkcs7_unpad*(payload: seq[byte], paddingSize: int): seq[byte] = + let k = payload[payload.high] + let unpadded = payload[0 .. payload.high - k.int] + return unpadded + +proc seqToDigest256*(sequence: seq[byte]): MDigest[256] = + var digest: MDigest[256] + for i in 0 ..< digest.data.len: + digest.data[i] = sequence[i] + return digest + +proc digestToSeq*[T](digest: MDigest[T]): seq[byte] = + var sequence: seq[byte] + for i in 0 ..< digest.data.len: + sequence.add digest.data[i] + return sequence + +# Serializes input parameters to a base64 string for exposure through QR code (used by WakuPairing) +proc toQr*( + applicationName: string, + applicationVersion: string, + shardId: string, + ephemeralKey: EllipticCurveKey, + committedStaticKey: MDigest[256], +): string = + var qr: string + qr.add encode(applicationName, safe = true) & ":" + qr.add encode(applicationVersion, safe = true) & ":" + qr.add encode(shardId, safe = true) & ":" + qr.add encode(ephemeralKey, safe = true) & ":" + qr.add encode(committedStaticKey.data, safe = true) + + return qr + +# Deserializes input string in base64 to the corresponding (applicationName, applicationVersion, shardId, ephemeralKey, committedStaticKey) +proc fromQr*( + qr: string +): (string, string, string, EllipticCurveKey, MDigest[256]) {. + raises: [Defect, ValueError] +.} = + let values = qr.split(":") + + assert(values.len == 5) + + let applicationName: string = decode(values[0]) + let applicationVersion: string = decode(values[1]) + let shardId: string = decode(values[2]) + + let decodedEphemeralKey = decode(values[3]).toBytes + var ephemeralKey: EllipticCurveKey + for i in 0 ..< ephemeralKey.len: + ephemeralKey[i] = decodedEphemeralKey[i] + + let committedStaticKey = seqToDigest256(decode(values[4]).toBytes) + + return + (applicationName, applicationVersion, shardId, ephemeralKey, committedStaticKey) + +# Converts a sequence or array (arbitrary size) to a MessageNametag +proc toMessageNametag*(input: openArray[byte]): MessageNametag = + var byte_seq: seq[byte] = @input + + # We set its length to the default message nametag length (will be truncated or 0-padded) + byte_seq.setLen(MessageNametagLength) + + # We copy it to a MessageNametag + var messageNametag: MessageNametag + for i in 0 ..< MessageNametagLength: + messageNametag[i] = byte_seq[i] + + return messageNametag + +# Uses the cryptographic information stored in the input handshake state to generate a random message nametag +# In current implementation the messageNametag = HKDF(handshake hash value), but other derivation mechanisms can be implemented +proc toMessageNametag*(hs: HandshakeState): MessageNametag = + var output: array[1, array[MessageNametagLength, byte]] + sha256.hkdf(hs.ss.h.data, [], [], output) + return output[0] + +proc genMessageNametagSecrets*( + hs: HandshakeState +): (array[MessageNametagSecretLength, byte], array[MessageNametagSecretLength, byte]) = + var output: array[2, array[MessageNametagSecretLength, byte]] + sha256.hkdf(hs.ss.h.data, [], [], output) + return (output[0], output[1]) + +# Simple utility that checks if the given variable is "default", +# Therefore, it has not been initialized +proc isDefault*[T](value: T): bool = + value == static(default(T)) + +################################################################# + +################################# +# Noise Handhshake Utilities +################################# + +# Generate random (public, private) Elliptic Curve key pairs +proc genKeyPair*(rng: var HmacDrbgContext): KeyPair = + var keyPair: KeyPair + keyPair.privateKey = EllipticCurveKey.random(rng) + keyPair.publicKey = keyPair.privateKey.public() + return keyPair + +# Gets private key from a key pair +proc getPrivateKey*(keypair: KeyPair): EllipticCurveKey = + return keypair.privateKey + +# Gets public key from a key pair +proc getPublicKey*(keypair: KeyPair): EllipticCurveKey = + return keypair.publicKey + +# Prints Handshake Patterns using Noise pattern layout +proc print*(self: HandshakePattern) {.raises: [IOError, NoiseMalformedHandshake].} = + try: + if self.name != "": + stdout.write self.name, ":\n" + stdout.flushFile() + # We iterate over pre message patterns, if any + if self.preMessagePatterns != EmptyPreMessage: + for pattern in self.preMessagePatterns: + stdout.write " ", pattern.direction + var first = true + for token in pattern.tokens: + if first: + stdout.write " ", token + first = false + else: + stdout.write ", ", token + stdout.write "\n" + stdout.flushFile() + stdout.write " ...\n" + stdout.flushFile() + # We iterate over message patterns + for pattern in self.messagePatterns: + stdout.write " ", pattern.direction + var first = true + for token in pattern.tokens: + if first: + stdout.write " ", token + first = false + else: + stdout.write ", ", token + stdout.write "\n" + stdout.flushFile() + except CatchableError: + raise newException(NoiseMalformedHandshake, "HandshakePattern malformed") + +# Hashes a Noise protocol name using SHA256 +proc hashProtocol*(protocolName: string): MDigest[256] = + # The output hash value + var hash: MDigest[256] + + # From Noise specification: Section 5.2 + # http://www.noiseprotocol.org/noise.html#the-symmetricstate-object + # If protocol_name is less than or equal to HASHLEN bytes in length, + # sets h equal to protocol_name with zero bytes appended to make HASHLEN bytes. + # Otherwise sets h = HASH(protocol_name). + if protocolName.len <= 32: + hash.data[0 .. protocolName.high] = protocolName.toBytes + else: + hash = sha256.digest(protocolName) + + return hash + +# Commits a public key pk for randomness r as H(pk || s) +proc commitPublicKey*(publicKey: EllipticCurveKey, r: seq[byte]): MDigest[256] = + var hashInput: seq[byte] + hashInput.add getBytes(publicKey) + hashInput.add r + + # The output hash value + var hash: MDigest[256] + hash = sha256.digest(hashInput) + + return hash + +# Generates an 8 decimal digits authorization code using HKDF and the handshake state +proc genAuthcode*(hs: HandshakeState): string = + var output: array[1, array[8, byte]] + sha256.hkdf(hs.ss.h.data, [], [], output) + let code = cast[uint64](output[0]) mod 100_000_000 + return $code + +# Initializes the empty Message nametag buffer. The n-th nametag is equal to HKDF( secret || n ) +proc initNametagsBuffer*(mntb: var MessageNametagBuffer) = + # We default the counter and buffer fields + mntb.counter = 0 + mntb.buffer = default(array[MessageNametagBufferSize, MessageNametag]) + + if mntb.secret.isSome: + for i in 0 ..< mntb.buffer.len: + mntb.buffer[i] = toMessageNametag( + sha256.digest(@(mntb.secret.get()) & @(toBytesLE(mntb.counter))).data + ) + mntb.counter += 1 + else: + # We warn users if no secret is set + debug "The message nametags buffer has not a secret set" + +# Deletes the first n elements in buffer and appends n new ones +proc delete*(mntb: var MessageNametagBuffer, n: int) = + if n <= 0: + return + + # We ensure n is at most MessageNametagBufferSize (the buffer will be fully replaced) + let n = min(n, MessageNametagBufferSize) + + # We update the last n values in the array if a secret is set + # Note that if the input MessageNametagBuffer is set to default, nothing is done here + if mntb.secret.isSome: + # We rotate left the array by n + mntb.buffer.rotateLeft(n) + + for i in 0 ..< n: + mntb.buffer[mntb.buffer.len - n + i] = toMessageNametag( + sha256.digest(@(mntb.secret.get()) & @(toBytesLE(mntb.counter))).data + ) + mntb.counter += 1 + else: + # We warn users that no secret is set + debug "The message nametags buffer has no secret set" + +# Checks if the input messageNametag is contained in the input MessageNametagBuffer +proc checkNametag*( + messageNametag: MessageNametag, mntb: var MessageNametagBuffer +): Result[bool, cstring] {. + raises: [Defect, NoiseMessageNametagError, NoiseSomeMessagesWereLost] +.} = + let index = mntb.buffer.find(messageNametag) + + if index == -1: + raise newException(NoiseMessageNametagError, "Message nametag not found in buffer") + elif index > 0: + raise newException( + NoiseSomeMessagesWereLost, + "Message nametag is present in buffer but is not the next expected nametag. One or more messages were probably lost", + ) + + # index is 0, hence the read message tag is the next expected one + return ok(true) + +# Deletes the first n elements in buffer and appends n new ones +proc pop*(mntb: var MessageNametagBuffer): MessageNametag = + # Note that if the input MessageNametagBuffer is set to default, an all 0 messageNametag is returned + let messageNametag = mntb.buffer[0] + delete(mntb, 1) + return messageNametag + +# Performs a Diffie-Hellman operation between two elliptic curve keys (one private, one public) +proc dh*(private: EllipticCurveKey, public: EllipticCurveKey): EllipticCurveKey = + # The output result of the Diffie-Hellman operation + var output: EllipticCurveKey + + # Since the EC multiplication writes the result to the input, we copy the input to the output variable + output = public + # We execute the DH operation + EllipticCurve.mul(output, private) + + return output + +################################################################# + +################################# +# ChaChaPoly Cipher utilities +################################# + +# Generates a random ChaChaPolyKey for testing encryption/decryption +proc randomChaChaPolyKey*(rng: var HmacDrbgContext): ChaChaPolyKey = + var key: ChaChaPolyKey + hmacDrbgGenerate(rng, key) + return key + +# Generates a random ChaChaPoly Cipher State for testing encryption/decryption +proc randomChaChaPolyCipherState*(rng: var HmacDrbgContext): ChaChaPolyCipherState = + var randomCipherState: ChaChaPolyCipherState + randomCipherState.k = randomChaChaPolyKey(rng) + hmacDrbgGenerate(rng, randomCipherState.nonce) + randomCipherState.ad = newSeq[byte](32) + hmacDrbgGenerate(rng, randomCipherState.ad) + return randomCipherState + +################################################################# + +################################# +# Noise Public keys utilities +################################# + +# Checks equality between two Noise public keys +proc `==`*(k1, k2: NoisePublicKey): bool = + return (k1.flag == k2.flag) and (k1.pk == k2.pk) + +# Converts a public Elliptic Curve key to an unencrypted Noise public key +proc toNoisePublicKey*(publicKey: EllipticCurveKey): NoisePublicKey = + var noisePublicKey: NoisePublicKey + noisePublicKey.flag = 0 + noisePublicKey.pk = getBytes(publicKey) + return noisePublicKey + +# Generates a random Noise public key +proc genNoisePublicKey*(rng: var HmacDrbgContext): NoisePublicKey = + var noisePublicKey: NoisePublicKey + # We generate a random key pair + let keyPair: KeyPair = genKeyPair(rng) + # Since it is unencrypted, flag is 0 + noisePublicKey.flag = 0 + # We copy the public X coordinate of the key pair to the output Noise public key + noisePublicKey.pk = getBytes(keyPair.publicKey) + return noisePublicKey + +# Converts a Noise public key to a stream of bytes as in +# https://rfc.vac.dev/spec/35/#public-keys-serialization +proc serializeNoisePublicKey*(noisePublicKey: NoisePublicKey): seq[byte] = + var serializedNoisePublicKey: seq[byte] + # Public key is serialized as (flag || pk) + # Note that pk contains the X coordinate of the public key if unencrypted + # or the encryption concatenated with the authorization tag if encrypted + serializedNoisePublicKey.add noisePublicKey.flag + serializedNoisePublicKey.add noisePublicKey.pk + return serializedNoisePublicKey + +# Converts a serialized Noise public key to a NoisePublicKey object as in +# https://rfc.vac.dev/spec/35/#public-keys-serialization +proc intoNoisePublicKey*( + serializedNoisePublicKey: seq[byte] +): NoisePublicKey {.raises: [Defect, NoisePublicKeyError].} = + var noisePublicKey: NoisePublicKey + # We retrieve the encryption flag + noisePublicKey.flag = serializedNoisePublicKey[0] + # If not 0 or 1 we raise a new exception + if not (noisePublicKey.flag == 0 or noisePublicKey.flag == 1): + raise newException(NoisePublicKeyError, "Invalid flag in serialized public key") + # We set the remaining sequence to the pk value (this may be an encrypted or not encrypted X coordinate) + noisePublicKey.pk = serializedNoisePublicKey[1 ..< serializedNoisePublicKey.len] + return noisePublicKey + +# Encrypts a Noise public key using a ChaChaPoly Cipher State +proc encryptNoisePublicKey*( + cs: ChaChaPolyCipherState, noisePublicKey: NoisePublicKey +): NoisePublicKey {.raises: [Defect, NoiseEmptyChaChaPolyInput, NoiseNonceMaxError].} = + var encryptedNoisePublicKey: NoisePublicKey + # We proceed with encryption only if + # - a key is set in the cipher state + # - the public key is unencrypted + if cs.k != EmptyKey and noisePublicKey.flag == 0: + let encPk = encrypt(cs, noisePublicKey.pk) + # We set the flag to 1, since encrypted + encryptedNoisePublicKey.flag = 1 + # Authorization tag is appendend to the ciphertext + encryptedNoisePublicKey.pk = encPk.data + encryptedNoisePublicKey.pk.add encPk.tag + # Otherwise we return the public key as it is + else: + encryptedNoisePublicKey = noisePublicKey + return encryptedNoisePublicKey + +# Decrypts a Noise public key using a ChaChaPoly Cipher State +proc decryptNoisePublicKey*( + cs: ChaChaPolyCipherState, noisePublicKey: NoisePublicKey +): NoisePublicKey {.raises: [Defect, NoiseEmptyChaChaPolyInput, NoiseDecryptTagError].} = + var decryptedNoisePublicKey: NoisePublicKey + # We proceed with decryption only if + # - a key is set in the cipher state + # - the public key is encrypted + if cs.k != EmptyKey and noisePublicKey.flag == 1: + # Since the pk field would contain an encryption + tag, we retrieve the ciphertext length + let pkLen = noisePublicKey.pk.len - ChaChaPolyTag.len + # We isolate the ciphertext and the authorization tag + let pk = noisePublicKey.pk[0 ..< pkLen] + let pkAuth = + intoChaChaPolyTag(noisePublicKey.pk[pkLen ..< pkLen + ChaChaPolyTag.len]) + # We convert it to a ChaChaPolyCiphertext + let ciphertext = ChaChaPolyCiphertext(data: pk, tag: pkAuth) + # We run decryption and store its value to a non-encrypted Noise public key (flag = 0) + decryptedNoisePublicKey.pk = decrypt(cs, ciphertext) + decryptedNoisePublicKey.flag = 0 + # Otherwise we return the public key as it is + else: + decryptedNoisePublicKey = noisePublicKey + return decryptedNoisePublicKey + +################################################################# + +################################# +# Payload encoding/decoding procedures +################################# + +# Checks equality between two PayloadsV2 objects +proc `==`*(p1, p2: PayloadV2): bool = + return + (p1.messageNametag == p2.messageNametag) and (p1.protocolId == p2.protocolId) and + (p1.handshakeMessage == p2.handshakeMessage) and + (p1.transportMessage == p2.transportMessage) + +# Generates a random PayloadV2 +proc randomPayloadV2*(rng: var HmacDrbgContext): PayloadV2 = + var payload2: PayloadV2 + # We set a random messageNametag + let randMessageNametag = randomSeqByte(rng, MessageNametagLength) + for i in 0 ..< MessageNametagLength: + payload2.messageNametag[i] = randMessageNametag[i] + # To generate a random protocol id, we generate a random 1-byte long sequence, and we convert the first element to uint8 + payload2.protocolId = randomSeqByte(rng, 1)[0].uint8 + # We set the handshake message to three unencrypted random Noise Public Keys + payload2.handshakeMessage = + @[genNoisePublicKey(rng), genNoisePublicKey(rng), genNoisePublicKey(rng)] + # We set the transport message to a random 128-bytes long sequence + payload2.transportMessage = randomSeqByte(rng, 128) + return payload2 + +# Serializes a PayloadV2 object to a byte sequences according to https://rfc.vac.dev/spec/35/. +# The output serialized payload concatenates the input PayloadV2 object fields as +# payload = ( protocolId || serializedHandshakeMessageLen || serializedHandshakeMessage || transportMessageLen || transportMessage) +# The output can be then passed to the payload field of a WakuMessage https://rfc.vac.dev/spec/14/ +proc serializePayloadV2*(self: PayloadV2): Result[seq[byte], cstring] = + # We collect public keys contained in the handshake message + var + # According to https://rfc.vac.dev/spec/35/, the maximum size for the handshake message is 256 bytes, that is + # the handshake message length can be represented with 1 byte only. (its length can be stored in 1 byte) + # However, to ease public keys length addition operation, we declare it as int and later cast to uit8 + serializedHandshakeMessageLen: int = 0 + # This variables will store the concatenation of the serializations of all public keys in the handshake message + serializedHandshakeMessage = newSeqOfCap[byte](256) + # A variable to store the currently processed public key serialization + serializedPk: seq[byte] + # For each public key in the handshake message + for pk in self.handshakeMessage: + # We serialize the public key + serializedPk = serializeNoisePublicKey(pk) + # We sum its serialized length to the total + serializedHandshakeMessageLen += serializedPk.len + # We add its serialization to the concatenation of all serialized public keys in the handshake message + serializedHandshakeMessage.add serializedPk + # If we are processing more than 256 byte, we return an error + if serializedHandshakeMessageLen > uint8.high.int: + debug "PayloadV2 malformed: too many public keys contained in the handshake message" + return err("Too many public keys in handshake message") + + # We get the transport message byte length + let transportMessageLen = self.transportMessage.len + + # The output payload as in https://rfc.vac.dev/spec/35/. We concatenate all the PayloadV2 fields as + # payload = ( protocolId || serializedHandshakeMessageLen || serializedHandshakeMessage || transportMessageLen || transportMessage) + # We declare it as a byte sequence of length accordingly to the PayloadV2 information read + var payload = newSeqOfCap[byte]( + MessageNametagLength + #MessageNametagLength bytes for messageNametag + 1 + # 1 byte for protocol ID + 1 + # 1 byte for length of serializedHandshakeMessage field + serializedHandshakeMessageLen + + # serializedHandshakeMessageLen bytes for serializedHandshakeMessage + 8 + # 8 bytes for transportMessageLen + transportMessageLen # transportMessageLen bytes for transportMessage + ) + + # We concatenate all the data + # The protocol ID (1 byte) and handshake message length (1 byte) can be directly casted to byte to allow direct copy to the payload byte sequence + payload.add @(self.messageNametag) + payload.add self.protocolId.byte + payload.add serializedHandshakeMessageLen.byte + payload.add serializedHandshakeMessage + # The transport message length is converted from uint64 to bytes in Little-Endian + payload.add toBytesLE(transportMessageLen.uint64) + payload.add self.transportMessage + + return ok(payload) + +# Deserializes a byte sequence to a PayloadV2 object according to https://rfc.vac.dev/spec/35/. +# The input serialized payload concatenates the output PayloadV2 object fields as +# payload = ( messageNametag || protocolId || serializedHandshakeMessageLen || serializedHandshakeMessage || transportMessageLen || transportMessage) +proc deserializePayloadV2*( + payload: seq[byte] +): Result[PayloadV2, cstring] {.raises: [Defect, NoisePublicKeyError].} = + # The output PayloadV2 + var payload2: PayloadV2 + + # i is the read input buffer position index + var i: uint64 = 0 + + # We start by reading the messageNametag + for j in 0 ..< MessageNametagLength: + payload2.messageNametag[j] = payload[i + j.uint64] + i += MessageNametagLength + + # We read the Protocol ID + # TODO: when the list of supported protocol ID is defined, check if read protocol ID is supported + payload2.protocolId = payload[i].uint8 + i += 1 + + # We read the Handshake Message lenght (1 byte) + var handshakeMessageLen = payload[i].uint64 + if handshakeMessageLen > uint8.high.uint64: + debug "Payload malformed: too many public keys contained in the handshake message" + return err("Too many public keys in handshake message") + + i += 1 + + # We now read for handshakeMessageLen bytes the buffer and we deserialize each (encrypted/unencrypted) public key read + var + # In handshakeMessage we accumulate the read deserialized Noise Public keys + handshakeMessage: seq[NoisePublicKey] + flag: byte + pkLen: uint64 + written: uint64 = 0 + + # We read the buffer until handshakeMessageLen are read + while written != handshakeMessageLen: + # We obtain the current Noise Public key encryption flag + flag = payload[i] + # If the key is unencrypted, we only read the X coordinate of the EC public key and we deserialize into a Noise Public Key + if flag == 0: + pkLen = 1 + EllipticCurveKey.len + handshakeMessage.add intoNoisePublicKey(payload[i ..< i + pkLen]) + i += pkLen + written += pkLen + # If the key is encrypted, we only read the encrypted X coordinate and the authorization tag, and we deserialize into a Noise Public Key + elif flag == 1: + pkLen = 1 + EllipticCurveKey.len + ChaChaPolyTag.len + handshakeMessage.add intoNoisePublicKey(payload[i ..< i + pkLen]) + i += pkLen + written += pkLen + else: + return err("Invalid flag for Noise public key") + + # We save in the output PayloadV2 the read handshake message + payload2.handshakeMessage = handshakeMessage + + # We read the transport message length (8 bytes) and we convert to uint64 in Little Endian + let transportMessageLen = fromBytesLE(uint64, payload[i .. (i + 8 - 1)]) + i += 8 + + # We read the transport message (handshakeMessage bytes) + payload2.transportMessage = payload[i .. i + transportMessageLen - 1] + i += transportMessageLen + + return ok(payload2) diff --git a/third-party/nwaku/waku/waku_peer_exchange.nim b/third-party/nwaku/waku/waku_peer_exchange.nim new file mode 100644 index 0000000..1529842 --- /dev/null +++ b/third-party/nwaku/waku/waku_peer_exchange.nim @@ -0,0 +1,5 @@ +{.push raises: [].} + +import ./waku_peer_exchange/[protocol, rpc, common, client] + +export protocol, rpc, common, client diff --git a/third-party/nwaku/waku/waku_peer_exchange/README.md b/third-party/nwaku/waku/waku_peer_exchange/README.md new file mode 100644 index 0000000..627023d --- /dev/null +++ b/third-party/nwaku/waku/waku_peer_exchange/README.md @@ -0,0 +1,3 @@ +# Waku Peer Exchange + +Implementation of [34/WAKU2-PEER-EXCHANGE](https://rfc.vac.dev/spec/34/). diff --git a/third-party/nwaku/waku/waku_peer_exchange/client.nim b/third-party/nwaku/waku/waku_peer_exchange/client.nim new file mode 100644 index 0000000..15426c4 --- /dev/null +++ b/third-party/nwaku/waku/waku_peer_exchange/client.nim @@ -0,0 +1,102 @@ +import std/options, results, chronicles, chronos, metrics + +import ./common, ./rpc, ./rpc_codec, ../node/peer_manager + +from ../waku_core/codecs import WakuPeerExchangeCodec + +declarePublicGauge waku_px_peers_received_total, + "number of ENRs received via peer exchange" +declarePublicCounter waku_px_client_errors, "number of peer exchange errors", ["type"] + +logScope: + topics = "waku peer_exchange client" + +type WakuPeerExchangeClient* = ref object + peerManager*: PeerManager + pxLoopHandle*: Future[void] + +proc new*(T: type WakuPeerExchangeClient, peerManager: PeerManager): T = + WakuPeerExchangeClient(peerManager: peerManager) + +proc request*( + wpx: WakuPeerExchangeClient, numPeers = DefaultPXNumPeersReq, conn: Connection +): Future[WakuPeerExchangeResult[PeerExchangeResponse]] {.async: (raises: []).} = + let rpc = PeerExchangeRpc.makeRequest(numPeers) + + var buffer: seq[byte] + var callResult = + (status_code: PeerExchangeResponseStatusCode.SUCCESS, status_desc: none(string)) + try: + await conn.writeLP(rpc.encode().buffer) + buffer = await conn.readLp(DefaultMaxRpcSize.int) + except CatchableError as exc: + error "exception when handling peer exchange request", error = exc.msg + waku_px_client_errors.inc(labelValues = ["error_sending_or_receiving_px_req"]) + callResult = ( + status_code: PeerExchangeResponseStatusCode.SERVICE_UNAVAILABLE, + status_desc: some($exc.msg), + ) + finally: + # close, no more data is expected + await conn.closeWithEof() + + if callResult.status_code != PeerExchangeResponseStatusCode.SUCCESS: + error "peer exchange request failed", status_code = callResult.status_code + return err(callResult) + + let decoded = PeerExchangeRpc.decode(buffer).valueOr: + error "peer exchange request error decoding buffer", error = $error + return err( + ( + status_code: PeerExchangeResponseStatusCode.BAD_RESPONSE, + status_desc: some($error), + ) + ) + if decoded.response.status_code != PeerExchangeResponseStatusCode.SUCCESS: + error "peer exchange request error", status_code = decoded.response.status_code + return err( + ( + status_code: decoded.response.status_code, + status_desc: decoded.response.status_desc, + ) + ) + + return ok(decoded.response) + +proc request*( + wpx: WakuPeerExchangeClient, numPeers = DefaultPXNumPeersReq, peer: RemotePeerInfo +): Future[WakuPeerExchangeResult[PeerExchangeResponse]] {.async: (raises: []).} = + try: + let connOpt = await wpx.peerManager.dialPeer(peer, WakuPeerExchangeCodec) + if connOpt.isNone(): + error "error in request connOpt is none" + return err( + ( + status_code: PeerExchangeResponseStatusCode.DIAL_FAILURE, + status_desc: some(dialFailure), + ) + ) + return await wpx.request(numPeers, connOpt.get()) + except CatchableError: + error "peer exchange request exception", error = getCurrentExceptionMsg() + return err( + ( + status_code: PeerExchangeResponseStatusCode.BAD_RESPONSE, + status_desc: some("exception dialing peer: " & getCurrentExceptionMsg()), + ) + ) + +proc request*( + wpx: WakuPeerExchangeClient, numPeers = DefaultPXNumPeersReq +): Future[WakuPeerExchangeResult[PeerExchangeResponse]] {.async: (raises: []).} = + let peerOpt = wpx.peerManager.selectPeer(WakuPeerExchangeCodec) + if peerOpt.isNone(): + waku_px_client_errors.inc(labelValues = [peerNotFoundFailure]) + info "peer exchange request could not be made as no peer exchange peers found" + return err( + ( + status_code: PeerExchangeResponseStatusCode.SERVICE_UNAVAILABLE, + status_desc: some(peerNotFoundFailure), + ) + ) + return await wpx.request(numPeers, peerOpt.get()) diff --git a/third-party/nwaku/waku/waku_peer_exchange/common.nim b/third-party/nwaku/waku/waku_peer_exchange/common.nim new file mode 100644 index 0000000..85d7398 --- /dev/null +++ b/third-party/nwaku/waku/waku_peer_exchange/common.nim @@ -0,0 +1,21 @@ +import results, metrics, chronos +import ./rpc, ../waku_core + +const + # We add a 64kB safety buffer for protocol overhead. + # 10x-multiplier also for safety + DefaultMaxRpcSize* = 10 * DefaultMaxWakuMessageSize + 64 * 1024 + # TODO what is the expected size of a PX message? As currently specified, it can contain an arbitrary number of ENRs... + MaxPeersCacheSize* = 60 + CacheRefreshInterval* = 10.minutes + DefaultPXNumPeersReq* = 5.uint64() + +# Error types (metric label values) +const + dialFailure* = "dial_failure" + peerNotFoundFailure* = "peer_not_found_failure" + decodeRpcFailure* = "decode_rpc_failure" + retrievePeersDiscv5Error* = "retrieve_peers_discv5_failure" + pxFailure* = "px_failure" + +type WakuPeerExchangeResult*[T] = Result[T, PeerExchangeResponseStatus] diff --git a/third-party/nwaku/waku/waku_peer_exchange/protocol.nim b/third-party/nwaku/waku/waku_peer_exchange/protocol.nim new file mode 100644 index 0000000..f3f8629 --- /dev/null +++ b/third-party/nwaku/waku/waku_peer_exchange/protocol.nim @@ -0,0 +1,219 @@ +import + std/[options, sequtils, random], + results, + chronicles, + chronos, + metrics, + libp2p/protocols/protocol, + libp2p/crypto/crypto, + eth/p2p/discoveryv5/enr +import + ../common/nimchronos, + ../node/peer_manager, + ../waku_core, + ../discovery/waku_discv5, + ./rpc, + ./rpc_codec, + ../common/rate_limit/request_limiter, + ./common + +from ../waku_core/codecs import WakuPeerExchangeCodec +export WakuPeerExchangeCodec + +declarePublicGauge waku_px_peers_received_unknown, + "number of previously unknown ENRs received via peer exchange" +declarePublicGauge waku_px_peers_cached, "number of peer exchange peer ENRs cached" +declarePublicCounter waku_px_errors, "number of peer exchange errors", ["type"] +declarePublicCounter waku_px_peers_sent, + "number of ENRs sent to peer exchange requesters" + +logScope: + topics = "waku peer_exchange" + +type WakuPeerExchange* = ref object of LPProtocol + peerManager*: PeerManager + enrCache*: seq[enr.Record] + cluster*: Option[uint16] + # todo: next step: ring buffer; future: implement cache satisfying https://rfc.vac.dev/spec/34/ + requestRateLimiter*: RequestRateLimiter + pxLoopHandle*: Future[void] + +proc respond( + wpx: WakuPeerExchange, enrs: seq[enr.Record], conn: Connection +): Future[WakuPeerExchangeResult[void]] {.async, gcsafe.} = + let rpc = PeerExchangeRpc.makeResponse(enrs.mapIt(PeerExchangePeerInfo(enr: it.raw))) + + try: + await conn.writeLP(rpc.encode().buffer) + except CatchableError as exc: + error "exception when trying to send a respond", error = getCurrentExceptionMsg() + waku_px_errors.inc(labelValues = [exc.msg]) + return err( + ( + status_code: PeerExchangeResponseStatusCode.DIAL_FAILURE, + status_desc: some("exception dialing peer: " & exc.msg), + ) + ) + + return ok() + +proc respondError( + wpx: WakuPeerExchange, + status_code: PeerExchangeResponseStatusCode, + status_desc: Option[string], + conn: Connection, +): Future[WakuPeerExchangeResult[void]] {.async, gcsafe.} = + let rpc = PeerExchangeRpc.makeErrorResponse(status_code, status_desc) + + try: + await conn.writeLP(rpc.encode().buffer) + except CatchableError as exc: + error "exception when trying to send a respond", error = getCurrentExceptionMsg() + waku_px_errors.inc(labelValues = [exc.msg]) + return err( + ( + status_code: PeerExchangeResponseStatusCode.SERVICE_UNAVAILABLE, + status_desc: some("exception dialing peer: " & exc.msg), + ) + ) + + return ok() + +proc getEnrsFromCache( + wpx: WakuPeerExchange, numPeers: uint64 +): seq[enr.Record] {.gcsafe.} = + if wpx.enrCache.len() == 0: + debug "peer exchange ENR cache is empty" + return @[] + + # copy and shuffle + randomize() + var shuffledCache = wpx.enrCache + shuffledCache.shuffle() + + # return numPeers or less if cache is smaller + return shuffledCache[0 ..< min(shuffledCache.len.int, numPeers.int)] + +proc poolFilter*(cluster: Option[uint16], peer: RemotePeerInfo): bool = + if peer.origin != Discv5: + trace "peer not from discv5", peer = $peer, origin = $peer.origin + return false + + if peer.enr.isNone(): + debug "peer has no ENR", peer = $peer + return false + + if cluster.isSome() and peer.enr.get().isClusterMismatched(cluster.get()): + debug "peer has mismatching cluster", peer = $peer + return false + + return true + +proc populateEnrCache(wpx: WakuPeerExchange) = + # share only peers that i) are reachable ii) come from discv5 iii) share cluster + let withEnr = wpx.peerManager.switch.peerStore.getReachablePeers().filterIt( + poolFilter(wpx.cluster, it) + ) + + # either what we have or max cache size + var newEnrCache = newSeq[enr.Record](0) + for i in 0 ..< min(withEnr.len, MaxPeersCacheSize): + newEnrCache.add(withEnr[i].enr.get()) + + # swap cache for new + wpx.enrCache = newEnrCache + trace "ENR cache populated" + +proc updatePxEnrCache(wpx: WakuPeerExchange) {.async.} = + # try more aggressively to fill the cache at startup + var attempts = 50 + while wpx.enrCache.len < MaxPeersCacheSize and attempts > 0: + attempts -= 1 + wpx.populateEnrCache() + await sleepAsync(1.seconds) + + heartbeat "Updating px enr cache", CacheRefreshInterval: + wpx.populateEnrCache() + +proc initProtocolHandler(wpx: WakuPeerExchange) = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = + var buffer: seq[byte] + wpx.requestRateLimiter.checkUsageLimit(WakuPeerExchangeCodec, conn): + try: + buffer = await conn.readLp(DefaultMaxRpcSize.int) + except CatchableError as exc: + error "exception when handling px request", error = getCurrentExceptionMsg() + waku_px_errors.inc(labelValues = [exc.msg]) + + ( + try: + await wpx.respondError( + PeerExchangeResponseStatusCode.BAD_REQUEST, some(exc.msg), conn + ) + except CatchableError: + error "could not send error response", error = getCurrentExceptionMsg() + return + ).isOkOr: + error "Failed to respond with BAD_REQUEST:", error = $error + return + + let decBuf = PeerExchangeRpc.decode(buffer) + if decBuf.isErr(): + waku_px_errors.inc(labelValues = [decodeRpcFailure]) + error "Failed to decode PeerExchange request", error = $decBuf.error + + ( + try: + await wpx.respondError( + PeerExchangeResponseStatusCode.BAD_REQUEST, some($decBuf.error), conn + ) + except CatchableError: + error "could not send error response decode", + error = getCurrentExceptionMsg() + return + ).isOkOr: + error "Failed to respond with BAD_REQUEST:", error = $error + return + + let enrs = wpx.getEnrsFromCache(decBuf.get().request.numPeers) + debug "peer exchange request received", enrs = $enrs + + try: + (await wpx.respond(enrs, conn)).isErrOr: + waku_px_peers_sent.inc(enrs.len().int64()) + except CatchableError: + error "could not send response", error = getCurrentExceptionMsg() + do: + defer: + # close, no data is expected + await conn.closeWithEof() + + try: + ( + await wpx.respondError( + PeerExchangeResponseStatusCode.TOO_MANY_REQUESTS, none(string), conn + ) + ).isOkOr: + error "Failed to respond with TOO_MANY_REQUESTS:", error = $error + except CatchableError: + error "could not send error response", error = getCurrentExceptionMsg() + return + + wpx.handler = handler + wpx.codec = WakuPeerExchangeCodec + +proc new*( + T: type WakuPeerExchange, + peerManager: PeerManager, + cluster: Option[uint16] = none(uint16), + rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](), +): T = + let wpx = WakuPeerExchange( + peerManager: peerManager, + cluster: cluster, + requestRateLimiter: newRequestRateLimiter(rateLimitSetting), + ) + wpx.initProtocolHandler() + setServiceLimitMetric(WakuPeerExchangeCodec, rateLimitSetting) + asyncSpawn wpx.updatePxEnrCache() + return wpx diff --git a/third-party/nwaku/waku/waku_peer_exchange/rpc.nim b/third-party/nwaku/waku/waku_peer_exchange/rpc.nim new file mode 100644 index 0000000..4a91e8d --- /dev/null +++ b/third-party/nwaku/waku/waku_peer_exchange/rpc.nim @@ -0,0 +1,61 @@ +import std/options + +type + PeerExchangeResponseStatusCode* {.pure.} = enum + UNKNOWN = uint32(000) + SUCCESS = uint32(200) + BAD_REQUEST = uint32(400) + BAD_RESPONSE = uint32(401) + TOO_MANY_REQUESTS = uint32(429) + SERVICE_UNAVAILABLE = uint32(503) + DIAL_FAILURE = uint32(599) + + PeerExchangePeerInfo* = object + enr*: seq[byte] # RLP encoded ENR: https://eips.ethereum.org/EIPS/eip-778 + + PeerExchangeRequest* = object + numPeers*: uint64 + + PeerExchangeResponse* = object + peerInfos*: seq[PeerExchangePeerInfo] + status_code*: PeerExchangeResponseStatusCode + status_desc*: Option[string] + + PeerExchangeResponseStatus* = + tuple[status_code: PeerExchangeResponseStatusCode, status_desc: Option[string]] + + PeerExchangeRpc* = object + request*: PeerExchangeRequest + response*: PeerExchangeResponse + +proc makeRequest*(T: type PeerExchangeRpc, numPeers: uint64): T = + return T(request: PeerExchangeRequest(numPeers: numPeers)) + +proc makeResponse*(T: type PeerExchangeRpc, peerInfos: seq[PeerExchangePeerInfo]): T = + return T( + response: PeerExchangeResponse( + peerInfos: peerInfos, status_code: PeerExchangeResponseStatusCode.SUCCESS + ) + ) + +proc makeErrorResponse*( + T: type PeerExchangeRpc, + status_code: PeerExchangeResponseStatusCode, + status_desc: Option[string] = none(string), +): T = + return T( + response: PeerExchangeResponse(status_code: status_code, status_desc: status_desc) + ) + +proc `$`*(statusCode: PeerExchangeResponseStatusCode): string = + case statusCode + of PeerExchangeResponseStatusCode.UNKNOWN: "UNKNOWN" + of PeerExchangeResponseStatusCode.SUCCESS: "SUCCESS" + of PeerExchangeResponseStatusCode.BAD_REQUEST: "BAD_REQUEST" + of PeerExchangeResponseStatusCode.BAD_RESPONSE: "BAD_RESPONSE" + of PeerExchangeResponseStatusCode.TOO_MANY_REQUESTS: "TOO_MANY_REQUESTS" + of PeerExchangeResponseStatusCode.SERVICE_UNAVAILABLE: "SERVICE_UNAVAILABLE" + of PeerExchangeResponseStatusCode.DIAL_FAILURE: "DIAL_FAILURE" + +# proc `$`*(pxResponseStatus: PeerExchangeResponseStatus): string = +# return $pxResponseStatus.status & " - " & pxResponseStatus.desc.get("") diff --git a/third-party/nwaku/waku/waku_peer_exchange/rpc_codec.nim b/third-party/nwaku/waku/waku_peer_exchange/rpc_codec.nim new file mode 100644 index 0000000..e6eaa0c --- /dev/null +++ b/third-party/nwaku/waku/waku_peer_exchange/rpc_codec.nim @@ -0,0 +1,115 @@ +{.push raises: [].} + +import std/options +import ../common/protobuf, ./rpc + +proc encode*(rpc: PeerExchangeRequest): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.numPeers) + pb.finish3() + + pb + +proc decode*(T: type PeerExchangeRequest, buffer: seq[byte]): ProtoResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = PeerExchangeRequest(numPeers: 0) + + var numPeers: uint64 + if ?pb.getField(1, numPeers): + rpc.numPeers = numPeers + + ok(rpc) + +proc encode*(rpc: PeerExchangePeerInfo): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.enr) + pb.finish3() + + pb + +proc decode*(T: type PeerExchangePeerInfo, buffer: seq[byte]): ProtoResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = PeerExchangePeerInfo(enr: @[]) + + var peerInfoBuffer: seq[byte] + if ?pb.getField(1, peerInfoBuffer): + rpc.enr = peerInfoBuffer + + ok(rpc) + +proc parse*(T: type PeerExchangeResponseStatusCode, status: uint32): T = + case status + of 200, 400, 429, 503: + cast[PeerExchangeResponseStatusCode](status) + else: + PeerExchangeResponseStatusCode.UNKNOWN + +proc encode*(rpc: PeerExchangeResponse): ProtoBuffer = + var pb = initProtoBuffer() + + for pi in rpc.peerInfos: + pb.write3(1, pi.encode()) + pb.write3(10, rpc.status_code.uint32) + pb.write3(11, rpc.status_desc) + + pb.finish3() + + pb + +proc decode*(T: type PeerExchangeResponse, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = PeerExchangeResponse(peerInfos: @[]) + + var peerInfoBuffers: seq[seq[byte]] + if ?pb.getRepeatedField(1, peerInfoBuffers): + for pib in peerInfoBuffers: + rpc.peerInfos.add(?PeerExchangePeerInfo.decode(pib)) + + var status_code: uint32 + if ?pb.getField(10, status_code): + rpc.status_code = PeerExchangeResponseStatusCode.parse(status_code) + else: + # older peers may not support status_code field yet + if rpc.peerInfos.len() > 0: + rpc.status_code = PeerExchangeResponseStatusCode.SUCCESS + else: + rpc.status_code = PeerExchangeResponseStatusCode.SERVICE_UNAVAILABLE + + var status_desc: string + if ?pb.getField(11, status_desc): + rpc.status_desc = some(status_desc) + else: + rpc.status_desc = none(string) + + ok(rpc) + +proc encode*(rpc: PeerExchangeRpc): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.request.encode()) + pb.write3(2, rpc.response.encode()) + + pb.finish3() + + pb + +proc decode*(T: type PeerExchangeRpc, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + var rpc = PeerExchangeRpc() + + var requestBuffer: seq[byte] + if not ?pb.getField(1, requestBuffer): + return err(ProtobufError.missingRequiredField("request")) + + rpc.request = ?PeerExchangeRequest.decode(requestBuffer) + + var responseBuffer: seq[byte] + if not ?pb.getField(2, responseBuffer): + rpc.response = + PeerExchangeResponse(status_code: PeerExchangeResponseStatusCode.UNKNOWN) + else: + rpc.response = ?PeerExchangeResponse.decode(responseBuffer) + + ok(rpc) diff --git a/third-party/nwaku/waku/waku_relay.nim b/third-party/nwaku/waku/waku_relay.nim new file mode 100644 index 0000000..96328d9 --- /dev/null +++ b/third-party/nwaku/waku/waku_relay.nim @@ -0,0 +1,3 @@ +import ./waku_relay/[protocol, topic_health] + +export protocol, topic_health diff --git a/third-party/nwaku/waku/waku_relay/message_id.nim b/third-party/nwaku/waku/waku_relay/message_id.nim new file mode 100644 index 0000000..655f50c --- /dev/null +++ b/third-party/nwaku/waku/waku_relay/message_id.nim @@ -0,0 +1,30 @@ +{.push raises: [].} + +import + results, nimcrypto/sha2, libp2p/protocols/pubsub, libp2p/protocols/pubsub/rpc/messages + +## Message ID provider + +type MessageIdProvider* = pubsub.MsgIdProvider + +## Default message ID provider +# Performs a sha256 digest on the Waku Relay message payload. As Protocol Buffers v3 +# deterministic serialization is not canonical between the different languages and +# implementations. +# +# See: https://gist.github.com/kchristidis/39c8b310fd9da43d515c4394c3cd9510 +# +# This lack of deterministic serializaion could lead to a situation where two +# messages with the same attributes and serialized by different implementations +# have a different message ID (hash). This can impact the performance of the +# Waku Relay (Gossipsub) protocol's message cache and the gossiping process, and +# as a consequence the network. + +proc defaultMessageIdProvider*( + message: messages.Message +): Result[MessageID, ValidationResult] = + let hash = sha256.digest(message.data) + ok(@(hash.data)) + +## Waku message Unique ID provider +# TODO: Add here the MUID provider once `meta` field RFC PR is merged diff --git a/third-party/nwaku/waku/waku_relay/protocol.nim b/third-party/nwaku/waku/waku_relay/protocol.nim new file mode 100644 index 0000000..6a57e04 --- /dev/null +++ b/third-party/nwaku/waku/waku_relay/protocol.nim @@ -0,0 +1,695 @@ +## Waku Relay module. Thin layer on top of GossipSub. +## +## See https://github.com/vacp2p/specs/blob/master/specs/waku/v2/waku-relay.md +## for spec. +{.push raises: [].} + +import + std/[strformat, strutils], + stew/byteutils, + results, + sequtils, + chronos, + chronicles, + metrics, + libp2p/multihash, + libp2p/protocols/pubsub/gossipsub, + libp2p/protocols/pubsub/rpc/messages, + libp2p/stream/connection, + libp2p/switch +import + ../waku_core, ./message_id, ./topic_health, ../node/delivery_monitor/publish_observer + +from ../waku_core/codecs import WakuRelayCodec +export WakuRelayCodec + +type ShardMetrics = object + count: float64 + sizeSum: float64 + avgSize: float64 + maxSize: float64 + +logScope: + topics = "waku relay" + +declareCounter waku_relay_network_bytes, + "total traffic per topic, distinct gross/net and direction", + labels = ["topic", "type", "direction"] + +declarePublicGauge( + waku_relay_total_msg_bytes_per_shard, + "total length of messages seen per shard", + labels = ["shard"], +) + +declarePublicGauge( + waku_relay_max_msg_bytes_per_shard, + "Maximum length of messages seen per shard", + labels = ["shard"], +) + +declarePublicGauge( + waku_relay_avg_msg_bytes_per_shard, + "Average length of messages seen per shard", + labels = ["shard"], +) + +# see: https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#overview-of-new-parameters +const TopicParameters = TopicParams( + topicWeight: 1, + + # p1: favours peers already in the mesh + timeInMeshWeight: 0.01, + timeInMeshQuantum: 1.seconds, + timeInMeshCap: 10.0, + + # p2: rewards fast peers + firstMessageDeliveriesWeight: 1.0, + firstMessageDeliveriesDecay: 0.5, + firstMessageDeliveriesCap: 10.0, + + # p3: penalizes lazy peers. safe low value + meshMessageDeliveriesWeight: 0.0, + meshMessageDeliveriesDecay: 0.0, + meshMessageDeliveriesCap: 0, + meshMessageDeliveriesThreshold: 0, + meshMessageDeliveriesWindow: 0.milliseconds, + meshMessageDeliveriesActivation: 0.seconds, + + # p3b: tracks history of prunes + meshFailurePenaltyWeight: 0.0, + meshFailurePenaltyDecay: 0.0, + + # p4: penalizes invalid messages. highly penalize + # peers sending wrong messages + invalidMessageDeliveriesWeight: -100.0, + invalidMessageDeliveriesDecay: 0.5, +) + +# see: https://rfc.vac.dev/spec/29/#gossipsub-v10-parameters +const GossipsubParameters = GossipSubParams.init( + pruneBackoff = chronos.minutes(1), + unsubscribeBackoff = chronos.seconds(5), + floodPublish = true, + gossipFactor = 0.25, + d = 6, + dLow = 4, + dHigh = 8, + dScore = 6, + dOut = 3, + dLazy = 6, + heartbeatInterval = chronos.seconds(1), + historyLength = 6, + historyGossip = 3, + fanoutTTL = chronos.minutes(1), + seenTTL = chronos.minutes(2), + + # no gossip is sent to peers below this score + gossipThreshold = -100, + + # no self-published msgs are sent to peers below this score + publishThreshold = -1000, + + # used to trigger disconnections + ignore peer if below this score + graylistThreshold = -10000, + + # grafts better peers if the mesh median score drops below this. unset. + opportunisticGraftThreshold = 0, + + # how often peer scoring is updated + decayInterval = chronos.seconds(12), + + # below this we consider the parameter to be zero + decayToZero = 0.01, + + # remember peer score during x after it disconnects + retainScore = chronos.minutes(10), + + # p5: application specific, unset + appSpecificWeight = 0.0, + + # p6: penalizes peers sharing more than threshold ips + ipColocationFactorWeight = -50.0, + ipColocationFactorThreshold = 5.0, + + # p7: penalizes bad behaviour (weight and decay) + behaviourPenaltyWeight = -10.0, + behaviourPenaltyDecay = 0.986, + + # triggers disconnections of bad peers aka score shardMetrics.maxSize: + shardMetrics.maxSize = payloadSize + shardMetrics.avgSize = shardMetrics.sizeSum / shardMetrics.count + w.msgMetricsPerShard[topic] = shardMetrics + + waku_relay_max_msg_bytes_per_shard.set(shardMetrics.maxSize, labelValues = [topic]) + + waku_relay_avg_msg_bytes_per_shard.set(shardMetrics.avgSize, labelValues = [topic]) + + waku_relay_total_msg_bytes_per_shard.set(shardMetrics.sizeSum, labelValues = [topic]) + +proc initRelayObservers(w: WakuRelay) = + proc decodeRpcMessageInfo( + peer: PubSubPeer, msg: Message + ): Result[ + tuple[msgId: string, topic: string, wakuMessage: WakuMessage, msgSize: int], void + ] = + let msg_id = w.msgIdProvider(msg).valueOr: + warn "Error generating message id", + my_peer_id = w.switch.peerInfo.peerId, + from_peer_id = peer.peerId, + pubsub_topic = msg.topic, + error = $error + return err() + + let msg_id_short = shortLog(msg_id) + + let wakuMessage = WakuMessage.decode(msg.data).valueOr: + warn "Error decoding to Waku Message", + my_peer_id = w.switch.peerInfo.peerId, + msg_id = msg_id_short, + from_peer_id = peer.peerId, + pubsub_topic = msg.topic, + error = $error + return err() + + let msgSize = msg.data.len + msg.topic.len + return ok((msg_id_short, msg.topic, wakuMessage, msgSize)) + + proc updateMetrics( + peer: PubSubPeer, + pubsub_topic: string, + msg: WakuMessage, + msgSize: int, + onRecv: bool, + ) = + if onRecv: + waku_relay_network_bytes.inc( + msgSize.int64, labelValues = [pubsub_topic, "gross", "in"] + ) + else: + # sent traffic can only be "net" + # TODO: If we can measure unsuccessful sends would mean a possible distinction between gross/net + waku_relay_network_bytes.inc( + msgSize.int64, labelValues = [pubsub_topic, "net", "out"] + ) + + proc onRecv(peer: PubSubPeer, msgs: var RPCMsg) = + for msg in msgs.messages: + let (msg_id_short, topic, wakuMessage, msgSize) = decodeRpcMessageInfo(peer, msg).valueOr: + continue + # message receive log happens in onValidated observer as onRecv is called before checks + updateMetrics(peer, topic, wakuMessage, msgSize, onRecv = true) + discard + + proc onValidated(peer: PubSubPeer, msg: Message, msgId: MessageId) = + let msg_id_short = shortLog(msgId) + let wakuMessage = WakuMessage.decode(msg.data).valueOr: + warn "onValidated: failed decoding to Waku Message", + my_peer_id = w.switch.peerInfo.peerId, + msg_id = msg_id_short, + from_peer_id = peer.peerId, + pubsub_topic = msg.topic, + error = $error + return + + logMessageInfo( + w, shortLog(peer.peerId), msg.topic, msg_id_short, wakuMessage, onRecv = true + ) + + proc onSend(peer: PubSubPeer, msgs: var RPCMsg) = + for msg in msgs.messages: + let (msg_id_short, topic, wakuMessage, msgSize) = decodeRpcMessageInfo(peer, msg).valueOr: + warn "onSend: failed decoding RPC info", + my_peer_id = w.switch.peerInfo.peerId, to_peer_id = peer.peerId + continue + logMessageInfo( + w, shortLog(peer.peerId), topic, msg_id_short, wakuMessage, onRecv = false + ) + updateMetrics(peer, topic, wakuMessage, msgSize, onRecv = false) + + let administrativeObserver = + PubSubObserver(onRecv: onRecv, onSend: onSend, onValidated: onValidated) + + w.addObserver(administrativeObserver) + +proc new*( + T: type WakuRelay, switch: Switch, maxMessageSize = int(DefaultMaxWakuMessageSize) +): WakuRelayResult[T] = + ## maxMessageSize: max num bytes that are allowed for the WakuMessage + + var w: WakuRelay + try: + w = WakuRelay.init( + switch = switch, + anonymize = true, + verifySignature = false, + sign = false, + triggerSelf = true, + msgIdProvider = defaultMessageIdProvider, + maxMessageSize = maxMessageSize, + parameters = GossipsubParameters, + ) + + procCall GossipSub(w).initPubSub() + w.initProtocolHandler() + w.initRelayObservers() + w.topicsHealth = initTable[string, TopicHealth]() + except InitializationError: + return err("initialization error: " & getCurrentExceptionMsg()) + + return ok(w) + +proc addValidator*( + w: WakuRelay, handler: WakuValidatorHandler, errorMessage: string = "" +) {.gcsafe.} = + w.wakuValidators.add((handler, errorMessage)) + +proc addPublishObserver*(w: WakuRelay, obs: PublishObserver) = + ## Observer when the api client performed a publish operation. This + ## is initially aimed for bringing an additional layer of delivery reliability thanks + ## to store + w.publishObservers.add(obs) + +proc addObserver*(w: WakuRelay, observer: PubSubObserver) {.gcsafe.} = + ## Observes when a message is sent/received from the GossipSub PoV + procCall GossipSub(w).addObserver(observer) + +proc getDHigh*(T: type WakuRelay): int = + return GossipsubParameters.dHigh + +proc getPubSubPeersInMesh*( + w: WakuRelay, pubsubTopic: PubsubTopic +): Result[HashSet[PubSubPeer], string] = + ## Returns the list of PubSubPeers in a mesh defined by the passed pubsub topic. + ## The 'mesh' atribute is defined in the GossipSub ref object. + + # If pubsubTopic is empty, we return all peers in mesh for any pubsub topic + if pubsubTopic == "": + var allPeers = initHashSet[PubSubPeer]() + for topic, topicMesh in w.mesh.pairs: + allPeers = allPeers.union(topicMesh) + return ok(allPeers) + + if not w.mesh.hasKey(pubsubTopic): + debug "getPubSubPeersInMesh - there is no mesh peer for the given pubsub topic", + pubsubTopic = pubsubTopic + return ok(initHashSet[PubSubPeer]()) + + let peersRes = catch: + w.mesh[pubsubTopic] + + let peers: HashSet[PubSubPeer] = peersRes.valueOr: + return err( + "getPubSubPeersInMesh - exception accessing " & pubsubTopic & ": " & error.msg + ) + + return ok(peers) + +proc getPeersInMesh*( + w: WakuRelay, pubsubTopic: PubsubTopic = "" +): Result[seq[PeerId], string] = + ## Returns the list of peerIds in a mesh defined by the passed pubsub topic. + ## The 'mesh' atribute is defined in the GossipSub ref object. + let pubSubPeers = w.getPubSubPeersInMesh(pubsubTopic).valueOr: + return err(error) + let peerIds = toSeq(pubSubPeers).mapIt(it.peerId) + + return ok(peerIds) + +proc getNumPeersInMesh*(w: WakuRelay, pubsubTopic: PubsubTopic): Result[int, string] = + ## Returns the number of peers in a mesh defined by the passed pubsub topic. + + let peers = w.getPubSubPeersInMesh(pubsubTopic).valueOr: + return err( + "getNumPeersInMesh - failed retrieving peers in mesh: " & pubsubTopic & ": " & + error + ) + + return ok(peers.len) + +proc calculateTopicHealth(wakuRelay: WakuRelay, topic: string): TopicHealth = + let numPeersInMesh = wakuRelay.getNumPeersInMesh(topic).valueOr: + error "Could not calculate topic health", topic = topic, error = error + return TopicHealth.UNHEALTHY + + if numPeersInMesh < 1: + return TopicHealth.UNHEALTHY + elif numPeersInMesh < wakuRelay.parameters.dLow: + return TopicHealth.MINIMALLY_HEALTHY + return TopicHealth.SUFFICIENTLY_HEALTHY + +proc updateTopicsHealth(wakuRelay: WakuRelay) {.async.} = + var futs = newSeq[Future[void]]() + for topic in toSeq(wakuRelay.topics.keys): + ## loop over all the topics I'm subscribed to + let + oldHealth = wakuRelay.topicsHealth.getOrDefault(topic) + currentHealth = wakuRelay.calculateTopicHealth(topic) + + if oldHealth == currentHealth: + continue + + wakuRelay.topicsHealth[topic] = currentHealth + if not wakuRelay.onTopicHealthChange.isNil(): + let fut = wakuRelay.onTopicHealthChange(topic, currentHealth) + if not fut.completed(): # Fast path for successful sync handlers + futs.add(fut) + + if futs.len() > 0: + # slow path - we have to wait for the handlers to complete + try: + futs = await allFinished(futs) + except CancelledError: + # check for errors in futures + for fut in futs: + if fut.failed: + let err = fut.readError() + warn "Error in health change handler", description = err.msg + +proc topicsHealthLoop(wakuRelay: WakuRelay) {.async.} = + while true: + await wakuRelay.updateTopicsHealth() + await sleepAsync(10.seconds) + +method start*(w: WakuRelay) {.async, base.} = + debug "start" + await procCall GossipSub(w).start() + w.topicHealthLoopHandle = w.topicsHealthLoop() + +method stop*(w: WakuRelay) {.async, base.} = + debug "stop" + await procCall GossipSub(w).stop() + if not w.topicHealthLoopHandle.isNil(): + await w.topicHealthLoopHandle.cancelAndWait() + +proc isSubscribed*(w: WakuRelay, topic: PubsubTopic): bool = + GossipSub(w).topics.hasKey(topic) + +proc subscribedTopics*(w: WakuRelay): seq[PubsubTopic] = + return toSeq(GossipSub(w).topics.keys()) + +proc generateOrderedValidator(w: WakuRelay): ValidatorHandler {.gcsafe.} = + # rejects messages that are not WakuMessage + let wrappedValidator = proc( + pubsubTopic: string, message: messages.Message + ): Future[ValidationResult] {.async.} = + # can be optimized by checking if the message is a WakuMessage without allocating memory + # see nim-libp2p protobuf library + let msg = WakuMessage.decode(message.data).valueOr: + error "protocol generateOrderedValidator reject decode error", + pubsubTopic = pubsubTopic, error = $error + return ValidationResult.Reject + + # now sequentially validate the message + for (validator, errorMessage) in w.wakuValidators: + let validatorRes = await validator(pubsubTopic, msg) + + if validatorRes != ValidationResult.Accept: + let msgHash = computeMessageHash(pubsubTopic, msg).to0xHex() + error "protocol generateOrderedValidator reject waku validator", + msg_hash = msgHash, + pubsubTopic = pubsubTopic, + contentTopic = msg.contentTopic, + validatorRes = validatorRes, + error = errorMessage + + return validatorRes + + return ValidationResult.Accept + + return wrappedValidator + +proc validateMessage*( + w: WakuRelay, pubsubTopic: string, msg: WakuMessage +): Future[Result[void, string]] {.async.} = + let messageSizeBytes = msg.encode().buffer.len + let msgHash = computeMessageHash(pubsubTopic, msg).to0xHex() + + if messageSizeBytes > w.maxMessageSize: + let message = fmt"Message size exceeded maximum of {w.maxMessageSize} bytes" + error "too large Waku message", + msg_hash = msgHash, + error = message, + messageSizeBytes = messageSizeBytes, + maxMessageSize = w.maxMessageSize + + return err(message) + + for (validator, message) in w.wakuValidators: + let validatorRes = await validator(pubsubTopic, msg) + if validatorRes != ValidationResult.Accept: + if message.len > 0: + error "invalid Waku message", msg_hash = msgHash, error = message + return err(message) + else: + ## This should never happen + error "uncertain invalid Waku message", msg_hash = msgHash, error = message + return err("validator failed") + + return ok() + +proc subscribe*(w: WakuRelay, pubsubTopic: PubsubTopic, handler: WakuRelayHandler) = + debug "subscribe", pubsubTopic = pubsubTopic + + # We need to wrap the handler since gossipsub doesnt understand WakuMessage + let topicHandler = proc( + pubsubTopic: string, data: seq[byte] + ): Future[void] {.gcsafe, raises: [].} = + let decMsg = WakuMessage.decode(data) + if decMsg.isErr(): + # fine if triggerSelf enabled, since validators are bypassed + error "failed to decode WakuMessage, validator passed a wrong message", + pubsubTopic = pubsubTopic, error = decMsg.error + let fut = newFuture[void]() + fut.complete() + return fut + else: + # this subscription handler is called once for every validated message + # that will be relayed, hence this is the place we can count net incoming traffic + waku_relay_network_bytes.inc( + data.len.int64 + pubsubTopic.len.int64, labelValues = [pubsubTopic, "net", "in"] + ) + + return handler(pubsubTopic, decMsg.get()) + + # Add the ordered validator to the topic + # This assumes that if `w.validatorInserted.hasKey(pubSubTopic) is true`, it contains the ordered validator. + # Otherwise this might lead to unintended behaviour. + if not w.topicValidator.hasKey(pubSubTopic): + let newValidator = w.generateOrderedValidator() + procCall GossipSub(w).addValidator(pubSubTopic, w.generateOrderedValidator()) + w.topicValidator[pubSubTopic] = newValidator + + # set this topic parameters for scoring + w.topicParams[pubsubTopic] = TopicParameters + + # subscribe to the topic with our wrapped handler + procCall GossipSub(w).subscribe(pubsubTopic, topicHandler) + + w.topicHandlers[pubsubTopic] = topicHandler + +proc unsubscribeAll*(w: WakuRelay, pubsubTopic: PubsubTopic) = + ## Unsubscribe all handlers on this pubsub topic + + debug "unsubscribe all", pubsubTopic = pubsubTopic + + procCall GossipSub(w).unsubscribeAll(pubsubTopic) + w.topicValidator.del(pubsubTopic) + w.topicHandlers.del(pubsubTopic) + +proc unsubscribe*(w: WakuRelay, pubsubTopic: PubsubTopic) = + if not w.topicValidator.hasKey(pubsubTopic): + error "unsubscribe no validator for this topic", pubsubTopic + return + + if not w.topicHandlers.hasKey(pubsubTopic): + error "not subscribed to the given topic", pubsubTopic + return + + var topicHandler: TopicHandler + var topicValidator: ValidatorHandler + try: + topicHandler = w.topicHandlers[pubsubTopic] + topicValidator = w.topicValidator[pubsubTopic] + except KeyError: + error "exception in unsubscribe", pubsubTopic, error = getCurrentExceptionMsg() + return + + debug "unsubscribe", pubsubTopic + procCall GossipSub(w).unsubscribe(pubsubTopic, topicHandler) + procCall GossipSub(w).removeValidator(pubsubTopic, topicValidator) + + w.topicValidator.del(pubsubTopic) + w.topicHandlers.del(pubsubTopic) + +proc publish*( + w: WakuRelay, pubsubTopic: PubsubTopic, wakuMessage: WakuMessage +): Future[Result[int, PublishOutcome]] {.async.} = + if pubsubTopic.isEmptyOrWhitespace(): + return err(NoTopicSpecified) + + var message = wakuMessage + if message.timestamp == 0: + message.timestamp = getNowInNanosecondTime() + + let data = message.encode().buffer + + let msgHash = computeMessageHash(pubsubTopic, message).to0xHex() + notice "start publish Waku message", msg_hash = msgHash, pubsubTopic = pubsubTopic + + let relayedPeerCount = await procCall GossipSub(w).publish(pubsubTopic, data) + + if relayedPeerCount <= 0: + return err(NoPeersToPublish) + + for obs in w.publishObservers: + obs.onMessagePublished(pubSubTopic, message) + + return ok(relayedPeerCount) + +proc getConnectedPubSubPeers*( + w: WakuRelay, pubsubTopic: PubsubTopic +): Result[HashSet[PubsubPeer], string] = + ## Returns the list of peerIds of connected peers and subscribed to the passed pubsub topic. + ## The 'gossipsub' atribute is defined in the GossipSub ref object. + + if pubsubTopic == "": + ## Return all the connected peers + var peerIds = initHashSet[PubsubPeer]() + for k, v in w.gossipsub: + peerIds = peerIds + v + return ok(peerIds) + + if not w.gossipsub.hasKey(pubsubTopic): + return err( + "getConnectedPeers - there is no gossipsub peer for the given pubsub topic: " & + pubsubTopic + ) + + let peersRes = catch: + w.gossipsub[pubsubTopic] + + let peers: HashSet[PubSubPeer] = peersRes.valueOr: + return + err("getConnectedPeers - exception accessing " & pubsubTopic & ": " & error.msg) + + return ok(peers) + +proc getConnectedPeers*( + w: WakuRelay, pubsubTopic: PubsubTopic +): Result[seq[PeerId], string] = + ## Returns the list of peerIds of connected peers and subscribed to the passed pubsub topic. + ## The 'gossipsub' atribute is defined in the GossipSub ref object. + + let peers = w.getConnectedPubSubPeers(pubsubTopic).valueOr: + return err(error) + + let peerIds = toSeq(peers).mapIt(it.peerId) + return ok(peerIds) + +proc getNumConnectedPeers*( + w: WakuRelay, pubsubTopic: PubsubTopic +): Result[int, string] = + ## Returns the number of connected peers and subscribed to the passed pubsub topic. + + ## Return all the connected peers + let peers = w.getConnectedPubSubPeers(pubsubTopic).valueOr: + return err( + "getNumConnectedPeers - failed retrieving peers in mesh: " & pubsubTopic & ": " & + error + ) + + return ok(peers.len) + +proc getSubscribedTopics*(w: WakuRelay): seq[PubsubTopic] = + ## Returns a seq containing the current list of subscribed topics + return PubSub(w).topics.keys.toSeq().mapIt(cast[PubsubTopic](it)) diff --git a/third-party/nwaku/waku/waku_relay/topic_health.nim b/third-party/nwaku/waku/waku_relay/topic_health.nim new file mode 100644 index 0000000..774abc5 --- /dev/null +++ b/third-party/nwaku/waku/waku_relay/topic_health.nim @@ -0,0 +1,19 @@ +import chronos + +import ../waku_core + +type TopicHealth* = enum + UNHEALTHY + MINIMALLY_HEALTHY + SUFFICIENTLY_HEALTHY + +proc `$`*(t: TopicHealth): string = + result = + case t + of UNHEALTHY: "UnHealthy" + of MINIMALLY_HEALTHY: "MinimallyHealthy" + of SUFFICIENTLY_HEALTHY: "SufficientlyHealthy" + +type TopicHealthChangeHandler* = proc( + pubsubTopic: PubsubTopic, topicHealth: TopicHealth +): Future[void] {.gcsafe, raises: [Defect].} diff --git a/third-party/nwaku/waku/waku_rendezvous.nim b/third-party/nwaku/waku/waku_rendezvous.nim new file mode 100644 index 0000000..b07f1f7 --- /dev/null +++ b/third-party/nwaku/waku/waku_rendezvous.nim @@ -0,0 +1,3 @@ +import ./waku_rendezvous/protocol + +export protocol diff --git a/third-party/nwaku/waku/waku_rendezvous/common.nim b/third-party/nwaku/waku/waku_rendezvous/common.nim new file mode 100644 index 0000000..6125ac8 --- /dev/null +++ b/third-party/nwaku/waku/waku_rendezvous/common.nim @@ -0,0 +1,38 @@ +{.push raises: [].} + +import std/options, chronos + +import ../common/enr, ../waku_enr/capabilities, ../waku_enr/sharding + +const DiscoverLimit* = 1000 +const DefaultRegistrationTTL* = 60.seconds +const DefaultRegistrationInterval* = 10.seconds +const DefaultRequestsInterval* = 1.minutes +const MaxRegistrationInterval* = 5.minutes +const PeersRequestedCount* = 12 + +proc computeNamespace*(clusterId: uint16, shard: uint16): string = + var namespace = "rs/" + + namespace &= $clusterId + namespace &= '/' + namespace &= $shard + + return namespace + +proc computeNamespace*(clusterId: uint16, shard: uint16, cap: Capabilities): string = + var namespace = "rs/" + + namespace &= $clusterId + namespace &= '/' + namespace &= $shard + namespace &= '/' + namespace &= $cap + + return namespace + +proc getRelayShards*(enr: enr.Record): Option[RelayShards] = + let typedRecord = enr.toTyped().valueOr: + return none(RelayShards) + + return typedRecord.relaySharding() diff --git a/third-party/nwaku/waku/waku_rendezvous/protocol.nim b/third-party/nwaku/waku/waku_rendezvous/protocol.nim new file mode 100644 index 0000000..ae57322 --- /dev/null +++ b/third-party/nwaku/waku/waku_rendezvous/protocol.nim @@ -0,0 +1,311 @@ +{.push raises: [].} + +import + std/[sugar, options], + results, + chronos, + chronicles, + metrics, + libp2p/protocols/rendezvous, + libp2p/switch, + libp2p/utility + +import + ../node/peer_manager, + ../common/callbacks, + ../waku_enr/capabilities, + ../waku_core/peers, + ../waku_core/topics, + ../waku_core/topics/pubsub_topic, + ./common + +logScope: + topics = "waku rendezvous" + +declarePublicCounter rendezvousPeerFoundTotal, + "total number of peers found via rendezvous" + +type WakuRendezVous* = ref object + rendezvous: Rendezvous + peerManager: PeerManager + clusterId: uint16 + getShards: GetShards + getCapabilities: GetCapabilities + + registrationInterval: timer.Duration + periodicRegistrationFut: Future[void] + + requestInterval: timer.Duration + periodicRequestFut: Future[void] + +proc batchAdvertise*( + self: WakuRendezVous, + namespace: string, + ttl: Duration = DefaultRegistrationTTL, + peers: seq[PeerId], +): Future[Result[void, string]] {.async: (raises: []).} = + ## Register with all rendezvous peers under a namespace + + # rendezvous.advertise expects already opened connections + # must dial first + var futs = collect(newSeq): + for peerId in peers: + self.peerManager.dialPeer(peerId, RendezVousCodec) + + let dialCatch = catch: + await allFinished(futs) + + if dialCatch.isErr(): + return err("batchAdvertise: " & dialCatch.error.msg) + + futs = dialCatch.get() + + let conns = collect(newSeq): + for fut in futs: + let catchable = catch: + fut.read() + + if catchable.isErr(): + warn "a rendezvous dial failed", cause = catchable.error.msg + continue + + let connOpt = catchable.get() + + let conn = connOpt.valueOr: + continue + + conn + + let advertCatch = catch: + await self.rendezvous.advertise(namespace, ttl, peers) + + for conn in conns: + await conn.close() + + if advertCatch.isErr(): + return err("batchAdvertise: " & advertCatch.error.msg) + + return ok() + +proc batchRequest*( + self: WakuRendezVous, + namespace: string, + count: int = DiscoverLimit, + peers: seq[PeerId], +): Future[Result[seq[PeerRecord], string]] {.async: (raises: []).} = + ## Request all records from all rendezvous peers matching a namespace + + # rendezvous.request expects already opened connections + # must dial first + var futs = collect(newSeq): + for peerId in peers: + self.peerManager.dialPeer(peerId, RendezVousCodec) + + let dialCatch = catch: + await allFinished(futs) + + if dialCatch.isErr(): + return err("batchRequest: " & dialCatch.error.msg) + + futs = dialCatch.get() + + let conns = collect(newSeq): + for fut in futs: + let catchable = catch: + fut.read() + + if catchable.isErr(): + warn "a rendezvous dial failed", cause = catchable.error.msg + continue + + let connOpt = catchable.get() + + let conn = connOpt.valueOr: + continue + + conn + + let reqCatch = catch: + await self.rendezvous.request(Opt.some(namespace), count, peers) + + for conn in conns: + await conn.close() + + if reqCatch.isErr(): + return err("batchRequest: " & reqCatch.error.msg) + + return ok(reqCatch.get()) + +proc advertiseAll( + self: WakuRendezVous +): Future[Result[void, string]] {.async: (raises: []).} = + debug "waku rendezvous advertisements started" + + let shards = self.getShards() + + let futs = collect(newSeq): + for shardId in shards: + # Get a random RDV peer for that shard + + let pubsub = + toPubsubTopic(RelayShard(clusterId: self.clusterId, shardId: shardId)) + + let rpi = self.peerManager.selectPeer(RendezVousCodec, some(pubsub)).valueOr: + continue + + let namespace = computeNamespace(self.clusterId, shardId) + + # Advertise yourself on that peer + self.batchAdvertise(namespace, DefaultRegistrationTTL, @[rpi.peerId]) + + if futs.len < 1: + return err("could not get a peer supporting RendezVousCodec") + + let catchable = catch: + await allFinished(futs) + + if catchable.isErr(): + return err(catchable.error.msg) + + for fut in catchable.get(): + if fut.failed(): + warn "a rendezvous advertisement failed", cause = fut.error.msg + + debug "waku rendezvous advertisements finished" + + return ok() + +proc initialRequestAll*( + self: WakuRendezVous +): Future[Result[void, string]] {.async: (raises: []).} = + debug "waku rendezvous initial requests started" + + let shards = self.getShards() + + let futs = collect(newSeq): + for shardId in shards: + let namespace = computeNamespace(self.clusterId, shardId) + # Get a random RDV peer for that shard + let rpi = self.peerManager.selectPeer( + RendezVousCodec, + some(toPubsubTopic(RelayShard(clusterId: self.clusterId, shardId: shardId))), + ).valueOr: + continue + + # Ask for peer records for that shard + self.batchRequest(namespace, PeersRequestedCount, @[rpi.peerId]) + + if futs.len < 1: + return err("could not get a peer supporting RendezVousCodec") + + let catchable = catch: + await allFinished(futs) + + if catchable.isErr(): + return err(catchable.error.msg) + + for fut in catchable.get(): + if fut.failed(): + warn "a rendezvous request failed", cause = fut.error.msg + elif fut.finished(): + let res = fut.value() + + let records = res.valueOr: + warn "a rendezvous request failed", cause = $res.error + continue + + for record in records: + rendezvousPeerFoundTotal.inc() + self.peerManager.addPeer(record) + + debug "waku rendezvous initial request finished" + + return ok() + +proc periodicRegistration(self: WakuRendezVous) {.async.} = + debug "waku rendezvous periodic registration started", + interval = self.registrationInterval + + # infinite loop + while true: + await sleepAsync(self.registrationInterval) + + (await self.advertiseAll()).isOkOr: + debug "waku rendezvous advertisements failed", error = error + + if self.registrationInterval > MaxRegistrationInterval: + self.registrationInterval = MaxRegistrationInterval + else: + self.registrationInterval += self.registrationInterval + + # Back to normal interval if no errors + self.registrationInterval = DefaultRegistrationInterval + +proc periodicRequests(self: WakuRendezVous) {.async.} = + debug "waku rendezvous periodic requests started", interval = self.requestInterval + + # infinite loop + while true: + (await self.initialRequestAll()).isOkOr: + error "waku rendezvous requests failed", error = error + + await sleepAsync(self.requestInterval) + + # Exponential backoff + self.requestInterval += self.requestInterval + + if self.requestInterval >= 1.days: + break + +proc new*( + T: type WakuRendezVous, + switch: Switch, + peerManager: PeerManager, + clusterId: uint16, + getShards: GetShards, + getCapabilities: GetCapabilities, +): Result[T, string] {.raises: [].} = + let rvCatchable = catch: + RendezVous.new(switch = switch, minDuration = DefaultRegistrationTTL) + + if rvCatchable.isErr(): + return err(rvCatchable.error.msg) + + let rv = rvCatchable.get() + + let mountCatchable = catch: + switch.mount(rv) + + if mountCatchable.isErr(): + return err(mountCatchable.error.msg) + + var wrv = WakuRendezVous() + wrv.rendezvous = rv + wrv.peerManager = peerManager + wrv.clusterId = clusterId + wrv.getShards = getShards + wrv.getCapabilities = getCapabilities + wrv.registrationInterval = DefaultRegistrationInterval + wrv.requestInterval = DefaultRequestsInterval + + debug "waku rendezvous initialized", + clusterId = clusterId, shards = getShards(), capabilities = getCapabilities() + + return ok(wrv) + +proc start*(self: WakuRendezVous) {.async: (raises: []).} = + # start registering forever + self.periodicRegistrationFut = self.periodicRegistration() + + self.periodicRequestFut = self.periodicRequests() + + debug "waku rendezvous discovery started" + +proc stopWait*(self: WakuRendezVous) {.async: (raises: []).} = + if not self.periodicRegistrationFut.isNil(): + await self.periodicRegistrationFut.cancelAndWait() + + if not self.periodicRequestFut.isNil(): + await self.periodicRequestFut.cancelAndWait() + + debug "waku rendezvous discovery stopped" diff --git a/third-party/nwaku/waku/waku_rln_relay.nim b/third-party/nwaku/waku/waku_rln_relay.nim new file mode 100644 index 0000000..ce79f98 --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay.nim @@ -0,0 +1,7 @@ +import + ./waku_rln_relay/group_manager, + ./waku_rln_relay/conversion_utils, + ./waku_rln_relay/rln_relay, + ./waku_rln_relay/contract + +export group_manager, conversion_utils, rln_relay, contract diff --git a/third-party/nwaku/waku/waku_rln_relay/constants.nim b/third-party/nwaku/waku/waku_rln_relay/constants.nim new file mode 100644 index 0000000..3e47575 --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/constants.nim @@ -0,0 +1,43 @@ +import stint + +import ./protocol_types + +import ../waku_keystore + +# Acceptable roots for merkle root validation of incoming messages +const AcceptableRootWindowSize* = 50 + +# RLN membership key and index files path +const RlnCredentialsFilename* = "rlnCredentials.txt" + +# inputs of the membership contract constructor +# TODO may be able to make these constants private and put them inside the waku_rln_relay_utils +const + # in wei + MembershipFee* = 0.u256 + # the current implementation of the rln lib supports a circuit for Merkle tree with depth 20 + MerkleTreeDepth* = 20 + EthClient* = "http://127.0.0.1:8540" + +const + # the size of poseidon hash output in bits + HashBitSize* = 256 + # the size of poseidon hash output as the number hex digits + HashHexSize* = int(HashBitSize / 4) + +const DefaultRlnTreePath* = "rln_tree.db" + +const + # pre-processed "rln/waku-rln-relay/v2.0.0" to array[32, byte] + DefaultRlnIdentifier*: RlnIdentifier = [ + 114, 108, 110, 47, 119, 97, 107, 117, 45, 114, 108, 110, 45, 114, 101, 108, 97, 121, + 47, 118, 50, 46, 48, 46, 48, 0, 0, 0, 0, 0, 0, 0, + ] + DefaultUserMessageLimit* = UserMessageLimit(20) + +const MaxClockGapSeconds* = 20.0 # the maximum clock difference between peers in seconds + +# RLN Keystore defaults +const RLNAppInfo* = AppInfo( + application: "waku-rln-relay", appIdentifier: "01234567890abcdef", version: "0.2" +) diff --git a/third-party/nwaku/waku/waku_rln_relay/contract.nim b/third-party/nwaku/waku/waku_rln_relay/contract.nim new file mode 100644 index 0000000..10a8e54 --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/contract.nim @@ -0,0 +1,14 @@ +# This contract code is used in deployment, note: this is not the deployedBytecode, it includes constructor args. +# Ref: https://github.com/waku-org/waku-rlnv2-contract +const PoseidonT3* = + "0x615be56200003b600b82828239805160001a60731461002e57634e487b7160e01b600052600060045260246000fd5b30600052607381538281f3fe73000000000000000000000000000000000000000030146080604052600436106100355760003560e01c8063561558fe1461003a575b600080fd5b61004d610048366004615b3e565b61005f565b60405190815260200160405180910390f35b60007f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f00000017f2b90bba00fca0589f617e7dcbfe82e0df706ab640ceb247b791a93b74e36736d7f101071f0032379b697315876690f053d148d4e109f5fb065c8aacc55a0f89bfa7f19a3fc0a56702bf417ba7fee3802593fa644470307043f7773279cd71d25d5e07ef1445235f2148c5986587169fc1bcd887b08d4d00868df5696fff40956e8648460805106017f08dff3487e8ac99e1f29a058d0fa80b930c728730b7ab36ce879f3890ecf73f58560a05106018582830986838883840909925086828309905086828883840909915086868309877f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e08509017f5151bcc773d05d360fc9c923795441a9618605f30e31f2b8f087d1575b9c613b01905086858309877f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe238509017f547424ff6c2e186923faa2cf5794c8cd2b5d3e8f151620ffda4a15b70cc05b3f0187858409887f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee29118609017f23dc61092dc247151d38da17703c1ccb157f035575fe333d62fda4d2a5ae1bf5018883800989848b83840909935089838409905089838b83840909925089828309905089828b838409099150898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f28813dcaebaeaa828a376df87af4a63bc8b7bf27ad49c6298ef7b387bf28526d019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f2727673b2ccbc903f181bf38e1c1d40d2033865200c352bc150928adddf9cb78019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f234ec45ca27727c2e74abd2b2a1494cd6efbd43e340587d6b8fb9e31e65cc63201945089818209935089818b86870909905089868709935089868b86870909955089858609935089858b868709099450898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f15b52534031ae18f7f862cb2cf7cf760ab10a8150a337b1ccd99ff6e8797d428019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f0dc8fad6d9e4b35f5ed9a3d186b79ce38e0e8a8d1b58b132d701d4eecf68d1f6019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f1bcd95ffc211fbca600f705fad3fb567ea4eb378f62e1fec97805518a47e4d9c01915089848509905089848b83840909935089838409905089838b83840909925089828309905089828b838409099150898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f10520b0ab721cadfe9eff81b016fc34dc76da36c2578937817cb978d069de559019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f1f6d48149b8e7f7d9b257d8ed5fbbaf42932498075fed0ace88a9eb81f5627f6019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f1d9655f652309014d29e00ef35a2089bfff8dc1c816f0dc9ca34bdb5460c870501945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f04df5a56ff95bcafb051f7b1cd43a99ba731ff67e47032058fe3d4185697cc7d019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f0672d995f8fff640151b3d290cedaf148690a10a8c8424a7f6ec282b6e4be828019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f099952b414884454b21200d7ffafdd5f0c9a9dcc06f2708e9fc1d8209b5c75b901915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f052cba2255dfd00c7c483143ba8d469448e43586a9b4cd9183fd0e843a6b9fa6019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f0b8badee690adb8eb0bd74712b7999af82de55707251ad7716077cb93c464ddc019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f119b1590f13307af5a1ee651020c07c749c15d60683a8050b963d0a8e4b2bdd101945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f03150b7cd6d5d17b2529d36be0f67b832c4acfc884ef4ee5ce15be0bfb4a8d09019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f2cc6182c5e14546e3cf1951f173912355374efb83d80898abe69cb317c9ea565019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017e5032551e6378c450cfe129a404b3764218cadedac14e2b92d2cd73111bf0f901915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f233237e3289baa34bb147e972ebcb9516469c399fcc069fb88f9da2cc28276b5019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f05c8f4f4ebd4a6e3c980d31674bfbe6323037f21b34ae5a4e80c2d4c24d60280019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f0a7b1db13042d396ba05d818a319f25252bcf35ef3aeed91ee1f09b2590fc65b01945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f2a73b71f9b210cf5b14296572c9d32dbf156e2b086ff47dc5df542365a404ec0019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f1ac9b0417abcc9a1935107e9ffc91dc3ec18f2c4dbe7f22976a760bb5c50c460019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f12c0339ae08374823fabb076707ef479269f3e4d6cb104349015ee046dc93fc001915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f0b7475b102a165ad7f5b18db4e1e704f52900aa3253baac68246682e56e9a28e019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f037c2849e191ca3edb1c5e49f6e8b8917c843e379366f2ea32ab3aa88d7f8448019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f05a6811f8556f014e92674661e217e9bd5206c5c93a07dc145fdb176a716346f01945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f29a795e7d98028946e947b75d54e9f044076e87a7b2883b47b675ef5f38bd66e019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f20439a0c84b322eb45a3857afc18f5826e8c7382c8a1585c507be199981fd22f019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f2e0ba8d94d9ecf4a94ec2050c7371ff1bb50f27799a84b6d4a2a6f2a0982c88701915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f143fd115ce08fb27ca38eb7cce822b4517822cd2109048d2e6d0ddcca17d71c8019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f0c64cbecb1c734b857968dbbdcf813cdf8611659323dbcbfc84323623be9caf1019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f028a305847c683f646fca925c163ff5ae74f348d62c2b670f1426cef9403da5301945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f2e4ef510ff0b6fda5fa940ab4c4380f26a6bcb64d89427b824d6755b5db9e30c019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017e81c95bc43384e663d79270c956ce3b8925b4f6d033b078b96384f50579400e019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f2ed5f0c91cbd9749187e2fade687e05ee2491b349c039a0bba8a9f4023a0bb3801915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f30509991f88da3504bbf374ed5aae2f03448a22c76234c8c990f01f33a735206019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f1c3f20fd55409a53221b7c4d49a356b9f0a1119fb2067b41a7529094424ec6ad019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f10b4e7f3ab5df003049514459b6e18eec46bb2213e8e131e170887b47ddcb96c01945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f2a1982979c3ff7f43ddd543d891c2abddd80f804c077d775039aa3502e43adef019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f1c74ee64f15e1db6feddbead56d6d55dba431ebc396c9af95cad0f1315bd5c91019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f07533ec850ba7f98eab9303cace01b4b9e4f2e8b82708cfa9c2fe45a0ae146a001915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f21576b438e500449a151e4eeaf17b154285c68f42d42c1808a11abf3764c0750019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f2f17c0559b8fe79608ad5ca193d62f10bce8384c815f0906743d6930836d4a9e019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f2d477e3862d07708a79e8aae946170bc9775a4201318474ae665b0b1b7e2730e01945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f162f5243967064c390e095577984f291afba2266c38f5abcd89be0f5b2747eab019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f2b4cb233ede9ba48264ecd2c8ae50d1ad7a8596a87f29f8a7777a70092393311019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f2c8fbcb2dd8573dc1dbaf8f4622854776db2eece6d85c4cf4254e7c35e03b07a01915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f1d6f347725e4816af2ff453f0cd56b199e1b61e9f601e9ade5e88db870949da9019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f204b0c397f4ebe71ebc2d8b3df5b913df9e6ac02b68d31324cd49af5c4565529019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f0c4cb9dc3c4fd8174f1149b3c63c3c2f9ecb827cd7dc25534ff8fb75bc79c50201945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f174ad61a1448c899a25416474f4930301e5c49475279e0639a616ddc45bc7b54019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f1a96177bcf4d8d89f759df4ec2f3cde2eaaa28c177cc0fa13a9816d49a38d2ef019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f066d04b24331d71cd0ef8054bc60c4ff05202c126a233c1a8242ace360b8a30a01915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f2a4c4fc6ec0b0cf52195782871c6dd3b381cc65f72e02ad527037a62aa1bd804019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f13ab2d136ccf37d447e9f2e14a7cedc95e727f8446f6d9d7e55afc01219fd649019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f1121552fca26061619d24d843dc82769c1b04fcec26f55194c2e3e869acc6a9a01945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017eef653322b13d6c889bc81715c37d77a6cd267d595c4a8909a5546c7c97cff1019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f0e25483e45a665208b261d8ba74051e6400c776d652595d9845aca35d8a397d3019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f29f536dcb9dd7682245264659e15d88e395ac3d4dde92d8c46448db979eeba8901915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f2a56ef9f2c53febadfda33575dbdbd885a124e2780bbea170e456baace0fa5be019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f1c8361c78eb5cf5decfb7a2d17b5c409f2ae2999a46762e8ee416240a8cb9af1019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f151aff5f38b20a0fc0473089aaf0206b83e8e68a764507bfd3d0ab4be74319c501945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f04c6187e41ed881dc1b239c88f7f9d43a9f52fc8c8b6cdd1e76e47615b51f100019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f13b37bd80f4d27fb10d84331f6fb6d534b81c61ed15776449e801b7ddc9c2967019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f01a5c536273c2d9df578bfbd32c17b7a2ce3664c2a52032c9321ceb1c4e8a8e401915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f2ab3561834ca73835ad05f5d7acb950b4a9a2c666b9726da832239065b7c3b02019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f1d4d8ec291e720db200fe6d686c0d613acaf6af4e95d3bf69f7ed516a597b646019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f041294d2cc484d228f5784fe7919fd2bb925351240a04b711514c9c80b65af1d01945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f154ac98e01708c611c4fa715991f004898f57939d126e392042971dd90e81fc6019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f0b339d8acca7d4f83eedd84093aef51050b3684c88f8b0b04524563bc6ea4da4019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f0955e49e6610c94254a4f84cfbab344598f0e71eaff4a7dd81ed95b50839c82e01915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f06746a6156eba54426b9e22206f15abca9a6f41e6f535c6f3525401ea0654626019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f0f18f5a0ecd1423c496f3820c549c27838e5790e2bd0a196ac917c7ff32077fb019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f04f6eeca1751f7308ac59eff5beb261e4bb563583ede7bc92a738223d6f76e1301945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f2b56973364c4c4f5c1a3ec4da3cdce038811eb116fb3e45bc1768d26fc0b3758019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f123769dd49d5b054dcd76b89804b1bcb8e1392b385716a5d83feb65d437f29ef019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f2147b424fc48c80a88ee52b91169aacea989f6446471150994257b2fb01c63e901915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f0fdc1f58548b85701a6c5505ea332a29647e6f34ad4243c2ea54ad897cebe54d019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f12373a8251fea004df68abcf0f7786d4bceff28c5dbbe0c3944f685cc0a0b1f2019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f21e4f4ea5f35f85bad7ea52ff742c9e8a642756b6af44203dd8a1f35c1a9003501945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f16243916d69d2ca3dfb4722224d4c462b57366492f45e90d8a81934f1bc3b147019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f1efbe46dd7a578b4f66f9adbc88b4378abc21566e1a0453ca13a4159cac04ac2019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f07ea5e8537cf5dd08886020e23a7f387d468d5525be66f853b672cc96a88969a01915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f05a8c4f9968b8aa3b7b478a30f9a5b63650f19a75e7ce11ca9fe16c0b76c00bc019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f20f057712cc21654fbfe59bd345e8dac3f7818c701b9c7882d9d57b72a32e83f019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f04a12ededa9dfd689672f8c67fee31636dcd8e88d01d49019bd90b33eb33db6901945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f27e88d8c15f37dcee44f1e5425a51decbd136ce5091a6767e49ec9544ccd101a019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f2feed17b84285ed9b8a5c8c5e95a41f66e096619a7703223176c41ee433de4d1019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f1ed7cc76edf45c7c404241420f729cf394e5942911312a0d6972b8bd53aff2b801915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f15742e99b9bfa323157ff8c586f5660eac6783476144cdcadf2874be45466b1a019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f1aac285387f65e82c895fc6887ddf40577107454c6ec0317284f033f27d0c785019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f25851c3c845d4790f9ddadbdb6057357832e2e7a49775f71ec75a96554d67c7701945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f15a5821565cc2ec2ce78457db197edf353b7ebba2c5523370ddccc3d9f146a67019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f2411d57a4813b9980efa7e31a1db5966dcf64f36044277502f15485f28c71727019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017e2e6f8d6520cd4713e335b8c0b6d2e647e9a98e12f4cd2558828b5ef6cb4c9b01915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f2ff7bc8f4380cde997da00b616b0fcd1af8f0e91e2fe1ed7398834609e0315d2019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017eb9831b948525595ee02724471bcd182e9521f6b7bb68f1e93be4febb0d3cbe019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f0a2f53768b8ebf6a86913b0e57c04e011ca408648a4743a87d77adbf0c9c351201945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017e248156142fd0373a479f91ff239e960f599ff7e94be69b7f2a290305e1198d019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f171d5620b87bfb1328cf8c02ab3f0c9a397196aa6a542c2350eb512a2b2bcda9019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f170a4f55536f7dc970087c7c10d6fad760c952172dd54dd99d1045e4ec34a80801915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f29aba33f799fe66c2ef3134aea04336ecc37e38c1cd211ba482eca17e2dbfae1019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f1e9bc179a4fdd758fdd1bb1945088d47e70d114a03f6a0e8b5ba650369e64973019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f1dd269799b660fad58f7f4892dfb0b5afeaad869a9c4b44f9c9e1c43bdaf8f0901945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f22cdbc8b70117ad1401181d02e15459e7ccd426fe869c7c95d1dd2cb0f24af38019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f0ef042e454771c533a9f57a55c503fcefd3150f52ed94a7cd5ba93b9c7dacefd019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f11609e06ad6c8fe2f287f3036037e8851318e8b08a0359a03b304ffca62e828401915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f1166d9e554616dba9e753eea427c17b7fecd58c076dfe42708b08f5b783aa9af019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f2de52989431a859593413026354413db177fbf4cd2ac0b56f855a888357ee466019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f3006eb4ffc7a85819a6da492f3a8ac1df51aee5b17b8e89d74bf01cf5f71e9ad01945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f2af41fbb61ba8a80fdcf6fff9e3f6f422993fe8f0a4639f962344c8225145086019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f119e684de476155fe5a6b41a8ebc85db8718ab27889e85e781b214bace4827c3019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f1835b786e2e8925e188bea59ae363537b51248c23828f047cff784b97b3fd80001915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f28201a34c594dfa34d794996c6433a20d152bac2a7905c926c40e285ab32eeb6019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f083efd7a27d1751094e80fefaf78b000864c82eb571187724a761f88c22cc4e7019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f0b6f88a3577199526158e61ceea27be811c16df7774dd8519e079564f61fd13b01945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f0ec868e6d15e51d9644f66e1d6471a94589511ca00d29e1014390e6ee4254f5b019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f2af33e3f866771271ac0c9b3ed2e1142ecd3e74b939cd40d00d937ab84c98591019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f0b520211f904b5e7d09b5d961c6ace7734568c547dd6858b364ce5e47951f17801915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f0b2d722d0919a1aad8db58f10062a92ea0c56ac4270e822cca228620188a1d40019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f1f790d4d7f8cf094d980ceb37c2453e957b54a9991ca38bbe0061d1ed6e562d4019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f0171eb95dfbf7d1eaea97cd385f780150885c16235a2a6a8da92ceb01e50423301945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f0c2d0e3b5fd57549329bf6885da66b9b790b40defd2c8650762305381b168873019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f1162fb28689c27154e5a8228b4e72b377cbcafa589e283c35d3803054407a18d019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f2f1459b65dee441b64ad386a91e8310f282c5a92a89e19921623ef8249711bc001915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f1e6ff3216b688c3d996d74367d5cd4c1bc489d46754eb712c243f70d1b53cfbb019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f01ca8be73832b8d0681487d27d157802d741a6f36cdc2a0576881f9326478875019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f1f7735706ffe9fc586f976d5bdf223dc680286080b10cea00b9b5de315f9650e01945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f2522b60f4ea3307640a0c2dce041fba921ac10a3d5f096ef4745ca838285f019019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f23f0bee001b1029d5255075ddc957f833418cad4f52b6c3f8ce16c235572575b019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f2bc1ae8b8ddbb81fcaac2d44555ed5685d142633e9df905f66d9401093082d5901915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f0f9406b8296564a37304507b8dba3ed162371273a07b1fc98011fcd6ad72205f019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f2360a8eb0cc7defa67b72998de90714e17e75b174a52ee4acb126c8cd995f0a8019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f15871a5cddead976804c803cbaef255eb4815a5e96df8b006dcbbc2767f8894801945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f193a56766998ee9e0a8652dd2f3b1da0362f4f54f72379544f957ccdeefb420f019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f2a394a43934f86982f9be56ff4fab1703b2e63c8ad334834e4309805e777ae0f019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f1859954cfeb8695f3e8b635dcb345192892cd11223443ba7b4166e8876c0d14201915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f04e1181763050e58013444dbcb99f1902b11bc25d90bbdca408d3819f4fed32b019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f0fdb253dee83869d40c335ea64de8c5bb10eb82db08b5e8b1f5e5552bfd05f23019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f058cbe8a9a5027bdaa4efb623adead6275f08686f1c08984a9d7c5bae9b4f1c001945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f1382edce9971e186497eadb1aeb1f52b23b4b83bef023ab0d15228b4cceca59a019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f03464990f045c6ee0819ca51fd11b0be7f61b8eb99f14b77e1e6634601d9e8b5019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f23f7bfc8720dc296fff33b41f98ff83c6fcab4605db2eb5aaa5bc137aeb70a5801915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f0a59a158e3eec2117e6e94e7f0e9decf18c3ffd5e1531a9219636158bbaf62f2019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f06ec54c80381c052b58bf23b312ffd3ce2c4eba065420af8f4c23ed0075fd07b019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f118872dc832e0eb5476b56648e867ec8b09340f7a7bcb1b4962f0ff9ed1f9d0101945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f13d69fa127d834165ad5c7cba7ad59ed52e0b0f0e42d7fea95e1906b520921b1019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f169a177f63ea681270b1c6877a73d21bde143942fb71dc55fd8a49f19f10c77b019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f04ef51591c6ead97ef42f287adce40d93abeb032b922f66ffb7e9a5a7450544d01915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f256e175a1dc079390ecd7ca703fb2e3b19ec61805d4f03ced5f45ee6dd0f69ec019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f30102d28636abd5fe5f2af412ff6004f75cc360d3205dd2da002813d3e2ceeb2019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f10998e42dfcd3bbf1c0714bc73eb1bf40443a3fa99bef4a31fd31be182fcc79201945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f193edd8e9fcf3d7625fa7d24b598a1d89f3362eaf4d582efecad76f879e36860019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f18168afd34f2d915d0368ce80b7b3347d1c7a561ce611425f2664d7aa51f0b5d019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f29383c01ebd3b6ab0c017656ebe658b6a328ec77bc33626e29e2e95b33ea611101915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f10646d2f2603de39a1f4ae5e7771a64a702db6e86fb76ab600bf573f9010c711019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f0beb5e07d1b27145f575f1395a55bf132f90c25b40da7b3864d0242dcb1117fb019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f16d685252078c133dc0d3ecad62b5c8830f95bb2e54b59abdffbf018d96fa33601945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f0a6abd1d833938f33c74154e0404b4b40a555bbbec21ddfafd672dd62047f01a019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f1a679f5d36eb7b5c8ea12a4c2dedc8feb12dffeec450317270a6f19b34cf1860019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f0980fb233bd456c23974d50e0ebfde4726a423eada4e8f6ffbc7592e3f1b93d601915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f161b42232e61b84cbf1810af93a38fc0cece3d5628c9282003ebacb5c312c72b019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f0ada10a90c7f0520950f7d47a60d5e6a493f09787f1564e5d09203db47de1a0b019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f1a730d372310ba82320345a29ac4238ed3f07a8a2b4e121bb50ddb9af407f45101945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f2c8120f268ef054f817064c369dda7ea908377feaba5c4dffbda10ef58e8c556019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f1c7c8824f758753fa57c00789c684217b930e95313bcb73e6e7b8649a4968f70019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f2cd9ed31f5f8691c8e39e4077a74faa0f400ad8b491eb3f7b47b27fa3fd1cf7701915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f23ff4f9d46813457cf60d92f57618399a5e022ac321ca550854ae23918a22eea019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f09945a5d147a4f66ceece6405dddd9d0af5a2c5103529407dff1ea58f180426d019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f188d9c528025d4c2b67660c6b771b90f7c7da6eaa29d3f268a6dd223ec6fc63001945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f3050e37996596b7f81f68311431d8734dba7d926d3633595e0c0d8ddf4f0f47f019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f15af1169396830a91600ca8102c35c426ceae5461e3f95d89d829518d30afd78019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f1da6d09885432ea9a06d9f37f873d985dae933e351466b2904284da3320d8acc01915089848509905089848b838409099350898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f2796ea90d269af29f5f8acf33921124e4e4fad3dbe658945e546ee411ddaa9cb019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f202d7dd1da0f6b4b0325c8b3307742f01e15612ec8e9304a7cb0319e01d32d60019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f096d6790d05bb759156a952ba263d672a2d7f9c788f4c831a29dace4c0f8be5f01945089818209935089818b868709099050898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f054efa1f65b0fce283808965275d877b438da23ce5b13e1963798cb1447d25a4019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f1b162f83d917e93edb3308c29802deb9d8aa690113b2e14864ccf6e18e4165f1019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f21e5241e12564dd6fd9f1cdd2a0de39eedfefc1466cc568ec5ceb745a0506edc01915089848509905089848b83840909935089838409905089838b83840909925089828309905089828b838409099150898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f1cfb5662e8cf5ac9226a80ee17b36abecb73ab5f87e161927b4349e10e4bdf08019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f0f21177e302a771bbae6d8d1ecb373b62c99af346220ac0129c53f666eb24100019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f1671522374606992affb0dd7f71b12bec4236aede6290546bcef7e1f515c232001945089818209935089818b86870909905089868709935089868b86870909955089858609935089858b868709099450898986098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e088098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b840901017f0fa3ec5b9488259c2eb4cf24501bfad9be2ec9e42c5cc8ccd419d2a692cad870019350898886098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2388098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771840901017f193c0e04e0bd298357cb266c1506080ed36edce85c648cc085e8c57b1ab54bba019250898786098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291188098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7840901017f102adf8ef74735a27e9128306dcbc3c99f6f7291cd406578ce14ea2adaba68f801915089848509905089848b83840909935089838409905089838b83840909925089828309905089828b838409099150898983098a7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e085098b7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b870901017f0fe0af7858e49859e2a54d6f1ad945b1316aa24bfbdd23ae40a6d0cb70c3eab1019050898883098a7f2e2419f9ec02ec394c9871c832963dc1b89d743c8c7b964029b2311687b1fe2385098b7f2969f27eed31a480b9c36c764379dbca2cc8fdd1415c3dded62940bcde0bd771870901017f216f6717bbc7dedb08536a2220843f4e2da5f1daa9ebdefde8a5ea7344798d22019550898783098a7f176cc029695ad02582a70eff08a6fd99d057e12e58e7d7b6b16cdfabc8ee291185098b7f143021ec686a3f330d5f9e654638065ce6cd79e28c5b3753326244ee65a1b1a7870901017f1da55cc900f0d21f4a3e694391918a1b3c23b2ac773c6b3ef88e2e422832516101945089818209935089818b86870909905089868709935089868b86870909955089858609935089858b868709099450898a8a87098b7f16ed41e13bb9c0c66ae119424fddbcbc9314dc9fdbdeea55d6c64543dc4903e089098c7f109b7f411ba0e4c9b2b70caf5c36a7b194be7c11ad24378bfedb68592ba8118b850901010660005260206000f35b600060408284031215615b5057600080fd5b82601f830112615b5f57600080fd5b6040516040810181811067ffffffffffffffff82111715615ba9577f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b8060405250806040840185811115615bc057600080fd5b845b81811015615bda578035835260209283019201615bc2565b50919594505050505056" +const LazyIMT* = + "0x611e7861003a600b82828239805160001a60731461002d57634e487b7160e01b600052600060045260246000fd5b30600052607381538281f3fe73000000000000000000000000000000000000000030146080604052600436106100ad5760003560e01c80633c251db111610080578063899375d611610065578063899375d614610183578063b7ca3a2d146101c6578063ca9ecfd4146101d957600080fd5b80633c251db11461014357806361136cec1461016357600080fd5b80630224ef5e146100b257806309489a3c146100d45780630c26d683146100f457806339ebe6e314610122575b600080fd5b8180156100be57600080fd5b506100d26100cd366004611a23565b6101ec565b005b8180156100e057600080fd5b506100d26100ef366004611a5f565b6101fa565b610107610102366004611aa5565b61020a565b60405164ffffffffff90911681526020015b60405180910390f35b610135610130366004611ad8565b61021f565b604051908152602001610119565b610156610151366004611afb565b61022b565b6040516101199190611b2e565b81801561016f57600080fd5b506100d261017e366004611ad8565b610242565b81801561018f57600080fd5b506100d261019e366004611b72565b80547fffffffffffffffffffffffffffffffffffffffffffff0000000000ffffffffff169055565b6101356101d4366004611b72565b61024c565b6101356101e7366004611b8b565b610257565b6101f68282610262565b5050565b6102058383836104d9565b505050565b60006102168383610812565b90505b92915050565b60006102168383610830565b6060610238848484610987565b90505b9392505050565b6101f68282610d23565b600061021982610dbd565b600061021982610e15565b815465010000000000900464ffffffffff167f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001821061030e5760405162461bcd60e51b815260206004820152602a60248201527f4c617a79494d543a206c656166206d757374206265203c20534e41524b5f534360448201527f414c41525f4649454c440000000000000000000000000000000000000000000060648201526084015b60405180910390fd5b825464ffffffffff908116908216106103695760405162461bcd60e51b815260206004820152601560248201527f4c617a79494d543a20747265652069732066756c6c00000000000000000000006044820152606401610305565b610374816001611bd5565b835464ffffffffff9190911665010000000000027fffffffffffffffffffffffffffffffffffffffffffff0000000000ffffffffff9091161783558160005b818560010160006103c48487610812565b64ffffffffff16815260208101919091526040016000205560018316156104d25760006103fb826103f6600187611bfa565b610812565b60408051808201825264ffffffffff8316600090815260018a01602090815290839020548252810186905290517f561558fe00000000000000000000000000000000000000000000000000000000815291925073__$PoseidonT3$__9163561558fe9161047691600401611c18565b602060405180830381865af4158015610493573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104b79190611c49565b647fffffffff600195861c16949093509190910190506103b3565b5050505050565b7f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f0000001821061056e5760405162461bcd60e51b815260206004820152602a60248201527f4c617a79494d543a206c656166206d757374206265203c20534e41524b5f534360448201527f414c41525f4649454c44000000000000000000000000000000000000000000006064820152608401610305565b825464ffffffffff65010000000000909104811690821681116105d35760405162461bcd60e51b815260206004820152601860248201527f4c617a79494d543a206c656166206d75737420657869737400000000000000006044820152606401610305565b8260005b818660010160006105e88488610812565b64ffffffffff1681526020019081526020016000208190555060008160016106109190611c62565b60ff168464ffffffffff16901c64ffffffffff16905060018564ffffffffff16901c64ffffffffff168111610645575061080a565b60018516600003610725576000610661836103f6886001611bd5565b60408051808201825286815264ffffffffff8316600090815260018c01602090815290839020549082015290517f561558fe00000000000000000000000000000000000000000000000000000000815291925073__$PoseidonT3$__9163561558fe916106dc91600401611c18565b602060405180830381865af41580156106f9573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061071d9190611c49565b9350506107f6565b6000610736836103f6600189611bfa565b60408051808201825264ffffffffff8316600090815260018c01602090815290839020548252810187905290517f561558fe00000000000000000000000000000000000000000000000000000000815291925073__$PoseidonT3$__9163561558fe916107b191600401611c18565b602060405180830381865af41580156107ce573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906107f29190611c49565b9350505b50647fffffffff600194851c1693016105d7565b505050505050565b60008161082660ff851663ffffffff611c7b565b6102169190611bd5565b6000808260ff16116108845760405162461bcd60e51b815260206004820152601a60248201527f4c617a79494d543a206465707468206d757374206265203e20300000000000006044820152606401610305565b602060ff831611156108fe5760405162461bcd60e51b815260206004820152602360248201527f4c617a79494d543a206465707468206d757374206265203c3d204d41585f444560448201527f50544800000000000000000000000000000000000000000000000000000000006064820152608401610305565b825465010000000000900464ffffffffff168061091f60ff85166002611db9565b64ffffffffff1610156109745760405162461bcd60e51b815260206004820152601860248201527f4c617a79494d543a20616d626967756f757320646570746800000000000000006044820152606401610305565b61097f8482856114b2565b949350505050565b825460609064ffffffffff65010000000000909104811690841681116109ef5760405162461bcd60e51b815260206004820152601860248201527f4c617a79494d543a206c656166206d75737420657869737400000000000000006044820152606401610305565b60015b64ffffffffff8216610a0860ff83166002611db9565b64ffffffffff161015610a275780610a1f81611dcf565b9150506109f2565b8060ff168460ff161015610a7d5760405162461bcd60e51b815260206004820152601460248201527f4c617a79494d543a2070726f6f662064657074680000000000000000000000006044820152606401610305565b60008460ff1667ffffffffffffffff811115610a9b57610a9b611dee565b604051908082528060200260200182016040528015610ac4578160200160208202803683370190505b509050610add8784610ad7600186611e1d565b846115d4565b60018616600003610b765764ffffffffff8316610afb876001611bd5565b64ffffffffff1610610b3557610b116000610e15565b81600081518110610b2457610b24611e36565b602002602001018181525050610bc4565b866001016000610b4d60008960016103f69190611bd5565b64ffffffffff1681526020019081526020016000205481600081518110610b2457610b24611e36565b866001016000610b8e600060018a6103f69190611bfa565b64ffffffffff1681526020019081526020016000205481600081518110610bb757610bb7611e36565b6020026020010181815250505b647fffffffff600196871c16955b8560ff168160ff161015610d185764ffffffffff80851660ff83161c1660018816600003610cb55780610c06896001611bd5565b64ffffffffff161015610c6657886001016000610c2a848b60016103f69190611bd5565b64ffffffffff16815260200190815260200160002054838360ff1681518110610c5557610c55611e36565b602002602001018181525050610d04565b64ffffffffff881660ff8316610c7d600188611bfa565b64ffffffffff16901c64ffffffffff1611610cb057610c9b82610e15565b838360ff1681518110610c5557610c55611e36565b610d04565b886001016000610ccc8460018c6103f69190611bfa565b64ffffffffff16815260200190815260200160002054838360ff1681518110610cf757610cf7611e36565b6020026020010181815250505b50647fffffffff600197881c169601610bd2565b509695505050505050565b602060ff82161115610d775760405162461bcd60e51b815260206004820152601760248201527f4c617a79494d543a205472656520746f6f206c617267650000000000000000006044820152606401610305565b610d88600160ff831681901b611e65565b82547fffffffffffffffffffffffffffffffffffffffffffff000000000000000000001664ffffffffff919091161790915550565b805460009065010000000000900464ffffffffff1660015b64ffffffffff8216610deb60ff83166002611db9565b64ffffffffff161015610e0a5780610e0281611dcf565b915050610dd5565b61097f8483836114b2565b60008160ff16600003610e2a57506000919050565b8160ff16600103610e5c57507f2098f5fb9e239eab3ceac3f27b81e481dc3124d55ffed523a839ee8446b64864919050565b8160ff16600203610e8e57507f1069673dcdb12263df301a6ff584a7ec261a44cb9dc68df067a4774460b1f1e1919050565b8160ff16600303610ec057507f18f43331537ee2af2e3d758d50f72106467c6eea50371dd528d57eb2b856d238919050565b8160ff16600403610ef257507f07f9d837cb17b0d36320ffe93ba52345f1b728571a568265caac97559dbc952a919050565b8160ff16600503610f2457507f2b94cf5e8746b3f5c9631f4c5df32907a699c58c94b2ad4d7b5cec1639183f55919050565b8160ff16600603610f5657507f2dee93c5a666459646ea7d22cca9e1bcfed71e6951b953611d11dda32ea09d78919050565b8160ff16600703610f8857507f078295e5a22b84e982cf601eb639597b8b0515a88cb5ac7fa8a4aabe3c87349d919050565b8160ff16600803610fba57507f2fa5e5f18f6027a6501bec864564472a616b2e274a41211a444cbe3a99f3cc61919050565b8160ff16600903610fec57507f0e884376d0d8fd21ecb780389e941f66e45e7acce3e228ab3e2156a614fcd747919050565b8160ff16600a0361101e57507f1b7201da72494f1e28717ad1a52eb469f95892f957713533de6175e5da190af2919050565b8160ff16600b0361105057507f1f8d8822725e36385200c0b201249819a6e6e1e4650808b5bebc6bface7d7636919050565b8160ff16600c0361108257507f2c5d82f66c914bafb9701589ba8cfcfb6162b0a12acf88a8d0879a0471b5f85a919050565b8160ff16600d036110b457507f14c54148a0940bb820957f5adf3fa1134ef5c4aaa113f4646458f270e0bfbfd0919050565b8160ff16600e036110e657507f190d33b12f986f961e10c0ee44d8b9af11be25588cad89d416118e4bf4ebe80c919050565b8160ff16600f0361111857507f22f98aa9ce704152ac17354914ad73ed1167ae6596af510aa5b3649325e06c92919050565b8160ff1660100361114a57507f2a7c7c9b6ce5880b9f6f228d72bf6a575a526f29c66ecceef8b753d38bba7323919050565b8160ff1660110361117c57507f2e8186e558698ec1c67af9c14d463ffc470043c9c2988b954d75dd643f36b992919050565b8160ff166012036111ae57507f0f57c5571e9a4eab49e2c8cf050dae948aef6ead647392273546249d1c1ff10f919050565b8160ff166013036111e057507f1830ee67b5fb554ad5f63d4388800e1cfe78e310697d46e43c9ce36134f72cca919050565b8160ff1660140361121257507f2134e76ac5d21aab186c2be1dd8f84ee880a1e46eaf712f9d371b6df22191f3e919050565b8160ff1660150361124457507f19df90ec844ebc4ffeebd866f33859b0c051d8c958ee3aa88f8f8df3db91a5b1919050565b8160ff1660160361127657507f18cca2a66b5c0787981e69aefd84852d74af0e93ef4912b4648c05f722efe52b919050565b8160ff166017036112a857507f2388909415230d1b4d1304d2d54f473a628338f2efad83fadf05644549d2538d919050565b8160ff166018036112da57507f27171fb4a97b6cc0e9e8f543b5294de866a2af2c9c8d0b1d96e673e4529ed540919050565b8160ff1660190361130c57507f2ff6650540f629fd5711a0bc74fc0d28dcb230b9392583e5f8d59696dde6ae21919050565b8160ff16601a0361133e57507f120c58f143d491e95902f7f5277778a2e0ad5168f6add75669932630ce611518919050565b8160ff16601b0361137057507f1f21feb70d3f21b07bf853d5e5db03071ec495a0a565a21da2d665d279483795919050565b8160ff16601c036113a257507f24be905fa71335e14c638cc0f66a8623a826e768068a9e968bb1a1dde18a72d2919050565b8160ff16601d036113d457507f0f8666b62ed17491c50ceadead57d4cd597ef3821d65c328744c74e553dac26d919050565b8160ff16601e0361140657507f0918d46bf52d98b034413f4a1a1c41594e7a7a3f6ae08cb43d1a2a230e1959ef919050565b8160ff16601f0361143857507f1bbeb01b4c479ecde76917645e404dfa2e26f90d0afc5a65128513ad375c5ff2919050565b8160ff1660200361146a57507f2f68a1c58e257e42a17a6c61dff5551ed560b9922ab119d5ac8e184c9734ead9919050565b60405162461bcd60e51b815260206004820152601e60248201527f4c617a79494d543a2064656661756c745a65726f2062616420696e64657800006044820152606401610305565b6000602060ff8316111561152e5760405162461bcd60e51b815260206004820152602360248201527f4c617a79494d543a206465707468206d757374206265203c3d204d41585f444560448201527f50544800000000000000000000000000000000000000000000000000000000006064820152608401610305565b8264ffffffffff1660000361154d5761154682610e15565b905061023b565b600061155a836001611c62565b60ff1667ffffffffffffffff81111561157557611575611dee565b60405190808252806020026020018201604052801561159e578160200160208202803683370190505b5090506115ad858585846115d4565b808360ff16815181106115c2576115c2611e36565b60200260200101519150509392505050565b602060ff8316111561164e5760405162461bcd60e51b815260206004820152602360248201527f4c617a79494d543a206465707468206d757374206265203c3d204d41585f444560448201527f50544800000000000000000000000000000000000000000000000000000000006064820152608401610305565b60008364ffffffffff16116116cb5760405162461bcd60e51b815260206004820152602560248201527f4c617a79494d543a206e756d626572206f66206c6561766573206d757374206260448201527f65203e20300000000000000000000000000000000000000000000000000000006064820152608401610305565b60006116d8600185611bfa565b905060018116600003611730578460010160006116f6600084610812565b64ffffffffff168152602001908152602001600020548260008151811061171f5761171f611e36565b60200260200101818152505061175a565b61173a6000610e15565b8260008151811061174d5761174d611e36565b6020026020010181815250505b60005b8360ff168160ff16101561080a57600182166000036118565773__$PoseidonT3$__63561558fe6040518060400160405280868560ff16815181106117b0576117b0611e36565b602002602001015181526020016117c685610e15565b8152506040518263ffffffff1660e01b81526004016117e59190611c18565b602060405180830381865af4158015611802573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906118269190611c49565b83611832836001611c62565b60ff168151811061184557611845611e36565b602002602001018181525050611a10565b6000611863826001611c62565b60ff168664ffffffffff16901c64ffffffffff16905060018364ffffffffff16901c64ffffffffff168111156119085760008760010160006118bc8560016118ab9190611c62565b60018864ffffffffff16901c610812565b64ffffffffff16815260200190815260200160002054905080858460016118e39190611c62565b60ff16815181106118f6576118f6611e36565b60200260200101818152505050611a0e565b6000876001016000611921856001886103f69190611bfa565b64ffffffffff16815260200190815260200160002054905073__$PoseidonT3$__63561558fe6040518060400160405280848152602001888760ff168151811061197957611979611e36565b60200260200101518152506040518263ffffffff1660e01b81526004016119a09190611c18565b602060405180830381865af41580156119bd573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906119e19190611c49565b856119ed856001611c62565b60ff1681518110611a0057611a00611e36565b602002602001018181525050505b505b647fffffffff600192831c16910161175d565b60008060408385031215611a3657600080fd5b50508035926020909101359150565b803564ffffffffff81168114611a5a57600080fd5b919050565b600080600060608486031215611a7457600080fd5b8335925060208401359150611a8b60408501611a45565b90509250925092565b803560ff81168114611a5a57600080fd5b60008060408385031215611ab857600080fd5b611ac183611a94565b9150611acf60208401611a45565b90509250929050565b60008060408385031215611aeb57600080fd5b82359150611acf60208401611a94565b600080600060608486031215611b1057600080fd5b83359250611b2060208501611a45565b9150611a8b60408501611a94565b6020808252825182820181905260009190848201906040850190845b81811015611b6657835183529284019291840191600101611b4a565b50909695505050505050565b600060208284031215611b8457600080fd5b5035919050565b600060208284031215611b9d57600080fd5b61021682611a94565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b64ffffffffff818116838216019080821115611bf357611bf3611ba6565b5092915050565b64ffffffffff828116828216039080821115611bf357611bf3611ba6565b60408101818360005b6002811015611c40578151835260209283019290910190600101611c21565b50505092915050565b600060208284031215611c5b57600080fd5b5051919050565b60ff818116838216019081111561021957610219611ba6565b64ffffffffff818116838216028082169190828114611c9c57611c9c611ba6565b505092915050565b600181815b80851115611ce2578164ffffffffff04821115611cc857611cc8611ba6565b80851615611cd557918102915b93841c9390800290611ca9565b509250929050565b600082611cf957506001610219565b81611d0657506000610219565b8160018114611d1c5760028114611d2657611d58565b6001915050610219565b60ff841115611d3757611d37611ba6565b6001841b915064ffffffffff821115611d5257611d52611ba6565b50610219565b5060208310610133831016604e8410600b8410161715611d90575081810a64ffffffffff811115611d8b57611d8b611ba6565b610219565b611d9a8383611ca4565b8064ffffffffff04821115611db157611db1611ba6565b029392505050565b600064ffffffffff61097f818516828516611cea565b600060ff821660ff8103611de557611de5611ba6565b60010192915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60ff828116828216039081111561021957610219611ba6565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b8181038181111561021957610219611ba656" +const Erc1967Proxy* = + "0x60806040526040516103c73803806103c78339810160408190526100229161025e565b61002c8282610033565b5050610341565b61003c82610091565b6040516001600160a01b038316907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b905f90a280511561008557610080828261010c565b505050565b61008d61017f565b5050565b806001600160a01b03163b5f036100cb57604051634c9c8ce360e01b81526001600160a01b03821660048201526024015b60405180910390fd5b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc80546001600160a01b0319166001600160a01b0392909216919091179055565b60605f80846001600160a01b0316846040516101289190610326565b5f60405180830381855af49150503d805f8114610160576040519150601f19603f3d011682016040523d82523d5f602084013e610165565b606091505b5090925090506101768583836101a0565b95945050505050565b341561019e5760405163b398979f60e01b815260040160405180910390fd5b565b6060826101b5576101b0826101ff565b6101f8565b81511580156101cc57506001600160a01b0384163b155b156101f557604051639996b31560e01b81526001600160a01b03851660048201526024016100c2565b50805b9392505050565b80511561020f5780518082602001fd5b60405163d6bda27560e01b815260040160405180910390fd5b634e487b7160e01b5f52604160045260245ffd5b5f5b8381101561025657818101518382015260200161023e565b50505f910152565b5f806040838503121561026f575f80fd5b82516001600160a01b0381168114610285575f80fd5b60208401519092506001600160401b03808211156102a1575f80fd5b818501915085601f8301126102b4575f80fd5b8151818111156102c6576102c6610228565b604051601f8201601f19908116603f011681019083821181831017156102ee576102ee610228565b81604052828152886020848701011115610306575f80fd5b61031783602083016020880161023c565b80955050505050509250929050565b5f825161033781846020870161023c565b9190910192915050565b607a8061034d5f395ff3fe6080604052600a600c565b005b60186014601a565b605d565b565b5f60587f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5473ffffffffffffffffffffffffffffffffffffffff1690565b905090565b365f80375f80365f845af43d5f803e8080156076573d5ff35b3d5ffd" +const Erc1967ProxyContractInput* = + "000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000248d8965bd0000000000000000000000000000000000000000000000000000000000000064" + +## The following bytecode contract is derived from the code in the following commit: https://github.com/waku-org/waku-rlnv2-contract/blob/a576a8949ca20e310f2fbb4ec0bd05a57ac3045f/src/WakuRlnV2.sol +const WakuRlnV2Contract* = + "0x60a06040523060805234801561001457600080fd5b5061001d610022565b6100e1565b600054610100900460ff161561008e5760405162461bcd60e51b815260206004820152602760248201527f496e697469616c697a61626c653a20636f6e747261637420697320696e697469604482015266616c697a696e6760c81b606482015260840160405180910390fd5b60005460ff908116146100df576000805460ff191660ff9081179091556040519081527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b565b6080516121ca6200011960003960008181610598015281816106330152818161076e01528181610804015261093301526121ca6000f3fe6080604052600436106101805760003560e01c806374e942fa116100d6578063af7b42101161007f578063e493ef8c11610059578063e493ef8c146104e2578063ebf0c71714610516578063f2fde38b1461052b57600080fd5b8063af7b421014610441578063d0383d6814610461578063d90d0ee61461048657600080fd5b806398366e35116100b057806398366e35146103b75780639ac21345146103de578063a45d5e591461042157600080fd5b806374e942fa146103355780638d8965bd146103625780638da5cb5b1461038257600080fd5b80634add651e11610138578063679537f911610112578063679537f9146102d35780636bdcc8ab14610300578063715018a61461032057600080fd5b80634add651e146102805780634f1ef2861461029d57806352d1902d146102b057600080fd5b806322d9730c1161016957806322d9730c146101e55780633659cfe6146102155780633c979b5f1461023757600080fd5b8063037a791a1461018557806309aeb04c146101c8575b600080fd5b34801561019157600080fd5b5060c9546101ae9068010000000000000000900463ffffffff1681565b60405163ffffffff90911681526020015b60405180910390f35b3480156101d457600080fd5b5060c9546101ae9063ffffffff1681565b3480156101f157600080fd5b50610205610200366004611c81565b61054b565b60405190151581526020016101bf565b34801561022157600080fd5b50610235610230366004611cc3565b610581565b005b34801561024357600080fd5b5060cc546102629064ffffffffff808216916501000000000090041682565b6040805164ffffffffff9384168152929091166020830152016101bf565b34801561028c57600080fd5b5060cb546101ae9063ffffffff1681565b6102356102ab366004611d5c565b610757565b3480156102bc57600080fd5b506102c5610919565b6040519081526020016101bf565b3480156102df57600080fd5b506102f36102ee366004611e34565b6109eb565b6040516101bf9190611e67565b34801561030c57600080fd5b5061020561031b366004611c81565b610b6a565b34801561032c57600080fd5b50610235610b81565b34801561034157600080fd5b50610355610350366004611ebe565b610b95565b6040516101bf9190611edb565b34801561036e57600080fd5b5061023561037d366004611f0d565b610cc4565b34801561038e57600080fd5b5060335460405173ffffffffffffffffffffffffffffffffffffffff90911681526020016101bf565b3480156103c357600080fd5b506103cc601481565b60405160ff90911681526020016101bf565b3480156103ea57600080fd5b506103fe6103f9366004611c81565b610f5f565b6040805163ffffffff9485168152939092166020840152908201526060016101bf565b34801561042d57600080fd5b5061020561043c366004611f0d565b610fd0565b34801561044d57600080fd5b5061023561045c366004611f28565b610ff5565b34801561046d57600080fd5b5060c9546101ae90640100000000900463ffffffff1681565b34801561049257600080fd5b506104c56104a1366004611c81565b60ca6020526000908152604090205463ffffffff8082169164010000000090041682565b6040805163ffffffff9384168152929091166020830152016101bf565b3480156104ee57600080fd5b506102c57f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000181565b34801561052257600080fd5b506102c5611090565b34801561053757600080fd5b50610235610546366004611cc3565b61112c565b6000811580159061057b57507f30644e72e131a029b85045b68181585d2833e84879b9709143e1f593f000000182105b92915050565b73ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001630036106315760405162461bcd60e51b815260206004820152602c60248201527f46756e6374696f6e206d7573742062652063616c6c6564207468726f7567682060448201527f64656c656761746563616c6c000000000000000000000000000000000000000060648201526084015b60405180910390fd5b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff166106a67f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5473ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff161461072f5760405162461bcd60e51b815260206004820152602c60248201527f46756e6374696f6e206d7573742062652063616c6c6564207468726f7567682060448201527f6163746976652070726f787900000000000000000000000000000000000000006064820152608401610628565b610738816111c6565b60408051600080825260208201909252610754918391906111ce565b50565b73ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001630036108025760405162461bcd60e51b815260206004820152602c60248201527f46756e6374696f6e206d7573742062652063616c6c6564207468726f7567682060448201527f64656c656761746563616c6c00000000000000000000000000000000000000006064820152608401610628565b7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff166108777f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc5473ffffffffffffffffffffffffffffffffffffffff1690565b73ffffffffffffffffffffffffffffffffffffffff16146109005760405162461bcd60e51b815260206004820152602c60248201527f46756e6374696f6e206d7573742062652063616c6c6564207468726f7567682060448201527f6163746976652070726f787900000000000000000000000000000000000000006064820152608401610628565b610909826111c6565b610915828260016111ce565b5050565b60003073ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146109c65760405162461bcd60e51b815260206004820152603860248201527f555550535570677261646561626c653a206d757374206e6f742062652063616c60448201527f6c6564207468726f7567682064656c656761746563616c6c00000000000000006064820152608401610628565b507f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc90565b60608163ffffffff168363ffffffff161115610a43576040517f9ffcd53d00000000000000000000000000000000000000000000000000000000815263ffffffff808516600483015283166024820152604401610628565b60c95463ffffffff6801000000000000000090910481169083161115610aa5576040517f9ffcd53d00000000000000000000000000000000000000000000000000000000815263ffffffff808516600483015283166024820152604401610628565b6000610ab18484611f7a565b610abc906001611f9e565b63ffffffff1667ffffffffffffffff811115610ada57610ada611cde565b604051908082528060200260200182016040528015610b03578160200160208202803683370190505b509050835b8363ffffffff168163ffffffff1611610b6257610b248161139e565b82610b2f8784611f7a565b63ffffffff1681518110610b4557610b45611fbb565b602090810291909101015280610b5a81611fea565b915050610b08565b509392505050565b600080610b7683610f5f565b151595945050505050565b610b8961145c565b610b9360006114c3565b565b610b9d611c62565b610ba5611c62565b6040517f3c251db100000000000000000000000000000000000000000000000000000000815260cc600482015264ffffffffff841660248201526014604482015260009073__$LazyIMT$__90633c251db190606401600060405180830381865af4158015610c25573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052610c6b919081019061200d565b905060005b601460ff82161015610cbb57818160ff1681518110610c9157610c91611fbb565b6020026020010151838260ff1660148110610cae57610cae611fbb565b6020020152600101610c70565b50909392505050565b600054610100900460ff1615808015610ce45750600054600160ff909116105b80610cfe5750303b158015610cfe575060005460ff166001145b610d705760405162461bcd60e51b815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a65640000000000000000000000000000000000006064820152608401610628565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790558015610dce57600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555b610dd661153a565b610dde6115bf565b60c9805463ffffffff8481167fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000009092169190911766100000000000001790915560cb80547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000016439092169190911790556040517f61136cec00000000000000000000000000000000000000000000000000000000815260cc60048201526014602482015273__$LazyIMT$__906361136cec9060440160006040518083038186803b158015610eb957600080fd5b505af4158015610ecd573d6000803e3d6000fd5b505060c980547fffffffffffffffffffffffffffffffffffffffff00000000ffffffffffffffff1690555050801561091557600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15050565b600081815260ca6020908152604080832081518083019092525463ffffffff808216808452640100000000909204169282019290925282918291908203610fb157600080600093509350935050610fc9565b80516020820151610fc18161139e565b935093509350505b9193909250565b6000808263ffffffff1611801561057b57505060c95463ffffffff9081169116111590565b81610fff8161054b565b611038576040517f7f3e75af00000000000000000000000000000000000000000000000000000000815260048101829052602401610628565b8161104281610fd0565b611080576040517f6677a0c700000000000000000000000000000000000000000000000000000000815263ffffffff82166004820152602401610628565b61108a848461163c565b50505050565b6040517f39ebe6e300000000000000000000000000000000000000000000000000000000815260cc60048201526014602482015260009073__$LazyIMT$__906339ebe6e390604401602060405180830381865af4158015611103573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061112791906120b3565b905090565b61113461145c565b73ffffffffffffffffffffffffffffffffffffffff81166111bd5760405162461bcd60e51b815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201527f64647265737300000000000000000000000000000000000000000000000000006064820152608401610628565b610754816114c3565b61075461145c565b7f4910fdfa16fed3260ed0e7147f7cc6da11a60208b5b9406d12a635614ffd91435460ff16156112065761120183611911565b505050565b8273ffffffffffffffffffffffffffffffffffffffff166352d1902d6040518163ffffffff1660e01b8152600401602060405180830381865afa92505050801561128b575060408051601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201909252611288918101906120b3565b60015b6112fd5760405162461bcd60e51b815260206004820152602e60248201527f45524331393637557067726164653a206e657720696d706c656d656e7461746960448201527f6f6e206973206e6f7420555550530000000000000000000000000000000000006064820152608401610628565b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc81146113925760405162461bcd60e51b815260206004820152602960248201527f45524331393637557067726164653a20756e737570706f727465642070726f7860448201527f6961626c655555494400000000000000000000000000000000000000000000006064820152608401610628565b50611201838383611a01565b6040517f0c26d68300000000000000000000000000000000000000000000000000000000815260006004820181905263ffffffff831660248301529060cd90829073__$LazyIMT$__90630c26d68390604401602060405180830381865af415801561141b573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061143f91906120cc565b64ffffffffff168152602001908152602001600020549050919050565b60335473ffffffffffffffffffffffffffffffffffffffff163314610b935760405162461bcd60e51b815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e65726044820152606401610628565b6033805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681179093556040519116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a35050565b600054610100900460ff166115b75760405162461bcd60e51b815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e670000000000000000000000000000000000000000006064820152608401610628565b610b93611a26565b600054610100900460ff16610b935760405162461bcd60e51b815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e670000000000000000000000000000000000000000006064820152608401610628565b61164582610b6a565b1561167b576040517e0a60f700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60c95463ffffffff640100000000820481166801000000000000000090920416106116d2576040517f57f6953100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60408051808201825283815263ffffffff8316602082015290517f561558fe00000000000000000000000000000000000000000000000000000000815260009173__$PoseidonT3$__9163561558fe9161173a916004016120e9565b602060405180830381865af4158015611757573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061177b91906120b3565b60408051808201825263ffffffff858116825260c95468010000000000000000900416602082015290517f0224ef5e00000000000000000000000000000000000000000000000000000000815260cc6004820152602481018390529192509073__$LazyIMT$__90630224ef5e9060440160006040518083038186803b15801561181157600080fd5b505af4158015611825573d6000803e3d6000fd5b505050600085815260ca6020908152604091829020845181548684015163ffffffff908116640100000000027fffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000909216928116929092171790915560c954835187815268010000000000000000909104909116918101919091527fb84da12e8207adb5ee3e5686338302ffe6634fbb95a9fd52f8a466ea2010152d92500160405180910390a1600160c960088282829054906101000a900463ffffffff166118ed9190611f9e565b92506101000a81548163ffffffff021916908363ffffffff16021790555050505050565b73ffffffffffffffffffffffffffffffffffffffff81163b61199b5760405162461bcd60e51b815260206004820152602d60248201527f455243313936373a206e657720696d706c656d656e746174696f6e206973206e60448201527f6f74206120636f6e7472616374000000000000000000000000000000000000006064820152608401610628565b7f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b611a0a83611aac565b600082511180611a175750805b156112015761108a8383611af9565b600054610100900460ff16611aa35760405162461bcd60e51b815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e670000000000000000000000000000000000000000006064820152608401610628565b610b93336114c3565b611ab581611911565b60405173ffffffffffffffffffffffffffffffffffffffff8216907fbc7cd75a20ee27fd9adebab32041f755214dbc6bffa90cc0225b39da2e5c2d3b90600090a250565b6060611b1e83836040518060600160405280602781526020016121a360279139611b25565b9392505050565b60606000808573ffffffffffffffffffffffffffffffffffffffff1685604051611b4f9190612135565b600060405180830381855af49150503d8060008114611b8a576040519150601f19603f3d011682016040523d82523d6000602084013e611b8f565b606091505b5091509150611ba086838387611baa565b9695505050505050565b60608315611c26578251600003611c1f5773ffffffffffffffffffffffffffffffffffffffff85163b611c1f5760405162461bcd60e51b815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e74726163740000006044820152606401610628565b5081611c30565b611c308383611c38565b949350505050565b815115611c485781518083602001fd5b8060405162461bcd60e51b81526004016106289190612151565b6040518061028001604052806014906020820280368337509192915050565b600060208284031215611c9357600080fd5b5035919050565b803573ffffffffffffffffffffffffffffffffffffffff81168114611cbe57600080fd5b919050565b600060208284031215611cd557600080fd5b611b1e82611c9a565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff81118282101715611d5457611d54611cde565b604052919050565b60008060408385031215611d6f57600080fd5b611d7883611c9a565b915060208084013567ffffffffffffffff80821115611d9657600080fd5b818601915086601f830112611daa57600080fd5b813581811115611dbc57611dbc611cde565b611dec847fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f84011601611d0d565b91508082528784828501011115611e0257600080fd5b80848401858401376000848284010152508093505050509250929050565b803563ffffffff81168114611cbe57600080fd5b60008060408385031215611e4757600080fd5b611e5083611e20565b9150611e5e60208401611e20565b90509250929050565b6020808252825182820181905260009190848201906040850190845b81811015611e9f57835183529284019291840191600101611e83565b50909695505050505050565b64ffffffffff8116811461075457600080fd5b600060208284031215611ed057600080fd5b8135611b1e81611eab565b6102808101818360005b6014811015611f04578151835260209283019290910190600101611ee5565b50505092915050565b600060208284031215611f1f57600080fd5b611b1e82611e20565b60008060408385031215611f3b57600080fd5b82359150611e5e60208401611e20565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b63ffffffff828116828216039080821115611f9757611f97611f4b565b5092915050565b63ffffffff818116838216019080821115611f9757611f97611f4b565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600063ffffffff80831681810361200357612003611f4b565b6001019392505050565b6000602080838503121561202057600080fd5b825167ffffffffffffffff8082111561203857600080fd5b818501915085601f83011261204c57600080fd5b81518181111561205e5761205e611cde565b8060051b915061206f848301611d0d565b818152918301840191848101908884111561208957600080fd5b938501935b838510156120a75784518252938501939085019061208e565b98975050505050505050565b6000602082840312156120c557600080fd5b5051919050565b6000602082840312156120de57600080fd5b8151611b1e81611eab565b60408101818360005b6002811015611f045781518352602092830192909101906001016120f2565b60005b8381101561212c578181015183820152602001612114565b50506000910152565b60008251612147818460208701612111565b9190910192915050565b6020815260008251806020840152612170816040850160208701612111565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016919091016040019291505056fe416464726573733a206c6f772d6c6576656c2064656c65676174652063616c6c206661696c6564" diff --git a/third-party/nwaku/waku/waku_rln_relay/conversion_utils.nim b/third-party/nwaku/waku/waku_rln_relay/conversion_utils.nim new file mode 100644 index 0000000..4a168eb --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/conversion_utils.nim @@ -0,0 +1,179 @@ +{.push raises: [].} + +import + std/[sequtils, strutils, algorithm], + web3, + chronicles, + stew/[arrayops, endians2], + stint +import ./constants, ./protocol_types +import ../waku_keystore + +export web3, chronicles, stint, constants, endians2 + +logScope: + topics = "waku rln_relay conversion_utils" + +proc inHex*( + value: + IdentityTrapdoor or IdentityNullifier or IdentitySecretHash or IDCommitment or + MerkleNode or Nullifier or Epoch or RlnIdentifier +): string = + var valueHex = "" #UInt256.fromBytesLE(value) + for b in value.reversed(): + valueHex = valueHex & b.toHex() + # We pad leading zeroes + while valueHex.len < value.len * 2: + valueHex = "0" & valueHex + return toLowerAscii(valueHex) + +proc encodeLengthPrefix*(input: openArray[byte]): seq[byte] = + ## returns length prefixed version of the input + ## with the following format [len<8>|input] + ## len: 8-byte value that represents the number of bytes in the `input` + ## len is serialized in little-endian + ## input: the supplied `input` + let + # the length should be serialized in little-endian + len = toBytes(uint64(input.len), Endianness.littleEndian) + output = concat(@len, @input) + return output + +proc serialize*(v: uint64): array[32, byte] = + ## a private proc to convert uint64 to a byte seq + ## this conversion is used in the proofGen proc + + ## converts `v` to a byte seq in little-endian order + let bytes = toBytes(v, Endianness.littleEndian) + var output: array[32, byte] + discard output.copyFrom(bytes) + return output + +proc serialize*( + idSecretHash: IdentitySecretHash, + memIndex: MembershipIndex, + userMessageLimit: UserMessageLimit, + messageId: MessageId, + externalNullifier: ExternalNullifier, + msg: openArray[byte], +): seq[byte] = + ## a private proc to convert RateLimitProof and the data to a byte seq + ## this conversion is used in the proofGen proc + ## the serialization is done as instructed in https://github.com/kilic/rln/blob/7ac74183f8b69b399e3bc96c1ae8ab61c026dc43/src/public.rs#L146 + ## [ id_key<32> | id_index<8> | epoch<32> | signal_len<8> | signal ] + let memIndexBytes = toBytes(uint64(memIndex), Endianness.littleEndian) + let userMessageLimitBytes = userMessageLimit.serialize() + let messageIdBytes = messageId.serialize() + let lenPrefMsg = encodeLengthPrefix(msg) + let output = concat( + @idSecretHash, + @memIndexBytes, + @userMessageLimitBytes, + @messageIdBytes, + @externalNullifier, + lenPrefMsg, + ) + return output + +proc serialize*(witness: RLNWitnessInput): seq[byte] = + ## Serializes the RLN witness into a byte array following zerokit's expected format. + ## The serialized format includes: + ## - identity_secret (32 bytes, little-endian with zero padding) + ## - user_message_limit (32 bytes, little-endian with zero padding) + ## - message_id (32 bytes, little-endian with zero padding) + ## - merkle tree depth (8 bytes, little-endian) = path_elements.len / 32 + ## - path_elements (each 32 bytes, ordered bottom-to-top) + ## - merkle tree depth again (8 bytes, little-endian) + ## - identity_path_index (sequence of bits as bytes, 0 = left, 1 = right) + ## - x (32 bytes, little-endian with zero padding) + ## - external_nullifier (32 bytes, little-endian with zero padding) + var buffer: seq[byte] + buffer.add(@(witness.identity_secret)) + buffer.add(@(witness.user_message_limit)) + buffer.add(@(witness.message_id)) + buffer.add(toBytes(uint64(witness.path_elements.len / 32), Endianness.littleEndian)) + for element in witness.path_elements: + buffer.add(element) + buffer.add(toBytes(uint64(witness.path_elements.len / 32), Endianness.littleEndian)) + buffer.add(witness.identity_path_index) + buffer.add(@(witness.x)) + buffer.add(@(witness.external_nullifier)) + return buffer + +proc serialize*(proof: RateLimitProof, data: openArray[byte]): seq[byte] = + ## a private proc to convert RateLimitProof and data to a byte seq + ## this conversion is used in the proof verification proc + ## [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> | signal_len<8> | signal ] + let lenPrefMsg = encodeLengthPrefix(@data) + var proofBytes = concat( + @(proof.proof), + @(proof.merkleRoot), + @(proof.externalNullifier), + @(proof.shareX), + @(proof.shareY), + @(proof.nullifier), + lenPrefMsg, + ) + + return proofBytes + +# Serializes a sequence of MerkleNodes +proc serialize*(roots: seq[MerkleNode]): seq[byte] = + var rootsBytes: seq[byte] = @[] + for root in roots: + rootsBytes = concat(rootsBytes, @root) + return rootsBytes + +# Serializes a sequence of MembershipIndex's +proc serialize*(memIndices: seq[MembershipIndex]): seq[byte] = + var memIndicesBytes = newSeq[byte]() + + # serialize the memIndices, with its length prefixed + let len = toBytes(uint64(memIndices.len), Endianness.littleEndian) + memIndicesBytes.add(len) + + for memIndex in memIndices: + let memIndexBytes = toBytes(uint64(memIndex), Endianness.littleEndian) + memIndicesBytes = concat(memIndicesBytes, @memIndexBytes) + + return memIndicesBytes + +proc toEpoch*(t: uint64): Epoch = + ## converts `t` to `Epoch` in little-endian order + let bytes = toBytes(t, Endianness.littleEndian) + trace "epoch bytes", bytes = bytes + var epoch: Epoch + discard epoch.copyFrom(bytes) + return epoch + +proc fromEpoch*(epoch: Epoch): uint64 = + ## decodes bytes of `epoch` (in little-endian) to uint64 + let t = fromBytesLE(uint64, array[32, byte](epoch)) + return t + +func `+`*(a, b: Quantity): Quantity {.borrow.} + +func u256*(n: Quantity): UInt256 {.inline.} = + n.uint64.stuint(256) + +proc uint64ToField*(n: uint64): array[32, byte] = + var output: array[32, byte] + let bytes = toBytes(n, Endianness.littleEndian) + output[0 ..< bytes.len] = bytes + return output + +proc UInt256ToField*(v: UInt256): array[32, byte] = + return cast[array[32, byte]](v) # already doesn't use `result` + +proc seqToField*(s: seq[byte]): array[32, byte] = + var output: array[32, byte] + let len = min(s.len, 32) + for i in 0 ..< len: + output[i] = s[i] + return output + +proc uint64ToIndex*(index: MembershipIndex, depth: int): seq[byte] = + var output = newSeq[byte](depth) + for i in 0 ..< depth: + output[i] = byte((index shr i) and 1) # LSB-first bit decomposition + return output diff --git a/third-party/nwaku/waku/waku_rln_relay/group_manager.nim b/third-party/nwaku/waku/waku_rln_relay/group_manager.nim new file mode 100644 index 0000000..14fda54 --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/group_manager.nim @@ -0,0 +1,3 @@ +import group_manager/[on_chain] + +export on_chain diff --git a/third-party/nwaku/waku/waku_rln_relay/group_manager/group_manager_base.nim b/third-party/nwaku/waku/waku_rln_relay/group_manager/group_manager_base.nim new file mode 100644 index 0000000..9ddcdee --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/group_manager/group_manager_base.nim @@ -0,0 +1,153 @@ +import + ../../common/error_handling, + ../protocol_types, + ../protocol_metrics, + ../constants, + ../rln +import options, chronos, results, std/[deques, sequtils] + +export options, chronos, results, protocol_types, protocol_metrics, deques + +# This module contains the GroupManager interface +# The GroupManager is responsible for managing the group state +# It should be used to register new members, and withdraw existing members +# It should also be used to sync the group state with the rest of the group members + +type Membership* = object + index*: MembershipIndex + rateCommitment*: RawRateCommitment + +type OnRegisterCallback* = proc(registrations: seq[Membership]): Future[void] {.gcsafe.} +type OnWithdrawCallback* = proc(withdrawals: seq[Membership]): Future[void] {.gcsafe.} + +type GroupManagerResult*[T] = Result[T, string] + +type GroupManager* = ref object of RootObj + idCredentials*: Option[IdentityCredential] + membershipIndex*: Option[MembershipIndex] + registerCb*: Option[OnRegisterCallback] + withdrawCb*: Option[OnWithdrawCallback] + rlnInstance*: ptr RLN + initialized*: bool + latestIndex*: MembershipIndex + validRoots*: Deque[MerkleNode] + onFatalErrorAction*: OnFatalErrorHandler + userMessageLimit*: Option[UserMessageLimit] + rlnRelayMaxMessageLimit*: uint64 + +# This proc is used to initialize the group manager +# Any initialization logic should be implemented here +method init*(g: GroupManager): Future[GroupManagerResult[void]] {.base, async.} = + return err("init proc for " & $g.type & " is not implemented yet") + +# This proc is used to start the group sync process +# It should be used to sync the group state with the rest of the group members +method startGroupSync*( + g: GroupManager +): Future[GroupManagerResult[void]] {.base, async.} = + return err("startGroupSync proc for " & $g.type & " is not implemented yet") + +# This proc is used to register a new identity commitment into the merkle tree +# The user may or may not have the identity secret to this commitment +# It should be used when detecting new members in the group, and syncing the group state +method register*( + g: GroupManager, rateCommitment: RateCommitment +): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "register proc for " & $g.type & " is not implemented yet" + ) + +# This proc is used to register a new identity commitment into the merkle tree +# The user should have the identity secret to this commitment +# It should be used when the user wants to join the group +method register*( + g: GroupManager, credentials: IdentityCredential, userMessageLimit: UserMessageLimit +): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "register proc for " & $g.type & " is not implemented yet" + ) + +# This proc is used to register a batch of new identity commitments into the merkle tree +# The user may or may not have the identity secret to these commitments +# It should be used when detecting a batch of new members in the group, and syncing the group state +method registerBatch*( + g: GroupManager, rateCommitments: seq[RawRateCommitment] +): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "registerBatch proc for " & $g.type & " is not implemented yet" + ) + +# This proc is used to set a callback that will be called when a new identity commitment is registered +# The callback may be called multiple times, and should be used to for any post processing +method onRegister*(g: GroupManager, cb: OnRegisterCallback) {.base, gcsafe.} = + g.registerCb = some(cb) + +# This proc is used to withdraw/remove an identity commitment from the merkle tree +# The user should have the identity secret hash to this commitment, by either deriving it, or owning it +method withdraw*( + g: GroupManager, identitySecretHash: IdentitySecretHash +): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "withdraw proc for " & $g.type & " is not implemented yet" + ) + +# This proc is used to withdraw/remove a batch of identity commitments from the merkle tree +# The user should have the identity secret hash to these commitments, by either deriving them, or owning them +method withdrawBatch*( + g: GroupManager, identitySecretHashes: seq[IdentitySecretHash] +): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "withdrawBatch proc for " & $g.type & " is not implemented yet" + ) + +# This proc is used to insert and remove a set of commitments from the merkle tree +method atomicBatch*( + g: GroupManager, + rateCommitments: seq[RateCommitment], + toRemoveIndices: seq[MembershipIndex], +): Future[void] {.base, async: (raises: [Exception]).} = + raise newException( + CatchableError, "atomicBatch proc for " & $g.type & " is not implemented yet" + ) + +method stop*(g: GroupManager): Future[void] {.base, async.} = + raise + newException(CatchableError, "stop proc for " & $g.type & " is not implemented yet") + +# This proc is used to set a callback that will be called when an identity commitment is withdrawn +# The callback may be called multiple times, and should be used to for any post processing +method onWithdraw*(g: GroupManager, cb: OnWithdrawCallback) {.base, gcsafe.} = + g.withdrawCb = some(cb) + +method indexOfRoot*( + g: GroupManager, root: MerkleNode +): int {.base, gcsafe, raises: [].} = + ## returns the index of the root in the merkle tree and returns -1 if the root is not found + return g.validRoots.find(root) + +method validateRoot*( + g: GroupManager, root: MerkleNode +): bool {.base, gcsafe, raises: [].} = + ## validates the root against the valid roots queue + return g.indexOfRoot(root) >= 0 + +method verifyProof*( + g: GroupManager, input: seq[byte], proof: RateLimitProof +): GroupManagerResult[bool] {.base, gcsafe, raises: [].} = + ## Dummy implementation for verifyProof + return err("verifyProof is not implemented") + +method generateProof*( + g: GroupManager, + data: seq[byte], + epoch: Epoch, + messageId: MessageId, + rlnIdentifier = DefaultRlnIdentifier, +): GroupManagerResult[RateLimitProof] {.base, gcsafe, raises: [].} = + ## Dummy implementation for generateProof + return err("generateProof is not implemented") + +method isReady*(g: GroupManager): Future[bool] {.base, async.} = + raise newException( + CatchableError, "isReady proc for " & $g.type & " is not implemented yet" + ) diff --git a/third-party/nwaku/waku/waku_rln_relay/group_manager/on_chain.nim b/third-party/nwaku/waku/waku_rln_relay/group_manager/on_chain.nim new file mode 100644 index 0000000..68651e7 --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/group_manager/on_chain.nim @@ -0,0 +1,3 @@ +import on_chain/group_manager + +export group_manager diff --git a/third-party/nwaku/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim b/third-party/nwaku/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim new file mode 100644 index 0000000..d1a7a13 --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/group_manager/on_chain/group_manager.nim @@ -0,0 +1,649 @@ +{.push raises: [].} + +import + os, + web3, + web3/eth_api_types, + web3/primitives, + eth/keys as keys, + chronicles, + nimcrypto/keccak as keccak, + stint, + json, + std/[strutils, tables, algorithm, strformat], + stew/[byteutils, arrayops], + sequtils + +import + ../../../waku_keystore, + ../../rln, + ../../rln/rln_interface, + ../../conversion_utils, + ../group_manager_base, + ./retry_wrapper, + ./rpc_wrapper + +export group_manager_base + +logScope: + topics = "waku rln_relay onchain_group_manager" + +type + WakuRlnContractWithSender = Sender[WakuRlnContract] + OnchainGroupManager* = ref object of GroupManager + ethClientUrls*: seq[string] + ethPrivateKey*: Option[string] + ethContractAddress*: string + ethRpc*: Option[Web3] + wakuRlnContract*: Option[WakuRlnContractWithSender] + registrationTxHash*: Option[TxHash] + chainId*: UInt256 + keystorePath*: Option[string] + keystorePassword*: Option[string] + registrationHandler*: Option[RegistrationHandler] + latestProcessedBlock*: BlockNumber + merkleProofCache*: seq[byte] + +# The below code is not working with the latest web3 version due to chainId being null (specifically on linea-sepolia) +# TODO: find better solution than this custom sendEthCallWithoutParams call + +proc fetchMerkleProofElements*( + g: OnchainGroupManager +): Future[Result[seq[byte], string]] {.async.} = + try: + let membershipIndex = g.membershipIndex.get() + let index40 = stuint(membershipIndex, 40) + + let methodSig = "getMerkleProof(uint40)" + var paddedParam = newSeq[byte](32) + let indexBytes = index40.toBytesBE() + for i in 0 ..< min(indexBytes.len, paddedParam.len): + paddedParam[paddedParam.len - indexBytes.len + i] = indexBytes[i] + + let response = await sendEthCallWithParams( + ethRpc = g.ethRpc.get(), + functionSignature = methodSig, + params = paddedParam, + fromAddress = g.ethRpc.get().defaultAccount, + toAddress = fromHex(Address, g.ethContractAddress), + chainId = g.chainId, + ) + + return response + except CatchableError: + error "Failed to fetch Merkle proof elements", error = getCurrentExceptionMsg() + return err("Failed to fetch merkle proof elements: " & getCurrentExceptionMsg()) + +proc fetchMerkleRoot*( + g: OnchainGroupManager +): Future[Result[UInt256, string]] {.async.} = + try: + let merkleRoot = await sendEthCallWithoutParams( + ethRpc = g.ethRpc.get(), + functionSignature = "root()", + fromAddress = g.ethRpc.get().defaultAccount, + toAddress = fromHex(Address, g.ethContractAddress), + chainId = g.chainId, + ) + return merkleRoot + except CatchableError: + error "Failed to fetch Merkle root", error = getCurrentExceptionMsg() + return err("Failed to fetch merkle root: " & getCurrentExceptionMsg()) + +proc fetchNextFreeIndex*( + g: OnchainGroupManager +): Future[Result[UInt256, string]] {.async.} = + try: + let nextFreeIndex = await sendEthCallWithoutParams( + ethRpc = g.ethRpc.get(), + functionSignature = "nextFreeIndex()", + fromAddress = g.ethRpc.get().defaultAccount, + toAddress = fromHex(Address, g.ethContractAddress), + chainId = g.chainId, + ) + return nextFreeIndex + except CatchableError: + error "Failed to fetch next free index", error = getCurrentExceptionMsg() + return err("Failed to fetch next free index: " & getCurrentExceptionMsg()) + +proc fetchMembershipStatus*( + g: OnchainGroupManager, idCommitment: IDCommitment +): Future[Result[bool, string]] {.async.} = + try: + let params = idCommitment.reversed() + let resultBytes = await sendEthCallWithParams( + ethRpc = g.ethRpc.get(), + functionSignature = "isInMembershipSet(uint256)", + params = params, + fromAddress = g.ethRpc.get().defaultAccount, + toAddress = fromHex(Address, g.ethContractAddress), + chainId = g.chainId, + ) + if resultBytes.isErr(): + return err("Failed to check membership: " & resultBytes.error) + let responseBytes = resultBytes.get() + + return ok(responseBytes.len == 32 and responseBytes[^1] == 1'u8) + except CatchableError: + error "Failed to fetch membership set membership", error = getCurrentExceptionMsg() + return err("Failed to fetch membership set membership: " & getCurrentExceptionMsg()) + +proc fetchMaxMembershipRateLimit*( + g: OnchainGroupManager +): Future[Result[UInt256, string]] {.async.} = + try: + let maxMembershipRateLimit = await sendEthCallWithoutParams( + ethRpc = g.ethRpc.get(), + functionSignature = "maxMembershipRateLimit()", + fromAddress = g.ethRpc.get().defaultAccount, + toAddress = fromHex(Address, g.ethContractAddress), + chainId = g.chainId, + ) + return maxMembershipRateLimit + except CatchableError: + error "Failed to fetch max membership rate limit", error = getCurrentExceptionMsg() + return err("Failed to fetch max membership rate limit: " & getCurrentExceptionMsg()) + +proc setMetadata*( + g: OnchainGroupManager, lastProcessedBlock = none(BlockNumber) +): GroupManagerResult[void] = + let normalizedBlock = lastProcessedBlock.get(g.latestProcessedBlock) + try: + let metadataSetRes = g.rlnInstance.setMetadata( + RlnMetadata( + lastProcessedBlock: normalizedBlock.uint64, + chainId: g.chainId, + contractAddress: g.ethContractAddress, + validRoots: g.validRoots.toSeq(), + ) + ) + if metadataSetRes.isErr(): + return err("failed to persist rln metadata: " & metadataSetRes.error) + except CatchableError: + return err("failed to persist rln metadata: " & getCurrentExceptionMsg()) + return ok() + +template initializedGuard(g: OnchainGroupManager): untyped = + if not g.initialized: + raise newException(CatchableError, "OnchainGroupManager is not initialized") + +template retryWrapper( + g: OnchainGroupManager, res: auto, errStr: string, body: untyped +): auto = + retryWrapper(res, RetryStrategy.new(), errStr, g.onFatalErrorAction): + body + +proc updateRoots*(g: OnchainGroupManager): Future[bool] {.async.} = + let rootRes = await g.fetchMerkleRoot() + if rootRes.isErr(): + return false + + let merkleRoot = UInt256ToField(rootRes.get()) + + if g.validRoots.len == 0: + g.validRoots.addLast(merkleRoot) + return true + + if g.validRoots[g.validRoots.len - 1] != merkleRoot: + if g.validRoots.len > AcceptableRootWindowSize: + discard g.validRoots.popFirst() + g.validRoots.addLast(merkleRoot) + return true + + return false + +proc trackRootChanges*(g: OnchainGroupManager) {.async: (raises: [CatchableError]).} = + try: + initializedGuard(g) + const rpcDelay = 5.seconds + + while true: + await sleepAsync(rpcDelay) + let rootUpdated = await g.updateRoots() + + if rootUpdated: + ## The membership set on-chain has changed (some new members have joined or some members have left) + if g.membershipIndex.isSome(): + ## A membership index exists only if the node has registered with RLN. + ## Non-registered nodes cannot have Merkle proof elements. + let proofResult = await g.fetchMerkleProofElements() + if proofResult.isErr(): + error "Failed to fetch Merkle proof", error = proofResult.error + else: + g.merkleProofCache = proofResult.get() + + let nextFreeIndex = await g.fetchNextFreeIndex() + if nextFreeIndex.isErr(): + error "Failed to fetch next free index", error = nextFreeIndex.error + raise newException( + CatchableError, "Failed to fetch next free index: " & nextFreeIndex.error + ) + + let memberCount = cast[int64](nextFreeIndex.get()) + waku_rln_number_registered_memberships.set(float64(memberCount)) + except CatchableError: + error "Fatal error in trackRootChanges", error = getCurrentExceptionMsg() + +method register*( + g: OnchainGroupManager, rateCommitment: RateCommitment +): Future[void] {.async: (raises: [Exception]).} = + initializedGuard(g) + + try: + let leaf = rateCommitment.toLeaf().get() + if g.registerCb.isSome(): + let idx = g.latestIndex + debug "registering member via callback", rateCommitment = leaf, index = idx + await g.registerCb.get()(@[Membership(rateCommitment: leaf, index: idx)]) + g.latestIndex.inc() + except CatchableError: + raise newException(ValueError, getCurrentExceptionMsg()) + +method register*( + g: OnchainGroupManager, + identityCredential: IdentityCredential, + userMessageLimit: UserMessageLimit, +): Future[void] {.async: (raises: [Exception]).} = + initializedGuard(g) + + let ethRpc = g.ethRpc.get() + let wakuRlnContract = g.wakuRlnContract.get() + + var gasPrice: int + g.retryWrapper(gasPrice, "Failed to get gas price"): + int(await ethRpc.provider.eth_gasPrice()) * 2 + let idCommitmentHex = identityCredential.idCommitment.inHex() + debug "identityCredential idCommitmentHex", idCommitment = idCommitmentHex + let idCommitment = identityCredential.idCommitment.toUInt256() + let idCommitmentsToErase: seq[UInt256] = @[] + debug "registering the member", + idCommitment = idCommitment, + userMessageLimit = userMessageLimit, + idCommitmentsToErase = idCommitmentsToErase + var txHash: TxHash + g.retryWrapper(txHash, "Failed to register the member"): + await wakuRlnContract + .register(idCommitment, userMessageLimit.stuint(32), idCommitmentsToErase) + .send(gasPrice = gasPrice) + + # wait for the transaction to be mined + var tsReceipt: ReceiptObject + g.retryWrapper(tsReceipt, "Failed to get the transaction receipt"): + await ethRpc.getMinedTransactionReceipt(txHash) + debug "registration transaction mined", txHash = txHash + g.registrationTxHash = some(txHash) + # the receipt topic holds the hash of signature of the raised events + # TODO: make this robust. search within the event list for the event + debug "ts receipt", receipt = tsReceipt[] + + if tsReceipt.status.isNone(): + raise newException(ValueError, "Transaction failed: status is None") + if tsReceipt.status.get() != 1.Quantity: + raise newException( + ValueError, "Transaction failed with status: " & $tsReceipt.status.get() + ) + + ## Extract MembershipRegistered event from transaction logs (third event) + let thirdTopic = tsReceipt.logs[2].topics[0] + debug "third topic", thirdTopic = thirdTopic + if thirdTopic != + cast[FixedBytes[32]](keccak.keccak256.digest( + "MembershipRegistered(uint256,uint256,uint32)" + ).data): + raise newException(ValueError, "register: unexpected event signature") + + ## Parse MembershipRegistered event data: rateCommitment(256) || membershipRateLimit(256) || index(32) + let arguments = tsReceipt.logs[2].data + debug "tx log data", arguments = arguments + let + ## Extract membership index from transaction log data (big endian) + membershipIndex = UInt256.fromBytesBE(arguments[64 .. 95]) + + trace "parsed membershipIndex", membershipIndex + g.userMessageLimit = some(userMessageLimit) + g.membershipIndex = some(membershipIndex.toMembershipIndex()) + g.idCredentials = some(identityCredential) + + let rateCommitment = RateCommitment( + idCommitment: identityCredential.idCommitment, userMessageLimit: userMessageLimit + ) + .toLeaf() + .get() + + if g.registerCb.isSome(): + let member = Membership(rateCommitment: rateCommitment, index: g.latestIndex) + await g.registerCb.get()(@[member]) + g.latestIndex.inc() + + return + +method withdraw*( + g: OnchainGroupManager, idCommitment: IDCommitment +): Future[void] {.async: (raises: [Exception]).} = + initializedGuard(g) # TODO: after slashing is enabled on the contract + +method withdrawBatch*( + g: OnchainGroupManager, idCommitments: seq[IDCommitment] +): Future[void] {.async: (raises: [Exception]).} = + initializedGuard(g) + +proc getRootFromProofAndIndex( + g: OnchainGroupManager, elements: seq[byte], bits: seq[byte] +): GroupManagerResult[array[32, byte]] = + # this is a helper function to get root from merkle proof elements and index + # it's currently not used anywhere, but can be used to verify the root from the proof and index + # Compute leaf hash from idCommitment and messageLimit + let messageLimitField = uint64ToField(g.userMessageLimit.get()) + let leafHashRes = poseidon(@[g.idCredentials.get().idCommitment, @messageLimitField]) + if leafHashRes.isErr(): + return err("Failed to compute leaf hash: " & leafHashRes.error) + + var hash = leafHashRes.get() + for i in 0 ..< bits.len: + let sibling = elements[i * 32 .. (i + 1) * 32 - 1] + + let hashRes = + if bits[i] == 0: + poseidon(@[@hash, sibling]) + else: + poseidon(@[sibling, @hash]) + + hash = hashRes.valueOr: + return err("Failed to compute poseidon hash: " & error) + hash = hashRes.get() + + return ok(hash) + +method generateProof*( + g: OnchainGroupManager, + data: seq[byte], + epoch: Epoch, + messageId: MessageId, + rlnIdentifier = DefaultRlnIdentifier, +): GroupManagerResult[RateLimitProof] {.gcsafe, raises: [].} = + ## Generates an RLN proof using the cached Merkle proof and custom witness + # Ensure identity credentials and membership index are set + if g.idCredentials.isNone(): + return err("identity credentials are not set") + if g.membershipIndex.isNone(): + return err("membership index is not set") + if g.userMessageLimit.isNone(): + return err("user message limit is not set") + + if (g.merkleProofCache.len mod 32) != 0: + return err("Invalid merkle proof cache length") + + let identity_secret = seqToField(g.idCredentials.get().idSecretHash) + let user_message_limit = uint64ToField(g.userMessageLimit.get()) + let message_id = uint64ToField(messageId) + var path_elements = newSeq[byte](0) + + let identity_path_index = uint64ToIndex(g.membershipIndex.get(), 20) + for i in 0 ..< g.merkleProofCache.len div 32: + let chunk = g.merkleProofCache[i * 32 .. (i + 1) * 32 - 1] + path_elements.add(chunk.reversed()) + + let x = keccak.keccak256.digest(data) + + let extNullifier = poseidon(@[@(epoch), @(rlnIdentifier)]).valueOr: + return err("Failed to compute external nullifier: " & error) + + let witness = RLNWitnessInput( + identity_secret: identity_secret, + user_message_limit: user_message_limit, + message_id: message_id, + path_elements: path_elements, + identity_path_index: identity_path_index, + x: x, + external_nullifier: extNullifier, + ) + + let serializedWitness = serialize(witness) + + var input_witness_buffer = toBuffer(serializedWitness) + + # Generate the proof using the zerokit API + var output_witness_buffer: Buffer + let witness_success = generate_proof_with_witness( + g.rlnInstance, addr input_witness_buffer, addr output_witness_buffer + ) + + if not witness_success: + return err("Failed to generate proof") + + # Parse the proof into a RateLimitProof object + var proofValue = cast[ptr array[320, byte]](output_witness_buffer.`ptr`) + let proofBytes: array[320, byte] = proofValue[] + + ## Parse the proof as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ] + let + proofOffset = 128 + rootOffset = proofOffset + 32 + externalNullifierOffset = rootOffset + 32 + shareXOffset = externalNullifierOffset + 32 + shareYOffset = shareXOffset + 32 + nullifierOffset = shareYOffset + 32 + + var + zkproof: ZKSNARK + proofRoot, shareX, shareY: MerkleNode + externalNullifier: ExternalNullifier + nullifier: Nullifier + + discard zkproof.copyFrom(proofBytes[0 .. proofOffset - 1]) + discard proofRoot.copyFrom(proofBytes[proofOffset .. rootOffset - 1]) + discard + externalNullifier.copyFrom(proofBytes[rootOffset .. externalNullifierOffset - 1]) + discard shareX.copyFrom(proofBytes[externalNullifierOffset .. shareXOffset - 1]) + discard shareY.copyFrom(proofBytes[shareXOffset .. shareYOffset - 1]) + discard nullifier.copyFrom(proofBytes[shareYOffset .. nullifierOffset - 1]) + + # Create the RateLimitProof object + let output = RateLimitProof( + proof: zkproof, + merkleRoot: proofRoot, + externalNullifier: externalNullifier, + epoch: epoch, + rlnIdentifier: rlnIdentifier, + shareX: shareX, + shareY: shareY, + nullifier: nullifier, + ) + + debug "Proof generated successfully", proof = output + + waku_rln_remaining_proofs_per_epoch.dec() + waku_rln_total_generated_proofs.inc() + return ok(output) + +method verifyProof*( + g: OnchainGroupManager, input: seq[byte], proof: RateLimitProof +): GroupManagerResult[bool] {.gcsafe, raises: [].} = + ## -- Verifies an RLN rate-limit proof against the set of valid Merkle roots -- + + var normalizedProof = proof + + normalizedProof.externalNullifier = poseidon( + @[@(proof.epoch), @(proof.rlnIdentifier)] + ).valueOr: + return err("Failed to compute external nullifier: " & error) + + let proofBytes = serialize(normalizedProof, input) + let proofBuffer = proofBytes.toBuffer() + + let rootsBytes = serialize(g.validRoots.items().toSeq()) + let rootsBuffer = rootsBytes.toBuffer() + + var validProof: bool # out-param + let ffiOk = verify_with_roots( + g.rlnInstance, # RLN context created at init() + addr proofBuffer, # (proof + signal) + addr rootsBuffer, # valid Merkle roots + addr validProof # will be set by the FFI call + , + ) + + if not ffiOk: + return err("could not verify the proof") + else: + debug "Proof verified successfully" + + return ok(validProof) + +method onRegister*(g: OnchainGroupManager, cb: OnRegisterCallback) {.gcsafe.} = + g.registerCb = some(cb) + +method onWithdraw*(g: OnchainGroupManager, cb: OnWithdrawCallback) {.gcsafe.} = + g.withdrawCb = some(cb) + +proc establishConnection( + g: OnchainGroupManager +): Future[GroupManagerResult[Web3]] {.async.} = + var ethRpc: Web3 + + g.retryWrapper(ethRpc, "Failed to connect to the Ethereum client"): + var innerEthRpc: Web3 + var connected = false + for clientUrl in g.ethClientUrls: + ## We give a chance to the user to provide multiple clients + ## and we try to connect to each of them + try: + innerEthRpc = await newWeb3(clientUrl) + connected = true + break + except CatchableError: + error "failed connect Eth client", error = getCurrentExceptionMsg() + + if not connected: + raise newException(CatchableError, "all failed") + + innerEthRpc + + return ok(ethRpc) + +method init*(g: OnchainGroupManager): Future[GroupManagerResult[void]] {.async.} = + # check if the Ethereum client is reachable + let ethRpc: Web3 = (await establishConnection(g)).valueOr: + return err("failed to connect to Ethereum clients: " & $error) + + var fetchedChainId: UInt256 + g.retryWrapper(fetchedChainId, "Failed to get the chain id"): + await ethRpc.provider.eth_chainId() + + # Set the chain id + if g.chainId == 0: + warn "Chain ID not set in config, using RPC Provider's Chain ID", + providerChainId = fetchedChainId + + if g.chainId != 0 and g.chainId != fetchedChainId: + return err( + "The RPC Provided a Chain ID which is different than the provided Chain ID: provided = " & + $g.chainId & ", actual = " & $fetchedChainId + ) + + g.chainId = fetchedChainId + + if g.ethPrivateKey.isSome(): + let pk = g.ethPrivateKey.get() + let parsedPk = keys.PrivateKey.fromHex(pk).valueOr: + return err("failed to parse the private key" & ": " & $error) + ethRpc.privateKey = Opt.some(parsedPk) + ethRpc.defaultAccount = + ethRpc.privateKey.get().toPublicKey().toCanonicalAddress().Address + + let contractAddress = web3.fromHex(web3.Address, g.ethContractAddress) + let wakuRlnContract = ethRpc.contractSender(WakuRlnContract, contractAddress) + + g.ethRpc = some(ethRpc) + g.wakuRlnContract = some(wakuRlnContract) + + if g.keystorePath.isSome() and g.keystorePassword.isSome(): + if not fileExists(g.keystorePath.get()): + error "File provided as keystore path does not exist", path = g.keystorePath.get() + return err("File provided as keystore path does not exist") + + var keystoreQuery = KeystoreMembership( + membershipContract: + MembershipContract(chainId: $g.chainId, address: g.ethContractAddress) + ) + if g.membershipIndex.isSome(): + keystoreQuery.treeIndex = MembershipIndex(g.membershipIndex.get()) + waku_rln_membership_credentials_import_duration_seconds.nanosecondTime: + let keystoreCred = getMembershipCredentials( + path = g.keystorePath.get(), + password = g.keystorePassword.get(), + query = keystoreQuery, + appInfo = RLNAppInfo, + ).valueOr: + return err("failed to get the keystore credentials: " & $error) + + g.membershipIndex = some(keystoreCred.treeIndex) + g.userMessageLimit = some(keystoreCred.userMessageLimit) + # now we check on the contract if the commitment actually has a membership + let idCommitmentBytes = keystoreCred.identityCredential.idCommitment + let idCommitmentUInt256 = keystoreCred.identityCredential.idCommitment.toUInt256() + let idCommitmentHex = idCommitmentBytes.inHex() + debug "Keystore idCommitment in bytes", idCommitmentBytes = idCommitmentBytes + debug "Keystore idCommitment in UInt256 ", idCommitmentUInt256 = idCommitmentUInt256 + debug "Keystore idCommitment in hex ", idCommitmentHex = idCommitmentHex + let idCommitment = keystoreCred.identityCredential.idCommitment + let membershipExists = (await g.fetchMembershipStatus(idCommitment)).valueOr: + return err("the commitment does not have a membership: " & error) + debug "membershipExists", membershipExists = membershipExists + + g.idCredentials = some(keystoreCred.identityCredential) + + let metadataGetOptRes = g.rlnInstance.getMetadata() + if metadataGetOptRes.isErr(): + warn "could not initialize with persisted rln metadata" + elif metadataGetOptRes.get().isSome(): + let metadata = metadataGetOptRes.get().get() + if metadata.chainId != g.chainId: + return err( + fmt"chain id mismatch. persisted={metadata.chainId}, smart_contract_chainId={g.chainId}" + ) + if metadata.contractAddress != g.ethContractAddress.toLower(): + return err("persisted data: contract address mismatch") + + let maxMembershipRateLimitRes = await g.fetchMaxMembershipRateLimit() + let maxMembershipRateLimit = maxMembershipRateLimitRes.valueOr: + return err("failed to fetch max membership rate limit: " & error) + + g.rlnRelayMaxMessageLimit = cast[uint64](maxMembershipRateLimit) + + proc onDisconnect() {.async.} = + error "Ethereum client disconnected" + + var newEthRpc: Web3 = (await g.establishConnection()).valueOr: + g.onFatalErrorAction("failed to connect to Ethereum clients onDisconnect") + return + + newEthRpc.ondisconnect = ethRpc.ondisconnect + g.ethRpc = some(newEthRpc) + + ethRpc.ondisconnect = proc() = + asyncSpawn onDisconnect() + + g.initialized = true + return ok() + +method stop*(g: OnchainGroupManager): Future[void] {.async, gcsafe.} = + if g.ethRpc.isSome(): + g.ethRpc.get().ondisconnect = nil + await g.ethRpc.get().close() + let flushed = g.rlnInstance.flush() + if not flushed: + error "failed to flush to the tree db" + + g.initialized = false + +method isReady*(g: OnchainGroupManager): Future[bool] {.async.} = + initializedGuard(g) + + if g.ethRpc.isNone(): + return false + + if g.wakuRlnContract.isNone(): + return false + + return true diff --git a/third-party/nwaku/waku/waku_rln_relay/group_manager/on_chain/retry_wrapper.nim b/third-party/nwaku/waku/waku_rln_relay/group_manager/on_chain/retry_wrapper.nim new file mode 100644 index 0000000..df87162 --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/group_manager/on_chain/retry_wrapper.nim @@ -0,0 +1,36 @@ +import ../../../common/error_handling +import chronos +import results + +type RetryStrategy* = object + shouldRetry*: bool + retryDelay*: Duration + retryCount*: uint + +proc new*(T: type RetryStrategy): RetryStrategy = + return RetryStrategy(shouldRetry: true, retryDelay: 4000.millis, retryCount: 15) + +template retryWrapper*( + res: auto, + retryStrategy: RetryStrategy, + errStr: string, + errCallback: OnFatalErrorHandler, + body: untyped, +): auto = + if errCallback == nil: + raise newException(CatchableError, "Ensure that the errCallback is set") + var retryCount = retryStrategy.retryCount + var shouldRetry = retryStrategy.shouldRetry + var exceptionMessage = "" + + while shouldRetry and retryCount > 0: + try: + res = body + shouldRetry = false + except: + retryCount -= 1 + exceptionMessage = getCurrentExceptionMsg() + await sleepAsync(retryStrategy.retryDelay) + if shouldRetry: + errCallback(errStr & ": " & exceptionMessage) + return diff --git a/third-party/nwaku/waku/waku_rln_relay/group_manager/on_chain/rpc_wrapper.nim b/third-party/nwaku/waku/waku_rln_relay/group_manager/on_chain/rpc_wrapper.nim new file mode 100644 index 0000000..867e9e7 --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/group_manager/on_chain/rpc_wrapper.nim @@ -0,0 +1,101 @@ +import + os, + web3, + web3/eth_api_types, + web3/primitives, + eth/keys as keys, + chronicles, + nimcrypto/keccak as keccak, + stint, + json, + std/[strutils, tables, algorithm], + stew/[byteutils, arrayops], + sequtils + +import + ../../../waku_keystore, + ../../rln, + ../../rln/rln_interface, + ../../conversion_utils, + ../../protocol_types, + ../group_manager_base + +logScope: + topics = "waku rln_relay onchain rpc_wrapper" + +# using the when predicate does not work within the contract macro, hence need to dupe +contract(WakuRlnContract): + # this serves as an entrypoint into the rln membership set + proc register( + idCommitment: UInt256, userMessageLimit: UInt32, idCommitmentsToErase: seq[UInt256] + ) + + # this event is emitted when a new member is registered + proc MembershipRegistered( + idCommitment: UInt256, membershipRateLimit: UInt256, index: UInt32 + ) {.event.} + + # Initializes the implementation contract (only used in unit tests) + proc initialize(maxMessageLimit: UInt256) + # this function denotes existence of a given user + proc isInMembershipSet(idCommitment: Uint256): bool {.view.} + # this constant describes the next index of a new member + proc nextFreeIndex(): UInt256 {.view.} + # this constant describes the block number this contract was deployed on + proc deployedBlockNumber(): UInt256 {.view.} + # this constant describes max message limit of rln contract + proc maxMembershipRateLimit(): UInt256 {.view.} + # this function returns the merkleProof for a given index + proc getMerkleProof(index: UInt256): seq[byte] {.view.} + # this function returns the Merkle root + proc root(): Uint256 {.view.} + +proc sendEthCallWithoutParams*( + ethRpc: Web3, + functionSignature: string, + fromAddress: Address, + toAddress: Address, + chainId: UInt256, +): Future[Result[UInt256, string]] {.async.} = + ## Workaround for web3 chainId=null issue on some networks (e.g., linea-sepolia) + ## Makes contract calls with explicit chainId for view functions with no parameters + let functionHash = + keccak256.digest(functionSignature.toOpenArrayByte(0, functionSignature.len - 1)) + let functionSelector = functionHash.data[0 .. 3] + let dataSignature = "0x" & functionSelector.mapIt(it.toHex(2)).join("") + + var tx: TransactionArgs + tx.`from` = Opt.some(fromAddress) + tx.to = Opt.some(toAddress) + tx.value = Opt.some(0.u256) + tx.data = Opt.some(byteutils.hexToSeqByte(dataSignature)) + tx.chainId = Opt.some(chainId) + + let resultBytes = await ethRpc.provider.eth_call(tx, "latest") + if resultBytes.len == 0: + return err("No result returned for function call: " & functionSignature) + return ok(UInt256.fromBytesBE(resultBytes)) + +proc sendEthCallWithParams*( + ethRpc: Web3, + functionSignature: string, + params: seq[byte], + fromAddress: Address, + toAddress: Address, + chainId: UInt256, +): Future[Result[seq[byte], string]] {.async.} = + ## Workaround for web3 chainId=null issue with parameterized contract calls + let functionHash = + keccak256.digest(functionSignature.toOpenArrayByte(0, functionSignature.len - 1)) + let functionSelector = functionHash.data[0 .. 3] + let callData = functionSelector & params + + var tx: TransactionArgs + tx.`from` = Opt.some(fromAddress) + tx.to = Opt.some(toAddress) + tx.value = Opt.some(0.u256) + tx.data = Opt.some(callData) + tx.chainId = Opt.some(chainId) + + let resultBytes = await ethRpc.provider.eth_call(tx, "latest") + return ok(resultBytes) diff --git a/third-party/nwaku/waku/waku_rln_relay/nonce_manager.nim b/third-party/nwaku/waku/waku_rln_relay/nonce_manager.nim new file mode 100644 index 0000000..257d969 --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/nonce_manager.nim @@ -0,0 +1,59 @@ +{.push raises: [].} + +import chronos, results, times +import ./constants + +export chronos, times, results, constants + +# This module contains the NonceManager interface +# The NonceManager is responsible for managing the messageId used to generate RLN proofs +# It should be used to fetch a new messageId every time a proof is generated +# It refreshes the messageId every `epoch` seconds + +type + Nonce* = uint64 + NonceManager* = ref object of RootObj + epoch*: float64 + nextNonce*: Nonce + lastNonceTime*: float64 + nonceLimit*: Nonce + + NonceManagerErrorKind* = enum + NonceLimitReached + + NonceManagerError* = object + kind*: NonceManagerErrorKind + error*: string + + NonceManagerResult*[T] = Result[T, NonceManagerError] + +proc `$`*(ne: NonceManagerError): string = + case ne.kind + of NonceLimitReached: + return "NonceLimitReached: " & ne.error + +proc init*(T: type NonceManager, nonceLimit: Nonce, epoch = 1.float64): T = + return + NonceManager(epoch: epoch, nextNonce: 0, lastNonceTime: 0, nonceLimit: nonceLimit) + +proc getNonce*(n: NonceManager): NonceManagerResult[Nonce] = + let now = getTime().toUnixFloat() + var retNonce = n.nextNonce + + if now - n.lastNonceTime >= n.epoch: + retNonce = 0 + n.lastNonceTime = now + + n.nextNonce = retNonce + 1 + + if retNonce >= n.nonceLimit: + return err( + NonceManagerError( + kind: NonceLimitReached, + error: + "Nonce limit reached. Please wait for the next epoch. requested nonce: " & + $retNonce & " & nonceLimit: " & $n.nonceLimit, + ) + ) + + return ok(retNonce) diff --git a/third-party/nwaku/waku/waku_rln_relay/protocol_metrics.nim b/third-party/nwaku/waku/waku_rln_relay/protocol_metrics.nim new file mode 100644 index 0000000..6a21146 --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/protocol_metrics.nim @@ -0,0 +1,126 @@ +{.push raises: [].} + +import chronicles, metrics, metrics/chronos_httpserver, ./constants, ../utils/collector + +export metrics + +logScope: + topics = "waku rln_relay" + +func generateBucketsForHistogram*(length: int): seq[float64] = + ## Generate a custom set of 5 buckets for a given length + let numberOfBuckets = 5 + let stepSize = length / numberOfBuckets + var buckets: seq[float64] + for i in 1 .. numberOfBuckets: + buckets.add(stepSize * i.toFloat()) + return buckets + +declarePublicCounter( + waku_rln_messages_total, "number of messages seen by the rln relay" +) + +declarePublicCounter(waku_rln_spam_messages_total, "number of spam messages detected") +declarePublicCounter( + waku_rln_invalid_messages_total, "number of invalid messages detected", ["type"] +) +# This metric will be useful in detecting the index of the root in the acceptable window of roots +declarePublicCounter( + waku_rln_valid_messages_total, + "number of valid messages with their roots tracked", + ["shard"], +) +declarePublicCounter( + waku_rln_errors_total, + "number of errors detected while operating the rln relay", + ["type"], +) +declarePublicCounter( + waku_rln_proof_verification_total, "number of times the rln proofs are verified" +) +# this is a gauge so that we can set it based on the events we receive +declarePublicGauge( + waku_rln_number_registered_memberships, + "number of registered and active rln memberships", +) + +# Timing metrics +declarePublicGauge( + waku_rln_proof_verification_duration_seconds, "time taken to verify a proof" +) +declarePublicGauge( + waku_rln_proof_generation_duration_seconds, "time taken to generate a proof" +) +declarePublicGauge( + waku_rln_instance_creation_duration_seconds, "time taken to create an rln instance" +) +declarePublicGauge( + waku_rln_membership_insertion_duration_seconds, + "time taken to insert a new member into the local merkle tree", +) +declarePublicGauge( + waku_rln_membership_credentials_import_duration_seconds, + "time taken to import membership credentials", +) + +declarePublicGauge( + waku_rln_remaining_proofs_per_epoch, + "number of proofs remaining to be generated for the current epoch", +) + +declarePublicGauge( + waku_rln_total_generated_proofs, + "total number of proofs generated since the node started", +) + +type RLNMetricsLogger = proc() {.gcsafe, raises: [Defect].} + +proc getRlnMetricsLogger*(): RLNMetricsLogger = + var logMetrics: RLNMetricsLogger + + var cumulativeErrors = 0.float64 + var cumulativeMessages = 0.float64 + var cumulativeSpamMessages = 0.float64 + var cumulativeInvalidMessages = 0.float64 + var cumulativeValidMessages = 0.float64 + var cumulativeProofsVerified = 0.float64 + var cumulativeProofsGenerated = 0.float64 + var cumulativeProofsRemaining = 100.float64 + var cumulativeRegisteredMember = 0.float64 + + when defined(metrics): + logMetrics = proc() = + {.gcsafe.}: + let freshErrorCount = + parseAndAccumulate(waku_rln_errors_total, cumulativeErrors) + let freshMsgCount = + parseAndAccumulate(waku_rln_messages_total, cumulativeMessages) + let freshSpamCount = + parseAndAccumulate(waku_rln_spam_messages_total, cumulativeSpamMessages) + let freshInvalidMsgCount = + parseAndAccumulate(waku_rln_invalid_messages_total, cumulativeInvalidMessages) + let freshValidMsgCount = + parseAndAccumulate(waku_rln_valid_messages_total, cumulativeValidMessages) + let freshProofsVerifiedCount = parseAndAccumulate( + waku_rln_proof_verification_total, cumulativeProofsVerified + ) + let freshProofsGeneratedCount = + parseAndAccumulate(waku_rln_total_generated_proofs, cumulativeProofsGenerated) + let freshProofsRemainingCount = parseAndAccumulate( + waku_rln_remaining_proofs_per_epoch, cumulativeProofsRemaining + ) + let freshRegisteredMemberCount = parseAndAccumulate( + waku_rln_number_registered_memberships, cumulativeRegisteredMember + ) + + info "Total messages", count = freshMsgCount + info "Total spam messages", count = freshSpamCount + info "Total invalid messages", count = freshInvalidMsgCount + info "Total valid messages", count = freshValidMsgCount + info "Total errors", count = freshErrorCount + info "Total proofs verified", count = freshProofsVerifiedCount + info "Total proofs generated", count = freshProofsGeneratedCount + info "Total proofs remaining", count = freshProofsRemainingCount + info "Total registered members", count = freshRegisteredMemberCount + + return logMetrics diff --git a/third-party/nwaku/waku/waku_rln_relay/protocol_types.nim b/third-party/nwaku/waku/waku_rln_relay/protocol_types.nim new file mode 100644 index 0000000..8678788 --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/protocol_types.nim @@ -0,0 +1,145 @@ +{.push raises: [].} + +import std/[options, tables, deques], stew/arrayops, stint, chronos, web3, eth/keys +import ../waku_core, ../waku_keystore, ../common/protobuf + +export waku_keystore, waku_core + +type RlnRelayResult*[T] = Result[T, string] + +## RLN is a Nim wrapper for the data types used in zerokit RLN +type RLN* {.incompleteStruct.} = object +type RLNResult* = RlnRelayResult[ptr RLN] + +type + MerkleNode* = array[32, byte] + # Each node of the Merkle tree is a Poseidon hash which is a 32 byte value + Nullifier* = array[32, byte] + Epoch* = array[32, byte] + RlnIdentifier* = array[32, byte] + ZKSNARK* = array[128, byte] + MessageId* = uint64 + ExternalNullifier* = array[32, byte] + RateCommitment* = object + idCommitment*: IDCommitment + userMessageLimit*: UserMessageLimit + + RawRateCommitment* = seq[byte] + +proc toRateCommitment*(rateCommitmentUint: UInt256): RawRateCommitment = + return RawRateCommitment(@(rateCommitmentUint.toBytesLE())) + +# Custom data types defined for waku rln relay ------------------------- +type RateLimitProof* = object + ## RateLimitProof holds the public inputs to rln circuit as + ## defined in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Public-Inputs + ## the `proof` field carries the actual zkSNARK proof + proof*: ZKSNARK + ## the root of Merkle tree used for the generation of the `proof` + merkleRoot*: MerkleNode + ## shareX and shareY are shares of user's identity key + ## these shares are created using Shamir secret sharing scheme + ## see details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Linear-Equation-amp-SSS + shareX*: MerkleNode + shareY*: MerkleNode + ## nullifier enables linking two messages published during the same epoch + ## see details in https://hackmd.io/tMTLMYmTR5eynw2lwK9n1w?view#Nullifiers + nullifier*: Nullifier + ## the epoch used for the generation of the `proof` + epoch*: Epoch + ## Application specific RLN Identifier + rlnIdentifier*: RlnIdentifier + ## the external nullifier used for the generation of the `proof` (derived from poseidon([epoch, rln_identifier])) + externalNullifier*: ExternalNullifier + +type UInt40* = StUint[40] +type UInt32* = StUint[32] + +type + Field = array[32, byte] # Field element representation (256 bits) + RLNWitnessInput* = object + identity_secret*: Field + user_message_limit*: Field + message_id*: Field + path_elements*: seq[byte] + identity_path_index*: seq[byte] + x*: Field + external_nullifier*: Field + +type ProofMetadata* = object + nullifier*: Nullifier + shareX*: MerkleNode + shareY*: MerkleNode + externalNullifier*: Nullifier + +type + MessageValidationResult* {.pure.} = enum + Valid + Invalid + Spam + + MerkleNodeResult* = RlnRelayResult[MerkleNode] + RateLimitProofResult* = RlnRelayResult[RateLimitProof] + +# Protobufs enc and init +proc init*(T: type RateLimitProof, buffer: seq[byte]): ProtoResult[T] = + var nsp: RateLimitProof + + let pb = initProtoBuffer(buffer) + + var proof: seq[byte] + discard ?pb.getField(1, proof) + discard nsp.proof.copyFrom(proof) + + var merkleRoot: seq[byte] + discard ?pb.getField(2, merkleRoot) + discard nsp.merkleRoot.copyFrom(merkleRoot) + + var epoch: seq[byte] + discard ?pb.getField(3, epoch) + discard nsp.epoch.copyFrom(epoch) + + var shareX: seq[byte] + discard ?pb.getField(4, shareX) + discard nsp.shareX.copyFrom(shareX) + + var shareY: seq[byte] + discard ?pb.getField(5, shareY) + discard nsp.shareY.copyFrom(shareY) + + var nullifier: seq[byte] + discard ?pb.getField(6, nullifier) + discard nsp.nullifier.copyFrom(nullifier) + + var rlnIdentifier: seq[byte] + discard ?pb.getField(7, rlnIdentifier) + discard nsp.rlnIdentifier.copyFrom(rlnIdentifier) + + return ok(nsp) + +proc encode*(nsp: RateLimitProof): ProtoBuffer = + var output = initProtoBuffer() + + output.write3(1, nsp.proof) + output.write3(2, nsp.merkleRoot) + output.write3(3, nsp.epoch) + output.write3(4, nsp.shareX) + output.write3(5, nsp.shareY) + output.write3(6, nsp.nullifier) + output.write3(7, nsp.rlnIdentifier) + + output.finish3() + return output + +func encode*(x: UInt32): seq[byte] = + ## the Ethereum ABI imposes a 32 byte width for every type + let numTargetBytes = 32 div 8 + let paddingBytes = 32 - numTargetBytes + let paddingZeros = newSeq[byte](paddingBytes) + paddingZeros & @(stint.toBytesBE(x)) + +type + SpamHandler* = + proc(wakuMessage: WakuMessage): void {.gcsafe, closure, raises: [Defect].} + RegistrationHandler* = + proc(txHash: string): void {.gcsafe, closure, raises: [Defect].} diff --git a/third-party/nwaku/waku/waku_rln_relay/rln.nim b/third-party/nwaku/waku/waku_rln_relay/rln.nim new file mode 100644 index 0000000..59f6b1d --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/rln.nim @@ -0,0 +1,3 @@ +import rln/rln_interface, rln/wrappers + +export rln_interface, wrappers diff --git a/third-party/nwaku/waku/waku_rln_relay/rln/rln_interface.nim b/third-party/nwaku/waku/waku_rln_relay/rln/rln_interface.nim new file mode 100644 index 0000000..98d4156 --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/rln/rln_interface.nim @@ -0,0 +1,196 @@ +## Nim wrappers for the functions defined in librln +import ../protocol_types + +{.push raises: [].} + +## Buffer struct is taken from +# https://github.com/celo-org/celo-threshold-bls-rs/blob/master/crates/threshold-bls-ffi/src/ffi.rs +type Buffer* = object + `ptr`*: ptr uint8 + len*: uint + +proc toBuffer*(x: openArray[byte]): Buffer = + ## converts the input to a Buffer object + ## the Buffer object is used to communicate data with the rln lib + var temp = @x + let baseAddr = cast[pointer](x) + let output = Buffer(`ptr`: cast[ptr uint8](baseAddr), len: uint(temp.len)) + return output + +###################################################################### +## RLN Zerokit module APIs +###################################################################### + +#-------------------------------- zkSNARKs operations ----------------------------------------- +proc key_gen*( + ctx: ptr RLN, output_buffer: ptr Buffer +): bool {.importc: "extended_key_gen".} + +## generates identity trapdoor, identity nullifier, identity secret hash and id commitment tuple serialized inside output_buffer as | identity_trapdoor<32> | identity_nullifier<32> | identity_secret_hash<32> | id_commitment<32> | +## identity secret hash is the poseidon hash of [identity_trapdoor, identity_nullifier] +## id commitment is the poseidon hash of the identity secret hash +## the return bool value indicates the success or failure of the operation + +proc seeded_key_gen*( + ctx: ptr RLN, input_buffer: ptr Buffer, output_buffer: ptr Buffer +): bool {.importc: "seeded_extended_key_gen".} + +## generates identity trapdoor, identity nullifier, identity secret hash and id commitment tuple serialized inside output_buffer as | identity_trapdoor<32> | identity_nullifier<32> | identity_secret_hash<32> | id_commitment<32> | using ChaCha20 +## seeded with an arbitrary long seed serialized in input_buffer +## The input seed provided by the user is hashed using Keccak256 before being passed to ChaCha20 as seed. +## identity secret hash is the poseidon hash of [identity_trapdoor, identity_nullifier] +## id commitment is the poseidon hash of the identity secret hash +## the return bool value indicates the success or failure of the operation + +proc generate_proof*( + ctx: ptr RLN, input_buffer: ptr Buffer, output_buffer: ptr Buffer +): bool {.importc: "generate_rln_proof".} + +## rln-v2 +## input_buffer has to be serialized as [ identity_secret<32> | identity_index<8> | user_message_limit<32> | message_id<32> | external_nullifier<32> | signal_len<8> | signal ] +## output_buffer holds the proof data and should be parsed as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ] +## rln-v1 +## input_buffer has to be serialized as [ id_key<32> | id_index<8> | epoch<32> | signal_len<8> | signal ] +## output_buffer holds the proof data and should be parsed as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> ] +## integers wrapped in <> indicate value sizes in bytes +## the return bool value indicates the success or failure of the operation + +proc generate_proof_with_witness*( + ctx: ptr RLN, input_buffer: ptr Buffer, output_buffer: ptr Buffer +): bool {.importc: "generate_rln_proof_with_witness".} + +## rln-v2 +## "witness" term refer to collection of secret inputs with proper serialization +## input_buffer has to be serialized as [ identity_secret<32> | user_message_limit<32> | message_id<32> | path_elements> | identity_path_index> | x<32> | external_nullifier<32> ] +## output_buffer holds the proof data and should be parsed as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> ] +## rln-v1 +## input_buffer has to be serialized as [ id_key<32> | path_elements> | identity_path_index> | x<32> | epoch<32> | rln_identifier<32> ] +## output_buffer holds the proof data and should be parsed as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> ] +## integers wrapped in <> indicate value sizes in bytes +## path_elements and identity_path_index serialize a merkle proof and are vectors of elements of 32 and 1 bytes respectively +## the return bool value indicates the success or failure of the operation + +proc verify*( + ctx: ptr RLN, proof_buffer: ptr Buffer, proof_is_valid_ptr: ptr bool +): bool {.importc: "verify_rln_proof".} + +## rln-v2 +## proof_buffer has to be serialized as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> | signal_len<8> | signal ] +## rln-v1 +## ## proof_buffer has to be serialized as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> | signal_len<8> | signal ] +## the return bool value indicates the success or failure of the call to the verify function +## the verification of the zk proof is available in proof_is_valid_ptr, where a value of true indicates success and false a failure + +proc verify_with_roots*( + ctx: ptr RLN, + proof_buffer: ptr Buffer, + roots_buffer: ptr Buffer, + proof_is_valid_ptr: ptr bool, +): bool {.importc: "verify_with_roots".} + +## rln-v2 +## proof_buffer has to be serialized as [ proof<128> | root<32> | external_nullifier<32> | share_x<32> | share_y<32> | nullifier<32> | signal_len<8> | signal ] +## rln-v1 +## proof_buffer has to be serialized as [ proof<128> | root<32> | epoch<32> | share_x<32> | share_y<32> | nullifier<32> | rln_identifier<32> | signal_len<8> | signal ] +## roots_buffer contains the concatenation of 32 bytes long serializations in little endian of root values +## the return bool value indicates the success or failure of the call to the verify function +## the verification of the zk proof is available in proof_is_valid_ptr, where a value of true indicates success and false a failure + +proc zk_prove*( + ctx: ptr RLN, input_buffer: ptr Buffer, output_buffer: ptr Buffer +): bool {.importc: "prove".} + +## Computes the zkSNARK proof and stores it in output_buffer for input values stored in input_buffer +## rln-v2 +## input_buffer is serialized as input_data as [ identity_secret<32> | user_message_limit<32> | message_id<32> | path_elements> | identity_path_index> | x<32> | external_nullifier<32> ] +## rln-v1 +## input_buffer is serialized as input_data as [ id_key<32> | path_elements> | identity_path_index> | x<32> | epoch<32> | rln_identifier<32> ] +## output_buffer holds the proof data and should be parsed as [ proof<128> ] +## path_elements and indentity_path elements serialize a merkle proof for id_key and are vectors of elements of 32 and 1 bytes, respectively (not. Vec<>). +## x is the x coordinate of the Shamir's secret share for which the proof is computed +## epoch is the input epoch (equivalently, the nullifier) +## the return bool value indicates the success or failure of the operation + +proc zk_verify*( + ctx: ptr RLN, proof_buffer: ptr Buffer, proof_is_valid_ptr: ptr bool +): bool {.importc: "verify".} + +## Verifies the zkSNARK proof passed in proof_buffer +## input_buffer is serialized as input_data as [ proof<128> ] +## the verification of the zk proof is available in proof_is_valid_ptr, where a value of true indicates success and false a failure +## the return bool value indicates the success or failure of the operation + +#---------------------------------------------------------------------------------------------- + +#-------------------------------- Common procedures ------------------------------------------- +proc new_circuit*( + tree_height: uint, input_buffer: ptr Buffer, ctx: ptr (ptr RLN) +): bool {.importc: "new".} + +## creates an instance of rln object as defined by the zerokit RLN lib +## tree_height represent the depth of the Merkle tree +## input_buffer contains a serialization of the path where the circuit resources can be found (.r1cs, .wasm, .zkey and optionally the verification_key.json) +## ctx holds the final created rln object +## the return bool value indicates the success or failure of the operation + +proc new_circuit_from_data*( + tree_height: uint, + circom_buffer: ptr Buffer, + zkey_buffer: ptr Buffer, + vk_buffer: ptr Buffer, + ctx: ptr (ptr RLN), +): bool {.importc: "new_with_params".} + +## creates an instance of rln object as defined by the zerokit RLN lib by passing the required inputs as byte arrays +## tree_height represent the depth of the Merkle tree +## circom_buffer contains the bytes read from the Circom .wasm circuit +## zkey_buffer contains the bytes read from the .zkey proving key +## vk_buffer contains the bytes read from the verification_key.json +## ctx holds the final created rln object +## the return bool value indicates the success or failure of the operation + +#-------------------------------- Hashing utils ------------------------------------------- + +proc sha256*( + input_buffer: ptr Buffer, output_buffer: ptr Buffer +): bool {.importc: "hash".} + +## it hashes (sha256) the plain text supplied in inputs_buffer and then maps it to a field element +## this proc is used to map arbitrary signals to field element for the sake of proof generation +## inputs_buffer holds the hash input as a byte seq +## the hash output is generated and populated inside output_buffer +## the output_buffer contains 32 bytes hash output + +proc poseidon*( + input_buffer: ptr Buffer, output_buffer: ptr Buffer +): bool {.importc: "poseidon_hash".} + +## it hashes (poseidon) the plain text supplied in inputs_buffer +## this proc is used to compute the identity secret hash, and external nullifier +## inputs_buffer holds the hash input as a byte seq +## the hash output is generated and populated inside output_buffer +## the output_buffer contains 32 bytes hash output + +#-------------------------------- Persistent Metadata utils ------------------------------------------- + +proc set_metadata*( + ctx: ptr RLN, input_buffer: ptr Buffer +): bool {.importc: "set_metadata".} + +## sets the metadata stored by ctx to the value passed by input_buffer +## the input_buffer holds a serialized representation of the metadata (format to be defined) +## input_buffer holds the metadata as a byte seq +## the return bool value indicates the success or failure of the operation + +proc get_metadata*( + ctx: ptr RLN, output_buffer: ptr Buffer +): bool {.importc: "get_metadata".} + +## gets the metadata stored by ctx and populates the passed pointer output_buffer with it +## the output_buffer holds the metadata as a byte seq +## the return bool value indicates the success or failure of the operation + +proc flush*(ctx: ptr RLN): bool {.importc: "flush".} +## flushes the write buffer to the database +## the return bool value indicates the success or failure of the operation +## This allows more robust and graceful handling of the database connection diff --git a/third-party/nwaku/waku/waku_rln_relay/rln/wrappers.nim b/third-party/nwaku/waku/waku_rln_relay/rln/wrappers.nim new file mode 100644 index 0000000..a5f8701 --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/rln/wrappers.nim @@ -0,0 +1,305 @@ +import std/json +import + chronicles, + options, + eth/keys, + stew/[arrayops, byteutils, endians2], + stint, + results, + std/[sequtils, strutils, tables, tempfiles] + +import ./rln_interface, ../conversion_utils, ../protocol_types, ../protocol_metrics +import ../../waku_core, ../../waku_keystore + +logScope: + topics = "waku rln_relay ffi" + +proc membershipKeyGen*(ctxPtr: ptr RLN): RlnRelayResult[IdentityCredential] = + ## generates a IdentityCredential that can be used for the registration into the rln membership contract + ## Returns an error if the key generation fails + + # keysBufferPtr will hold the generated identity tuple i.e., trapdoor, nullifier, secret hash and commitment + var + keysBuffer: Buffer + keysBufferPtr = addr(keysBuffer) + done = key_gen(ctxPtr, keysBufferPtr) + + # check whether the keys are generated successfully + if (done == false): + return err("error in key generation") + + if (keysBuffer.len != 4 * 32): + return err("keysBuffer is of invalid length") + + var generatedKeys = cast[ptr array[4 * 32, byte]](keysBufferPtr.`ptr`)[] + # the public and secret keys together are 64 bytes + + # TODO define a separate proc to decode the generated keys to the secret and public components + var + idTrapdoor: array[32, byte] + idNullifier: array[32, byte] + idSecretHash: array[32, byte] + idCommitment: array[32, byte] + for (i, x) in idTrapdoor.mpairs: + x = generatedKeys[i + 0 * 32] + for (i, x) in idNullifier.mpairs: + x = generatedKeys[i + 1 * 32] + for (i, x) in idSecretHash.mpairs: + x = generatedKeys[i + 2 * 32] + for (i, x) in idCommitment.mpairs: + x = generatedKeys[i + 3 * 32] + + var identityCredential = IdentityCredential( + idTrapdoor: @idTrapdoor, + idNullifier: @idNullifier, + idSecretHash: @idSecretHash, + idCommitment: @idCommitment, + ) + + return ok(identityCredential) + +type RlnTreeConfig = ref object of RootObj + cache_capacity: int + mode: string + compression: bool + flush_every_ms: int + +type RlnConfig = ref object of RootObj + resources_folder: string + tree_config: RlnTreeConfig + +proc `%`(c: RlnConfig): JsonNode = + ## wrapper around the generic JObject constructor. + ## We don't need to have a separate proc for the tree_config field + let tree_config = + %{ + "cache_capacity": %c.tree_config.cache_capacity, + "mode": %c.tree_config.mode, + "compression": %c.tree_config.compression, + "flush_every_ms": %c.tree_config.flush_every_ms, + } + return %[("resources_folder", %c.resources_folder), ("tree_config", %tree_config)] + +proc createRLNInstanceLocal(d = MerkleTreeDepth): RLNResult = + ## generates an instance of RLN + ## An RLN instance supports both zkSNARKs logics and Merkle tree data structure and operations + ## d indicates the depth of Merkle tree + ## Returns an error if the instance creation fails + + let rln_config = RlnConfig( + resources_folder: "tree_height_" & $d & "/", + tree_config: RlnTreeConfig( + cache_capacity: 15_000, + mode: "high_throughput", + compression: false, + flush_every_ms: 500, + ), + ) + + var serialized_rln_config = $(%rln_config) + + var + rlnInstance: ptr RLN + merkleDepth: csize_t = uint(d) + configBuffer = + serialized_rln_config.toOpenArrayByte(0, serialized_rln_config.high).toBuffer() + + # create an instance of RLN + let res = new_circuit(merkleDepth, addr configBuffer, addr rlnInstance) + # check whether the circuit parameters are generated successfully + if (res == false): + debug "error in parameters generation" + return err("error in parameters generation") + return ok(rlnInstance) + +proc createRLNInstance*(d = MerkleTreeDepth): RLNResult = + ## Wraps the rln instance creation for metrics + ## Returns an error if the instance creation fails + var res: RLNResult + waku_rln_instance_creation_duration_seconds.nanosecondTime: + res = createRLNInstanceLocal(d) + return res + +proc sha256*(data: openArray[byte]): RlnRelayResult[MerkleNode] = + ## a thin layer on top of the Nim wrapper of the sha256 hasher + var lenPrefData = encodeLengthPrefix(data) + var + hashInputBuffer = lenPrefData.toBuffer() + outputBuffer: Buffer # will holds the hash output + + trace "sha256 hash input buffer length", bufflen = hashInputBuffer.len + let hashSuccess = sha256(addr hashInputBuffer, addr outputBuffer) + + # check whether the hash call is done successfully + if not hashSuccess: + return err("error in sha256 hash") + + let output = cast[ptr MerkleNode](outputBuffer.`ptr`)[] + + return ok(output) + +proc poseidon*(data: seq[seq[byte]]): RlnRelayResult[array[32, byte]] = + ## a thin layer on top of the Nim wrapper of the poseidon hasher + var inputBytes = serialize(data) + var + hashInputBuffer = inputBytes.toBuffer() + outputBuffer: Buffer # will holds the hash output + + let hashSuccess = poseidon(addr hashInputBuffer, addr outputBuffer) + + # check whether the hash call is done successfully + if not hashSuccess: + return err("error in poseidon hash") + + let output = cast[ptr array[32, byte]](outputBuffer.`ptr`)[] + + return ok(output) + +proc toLeaf*(rateCommitment: RateCommitment): RlnRelayResult[seq[byte]] = + let idCommitment = rateCommitment.idCommitment + var userMessageLimit: array[32, byte] + try: + discard userMessageLimit.copyFrom( + toBytes(rateCommitment.userMessageLimit, Endianness.littleEndian) + ) + except CatchableError: + return err( + "could not convert the user message limit to bytes: " & getCurrentExceptionMsg() + ) + let leaf = poseidon(@[@idCommitment, @userMessageLimit]).valueOr: + return err("could not convert the rate commitment to a leaf") + var retLeaf = newSeq[byte](leaf.len) + for i in 0 ..< leaf.len: + retLeaf[i] = leaf[i] + return ok(retLeaf) + +proc toLeaves*(rateCommitments: seq[RateCommitment]): RlnRelayResult[seq[seq[byte]]] = + var leaves = newSeq[seq[byte]]() + for rateCommitment in rateCommitments: + let leaf = toLeaf(rateCommitment).valueOr: + return err("could not convert the rate commitment to a leaf: " & $error) + leaves.add(leaf) + return ok(leaves) + +proc extractMetadata*(proof: RateLimitProof): RlnRelayResult[ProofMetadata] = + let externalNullifier = poseidon(@[@(proof.epoch), @(proof.rlnIdentifier)]).valueOr: + return err("could not construct the external nullifier") + return ok( + ProofMetadata( + nullifier: proof.nullifier, + shareX: proof.shareX, + shareY: proof.shareY, + externalNullifier: externalNullifier, + ) + ) + +type RlnMetadata* = object + lastProcessedBlock*: uint64 + chainId*: UInt256 + contractAddress*: string + validRoots*: seq[MerkleNode] + +proc serialize(metadata: RlnMetadata): seq[byte] = + ## serializes the metadata + ## returns the serialized metadata + return concat( + @(metadata.lastProcessedBlock.toBytes()), + @(metadata.chainId.toBytes(Endianness.littleEndian)[0 .. 7]), + @(hexToSeqByte(toLower(metadata.contractAddress))), + @(uint64(metadata.validRoots.len()).toBytes()), + @(serialize(metadata.validRoots)), + ) + +type MerkleNodeSeq = seq[MerkleNode] + +proc deserialize(T: type MerkleNodeSeq, merkleNodeByteSeq: seq[byte]): T = + ## deserializes a byte seq to a seq of MerkleNodes + ## the order of serialization is |merkle_node_len<8>|merkle_node[len]| + + var roots = newSeq[MerkleNode]() + let len = uint64.fromBytes(merkleNodeByteSeq[0 .. 7], Endianness.littleEndian) + trace "length of valid roots", len + for i in 0'u64 ..< len: + # convert seq[byte] to array[32, byte] + let fromByte = 8 + i * 32 + let toByte = fromByte + 31 + let rawRoot = merkleNodeByteSeq[fromByte .. toByte] + trace "raw root", rawRoot = rawRoot + var root: MerkleNode + discard root.copyFrom(rawRoot) + roots.add(root) + return roots + +proc setMetadata*(rlnInstance: ptr RLN, metadata: RlnMetadata): RlnRelayResult[void] = + ## sets the metadata of the RLN instance + ## returns an error if the metadata could not be set + ## returns void if the metadata is set successfully + + # serialize the metadata + let metadataBytes = serialize(metadata) + trace "setting metadata", + metadata = metadata, metadataBytes = metadataBytes, len = metadataBytes.len + var metadataBuffer = metadataBytes.toBuffer() + let metadataBufferPtr = addr metadataBuffer + + # set the metadata + let metadataSet = set_metadata(rlnInstance, metadataBufferPtr) + + if not metadataSet: + return err("could not set the metadata") + return ok() + +proc getMetadata*(rlnInstance: ptr RLN): RlnRelayResult[Option[RlnMetadata]] = + ## gets the metadata of the RLN instance + ## returns an error if the metadata could not be retrieved + ## returns the metadata if the metadata is retrieved successfully + + # read the metadata + var + metadata {.noinit.}: Buffer = Buffer() + metadataPtr = addr(metadata) + getMetadataSuccessful = get_metadata(rlnInstance, metadataPtr) + if not getMetadataSuccessful: + return err("could not get the metadata") + trace "metadata length", metadataLen = metadata.len + + if metadata.len == 0: + return ok(none(RlnMetadata)) + + let + lastProcessedBlockOffset = 0 + chainIdOffset = lastProcessedBlockOffset + 8 + contractAddressOffset = chainIdOffset + 8 + validRootsOffset = contractAddressOffset + 20 + + var + lastProcessedBlock: uint64 + chainId: UInt256 + contractAddress: string + validRoots: MerkleNodeSeq + + # 8 + 8 + 20 + 8 + (5*32) = 204 + var metadataBytes = cast[ptr array[204, byte]](metadata.`ptr`)[] + trace "received metadata bytes", + metadataBytes = metadataBytes, len = metadataBytes.len + + lastProcessedBlock = + uint64.fromBytes(metadataBytes[lastProcessedBlockOffset .. chainIdOffset - 1]) + chainId = UInt256.fromBytes( + metadataBytes[chainIdOffset .. contractAddressOffset - 1], Endianness.littleEndian + ) + contractAddress = + byteutils.toHex(metadataBytes[contractAddressOffset .. validRootsOffset - 1]) + let validRootsBytes = metadataBytes[validRootsOffset .. metadataBytes.high] + validRoots = MerkleNodeSeq.deserialize(validRootsBytes) + + return ok( + some( + RlnMetadata( + lastProcessedBlock: lastProcessedBlock, + chainId: chainId, + contractAddress: "0x" & contractAddress, + validRoots: validRoots, + ) + ) + ) diff --git a/third-party/nwaku/waku/waku_rln_relay/rln_relay.nim b/third-party/nwaku/waku/waku_rln_relay/rln_relay.nim new file mode 100644 index 0000000..510760f --- /dev/null +++ b/third-party/nwaku/waku/waku_rln_relay/rln_relay.nim @@ -0,0 +1,480 @@ +{.push raises: [].} + +import + std/[sequtils, tables, times, deques], + chronicles, + options, + chronos, + stint, + web3, + json, + web3/eth_api_types, + eth/keys, + libp2p/protocols/pubsub/rpc/messages, + libp2p/protocols/pubsub/pubsub, + results, + stew/[byteutils, arrayops] +import + ./group_manager, + ./rln, + ./conversion_utils, + ./constants, + ./protocol_types, + ./protocol_metrics, + ./nonce_manager + +import + ../common/error_handling, + ../waku_relay, # for WakuRelayHandler + ../waku_core, + ../waku_keystore + +logScope: + topics = "waku rln_relay" + +type RlnRelayCreds* {.requiresInit.} = object + path*: string + password*: string + +type RlnRelayConf* = object of RootObj + # TODO: severals parameters are only needed when it's dynamic + # change the config to either nest or use enum/type variant so it's obvious + # and then it can be set to `requiresInit` + dynamic*: bool + credIndex*: Option[uint] + ethContractAddress*: string + ethClientUrls*: seq[string] + chainId*: UInt256 + creds*: Option[RlnRelayCreds] + epochSizeSec*: uint64 + userMessageLimit*: uint64 + ethPrivateKey*: Option[string] + +type WakuRlnConfig* = object of RlnRelayConf + onFatalErrorAction*: OnFatalErrorHandler + +type WakuRLNRelay* = ref object of RootObj + # the log of nullifiers and Shamir shares of the past messages grouped per epoch + nullifierLog*: OrderedTable[Epoch, Table[Nullifier, ProofMetadata]] + lastEpoch*: Epoch # the epoch of the last published rln message + rlnEpochSizeSec*: uint64 + rlnMaxTimestampGap*: uint64 + rlnMaxEpochGap*: uint64 + groupManager*: GroupManager + onFatalErrorAction*: OnFatalErrorHandler + nonceManager*: NonceManager + epochMonitorFuture*: Future[void] + rootChangesFuture*: Future[void] + +proc calcEpoch*(rlnPeer: WakuRLNRelay, t: float64): Epoch = + ## gets time `t` as `flaot64` with subseconds resolution in the fractional part + ## and returns its corresponding rln `Epoch` value + + let e = uint64(t / rlnPeer.rlnEpochSizeSec.float64) + return toEpoch(e) + +proc nextEpoch*(rlnPeer: WakuRLNRelay, time: float64): float64 = + let + currentEpoch = uint64(time / rlnPeer.rlnEpochSizeSec.float64) + nextEpochTime = float64(currentEpoch + 1) * rlnPeer.rlnEpochSizeSec.float64 + currentTime = epochTime() + + # Ensure we always return a future time + if nextEpochTime > currentTime: + return nextEpochTime + else: + return epochTime() + +proc stop*(rlnPeer: WakuRLNRelay) {.async: (raises: [Exception]).} = + ## stops the rln-relay protocol + ## Throws an error if it cannot stop the rln-relay protocol + + # stop the group sync, and flush data to tree db + info "stopping rln-relay" + await rlnPeer.groupManager.stop() + +proc hasDuplicate*( + rlnPeer: WakuRLNRelay, epoch: Epoch, proofMetadata: ProofMetadata +): RlnRelayResult[bool] = + ## returns true if there is another message in the `nullifierLog` of the `rlnPeer` with the same + ## epoch and nullifier as `proofMetadata`'s epoch and nullifier + ## otherwise, returns false + ## Returns an error if it cannot check for duplicates + + # check if the epoch exists + let nullifier = proofMetadata.nullifier + if not rlnPeer.nullifierLog.hasKey(epoch): + return ok(false) + try: + if rlnPeer.nullifierLog[epoch].hasKey(nullifier): + # there is an identical record, mark it as spam + return ok(true) + + # there is no duplicate + return ok(false) + except KeyError: + return err("the epoch was not found: " & getCurrentExceptionMsg()) + +proc updateLog*( + rlnPeer: WakuRLNRelay, epoch: Epoch, proofMetadata: ProofMetadata +): RlnRelayResult[void] = + ## saves supplied proofMetadata `proofMetadata` + ## in the `nullifierLog` of the `rlnPeer` + ## Returns an error if it cannot update the log + + # check if the epoch exists + if not rlnPeer.nullifierLog.hasKeyOrPut( + epoch, {proofMetadata.nullifier: proofMetadata}.toTable() + ): + return ok() + + try: + # check if an identical record exists + if rlnPeer.nullifierLog[epoch].hasKeyOrPut(proofMetadata.nullifier, proofMetadata): + # the above condition could be `discarded` but it is kept for clarity, that slashing will + # be implemented here + # TODO: slashing logic + return ok() + return ok() + except KeyError: + return + err("the epoch was not found: " & getCurrentExceptionMsg()) # should never happen + +proc getCurrentEpoch*(rlnPeer: WakuRLNRelay): Epoch = + return rlnPeer.calcEpoch(epochTime()) + +proc absDiff*(e1, e2: Epoch): uint64 = + ## returns the absolute difference between the two rln `Epoch`s `e1` and `e2` + ## i.e., e1 - e2 + + # convert epochs to their corresponding unsigned numerical values + let + epoch1 = fromEpoch(e1) + epoch2 = fromEpoch(e2) + + # Manually perform an `abs` calculation + if epoch1 > epoch2: + return epoch1 - epoch2 + else: + return epoch2 - epoch1 + +proc toRLNSignal*(wakumessage: WakuMessage): seq[byte] = + ## it is a utility proc that prepares the `data` parameter of the proof generation procedure i.e., `proofGen` that resides in the current module + ## it extracts the `contentTopic`, `timestamp` and the `payload` of the supplied `wakumessage` and serializes them into a byte sequence + + let + contentTopicBytes = toBytes(wakumessage.contentTopic) + timestampBytes = toBytes(wakumessage.timestamp.uint64) + output = concat(wakumessage.payload, contentTopicBytes, @(timestampBytes)) + return output + +proc validateMessage*( + rlnPeer: WakuRLNRelay, msg: WakuMessage +): MessageValidationResult = + ## validate the supplied `msg` based on the waku-rln-relay routing protocol i.e., + ## the `msg`'s epoch is within MaxEpochGap of the current epoch + ## the `msg` has valid rate limit proof + ## the `msg` does not violate the rate limit + ## `timeOption` indicates Unix epoch time (fractional part holds sub-seconds) + ## if `timeOption` is supplied, then the current epoch is calculated based on that + + let decodeRes = RateLimitProof.init(msg.proof) + if decodeRes.isErr(): + return MessageValidationResult.Invalid + + let proof = decodeRes.get() + + # track message count for metrics + waku_rln_messages_total.inc() + + # checks if the message's timestamp is within acceptable range + let currentTime = getTime().toUnixFloat() + let messageTime = msg.timestamp.float64 / 1e9 + + let timeDiff = uint64(abs(currentTime - messageTime)) + + debug "time info", + currentTime = currentTime, messageTime = messageTime, msgHash = msg.hash + + if timeDiff > rlnPeer.rlnMaxTimestampGap: + warn "invalid message: timestamp difference exceeds threshold", + timeDiff = timeDiff, maxTimestampGap = rlnPeer.rlnMaxTimestampGap + waku_rln_invalid_messages_total.inc(labelValues = ["invalid_timestamp"]) + return MessageValidationResult.Invalid + + let computedEpoch = rlnPeer.calcEpoch(messageTime) + if proof.epoch != computedEpoch: + warn "invalid message: timestamp mismatches epoch", + proofEpoch = fromEpoch(proof.epoch), computedEpoch = fromEpoch(computedEpoch) + waku_rln_invalid_messages_total.inc(labelValues = ["timestamp_mismatch"]) + return MessageValidationResult.Invalid + + let rootValidationRes = rlnPeer.groupManager.validateRoot(proof.merkleRoot) + if not rootValidationRes: + warn "invalid message: provided root does not belong to acceptable window of roots", + provided = proof.merkleRoot.inHex(), + validRoots = rlnPeer.groupManager.validRoots.mapIt(it.inHex()) + waku_rln_invalid_messages_total.inc(labelValues = ["invalid_root"]) + return MessageValidationResult.Invalid + + # verify the proof + let + contentTopicBytes = toBytes(msg.contentTopic) + timestampBytes = toBytes(msg.timestamp.uint64) + input = concat(msg.payload, contentTopicBytes, @(timestampBytes)) + + waku_rln_proof_verification_total.inc() + waku_rln_proof_verification_duration_seconds.nanosecondTime: + let proofVerificationRes = + rlnPeer.groupManager.verifyProof(msg.toRLNSignal(), proof) + + if proofVerificationRes.isErr(): + waku_rln_errors_total.inc(labelValues = ["proof_verification"]) + warn "invalid message: proof verification failed", payloadLen = msg.payload.len + return MessageValidationResult.Invalid + + if not proofVerificationRes.value(): + # invalid proof + warn "invalid message: invalid proof", payloadLen = msg.payload.len + waku_rln_invalid_messages_total.inc(labelValues = ["invalid_proof"]) + return MessageValidationResult.Invalid + + # check if double messaging has happened + let proofMetadataRes = proof.extractMetadata() + if proofMetadataRes.isErr(): + waku_rln_errors_total.inc(labelValues = ["proof_metadata_extraction"]) + return MessageValidationResult.Invalid + + let msgEpoch = proof.epoch + let hasDup = rlnPeer.hasDuplicate(msgEpoch, proofMetadataRes.get()) + if hasDup.isErr(): + waku_rln_errors_total.inc(labelValues = ["duplicate_check"]) + elif hasDup.value == true: + trace "invalid message: message is spam", payloadLen = msg.payload.len + waku_rln_spam_messages_total.inc() + return MessageValidationResult.Spam + + trace "message is valid", payloadLen = msg.payload.len + # Metric increment moved to validator to include shard label + return MessageValidationResult.Valid + +proc validateMessageAndUpdateLog*( + rlnPeer: WakuRLNRelay, msg: WakuMessage +): MessageValidationResult = + ## validates the message and updates the log to prevent double messaging + ## in future messages + + let isValidMessage = rlnPeer.validateMessage(msg) + + let decodeRes = RateLimitProof.init(msg.proof) + if decodeRes.isErr(): + return MessageValidationResult.Invalid + + let msgProof = decodeRes.get() + let proofMetadataRes = msgProof.extractMetadata() + + if proofMetadataRes.isErr(): + return MessageValidationResult.Invalid + + # insert the message to the log (never errors) only if the + # message is valid. + if isValidMessage == MessageValidationResult.Valid: + discard rlnPeer.updateLog(msgProof.epoch, proofMetadataRes.get()) + + return isValidMessage + +proc appendRLNProof*( + rlnPeer: WakuRLNRelay, msg: var WakuMessage, senderEpochTime: float64 +): RlnRelayResult[void] = + ## returns true if it can create and append a `RateLimitProof` to the supplied `msg` + ## returns false otherwise + ## `senderEpochTime` indicates the number of seconds passed since Unix epoch. The fractional part holds sub-seconds. + ## The `epoch` field of `RateLimitProof` is derived from the provided `senderEpochTime` (using `calcEpoch()`) + + let input = msg.toRLNSignal() + let epoch = rlnPeer.calcEpoch(senderEpochTime) + + let nonce = rlnPeer.nonceManager.getNonce().valueOr: + return err("could not get new message id to generate an rln proof: " & $error) + let proof = rlnPeer.groupManager.generateProof(input, epoch, nonce).valueOr: + return err("could not generate rln-v2 proof: " & $error) + + msg.proof = proof.encode().buffer + return ok() + +proc clearNullifierLog*(rlnPeer: WakuRlnRelay) = + # clear the first MaxEpochGap epochs of the nullifer log + # if more than MaxEpochGap epochs are in the log + let currentEpoch = fromEpoch(rlnPeer.getCurrentEpoch()) + + var epochsToRemove: seq[Epoch] = @[] + for epoch in rlnPeer.nullifierLog.keys(): + let epochInt = fromEpoch(epoch) + + # clean all epochs that are +- rlnMaxEpochGap from the current epoch + if (currentEpoch + rlnPeer.rlnMaxEpochGap) <= epochInt or + epochInt <= (currentEpoch - rlnPeer.rlnMaxEpochGap): + epochsToRemove.add(epoch) + + for epochRemove in epochsToRemove: + trace "clearing epochs from the nullifier log", + currentEpoch = currentEpoch, cleanedEpoch = fromEpoch(epochRemove) + rlnPeer.nullifierLog.del(epochRemove) + +proc generateRlnValidator*( + wakuRlnRelay: WakuRLNRelay, spamHandler = none(SpamHandler) +): WakuValidatorHandler = + ## this procedure is a thin wrapper for the pubsub addValidator method + ## it sets a validator for waku messages, acting in the registered pubsub topic + ## the message validation logic is according to https://rfc.vac.dev/spec/17/ + proc validator( + topic: string, message: WakuMessage + ): Future[pubsub.ValidationResult] {.async.} = + trace "rln-relay topic validator is called" + wakuRlnRelay.clearNullifierLog() + + let decodeRes = RateLimitProof.init(message.proof) + + if decodeRes.isErr(): + trace "generateRlnValidator reject", error = decodeRes.error + return pubsub.ValidationResult.Reject + + let msgProof = decodeRes.get() + + # validate the message and update log + let validationRes = wakuRlnRelay.validateMessageAndUpdateLog(message) + + let + proof = toHex(msgProof.proof) + epoch = fromEpoch(msgProof.epoch) + root = inHex(msgProof.merkleRoot) + shareX = inHex(msgProof.shareX) + shareY = inHex(msgProof.shareY) + nullifier = inHex(msgProof.nullifier) + payload = string.fromBytes(message.payload) + case validationRes + of Valid: + trace "message validity is verified, relaying", + proof = proof, + root = root, + shareX = shareX, + shareY = shareY, + nullifier = nullifier + waku_rln_valid_messages_total.inc(labelValues = [topic]) + return pubsub.ValidationResult.Accept + of Invalid: + trace "message validity could not be verified, discarding", + proof = proof, + root = root, + shareX = shareX, + shareY = shareY, + nullifier = nullifier + return pubsub.ValidationResult.Reject + of Spam: + trace "A spam message is found! yay! discarding:", + proof = proof, + root = root, + shareX = shareX, + shareY = shareY, + nullifier = nullifier + if spamHandler.isSome(): + let handler = spamHandler.get() + handler(message) + return pubsub.ValidationResult.Reject + + return validator + +proc monitorEpochs(wakuRlnRelay: WakuRLNRelay) {.async.} = + while true: + try: + if wakuRlnRelay.groupManager.userMessageLimit.isSome(): + waku_rln_remaining_proofs_per_epoch.set( + wakuRlnRelay.groupManager.userMessageLimit.get().float64 + ) + else: + error "userMessageLimit is not set in monitorEpochs" + except CatchableError: + error "Error in epoch monitoring", error = getCurrentExceptionMsg() + + let nextEpochTime = wakuRlnRelay.nextEpoch(epochTime()) + let sleepDuration = int((nextEpochTime - epochTime()) * 1000) + await sleepAsync(sleepDuration) + +proc mount( + conf: WakuRlnConfig, registrationHandler = none(RegistrationHandler) +): Future[RlnRelayResult[WakuRlnRelay]] {.async.} = + var + groupManager: GroupManager + wakuRlnRelay: WakuRLNRelay + # create an RLN instance + let rlnInstance = createRLNInstance().valueOr: + return err("could not create RLN instance: " & $error) + + let (rlnRelayCredPath, rlnRelayCredPassword) = + if conf.creds.isSome: + (some(conf.creds.get().path), some(conf.creds.get().password)) + else: + (none(string), none(string)) + + groupManager = OnchainGroupManager( + userMessageLimit: some(conf.userMessageLimit), + ethClientUrls: conf.ethClientUrls, + ethContractAddress: $conf.ethContractAddress, + chainId: conf.chainId, + rlnInstance: rlnInstance, + registrationHandler: registrationHandler, + keystorePath: rlnRelayCredPath, + keystorePassword: rlnRelayCredPassword, + ethPrivateKey: conf.ethPrivateKey, + membershipIndex: conf.credIndex, + onFatalErrorAction: conf.onFatalErrorAction, + ) + + # Initialize the groupManager + (await groupManager.init()).isOkOr: + return err("could not initialize the group manager: " & $error) + + wakuRlnRelay = WakuRLNRelay( + groupManager: groupManager, + nonceManager: NonceManager.init(conf.userMessageLimit, conf.epochSizeSec.float), + rlnEpochSizeSec: conf.epochSizeSec, + rlnMaxEpochGap: max(uint64(MaxClockGapSeconds / float64(conf.epochSizeSec)), 1), + rlnMaxTimestampGap: uint64(MaxClockGapSeconds), + onFatalErrorAction: conf.onFatalErrorAction, + ) + + # track root changes on smart contract merkle tree + if groupManager of OnchainGroupManager: + let onchainManager = cast[OnchainGroupManager](groupManager) + wakuRlnRelay.rootChangesFuture = onchainManager.trackRootChanges() + + # Start epoch monitoring in the background + wakuRlnRelay.epochMonitorFuture = monitorEpochs(wakuRlnRelay) + return ok(wakuRlnRelay) + +proc isReady*(rlnPeer: WakuRLNRelay): Future[bool] {.async: (raises: [Exception]).} = + ## returns true if the rln-relay protocol is ready to relay messages + ## returns false otherwise + + # could be nil during startup + if rlnPeer.groupManager == nil: + return false + try: + return await rlnPeer.groupManager.isReady() + except CatchableError: + error "could not check if the rln-relay protocol is ready", + err = getCurrentExceptionMsg() + return false + +proc new*( + T: type WakuRlnRelay, + conf: WakuRlnConfig, + registrationHandler = none(RegistrationHandler), +): Future[RlnRelayResult[WakuRlnRelay]] {.async.} = + ## Mounts the rln-relay protocol on the node. + ## The rln-relay protocol can be mounted in two modes: on-chain and off-chain. + ## Returns an error if the rln-relay protocol could not be mounted. + try: + return await mount(conf, registrationHandler) + except CatchableError: + return err("could not mount the rln-relay protocol: " & getCurrentExceptionMsg()) diff --git a/third-party/nwaku/waku/waku_store.nim b/third-party/nwaku/waku/waku_store.nim new file mode 100644 index 0000000..9aba802 --- /dev/null +++ b/third-party/nwaku/waku/waku_store.nim @@ -0,0 +1,3 @@ +import ./waku_store/common, ./waku_store/protocol + +export common, protocol diff --git a/third-party/nwaku/waku/waku_store/client.nim b/third-party/nwaku/waku/waku_store/client.nim new file mode 100644 index 0000000..27e92ab --- /dev/null +++ b/third-party/nwaku/waku/waku_store/client.nim @@ -0,0 +1,93 @@ +{.push raises: [].} + +import std/[options, tables], results, chronicles, chronos, metrics, bearssl/rand +import + ../node/peer_manager, ../utils/requests, ./protocol_metrics, ./common, ./rpc_codec + +logScope: + topics = "waku store client" + +const DefaultPageSize*: uint = 20 + # A recommended default number of waku messages per page + +type WakuStoreClient* = ref object + peerManager: PeerManager + rng: ref rand.HmacDrbgContext + storeMsgMetricsPerShard*: Table[string, float64] + +proc new*( + T: type WakuStoreClient, peerManager: PeerManager, rng: ref rand.HmacDrbgContext +): T {.gcsafe.} = + WakuStoreClient(peerManager: peerManager, rng: rng) + +proc sendStoreRequest( + self: WakuStoreClient, request: StoreQueryRequest, connection: Connection +): Future[StoreQueryResult] {.async, gcsafe.} = + var req = request + + if req.requestId == "": + req.requestId = generateRequestId(self.rng) + + let writeRes = catch: + await connection.writeLP(req.encode().buffer) + if writeRes.isErr(): + return err(StoreError(kind: ErrorCode.BAD_REQUEST, cause: writeRes.error.msg)) + + let readRes = catch: + await connection.readLp(DefaultMaxRpcSize.int) + + let buf = readRes.valueOr: + return err(StoreError(kind: ErrorCode.BAD_RESPONSE, cause: error.msg)) + + let res = StoreQueryResponse.decode(buf).valueOr: + waku_store_errors.inc(labelValues = [DecodeRpcFailure]) + return err(StoreError(kind: ErrorCode.BAD_RESPONSE, cause: DecodeRpcFailure)) + + if res.statusCode != uint32(StatusCode.SUCCESS): + waku_store_errors.inc(labelValues = [NoSuccessStatusCode]) + return err(StoreError.new(res.statusCode, res.statusDesc)) + + if req.pubsubTopic.isSome(): + let topic = req.pubsubTopic.get() + if not self.storeMsgMetricsPerShard.hasKey(topic): + self.storeMsgMetricsPerShard[topic] = 0 + self.storeMsgMetricsPerShard[topic] += float64(req.encode().buffer.len) + + waku_relay_fleet_store_msg_size_bytes.inc( + self.storeMsgMetricsPerShard[topic], labelValues = [topic] + ) + waku_relay_fleet_store_msg_count.inc(1.0, labelValues = [topic]) + + return ok(res) + +proc query*( + self: WakuStoreClient, request: StoreQueryRequest, peer: RemotePeerInfo | PeerId +): Future[StoreQueryResult] {.async, gcsafe.} = + if request.paginationCursor.isSome() and request.paginationCursor.get() == EmptyCursor: + return err(StoreError(kind: ErrorCode.BAD_REQUEST, cause: "invalid cursor")) + + let connection = (await self.peerManager.dialPeer(peer, WakuStoreCodec)).valueOr: + waku_store_errors.inc(labelValues = [DialFailure]) + + return err(StoreError(kind: ErrorCode.PEER_DIAL_FAILURE, address: $peer)) + + return await self.sendStoreRequest(request, connection) + +proc queryToAny*( + self: WakuStoreClient, request: StoreQueryRequest, peerId = none(PeerId) +): Future[StoreQueryResult] {.async.} = + ## This proc is similar to the query one but in this case + ## we don't specify a particular peer and instead we get it from peer manager + + if request.paginationCursor.isSome() and request.paginationCursor.get() == EmptyCursor: + return err(StoreError(kind: ErrorCode.BAD_REQUEST, cause: "invalid cursor")) + + let peer = self.peerManager.selectPeer(WakuStoreCodec).valueOr: + return err(StoreError(kind: BAD_RESPONSE, cause: "no service store peer connected")) + + let connection = (await self.peerManager.dialPeer(peer, WakuStoreCodec)).valueOr: + waku_store_errors.inc(labelValues = [DialFailure]) + + return err(StoreError(kind: ErrorCode.PEER_DIAL_FAILURE, address: $peer)) + + return await self.sendStoreRequest(request, connection) diff --git a/third-party/nwaku/waku/waku_store/common.nim b/third-party/nwaku/waku/waku_store/common.nim new file mode 100644 index 0000000..d11c803 --- /dev/null +++ b/third-party/nwaku/waku/waku_store/common.nim @@ -0,0 +1,156 @@ +{.push raises: [].} + +import std/[options, sequtils], results, stew/byteutils +import ../waku_core, ../common/paging + +from ../waku_core/codecs import WakuStoreCodec +export WakuStoreCodec + +const + DefaultPageSize*: uint64 = 20 + + MaxPageSize*: uint64 = 100 + + EmptyCursor*: WakuMessageHash = EmptyWakuMessageHash + +type WakuStoreResult*[T] = Result[T, string] + +## Public API types + +type + StoreQueryRequest* = object + requestId*: string + includeData*: bool + + pubsubTopic*: Option[PubsubTopic] + contentTopics*: seq[ContentTopic] + startTime*: Option[Timestamp] + endTime*: Option[Timestamp] + + messageHashes*: seq[WakuMessageHash] + + paginationCursor*: Option[WakuMessageHash] + paginationForward*: PagingDirection + paginationLimit*: Option[uint64] + + WakuMessageKeyValue* = object + messageHash*: WakuMessageHash + message*: Option[WakuMessage] + pubsubTopic*: Option[PubsubTopic] + + StoreQueryResponse* = object + requestId*: string + + statusCode*: uint32 + statusDesc*: string + + messages*: seq[WakuMessageKeyValue] + + paginationCursor*: Option[WakuMessageHash] + + # Types to be used by clients that use the hash in hex + WakuMessageKeyValueHex* = object + messageHash*: string + message*: Option[WakuMessage] + pubsubTopic*: Option[PubsubTopic] + + StoreQueryResponseHex* = object + requestId*: string + + statusCode*: uint32 + statusDesc*: string + + messages*: seq[WakuMessageKeyValueHex] + + paginationCursor*: Option[string] + + StatusCode* {.pure.} = enum + UNKNOWN = uint32(000) + SUCCESS = uint32(200) + BAD_RESPONSE = uint32(300) + BAD_REQUEST = uint32(400) + TOO_MANY_REQUESTS = uint32(429) + SERVICE_UNAVAILABLE = uint32(503) + PEER_DIAL_FAILURE = uint32(504) + + ErrorCode* {.pure.} = enum + UNKNOWN = uint32(000) + BAD_RESPONSE = uint32(300) + BAD_REQUEST = uint32(400) + TOO_MANY_REQUESTS = uint32(429) + SERVICE_UNAVAILABLE = uint32(503) + PEER_DIAL_FAILURE = uint32(504) + + StoreError* = object + case kind*: ErrorCode + of ErrorCode.PEER_DIAL_FAILURE: + address*: string + of ErrorCode.BAD_RESPONSE, ErrorCode.BAD_REQUEST: + cause*: string + else: + discard + + StoreQueryResult* = Result[StoreQueryResponse, StoreError] + +proc into*(errCode: ErrorCode): StatusCode = + cast[StatusCode](uint32(errCode)) + +proc new*(T: type StoreError, code: uint32, desc: string): T = + let kind = ErrorCode.parse(code) + + case kind + of ErrorCode.BAD_RESPONSE: + return StoreError(kind: kind, cause: desc) + of ErrorCode.BAD_REQUEST: + return StoreError(kind: kind, cause: desc) + of ErrorCode.TOO_MANY_REQUESTS: + return StoreError(kind: kind) + of ErrorCode.SERVICE_UNAVAILABLE: + return StoreError(kind: kind) + of ErrorCode.PEER_DIAL_FAILURE: + return StoreError(kind: kind, address: desc) + of ErrorCode.UNKNOWN: + return StoreError(kind: kind) + +proc parse*(T: type ErrorCode, kind: uint32): T = + case kind + of 000, 300, 400, 429, 503, 504: + cast[ErrorCode](kind) + else: + ErrorCode.UNKNOWN + +proc `$`*(err: StoreError): string = + case err.kind + of ErrorCode.PEER_DIAL_FAILURE: + "PEER_DIAL_FAILURE: " & err.address + of ErrorCode.BAD_RESPONSE: + "BAD_RESPONSE: " & err.cause + of ErrorCode.BAD_REQUEST: + "BAD_REQUEST: " & err.cause + of ErrorCode.TOO_MANY_REQUESTS: + "TOO_MANY_REQUESTS" + of ErrorCode.SERVICE_UNAVAILABLE: + "SERVICE_UNAVAILABLE" + of ErrorCode.UNKNOWN: + "UNKNOWN" + +proc toHex*(messageData: WakuMessageKeyValue): WakuMessageKeyValueHex = + WakuMessageKeyValueHex( + messageHash: messageData.messageHash.to0xHex(), + # Assuming WakuMessageHash has a toHex method + message: messageData.message, + pubsubTopic: messageData.pubsubTopic, + ) + +proc toHex*(response: StoreQueryResponse): StoreQueryResponseHex = + StoreQueryResponseHex( + requestId: response.requestId, + statusCode: response.statusCode, + statusDesc: response.statusDesc, + messages: response.messages.mapIt(it.toHex()), # Convert each message to hex + paginationCursor: + if response.paginationCursor.isSome: + some(response.paginationCursor.get().to0xHex()) + else: + none[string](), + ) diff --git a/third-party/nwaku/waku/waku_store/protocol.nim b/third-party/nwaku/waku/waku_store/protocol.nim new file mode 100644 index 0000000..5e13c9a --- /dev/null +++ b/third-party/nwaku/waku/waku_store/protocol.nim @@ -0,0 +1,169 @@ +## Waku Store protocol for historical messaging support. +## See spec for more details: +## https://github.com/vacp2p/specs/blob/master/specs/waku/v2/waku-store.md +{.push raises: [].} + +import + std/[options, times], + results, + chronicles, + chronos, + bearssl/rand, + libp2p/crypto/crypto, + libp2p/protocols/protocol, + libp2p/protobuf/minprotobuf, + libp2p/stream/connection, + metrics +import + ../waku_core, + ../node/peer_manager, + ./common, + ./rpc_codec, + ./protocol_metrics, + ../common/rate_limit/request_limiter + +logScope: + topics = "waku store" + +type StoreQueryRequestHandler* = + proc(req: StoreQueryRequest): Future[StoreQueryResult] {.async, gcsafe.} + +type WakuStore* = ref object of LPProtocol + peerManager: PeerManager + rng: ref rand.HmacDrbgContext + requestHandler*: StoreQueryRequestHandler + requestRateLimiter*: RequestRateLimiter + +## Protocol + +type StoreResp = tuple[resp: seq[byte], requestId: string] + +proc handleQueryRequest( + self: WakuStore, requestor: PeerId, raw_request: seq[byte] +): Future[StoreResp] {.async.} = + var res = StoreQueryResponse() + + let req = StoreQueryRequest.decode(raw_request).valueOr: + error "failed to decode rpc", peerId = requestor, error = $error + waku_store_errors.inc(labelValues = [DecodeRpcFailure]) + + res.statusCode = uint32(ErrorCode.BAD_REQUEST) + res.statusDesc = "decoding rpc failed: " & $error + + return (res.encode().buffer, "not_parsed_requestId") + + let requestId = req.requestId + + info "received store query request", + peerId = requestor, requestId = requestId, request = req + waku_store_queries.inc() + + let queryResult = await self.requestHandler(req) + + res = queryResult.valueOr: + error "store query failed", + peerId = requestor, requestId = requestId, error = $error + + res.statusCode = uint32(error.kind) + res.statusDesc = $error + + return (res.encode().buffer, "not_parsed_requestId") + + res.requestId = requestId + res.statusCode = 200 + res.statusDesc = "OK" + + info "sending store query response", + peerId = requestor, requestId = requestId, messages = res.messages.len + + return (res.encode().buffer, requestId) + +proc initProtocolHandler(self: WakuStore) = + let rejectReposnseBuffer = StoreQueryResponse( + ## We will not copy and decode RPC buffer from stream only for requestId + ## in reject case as it is comparably too expensive and opens possible + ## attack surface + requestId: "N/A", + statusCode: uint32(ErrorCode.TOO_MANY_REQUESTS), + statusDesc: $ErrorCode.TOO_MANY_REQUESTS, + ).encode().buffer + + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = + var successfulQuery = false ## only consider the correct queries in metrics + var resBuf: StoreResp + var queryDuration: float + self.requestRateLimiter.checkUsageLimit(WakuStoreCodec, conn): + let readRes = catch: + await conn.readLp(DefaultMaxRpcSize.int) + + let reqBuf = readRes.valueOr: + error "Connection read error", error = error.msg + return + + waku_service_network_bytes.inc( + amount = reqBuf.len().int64, labelValues = [WakuStoreCodec, "in"] + ) + + let queryStartTime = getTime().toUnixFloat() + + try: + resBuf = await self.handleQueryRequest(conn.peerId, reqBuf) + except CatchableError: + error "store query failed in handler", + remote_peer_id = conn.peerId, + requestId = resBuf.requestId, + error = getCurrentExceptionMsg() + return + + queryDuration = getTime().toUnixFloat() - queryStartTime + waku_store_time_seconds.set(queryDuration, ["query-db-time"]) + successfulQuery = true + do: + debug "store query request rejected due rate limit exceeded", + peerId = conn.peerId, limit = $self.requestRateLimiter.setting + resBuf = (rejectReposnseBuffer, "rejected") + + let writeRespStartTime = getTime().toUnixFloat() + + let writeRes = catch: + await conn.writeLp(resBuf.resp) + + if writeRes.isErr(): + error "Connection write error", error = writeRes.error.msg + return + + if successfulQuery: + let writeDuration = getTime().toUnixFloat() - writeRespStartTime + waku_store_time_seconds.set(writeDuration, ["send-store-resp-time"]) + debug "after sending response", + requestId = resBuf.requestId, + queryDurationSecs = queryDuration, + writeStreamDurationSecs = writeDuration + + waku_service_network_bytes.inc( + amount = resBuf.resp.len().int64, labelValues = [WakuStoreCodec, "out"] + ) + + self.handler = handler + self.codec = WakuStoreCodec + +proc new*( + T: type WakuStore, + peerManager: PeerManager, + rng: ref rand.HmacDrbgContext, + requestHandler: StoreQueryRequestHandler, + rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](), +): T = + if requestHandler.isNil(): # TODO use an Option instead ??? + raise newException(NilAccessDefect, "history query handler is nil") + + let store = WakuStore( + rng: rng, + peerManager: peerManager, + requestHandler: requestHandler, + requestRateLimiter: newRequestRateLimiter(rateLimitSetting), + ) + + store.initProtocolHandler() + setServiceLimitMetric(WakuStoreCodec, rateLimitSetting) + return store diff --git a/third-party/nwaku/waku/waku_store/protocol_metrics.nim b/third-party/nwaku/waku/waku_store/protocol_metrics.nim new file mode 100644 index 0000000..d453216 --- /dev/null +++ b/third-party/nwaku/waku/waku_store/protocol_metrics.nim @@ -0,0 +1,32 @@ +{.push raises: [].} + +import metrics + +declarePublicCounter waku_store_errors, "number of store protocol errors", ["type"] +declarePublicCounter waku_store_queries, "number of store queries received" + +## "query-db-time" phase considers the time when node performs the query to the database. +## "send-store-resp-time" phase is the time when node writes the store response to the store-client. +declarePublicGauge waku_store_time_seconds, + "Time in seconds spent by each store phase", labels = ["phase"] + +declarePublicGauge( + waku_relay_fleet_store_msg_size_bytes, + "Total size of messages stored by fleet store nodes per shard", + labels = ["shard"], +) + +declarePublicGauge( + waku_relay_fleet_store_msg_count, + "Number of messages stored by fleet store nodes per shard", + labels = ["shard"], +) + +# Error types (metric label values) +const + DialFailure* = "dial_failure" + DecodeRpcFailure* = "decode_rpc_failure" + PeerNotFoundFailure* = "peer_not_found_failure" + EmptyRpcQueryFailure* = "empty_rpc_query_failure" + EmptyRpcResponseFailure* = "empty_rpc_response_failure" + NoSuccessStatusCode* = "status_code_no_success" diff --git a/third-party/nwaku/waku/waku_store/resume.nim b/third-party/nwaku/waku/waku_store/resume.nim new file mode 100644 index 0000000..208ba0a --- /dev/null +++ b/third-party/nwaku/waku/waku_store/resume.nim @@ -0,0 +1,220 @@ +{.push raises: [].} + +import + std/options, + sqlite3_abi, + chronicles, + chronos, + metrics, + libp2p/protocols/protocol, + libp2p/stream/connection, + libp2p/crypto/crypto, + eth/p2p/discoveryv5/enr + +import + ../common/databases/db_sqlite, + ../waku_core, + ../waku_archive, + ../common/nimchronos, + ../waku_store/[client, common], + ../node/peer_manager/peer_manager + +logScope: + topics = "waku store resume" + +const + OnlineDbUrl = "lastonline.db" + LastOnlineInterval = chronos.minutes(1) + ResumeRangeLimit = 6 # hours + +type + TransferCallback* = proc( + timestamp: Timestamp, peer: RemotePeerInfo + ): Future[Result[void, string]] {.async: (raises: []), closure.} + + StoreResume* = ref object + handle: Future[void] + + db: SqliteDatabase + replaceStmt: SqliteStmt[(Timestamp), void] + + transferCallBack: Option[TransferCallback] + + peerManager: PeerManager + +proc setupLastOnlineDB(): Result[SqliteDatabase, string] = + let db = SqliteDatabase.new(OnlineDbUrl).valueOr: + return err($error) + + let createStmt = db + .prepareStmt( + """CREATE TABLE IF NOT EXISTS last_online (timestamp BIGINT NOT NULL);""", + NoParams, void, + ) + .expect("Valid statement") + + createStmt.exec(()).isOkOr: + return err("failed to exec stmt") + + # We dispose of this prepared statement here, as we never use it again + createStmt.dispose() + + return ok(db) + +proc initTransferHandler( + self: StoreResume, wakuArchive: WakuArchive, wakuStoreClient: WakuStoreClient +) = + # guard clauses to prevent faulty callback + if self.peerManager.isNil(): + error "peer manager unavailable for store resume" + return + + if wakuArchive.isNil(): + error "waku archive unavailable for store resume" + return + + if wakuStoreClient.isNil(): + error "waku store client unavailable for store resume" + return + + # tying archive, store client and resume into one callback and saving it for later + self.transferCallBack = some( + proc( + timestamp: Timestamp, peer: RemotePeerInfo + ): Future[Result[void, string]] {.async: (raises: []), closure.} = + var req = StoreQueryRequest() + req.includeData = true + req.startTime = some(timestamp) + req.endTime = some(getNowInNanosecondTime()) + req.paginationLimit = some(uint64(100)) + + while true: + let catchable = catch: + await wakuStoreClient.query(req, peer) + + if catchable.isErr(): + return err("store client error: " & catchable.error.msg) + + let res = catchable.get() + let response = res.valueOr: + return err("store client error: " & $error) + + req.paginationCursor = response.paginationCursor + + for kv in response.messages: + let handleRes = catch: + await wakuArchive.handleMessage(kv.pubsubTopic.get(), kv.message.get()) + + if handleRes.isErr(): + error "message transfer failed", error = handleRes.error.msg + continue + + if req.paginationCursor.isNone(): + break + + return ok() + ) + +proc new*( + T: type StoreResume, + peerManager: PeerManager, + wakuArchive: WakuArchive, + wakuStoreClient: WakuStoreClient, +): Result[T, string] = + info "initializing store resume" + + let db = setupLastOnlineDB().valueOr: + return err("Failed to setup last online DB") + + let replaceStmt = db + .prepareStmt("REPLACE INTO last_online (timestamp) VALUES (?);", (Timestamp), void) + .expect("Valid statement") + + let resume = StoreResume(db: db, replaceStmt: replaceStmt, peerManager: peerManager) + + resume.initTransferHandler(wakuArchive, wakuStoreClient) + + return ok(resume) + +proc getLastOnlineTimestamp*(self: StoreResume): Result[Timestamp, string] = + var timestamp: Timestamp + + proc queryCallback(s: ptr sqlite3_stmt) = + timestamp = sqlite3_column_int64(s, 0) + + self.db.query("SELECT MAX(timestamp) FROM last_online", queryCallback).isOkOr: + return err("failed to query: " & $error) + + return ok(timestamp) + +proc setLastOnlineTimestamp*( + self: StoreResume, timestamp: Timestamp +): Result[void, string] = + self.replaceStmt.exec((timestamp)).isOkOr: + return err("failed to execute replace stmt" & $error) + + return ok() + +proc startStoreResume*( + self: StoreResume, time: Timestamp, peer: RemotePeerInfo +): Future[Result[void, string]] {.async.} = + info "starting store resume", lastOnline = $time, peer = $peer + + # get the callback we saved if possible + let callback = self.transferCallBack.valueOr: + return err("transfer callback uninitialised") + + # run the callback + (await callback(time, peer)).isOkOr: + return err("transfer callback failed: " & $error) + + info "store resume completed" + + return ok() + +proc autoStoreResume*(self: StoreResume): Future[Result[void, string]] {.async.} = + let peer = self.peerManager.selectPeer(WakuStoreCodec).valueOr: + return err("no suitable peer found for store resume") + + let lastOnlineTs = self.getLastOnlineTimestamp().valueOr: + return err("failed to get last online timestamp: " & $error) + + # Limit the resume time range + let now = getNowInNanosecondTime() + let maxTime = now - (ResumeRangeLimit * 3600 * 1_000_000_000) + let ts = max(lastOnlineTs, maxTime) + + return await self.startStoreResume(ts, peer) + +proc periodicSetLastOnline(self: StoreResume) {.async.} = + ## Save a timestamp periodically + ## so that a node can know when it was last online + while true: + await sleepAsync(LastOnlineInterval) + + let ts = getNowInNanosecondTime() + + self.setLastOnlineTimestamp(ts).isOkOr: + error "failed to set last online timestamp", error, time = ts + +proc start*(self: StoreResume) {.async.} = + # start resume process, will try thrice. + var tries = 3 + while tries > 0: + (await self.autoStoreResume()).isOkOr: + tries -= 1 + error "store resume failed", triesLeft = tries, error = $error + await sleepAsync(30.seconds) + continue + + break + + # starting periodic storage of last online timestamp + self.handle = self.periodicSetLastOnline() + +proc stopWait*(self: StoreResume) {.async.} = + if not self.handle.isNil(): + await noCancel(self.handle.cancelAndWait()) + + self.replaceStmt.dispose() + self.db.close() diff --git a/third-party/nwaku/waku/waku_store/rpc_codec.nim b/third-party/nwaku/waku/waku_store/rpc_codec.nim new file mode 100644 index 0000000..a5e5e2d --- /dev/null +++ b/third-party/nwaku/waku/waku_store/rpc_codec.nim @@ -0,0 +1,213 @@ +{.push raises: [].} + +import std/options, stew/arrayops +import ../common/[protobuf, paging], ../waku_core, ./common + +const DefaultMaxRpcSize* = -1 + +### Request ### + +proc encode*(req: StoreQueryRequest): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, req.requestId) + pb.write3(2, uint32(req.includeData)) + + pb.write3(10, req.pubsubTopic) + + for contentTopic in req.contentTopics: + pb.write3(11, contentTopic) + + pb.write3( + 12, + req.startTime.map( + proc(time: int64): zint64 = + zint64(time) + ), + ) + pb.write3( + 13, + req.endTime.map( + proc(time: int64): zint64 = + zint64(time) + ), + ) + + for hash in req.messagehashes: + pb.write3(20, hash) + + pb.write3(51, req.paginationCursor) + pb.write3(52, uint32(req.paginationForward)) + pb.write3(53, req.paginationLimit) + + pb.finish3() + + return pb + +proc decode*( + T: type StoreQueryRequest, buffer: seq[byte] +): ProtobufResult[StoreQueryRequest] = + var req = StoreQueryRequest() + let pb = initProtoBuffer(buffer) + + if not ?pb.getField(1, req.requestId): + return err(ProtobufError.missingRequiredField("request_id")) + + var inclData: uint32 + if not ?pb.getField(2, inclData): + req.includeData = false + else: + req.includeData = inclData > 0 + + var pubsubTopic: string + if not ?pb.getField(10, pubsubTopic): + req.pubsubTopic = none(string) + else: + req.pubsubTopic = some(pubsubTopic) + + var topics: seq[string] + if not ?pb.getRepeatedField(11, topics): + req.contentTopics = @[] + else: + req.contentTopics = topics + + var start: zint64 + if not ?pb.getField(12, start): + req.startTime = none(Timestamp) + else: + req.startTime = some(Timestamp(int64(start))) + + var endTime: zint64 + if not ?pb.getField(13, endTime): + req.endTime = none(Timestamp) + else: + req.endTime = some(Timestamp(int64(endTime))) + + var buffer: seq[seq[byte]] + if not ?pb.getRepeatedField(20, buffer): + req.messageHashes = @[] + else: + req.messageHashes = newSeqOfCap[WakuMessageHash](buffer.len) + for buf in buffer: + var hash: WakuMessageHash + discard copyFrom[byte](hash, buf) + req.messageHashes.add(hash) + + var cursor: seq[byte] + if not ?pb.getField(51, cursor): + req.paginationCursor = none(WakuMessageHash) + else: + var hash: WakuMessageHash + discard copyFrom[byte](hash, cursor) + req.paginationCursor = some(hash) + + var paging: uint32 + if not ?pb.getField(52, paging): + req.paginationForward = PagingDirection.default() + else: + req.paginationForward = PagingDirection(paging) + + var limit: uint64 + if not ?pb.getField(53, limit): + req.paginationLimit = none(uint64) + else: + req.paginationLimit = some(limit) + + return ok(req) + +### Response ### + +proc encode*(keyValue: WakuMessageKeyValue): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, keyValue.messageHash) + + if keyValue.message.isSome() and keyValue.pubsubTopic.isSome(): + pb.write3(2, keyValue.message.get().encode()) + pb.write3(3, keyValue.pubsubTopic.get()) + + pb.finish3() + + return pb + +proc encode*(res: StoreQueryResponse): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, res.requestId) + + pb.write3(10, res.statusCode) + pb.write3(11, res.statusDesc) + + for msg in res.messages: + pb.write3(20, msg.encode()) + + pb.write3(51, res.paginationCursor) + + pb.finish3() + + return pb + +proc decode*( + T: type WakuMessageKeyValue, buffer: seq[byte] +): ProtobufResult[WakuMessageKeyValue] = + var keyValue = WakuMessageKeyValue() + let pb = initProtoBuffer(buffer) + + var buf: seq[byte] + if not ?pb.getField(1, buf): + return err(ProtobufError.missingRequiredField("message_hash")) + else: + var hash: WakuMessageHash + discard copyFrom[byte](hash, buf) + keyValue.messagehash = hash + + var proto: ProtoBuffer + var topic: string + if ?pb.getField(2, proto) and ?pb.getField(3, topic): + keyValue.message = some(?WakuMessage.decode(proto.buffer)) + keyValue.pubsubTopic = some(topic) + else: + keyValue.message = none(WakuMessage) + keyValue.pubsubTopic = none(string) + + return ok(keyValue) + +proc decode*( + T: type StoreQueryResponse, buffer: seq[byte] +): ProtobufResult[StoreQueryResponse] = + var res = StoreQueryResponse() + let pb = initProtoBuffer(buffer) + + if not ?pb.getField(1, res.requestId): + return err(ProtobufError.missingRequiredField("request_id")) + + var code: uint32 + if not ?pb.getField(10, code): + return err(ProtobufError.missingRequiredField("status_code")) + else: + res.statusCode = code + + var desc: string + if not ?pb.getField(11, desc): + return err(ProtobufError.missingRequiredField("status_desc")) + else: + res.statusDesc = desc + + var buffer: seq[seq[byte]] + if not ?pb.getRepeatedField(20, buffer): + res.messages = @[] + else: + res.messages = newSeqOfCap[WakuMessageKeyValue](buffer.len) + for buf in buffer: + let msg = ?WakuMessageKeyValue.decode(buf) + res.messages.add(msg) + + var cursor: seq[byte] + if not ?pb.getField(51, cursor): + res.paginationCursor = none(WakuMessageHash) + else: + var hash: WakuMessageHash + discard copyFrom[byte](hash, cursor) + res.paginationCursor = some(hash) + + return ok(res) diff --git a/third-party/nwaku/waku/waku_store/self_req_handler.nim b/third-party/nwaku/waku/waku_store/self_req_handler.nim new file mode 100644 index 0000000..116946d --- /dev/null +++ b/third-party/nwaku/waku/waku_store/self_req_handler.nim @@ -0,0 +1,37 @@ +## +## This file is aimed to attend the requests that come directly +## from the 'self' node. It is expected to attend the store requests that +## come from REST-store endpoint when those requests don't indicate +## any store-peer address. +## +## Notice that the REST-store requests normally assume that the REST +## server is acting as a store-client. In this module, we allow that +## such REST-store node can act as store-server as well by retrieving +## its own stored messages. The typical use case for that is when +## using `nwaku-compose`, which spawn a Waku node connected to a local +## database, and the user is interested in retrieving the messages +## stored by that local store node. +## + +import results, chronos +import ./protocol, ./common + +proc handleSelfStoreRequest*( + self: WakuStore, req: StoreQueryRequest +): Future[WakuStoreResult[StoreQueryResponse]] {.async.} = + ## Handles the store requests made by the node to itself. + ## Normally used in REST-store requests + + let handlerResult = catch: + await self.requestHandler(req) + + let resResult = + if handlerResult.isErr(): + return err("exception in handleSelfStoreRequest: " & handlerResult.error.msg) + else: + handlerResult.get() + + let res = resResult.valueOr: + return err("error in handleSelfStoreRequest: " & $error) + + return ok(res) diff --git a/third-party/nwaku/waku/waku_store_legacy.nim b/third-party/nwaku/waku/waku_store_legacy.nim new file mode 100644 index 0000000..9dac194 --- /dev/null +++ b/third-party/nwaku/waku/waku_store_legacy.nim @@ -0,0 +1,3 @@ +import ./waku_store_legacy/common, ./waku_store_legacy/protocol + +export common, protocol diff --git a/third-party/nwaku/waku/waku_store_legacy/README.md b/third-party/nwaku/waku/waku_store_legacy/README.md new file mode 100644 index 0000000..f206873 --- /dev/null +++ b/third-party/nwaku/waku/waku_store_legacy/README.md @@ -0,0 +1,3 @@ +# Waku Store protocol + +The store protocol implements historical message support. See https://rfc.vac.dev/spec/13/ for more information. diff --git a/third-party/nwaku/waku/waku_store_legacy/client.nim b/third-party/nwaku/waku/waku_store_legacy/client.nim new file mode 100644 index 0000000..f26906e --- /dev/null +++ b/third-party/nwaku/waku/waku_store_legacy/client.nim @@ -0,0 +1,245 @@ +{.push raises: [].} + +import std/options, results, chronicles, chronos, metrics, bearssl/rand +import + ../node/peer_manager, + ../utils/requests, + ./protocol_metrics, + ./common, + ./rpc, + ./rpc_codec + +when defined(waku_exp_store_resume): + import std/[sequtils, times] + import ../waku_archive + import ../waku_core/message/digest + +logScope: + topics = "waku legacy store client" + +const DefaultPageSize*: uint = 20 + # A recommended default number of waku messages per page + +type WakuStoreClient* = ref object + peerManager: PeerManager + rng: ref rand.HmacDrbgContext + + # TODO: Move outside of the client + when defined(waku_exp_store_resume): + store: ArchiveDriver + +proc new*( + T: type WakuStoreClient, peerManager: PeerManager, rng: ref rand.HmacDrbgContext +): T = + WakuStoreClient(peerManager: peerManager, rng: rng) + +proc sendHistoryQueryRPC( + w: WakuStoreClient, req: HistoryQuery, peer: RemotePeerInfo +): Future[HistoryResult] {.async, gcsafe.} = + let connOpt = await w.peerManager.dialPeer(peer, WakuLegacyStoreCodec) + if connOpt.isNone(): + waku_legacy_store_errors.inc(labelValues = [dialFailure]) + return err(HistoryError(kind: HistoryErrorKind.PEER_DIAL_FAILURE, address: $peer)) + + let connection = connOpt.get() + + let requestId = + if req.requestId != "": + req.requestId + else: + generateRequestId(w.rng) + + let reqRpc = HistoryRPC(requestId: requestId, query: some(req.toRPC())) + await connection.writeLP(reqRpc.encode().buffer) + + #TODO: I see a challenge here, if storeNode uses a different MaxRPCSize this read will fail. + # Need to find a workaround for this. + let buf = await connection.readLp(DefaultMaxRpcSize.int) + let respDecodeRes = HistoryRPC.decode(buf) + if respDecodeRes.isErr(): + waku_legacy_store_errors.inc(labelValues = [decodeRpcFailure]) + return + err(HistoryError(kind: HistoryErrorKind.BAD_RESPONSE, cause: decodeRpcFailure)) + + let respRpc = respDecodeRes.get() + + # Disabled ,for now, since the default response is a possible case (no messages, pagesize = 0, error = NONE(0)) + # TODO: Rework the RPC protocol to differentiate the default value from an empty value (e.g., status = 200 (OK)) + # and rework the protobuf parsing to return Option[T] when empty values are received + if respRpc.response.isNone(): + waku_legacy_store_errors.inc(labelValues = [emptyRpcResponseFailure]) + return err( + HistoryError(kind: HistoryErrorKind.BAD_RESPONSE, cause: emptyRpcResponseFailure) + ) + + let resp = respRpc.response.get() + + return resp.toAPI() + +proc query*( + w: WakuStoreClient, req: HistoryQuery, peer: RemotePeerInfo +): Future[HistoryResult] {.async, gcsafe.} = + return await w.sendHistoryQueryRPC(req, peer) + +# TODO: Move outside of the client +when defined(waku_exp_store_resume): + ## Resume store + + const StoreResumeTimeWindowOffset: Timestamp = getNanosecondTime(20) + ## Adjust the time window with an offset of 20 seconds + + proc new*( + T: type WakuStoreClient, + peerManager: PeerManager, + rng: ref rand.HmacDrbgContext, + store: ArchiveDriver, + ): T = + WakuStoreClient(peerManager: peerManager, rng: rng, store: store) + + proc queryAll( + w: WakuStoreClient, query: HistoryQuery, peer: RemotePeerInfo + ): Future[WakuStoreResult[seq[WakuMessage]]] {.async, gcsafe.} = + ## A thin wrapper for query. Sends the query to the given peer. when the query has a valid pagingInfo, + ## it retrieves the historical messages in pages. + ## Returns all the fetched messages, if error occurs, returns an error string + + # Make a copy of the query + var req = query + + var messageList: seq[WakuMessage] = @[] + + while true: + let queryRes = await w.query(req, peer) + if queryRes.isErr(): + return err($queryRes.error) + + let response = queryRes.get() + + messageList.add(response.messages) + + # Check whether it is the last page + if response.cursor.isNone(): + break + + # Update paging cursor + req.cursor = response.cursor + + return ok(messageList) + + proc queryLoop( + w: WakuStoreClient, req: HistoryQuery, peers: seq[RemotePeerInfo] + ): Future[WakuStoreResult[seq[WakuMessage]]] {.async, gcsafe.} = + ## Loops through the peers candidate list in order and sends the query to each + ## + ## Once all responses have been received, the retrieved messages are consolidated into one deduplicated list. + ## if no messages have been retrieved, the returned future will resolve into a result holding an empty seq. + let queryFuturesList = peers.mapIt(w.queryAll(req, it)) + + await allFutures(queryFuturesList) + + let messagesList = queryFuturesList + .map( + proc(fut: Future[WakuStoreResult[seq[WakuMessage]]]): seq[WakuMessage] = + try: + # fut.read() can raise a CatchableError + # These futures have been awaited before using allFutures(). Call completed() just as a sanity check. + if not fut.completed() or fut.read().isErr(): + return @[] + + fut.read().value + except CatchableError: + return @[] + ) + .concat() + .deduplicate() + + return ok(messagesList) + + proc put( + store: ArchiveDriver, pubsubTopic: PubsubTopic, message: WakuMessage + ): Result[void, string] = + let + digest = waku_archive.computeDigest(message) + messageHash = computeMessageHash(pubsubTopic, message) + receivedTime = + if message.timestamp > 0: + message.timestamp + else: + getNanosecondTime(getTime().toUnixFloat()) + + store.put(pubsubTopic, message, digest, messageHash, receivedTime) + + proc resume*( + w: WakuStoreClient, + peerList = none(seq[RemotePeerInfo]), + pageSize = DefaultPageSize, + pubsubTopic = DefaultPubsubTopic, + ): Future[WakuStoreResult[uint64]] {.async, gcsafe.} = + ## resume proc retrieves the history of waku messages published on the default waku pubsub topic since the last time the waku store node has been online + ## messages are stored in the store node's messages field and in the message db + ## the offline time window is measured as the difference between the current time and the timestamp of the most recent persisted waku message + ## an offset of 20 second is added to the time window to count for nodes asynchrony + ## peerList indicates the list of peers to query from. + ## The history is fetched from all available peers in this list and then consolidated into one deduplicated list. + ## Such candidates should be found through a discovery method (to be developed). + ## if no peerList is passed, one of the peers in the underlying peer manager unit of the store protocol is picked randomly to fetch the history from. + ## The history gets fetched successfully if the dialed peer has been online during the queried time window. + ## the resume proc returns the number of retrieved messages if no error occurs, otherwise returns the error string + + # If store has not been provided, don't even try + if w.store.isNil(): + return err("store not provided (nil)") + + # NOTE: Original implementation is based on the message's sender timestamp. At the moment + # of writing, the sqlite store implementation returns the last message's receiver + # timestamp. + # lastSeenTime = lastSeenItem.get().msg.timestamp + let + lastSeenTime = w.store.getNewestMessageTimestamp().get(Timestamp(0)) + now = getNanosecondTime(getTime().toUnixFloat()) + + debug "resuming with offline time window", + lastSeenTime = lastSeenTime, currentTime = now + + let + queryEndTime = now + StoreResumeTimeWindowOffset + queryStartTime = max(lastSeenTime - StoreResumeTimeWindowOffset, 0) + + let req = HistoryQuery( + pubsubTopic: some(pubsubTopic), + startTime: some(queryStartTime), + endTime: some(queryEndTime), + pageSize: uint64(pageSize), + direction: default(), + ) + + var res: WakuStoreResult[seq[WakuMessage]] + if peerList.isSome(): + debug "trying the candidate list to fetch the history" + res = await w.queryLoop(req, peerList.get()) + else: + debug "no candidate list is provided, selecting a random peer" + # if no peerList is set then query from one of the peers stored in the peer manager + let peerOpt = w.peerManager.selectPeer(WakuLegacyStoreCodec) + if peerOpt.isNone(): + warn "no suitable remote peers" + waku_legacy_store_errors.inc(labelValues = [peerNotFoundFailure]) + return err("no suitable remote peers") + + debug "a peer is selected from peer manager" + res = await w.queryAll(req, peerOpt.get()) + + if res.isErr(): + debug "failed to resume the history" + return err("failed to resume the history") + + # Save the retrieved messages in the store + var added: uint = 0 + for msg in res.get(): + let putStoreRes = w.store.put(pubsubTopic, msg) + if putStoreRes.isErr(): + continue + + added.inc() + + return ok(added) diff --git a/third-party/nwaku/waku/waku_store_legacy/common.nim b/third-party/nwaku/waku/waku_store_legacy/common.nim new file mode 100644 index 0000000..c1958f2 --- /dev/null +++ b/third-party/nwaku/waku/waku_store_legacy/common.nim @@ -0,0 +1,108 @@ +{.push raises: [].} + +import std/[options, sequtils], results, stew/byteutils, nimcrypto/sha2 +import ../waku_core, ../common/paging + +from ../waku_core/codecs import WakuLegacyStoreCodec +export WakuLegacyStoreCodec + +const + DefaultPageSize*: uint64 = 20 + + MaxPageSize*: uint64 = 100 + +type WakuStoreResult*[T] = Result[T, string] + +## Waku message digest + +type MessageDigest* = MDigest[256] + +proc computeDigest*(msg: WakuMessage): MessageDigest = + var ctx: sha256 + ctx.init() + defer: + ctx.clear() + + ctx.update(msg.contentTopic.toBytes()) + ctx.update(msg.payload) + + # Computes the hash + return ctx.finish() + +## Public API types + +type + HistoryCursor* = object + pubsubTopic*: PubsubTopic + senderTime*: Timestamp + storeTime*: Timestamp + digest*: MessageDigest + + HistoryQuery* = object + pubsubTopic*: Option[PubsubTopic] + contentTopics*: seq[ContentTopic] + cursor*: Option[HistoryCursor] + startTime*: Option[Timestamp] + endTime*: Option[Timestamp] + pageSize*: uint64 + direction*: PagingDirection + requestId*: string + + HistoryResponse* = object + messages*: seq[WakuMessage] + cursor*: Option[HistoryCursor] + + HistoryErrorKind* {.pure.} = enum + UNKNOWN = uint32(000) + BAD_RESPONSE = uint32(300) + BAD_REQUEST = uint32(400) + TOO_MANY_REQUESTS = uint32(429) + SERVICE_UNAVAILABLE = uint32(503) + PEER_DIAL_FAILURE = uint32(504) + + HistoryError* = object + case kind*: HistoryErrorKind + of PEER_DIAL_FAILURE: + address*: string + of BAD_RESPONSE, BAD_REQUEST: + cause*: string + else: + discard + + HistoryResult* = Result[HistoryResponse, HistoryError] + +proc parse*(T: type HistoryErrorKind, kind: uint32): T = + case kind + of 000, 200, 300, 400, 429, 503: + HistoryErrorKind(kind) + else: + HistoryErrorKind.UNKNOWN + +proc `$`*(err: HistoryError): string = + case err.kind + of HistoryErrorKind.PEER_DIAL_FAILURE: + "PEER_DIAL_FAILURE: " & err.address + of HistoryErrorKind.BAD_RESPONSE: + "BAD_RESPONSE: " & err.cause + of HistoryErrorKind.BAD_REQUEST: + "BAD_REQUEST: " & err.cause + of HistoryErrorKind.TOO_MANY_REQUESTS: + "TOO_MANY_REQUESTS" + of HistoryErrorKind.SERVICE_UNAVAILABLE: + "SERVICE_UNAVAILABLE" + of HistoryErrorKind.UNKNOWN: + "UNKNOWN" + +proc checkHistCursor*(self: HistoryCursor): Result[void, HistoryError] = + if self.pubsubTopic.len == 0: + return err(HistoryError(kind: BAD_REQUEST, cause: "empty pubsubTopic")) + if self.senderTime == 0: + return err(HistoryError(kind: BAD_REQUEST, cause: "invalid senderTime")) + if self.storeTime == 0: + return err(HistoryError(kind: BAD_REQUEST, cause: "invalid storeTime")) + if self.digest.data.all( + proc(x: byte): bool = + x == 0 + ): + return err(HistoryError(kind: BAD_REQUEST, cause: "empty digest")) + return ok() diff --git a/third-party/nwaku/waku/waku_store_legacy/protocol.nim b/third-party/nwaku/waku/waku_store_legacy/protocol.nim new file mode 100644 index 0000000..79d0f03 --- /dev/null +++ b/third-party/nwaku/waku/waku_store_legacy/protocol.nim @@ -0,0 +1,187 @@ +## Waku Store protocol for historical messaging support. +## See spec for more details: +## https://github.com/vacp2p/specs/blob/master/specs/waku/v2/waku-store.md +{.push raises: [].} + +import + std/[options, times], + results, + chronicles, + chronos, + bearssl/rand, + libp2p/crypto/crypto, + libp2p/protocols/protocol, + libp2p/protobuf/minprotobuf, + libp2p/stream/connection, + metrics +import + ../waku_core, + ../node/peer_manager, + ./common, + ./rpc, + ./rpc_codec, + ./protocol_metrics, + ../common/rate_limit/request_limiter + +logScope: + topics = "waku legacy store" + +type HistoryQueryHandler* = + proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.} + +type WakuStore* = ref object of LPProtocol + peerManager: PeerManager + rng: ref rand.HmacDrbgContext + queryHandler*: HistoryQueryHandler + requestRateLimiter*: RequestRateLimiter + +## Protocol + +type StoreResp = tuple[resp: seq[byte], requestId: string] + +proc handleLegacyQueryRequest( + self: WakuStore, requestor: PeerId, raw_request: seq[byte] +): Future[StoreResp] {.async.} = + let decodeRes = HistoryRPC.decode(raw_request) + if decodeRes.isErr(): + error "failed to decode rpc", peerId = requestor, error = $decodeRes.error + waku_legacy_store_errors.inc(labelValues = [decodeRpcFailure]) + return (newSeq[byte](), "failed to decode rpc") + + let reqRpc = decodeRes.value + + if reqRpc.query.isNone(): + error "empty query rpc", peerId = requestor, requestId = reqRpc.requestId + waku_legacy_store_errors.inc(labelValues = [emptyRpcQueryFailure]) + return (newSeq[byte](), "empty query rpc") + + let requestId = reqRpc.requestId + var request = reqRpc.query.get().toAPI() + request.requestId = requestId + + info "received history query", + peerId = requestor, requestId = requestId, query = request + waku_legacy_store_queries.inc() + + var responseRes: HistoryResult + try: + responseRes = await self.queryHandler(request) + except Exception: + error "history query failed", + peerId = requestor, requestId = requestId, error = getCurrentExceptionMsg() + + let error = HistoryError(kind: HistoryErrorKind.UNKNOWN).toRPC() + let response = HistoryResponseRPC(error: error) + return ( + HistoryRPC(requestId: requestId, response: some(response)).encode().buffer, + requestId, + ) + + if responseRes.isErr(): + error "history query failed", + peerId = requestor, requestId = requestId, error = responseRes.error + + let response = responseRes.toRPC() + return ( + HistoryRPC(requestId: requestId, response: some(response)).encode().buffer, + requestId, + ) + + let response = responseRes.toRPC() + + info "sending history response", + peerId = requestor, requestId = requestId, messages = response.messages.len + + return ( + HistoryRPC(requestId: requestId, response: some(response)).encode().buffer, + requestId, + ) + +proc initProtocolHandler(ws: WakuStore) = + let rejectResponseBuf = HistoryRPC( + ## We will not copy and decode RPC buffer from stream only for requestId + ## in reject case as it is comparably too expensive and opens possible + ## attack surface + requestId: "N/A", + response: some( + HistoryResponseRPC( + error: HistoryError(kind: HistoryErrorKind.TOO_MANY_REQUESTS).toRPC() + ) + ), + ).encode().buffer + + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = + var successfulQuery = false ## only consider the correct queries in metrics + var resBuf: StoreResp + var queryDuration: float + ws.requestRateLimiter.checkUsageLimit(WakuLegacyStoreCodec, conn): + let readRes = catch: + await conn.readLp(DefaultMaxRpcSize.int) + + let reqBuf = readRes.valueOr: + error "Connection read error", error = error.msg + return + + waku_service_network_bytes.inc( + amount = reqBuf.len().int64, labelValues = [WakuLegacyStoreCodec, "in"] + ) + + let queryStartTime = getTime().toUnixFloat() + try: + resBuf = await ws.handleLegacyQueryRequest(conn.peerId, reqBuf) + except CatchableError: + error "legacy store query handler failed", + remote_peer_id = conn.peerId, error = getCurrentExceptionMsg() + return + + queryDuration = getTime().toUnixFloat() - queryStartTime + waku_legacy_store_time_seconds.set(queryDuration, ["query-db-time"]) + successfulQuery = true + do: + debug "Legacy store query request rejected due rate limit exceeded", + peerId = conn.peerId, limit = $ws.requestRateLimiter.setting + resBuf = (rejectResponseBuf, "rejected") + + let writeRespStartTime = getTime().toUnixFloat() + let writeRes = catch: + await conn.writeLp(resBuf.resp) + + if writeRes.isErr(): + error "Connection write error", error = writeRes.error.msg + return + + if successfulQuery: + let writeDuration = getTime().toUnixFloat() - writeRespStartTime + waku_legacy_store_time_seconds.set(writeDuration, ["send-store-resp-time"]) + debug "after sending response", + requestId = resBuf.requestId, + queryDurationSecs = queryDuration, + writeStreamDurationSecs = writeDuration + + waku_service_network_bytes.inc( + amount = resBuf.resp.len().int64, labelValues = [WakuLegacyStoreCodec, "out"] + ) + + ws.handler = handler + ws.codec = WakuLegacyStoreCodec + +proc new*( + T: type WakuStore, + peerManager: PeerManager, + rng: ref rand.HmacDrbgContext, + queryHandler: HistoryQueryHandler, + rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](), +): T = + # Raise a defect if history query handler is nil + if queryHandler.isNil(): + raise newException(NilAccessDefect, "history query handler is nil") + + let ws = WakuStore( + rng: rng, + peerManager: peerManager, + queryHandler: queryHandler, + requestRateLimiter: newRequestRateLimiter(rateLimitSetting), + ) + ws.initProtocolHandler() + setServiceLimitMetric(WakuLegacyStoreCodec, rateLimitSetting) + ws diff --git a/third-party/nwaku/waku/waku_store_legacy/protocol_metrics.nim b/third-party/nwaku/waku/waku_store_legacy/protocol_metrics.nim new file mode 100644 index 0000000..45a8489 --- /dev/null +++ b/third-party/nwaku/waku/waku_store_legacy/protocol_metrics.nim @@ -0,0 +1,21 @@ +{.push raises: [].} + +import metrics + +declarePublicCounter waku_legacy_store_errors, + "number of legacy store protocol errors", ["type"] +declarePublicCounter waku_legacy_store_queries, + "number of legacy store queries received" + +## "query-db-time" phase considers the time when node performs the query to the database. +## "send-store-resp-time" phase is the time when node writes the store response to the store-client. +declarePublicGauge waku_legacy_store_time_seconds, + "Time in seconds spent by each store phase", labels = ["phase"] + +# Error types (metric label values) +const + dialFailure* = "dial_failure_legacy" + decodeRpcFailure* = "decode_rpc_failure_legacy" + peerNotFoundFailure* = "peer_not_found_failure_legacy" + emptyRpcQueryFailure* = "empty_rpc_query_failure_legacy" + emptyRpcResponseFailure* = "empty_rpc_response_failure_legacy" diff --git a/third-party/nwaku/waku/waku_store_legacy/rpc.nim b/third-party/nwaku/waku/waku_store_legacy/rpc.nim new file mode 100644 index 0000000..bce3e60 --- /dev/null +++ b/third-party/nwaku/waku/waku_store_legacy/rpc.nim @@ -0,0 +1,223 @@ +{.push raises: [].} + +import std/[options, sequtils], results +import ../waku_core, ../common/paging, ./common + +## Wire protocol + +const HistoryQueryDirectionDefaultValue = default(type HistoryQuery.direction) + +type PagingIndexRPC* = object + ## This type contains the description of an Index used in the pagination of WakuMessages + pubsubTopic*: PubsubTopic + senderTime*: Timestamp # the time at which the message is generated + receiverTime*: Timestamp + digest*: MessageDigest # calculated over payload and content topic + +proc `==`*(x, y: PagingIndexRPC): bool = + ## receiverTime plays no role in index equality + (x.senderTime == y.senderTime) and (x.digest == y.digest) and + (x.pubsubTopic == y.pubsubTopic) + +proc compute*( + T: type PagingIndexRPC, + msg: WakuMessage, + receivedTime: Timestamp, + pubsubTopic: PubsubTopic, +): T = + ## Takes a WakuMessage with received timestamp and returns its Index. + let + digest = computeDigest(msg) + senderTime = msg.timestamp + + PagingIndexRPC( + pubsubTopic: pubsubTopic, + senderTime: senderTime, + receiverTime: receivedTime, + digest: digest, + ) + +type PagingInfoRPC* = object + ## This type holds the information needed for the pagination + pageSize*: Option[uint64] + cursor*: Option[PagingIndexRPC] + direction*: Option[PagingDirection] + +type + HistoryContentFilterRPC* = object + contentTopic*: ContentTopic + + HistoryQueryRPC* = object + contentFilters*: seq[HistoryContentFilterRPC] + pubsubTopic*: Option[PubsubTopic] + pagingInfo*: Option[PagingInfoRPC] + startTime*: Option[int64] + endTime*: Option[int64] + + HistoryResponseErrorRPC* {.pure.} = enum + ## HistoryResponseErrorRPC contains error message to inform the querying node about + ## the state of its request + NONE = uint32(0) + INVALID_CURSOR = uint32(1) + TOO_MANY_REQUESTS = uint32(429) + SERVICE_UNAVAILABLE = uint32(503) + + HistoryResponseRPC* = object + messages*: seq[WakuMessage] + pagingInfo*: Option[PagingInfoRPC] + error*: HistoryResponseErrorRPC + + HistoryRPC* = object + requestId*: string + query*: Option[HistoryQueryRPC] + response*: Option[HistoryResponseRPC] + +proc parse*(T: type HistoryResponseErrorRPC, kind: uint32): T = + case kind + of 0, 1, 429, 503: + cast[HistoryResponseErrorRPC](kind) + else: + # TODO: Improve error variants/move to satus codes + HistoryResponseErrorRPC.INVALID_CURSOR + +## Wire protocol type mappings + +proc toRPC*(cursor: HistoryCursor): PagingIndexRPC {.gcsafe.} = + PagingIndexRPC( + pubsubTopic: cursor.pubsubTopic, + senderTime: cursor.senderTime, + receiverTime: cursor.storeTime, + digest: cursor.digest, + ) + +proc toAPI*(rpc: PagingIndexRPC): HistoryCursor = + HistoryCursor( + pubsubTopic: rpc.pubsubTopic, + senderTime: rpc.senderTime, + storeTime: rpc.receiverTime, + digest: rpc.digest, + ) + +proc toRPC*(query: HistoryQuery): HistoryQueryRPC = + var rpc = HistoryQueryRPC() + + rpc.contentFilters = + query.contentTopics.mapIt(HistoryContentFilterRPC(contentTopic: it)) + + rpc.pubsubTopic = query.pubsubTopic + + rpc.pagingInfo = block: + if query.cursor.isNone() and query.pageSize == default(type query.pageSize) and + query.direction == HistoryQueryDirectionDefaultValue: + none(PagingInfoRPC) + else: + let + pageSize = some(query.pageSize) + cursor = query.cursor.map(toRPC) + direction = some(query.direction) + + some(PagingInfoRPC(pageSize: pageSize, cursor: cursor, direction: direction)) + + rpc.startTime = query.startTime + rpc.endTime = query.endTime + + rpc + +proc toAPI*(rpc: HistoryQueryRPC): HistoryQuery = + let + pubsubTopic = rpc.pubsubTopic + + contentTopics = rpc.contentFilters.mapIt(it.contentTopic) + + cursor = + if rpc.pagingInfo.isNone() or rpc.pagingInfo.get().cursor.isNone(): + none(HistoryCursor) + else: + rpc.pagingInfo.get().cursor.map(toAPI) + + startTime = rpc.startTime + + endTime = rpc.endTime + + pageSize = + if rpc.pagingInfo.isNone() or rpc.pagingInfo.get().pageSize.isNone(): + 0'u64 + else: + rpc.pagingInfo.get().pageSize.get() + + direction = + if rpc.pagingInfo.isNone() or rpc.pagingInfo.get().direction.isNone(): + HistoryQueryDirectionDefaultValue + else: + rpc.pagingInfo.get().direction.get() + + HistoryQuery( + pubsubTopic: pubsubTopic, + contentTopics: contentTopics, + cursor: cursor, + startTime: startTime, + endTime: endTime, + pageSize: pageSize, + direction: direction, + ) + +proc toRPC*(err: HistoryError): HistoryResponseErrorRPC = + # TODO: Better error mappings/move to error codes + case err.kind + of HistoryErrorKind.BAD_REQUEST: + # TODO: Respond aksi with the reason + HistoryResponseErrorRPC.INVALID_CURSOR + of HistoryErrorKind.TOO_MANY_REQUESTS: + HistoryResponseErrorRPC.TOO_MANY_REQUESTS + of HistoryErrorKind.SERVICE_UNAVAILABLE: + HistoryResponseErrorRPC.SERVICE_UNAVAILABLE + else: + HistoryResponseErrorRPC.INVALID_CURSOR + +proc toAPI*(err: HistoryResponseErrorRPC): HistoryError = + # TODO: Better error mappings/move to error codes + case err + of HistoryResponseErrorRPC.INVALID_CURSOR: + HistoryError(kind: HistoryErrorKind.BAD_REQUEST, cause: "invalid cursor") + of HistoryResponseErrorRPC.TOO_MANY_REQUESTS: + HistoryError(kind: HistoryErrorKind.TOO_MANY_REQUESTS) + of HistoryResponseErrorRPC.SERVICE_UNAVAILABLE: + HistoryError(kind: HistoryErrorKind.SERVICE_UNAVAILABLE) + else: + HistoryError(kind: HistoryErrorKind.UNKNOWN) + +proc toRPC*(res: HistoryResult): HistoryResponseRPC = + if res.isErr(): + let error = res.error.toRPC() + + HistoryResponseRPC(error: error) + else: + let resp = res.get() + + let + messages = resp.messages + + pagingInfo = block: + if resp.cursor.isNone(): + none(PagingInfoRPC) + else: + some(PagingInfoRPC(cursor: resp.cursor.map(toRPC))) + + error = HistoryResponseErrorRPC.NONE + + HistoryResponseRPC(messages: messages, pagingInfo: pagingInfo, error: error) + +proc toAPI*(rpc: HistoryResponseRPC): HistoryResult = + if rpc.error != HistoryResponseErrorRPC.NONE: + err(rpc.error.toAPI()) + else: + let + messages = rpc.messages + + cursor = + if rpc.pagingInfo.isNone(): + none(HistoryCursor) + else: + rpc.pagingInfo.get().cursor.map(toAPI) + + ok(HistoryResponse(messages: messages, cursor: cursor)) diff --git a/third-party/nwaku/waku/waku_store_legacy/rpc_codec.nim b/third-party/nwaku/waku/waku_store_legacy/rpc_codec.nim new file mode 100644 index 0000000..f9c518e --- /dev/null +++ b/third-party/nwaku/waku/waku_store_legacy/rpc_codec.nim @@ -0,0 +1,255 @@ +{.push raises: [].} + +import std/options, nimcrypto/hash +import ../common/[protobuf, paging], ../waku_core, ./common, ./rpc + +const DefaultMaxRpcSize* = -1 + +## Pagination + +proc encode*(index: PagingIndexRPC): ProtoBuffer = + ## Encode an Index object into a ProtoBuffer + ## returns the resultant ProtoBuffer + var pb = initProtoBuffer() + + pb.write3(1, index.digest.data) + pb.write3(2, zint64(index.receiverTime)) + pb.write3(3, zint64(index.senderTime)) + pb.write3(4, index.pubsubTopic) + pb.finish3() + + pb + +proc decode*(T: type PagingIndexRPC, buffer: seq[byte]): ProtobufResult[T] = + ## creates and returns an Index object out of buffer + var rpc = PagingIndexRPC() + let pb = initProtoBuffer(buffer) + + var data: seq[byte] + if not ?pb.getField(1, data): + return err(ProtobufError.missingRequiredField("digest")) + else: + var digest = MessageDigest() + for count, b in data: + digest.data[count] = b + + rpc.digest = digest + + var receiverTime: zint64 + if not ?pb.getField(2, receiverTime): + return err(ProtobufError.missingRequiredField("receiver_time")) + else: + rpc.receiverTime = int64(receiverTime) + + var senderTime: zint64 + if not ?pb.getField(3, senderTime): + return err(ProtobufError.missingRequiredField("sender_time")) + else: + rpc.senderTime = int64(senderTime) + + var pubsubTopic: string + if not ?pb.getField(4, pubsubTopic): + return err(ProtobufError.missingRequiredField("pubsub_topic")) + else: + rpc.pubsubTopic = pubsubTopic + + ok(rpc) + +proc encode*(rpc: PagingInfoRPC): ProtoBuffer = + ## Encodes a PagingInfo object into a ProtoBuffer + ## returns the resultant ProtoBuffer + var pb = initProtoBuffer() + + pb.write3(1, rpc.pageSize) + pb.write3(2, rpc.cursor.map(encode)) + pb.write3( + 3, + rpc.direction.map( + proc(d: PagingDirection): uint32 = + uint32(ord(d)) + ), + ) + pb.finish3() + + pb + +proc decode*(T: type PagingInfoRPC, buffer: seq[byte]): ProtobufResult[T] = + ## creates and returns a PagingInfo object out of buffer + var rpc = PagingInfoRPC() + let pb = initProtoBuffer(buffer) + + var pageSize: uint64 + if not ?pb.getField(1, pageSize): + rpc.pageSize = none(uint64) + else: + rpc.pageSize = some(pageSize) + + var cursorBuffer: seq[byte] + if not ?pb.getField(2, cursorBuffer): + rpc.cursor = none(PagingIndexRPC) + else: + let cursor = ?PagingIndexRPC.decode(cursorBuffer) + rpc.cursor = some(cursor) + + var direction: uint32 + if not ?pb.getField(3, direction): + rpc.direction = none(PagingDirection) + else: + rpc.direction = some(PagingDirection(direction)) + + ok(rpc) + +## Wire protocol + +proc encode*(rpc: HistoryContentFilterRPC): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.contentTopic) + pb.finish3() + + pb + +proc decode*(T: type HistoryContentFilterRPC, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + + var contentTopic: ContentTopic + if not ?pb.getField(1, contentTopic): + return err(ProtobufError.missingRequiredField("content_topic")) + ok(HistoryContentFilterRPC(contentTopic: contentTopic)) + +proc encode*(rpc: HistoryQueryRPC): ProtoBuffer = + var pb = initProtoBuffer() + pb.write3(2, rpc.pubsubTopic) + + for filter in rpc.contentFilters: + pb.write3(3, filter.encode()) + + pb.write3(4, rpc.pagingInfo.map(encode)) + pb.write3( + 5, + rpc.startTime.map( + proc(time: int64): zint64 = + zint64(time) + ), + ) + pb.write3( + 6, + rpc.endTime.map( + proc(time: int64): zint64 = + zint64(time) + ), + ) + pb.finish3() + + pb + +proc decode*(T: type HistoryQueryRPC, buffer: seq[byte]): ProtobufResult[T] = + var rpc = HistoryQueryRPC() + let pb = initProtoBuffer(buffer) + + var pubsubTopic: string + if not ?pb.getField(2, pubsubTopic): + rpc.pubsubTopic = none(string) + else: + rpc.pubsubTopic = some(pubsubTopic) + + var buffs: seq[seq[byte]] + if not ?pb.getRepeatedField(3, buffs): + rpc.contentFilters = @[] + else: + for pb in buffs: + let filter = ?HistoryContentFilterRPC.decode(pb) + rpc.contentFilters.add(filter) + + var pagingInfoBuffer: seq[byte] + if not ?pb.getField(4, pagingInfoBuffer): + rpc.pagingInfo = none(PagingInfoRPC) + else: + let pagingInfo = ?PagingInfoRPC.decode(pagingInfoBuffer) + rpc.pagingInfo = some(pagingInfo) + + var startTime: zint64 + if not ?pb.getField(5, startTime): + rpc.startTime = none(int64) + else: + rpc.startTime = some(int64(startTime)) + + var endTime: zint64 + if not ?pb.getField(6, endTime): + rpc.endTime = none(int64) + else: + rpc.endTime = some(int64(endTime)) + + ok(rpc) + +proc encode*(response: HistoryResponseRPC): ProtoBuffer = + var pb = initProtoBuffer() + + for rpc in response.messages: + pb.write3(2, rpc.encode()) + + pb.write3(3, response.pagingInfo.map(encode)) + pb.write3(4, uint32(ord(response.error))) + pb.finish3() + + pb + +proc decode*(T: type HistoryResponseRPC, buffer: seq[byte]): ProtobufResult[T] = + var rpc = HistoryResponseRPC() + let pb = initProtoBuffer(buffer) + + var messages: seq[seq[byte]] + if ?pb.getRepeatedField(2, messages): + for pb in messages: + let message = ?WakuMessage.decode(pb) + rpc.messages.add(message) + else: + rpc.messages = @[] + + var pagingInfoBuffer: seq[byte] + if ?pb.getField(3, pagingInfoBuffer): + let pagingInfo = ?PagingInfoRPC.decode(pagingInfoBuffer) + rpc.pagingInfo = some(pagingInfo) + else: + rpc.pagingInfo = none(PagingInfoRPC) + + var error: uint32 + if not ?pb.getField(4, error): + return err(ProtobufError.missingRequiredField("error")) + else: + rpc.error = HistoryResponseErrorRPC.parse(error) + + ok(rpc) + +proc encode*(rpc: HistoryRPC): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, rpc.requestId) + pb.write3(2, rpc.query.map(encode)) + pb.write3(3, rpc.response.map(encode)) + pb.finish3() + + pb + +proc decode*(T: type HistoryRPC, buffer: seq[byte]): ProtobufResult[T] = + var rpc = HistoryRPC() + let pb = initProtoBuffer(buffer) + + if not ?pb.getField(1, rpc.requestId): + return err(ProtobufError.missingRequiredField("request_id")) + + var queryBuffer: seq[byte] + if not ?pb.getField(2, queryBuffer): + rpc.query = none(HistoryQueryRPC) + else: + let query = ?HistoryQueryRPC.decode(queryBuffer) + rpc.query = some(query) + + var responseBuffer: seq[byte] + if not ?pb.getField(3, responseBuffer): + rpc.response = none(HistoryResponseRPC) + else: + let response = ?HistoryResponseRPC.decode(responseBuffer) + rpc.response = some(response) + + ok(rpc) diff --git a/third-party/nwaku/waku/waku_store_legacy/self_req_handler.nim b/third-party/nwaku/waku/waku_store_legacy/self_req_handler.nim new file mode 100644 index 0000000..e465d9e --- /dev/null +++ b/third-party/nwaku/waku/waku_store_legacy/self_req_handler.nim @@ -0,0 +1,31 @@ +## +## This file is aimed to attend the requests that come directly +## from the 'self' node. It is expected to attend the store requests that +## come from REST-store endpoint when those requests don't indicate +## any store-peer address. +## +## Notice that the REST-store requests normally assume that the REST +## server is acting as a store-client. In this module, we allow that +## such REST-store node can act as store-server as well by retrieving +## its own stored messages. The typical use case for that is when +## using `nwaku-compose`, which spawn a Waku node connected to a local +## database, and the user is interested in retrieving the messages +## stored by that local store node. +## + +import results, chronos +import ./protocol, ./common + +proc handleSelfStoreRequest*( + self: WakuStore, histQuery: HistoryQuery +): Future[WakuStoreResult[HistoryResponse]] {.async.} = + ## Handles the store requests made by the node to itself. + ## Normally used in REST-store requests + + try: + let resp: HistoryResponse = (await self.queryHandler(histQuery)).valueOr: + return err("error in handleSelfStoreRequest: " & $error) + + return WakuStoreResult[HistoryResponse].ok(resp) + except Exception: + return err("exception in handleSelfStoreRequest: " & getCurrentExceptionMsg()) diff --git a/third-party/nwaku/waku/waku_store_sync.nim b/third-party/nwaku/waku/waku_store_sync.nim new file mode 100644 index 0000000..03c1b33 --- /dev/null +++ b/third-party/nwaku/waku/waku_store_sync.nim @@ -0,0 +1,6 @@ +{.push raises: [].} + +import + ./waku_store_sync/reconciliation, ./waku_store_sync/transfer, ./waku_store_sync/common + +export reconciliation, transfer, common diff --git a/third-party/nwaku/waku/waku_store_sync/codec.nim b/third-party/nwaku/waku/waku_store_sync/codec.nim new file mode 100644 index 0000000..a00de8e --- /dev/null +++ b/third-party/nwaku/waku/waku_store_sync/codec.nim @@ -0,0 +1,351 @@ +{.push raises: [].} + +import std/sequtils, stew/[leb128, byteutils] + +import ../common/protobuf, ../waku_core/message, ../waku_core/time, ./common + +const + HashLen = 32 + VarIntLen = 9 + AvgCapacity = 1000 + +proc encode*(value: WakuMessageAndTopic): ProtoBuffer = + var pb = initProtoBuffer() + + pb.write3(1, value.pubsub) + pb.write3(2, value.message.encode()) + + return pb + +proc deltaEncode*(itemSet: ItemSet): seq[byte] = + # 1 byte for resolved bool and 32 bytes hash plus 9 bytes varint per elements + let capacity = 1 + (itemSet.elements.len * (VarIntLen + HashLen)) + + var + output = newSeqOfCap[byte](capacity) + lastTime = Timestamp(0) + buf = Leb128Buf[uint64]() + + for id in itemSet.elements: + let timeDiff = uint64(id.time) - uint64(lastTime) + lastTime = id.time + + # encode timestamp + buf = timediff.toBytes(Leb128) + output &= @buf + + output &= id.hash + + output &= byte(itemSet.reconciled) + + return output + +proc deltaEncode*(value: RangesData): seq[byte] = + if value.ranges.len == 0: + return @[0] + + var + output = newSeqOfCap[byte](AvgCapacity) + buf = Leb128Buf[uint64]() + lastTimestamp: Timestamp + lastHash: Fingerprint + i = 0 + j = 0 + + # encode pubsub topics + buf = uint64(value.pubsubTopics.len).toBytes(Leb128) + output &= @buf + + for topic in value.pubsubTopics: + buf = uint64(topic.len).toBytes(Leb128) + output &= @buf + + output &= topic.toBytes() + + # encode content topics + buf = uint64(value.contentTopics.len).toBytes(Leb128) + output &= @buf + + for topic in value.contentTopics: + buf = uint64(topic.len).toBytes(Leb128) + output &= @buf + + output &= topic.toBytes() + + # the first range is implicit but must be explicit when encoded + let (bound, _) = value.ranges[0] + + lastTimestamp = bound.a.time + lastHash = bound.a.hash + + # encode first timestamp + buf = uint64(lastTimestamp).toBytes(Leb128) + output &= @buf + + # implicit first hash is always 0 and range type is always skip + + for (bound, rangeType) in value.ranges: + let timeDiff = uint64(bound.b.time) - uint64(lastTimestamp) + lastTimestamp = bound.b.time + + # encode timestamp + buf = timeDiff.toBytes(Leb128) + output &= @buf + + if timeDiff == 0: + var sameBytes = 0 + for (byte1, byte2) in zip(lastHash, bound.b.hash): + sameBytes.inc() + + if byte1 != byte2: + break + + # encode number of same bytes + output &= byte(sameBytes) + + # encode hash bytes + output &= bound.b.hash[0 ..< sameBytes] + + # encode rangeType + output &= byte(rangeType) + + case rangeType + of RangeType.Skip: + continue + of RangeType.Fingerprint: + output &= value.fingerprints[i] + i.inc() + of RangeType.ItemSet: + let itemSet = value.itemSets[j] + j.inc() + + # encode how many elements are in the set + buf = uint64(itemSet.elements.len).toBytes(Leb128) + output &= @buf + + let encodedSet = itemSet.deltaEncode() + + output &= encodedSet + + continue + + return output + +proc getItemSetLength(idx: var int, buffer: seq[byte]): int = + let min = min(idx + VarIntLen, buffer.len) + let slice = buffer[idx ..< min] + let (val, len) = uint64.fromBytes(slice, Leb128) + idx += len + + return int(val) + +proc getFingerprint(idx: var int, buffer: seq[byte]): Result[Fingerprint, string] = + if idx + HashLen > buffer.len: + return err("Cannot decode fingerprint") + + let slice = buffer[idx ..< idx + HashLen] + idx += HashLen + var fingerprint = EmptyFingerprint + for i, bytes in slice: + fingerprint[i] = bytes + + return ok(fingerprint) + +proc getRangeType(idx: var int, buffer: seq[byte]): Result[RangeType, string] = + if idx >= buffer.len: + return err("Cannot decode range type") + + let val = buffer[idx] + + if val > 2 or val < 0: + return err("Cannot decode range type") + + let rangeType = RangeType(val) + idx += 1 + + return ok(rangeType) + +proc updateHash(idx: var int, buffer: seq[byte], hash: var WakuMessageHash) = + if idx >= buffer.len: + return + + let sameBytes = int(buffer[idx]) + + if sameBytes > 32: + return + + idx += 1 + + if idx + sameBytes > buffer.len: + return + + let slice = buffer[idx ..< idx + sameBytes] + idx += sameBytes + + for i, bytes in slice: + hash[i] = bytes + +proc getTimeDiff(idx: var int, buffer: seq[byte]): Timestamp = + let min = min(idx + VarIntLen, buffer.len) + let slice = buffer[idx ..< min] + let (val, len) = uint64.fromBytes(slice, Leb128) + idx += len + + return Timestamp(val) + +proc getTimestamp(idx: var int, buffer: seq[byte]): Result[Timestamp, string] = + if idx + VarIntLen > buffer.len: + return err("Cannot decode timestamp") + + let slice = buffer[idx ..< idx + VarIntLen] + let (val, len) = uint64.fromBytes(slice, Leb128) + idx += len + + return ok(Timestamp(val)) + +proc getHash(idx: var int, buffer: seq[byte]): Result[WakuMessageHash, string] = + if idx + HashLen > buffer.len: + return err("Cannot decode hash") + + let slice = buffer[idx ..< idx + HashLen] + idx += HashLen + var hash = EmptyWakuMessageHash + for i, bytes in slice: + hash[i] = bytes + + return ok(hash) + +proc getReconciled(idx: var int, buffer: seq[byte]): Result[bool, string] = + if idx >= buffer.len: + return err("Cannot decode reconciled") + + let val = buffer[idx] + + if val > 1 or val < 0: + return err("Cannot decode reconciled") + + let recon = bool(val) + idx += 1 + + return ok(recon) + +proc getTopics(idx: var int, buffer: seq[byte]): Result[seq[string], string] = + if idx + VarIntLen > buffer.len: + return err("Cannot decode topic count") + + let slice = buffer[idx ..< idx + VarIntLen] + let (val, len) = uint64.fromBytes(slice, Leb128) + idx += len + let topicCount = int(val) + + var topics: seq[string] + for i in 0 ..< topicCount: + if idx + VarIntLen > buffer.len: + return err("Cannot decode length. Topic index: " & $i) + + let slice = buffer[idx ..< idx + VarIntLen] + let (val, len) = uint64.fromBytes(slice, Leb128) + idx += len + let topicLen = int(val) + + if idx + topicLen > buffer.len: + return err("Cannot decode bytes. Topic index: " & $i) + + let topic = string.fromBytes(buffer[idx ..< idx + topicLen]) + idx += topicLen + + topics.add(topic) + + return ok(topics) + +proc deltaDecode*( + itemSet: var ItemSet, buffer: seq[byte], setLength: int +): Result[int, string] = + var + lastTime = Timestamp(0) + idx = 0 + + while itemSet.elements.len < setLength: + let timeDiff = ?getTimestamp(idx, buffer) + let time = lastTime + timeDiff + lastTime = time + + let hash = ?getHash(idx, buffer) + + let id = SyncID(time: time, hash: hash) + + itemSet.elements.add(id) + + itemSet.reconciled = ?getReconciled(idx, buffer) + + return ok(idx) + +proc getItemSet( + idx: var int, buffer: seq[byte], itemSetLength: int +): Result[ItemSet, string] = + var itemSet = ItemSet() + let slice = buffer[idx ..< buffer.len] + let count = ?deltaDecode(itemSet, slice, itemSetLength) + idx += count + + return ok(itemSet) + +proc deltaDecode*(T: type RangesData, buffer: seq[byte]): Result[T, string] = + if buffer.len <= 1: + return ok(RangesData()) + + var + payload = RangesData() + lastTime = Timestamp(0) + idx = 0 + + payload.pubsubTopics = ?getTopics(idx, buffer) + payload.contentTopics = ?getTopics(idx, buffer) + + lastTime = ?getTimestamp(idx, buffer) + + # implicit first hash is always 0 + # implicit first range mode is alway skip + + while idx < buffer.len - 1: + let lowerRangeBound = SyncID(time: lastTime, hash: EmptyWakuMessageHash) + + let timeDiff = getTimeDiff(idx, buffer) + + var hash = EmptyWakuMessageHash + if timeDiff == 0: + updateHash(idx, buffer, hash) + + let thisTime = lastTime + timeDiff + lastTime = thisTime + + let upperRangeBound = SyncID(time: thisTime, hash: hash) + let bounds = lowerRangeBound .. upperRangeBound + + let rangeType = ?getRangeType(idx, buffer) + payload.ranges.add((bounds, rangeType)) + + if rangeType == RangeType.Fingerprint: + let fingerprint = ?getFingerprint(idx, buffer) + payload.fingerprints.add(fingerprint) + elif rangeType == RangeType.ItemSet: + let itemSetLength = getItemSetLength(idx, buffer) + let itemSet = ?getItemSet(idx, buffer, itemSetLength) + payload.itemSets.add(itemSet) + + return ok(payload) + +proc decode*(T: type WakuMessageAndTopic, buffer: seq[byte]): ProtobufResult[T] = + let pb = initProtoBuffer(buffer) + + var pubsub: string + if not ?pb.getField(1, pubsub): + return err(ProtobufError.missingRequiredField("pubsub")) + + var proto: ProtoBuffer + if not ?pb.getField(2, proto): + return err(ProtobufError.missingRequiredField("msg")) + + let message = ?WakuMessage.decode(proto.buffer) + + return ok(WakuMessageAndTopic(pubsub: pubsub, message: message)) diff --git a/third-party/nwaku/waku/waku_store_sync/common.nim b/third-party/nwaku/waku/waku_store_sync/common.nim new file mode 100644 index 0000000..da8a5df --- /dev/null +++ b/third-party/nwaku/waku/waku_store_sync/common.nim @@ -0,0 +1,84 @@ +{.push raises: [].} + +import std/[options], chronos, stew/[byteutils] + +import ../waku_core + +const + DefaultSyncInterval*: Duration = 5.minutes + DefaultSyncRange*: Duration = 1.hours + DefaultGossipSubJitter*: Duration = 20.seconds + +type + Fingerprint* = array[32, byte] + + SyncID* = object + time*: Timestamp + hash*: WakuMessageHash + + ItemSet* = object + elements*: seq[SyncID] + reconciled*: bool + + RangeType* {.pure.} = enum + Skip = 0 + Fingerprint = 1 + ItemSet = 2 + + RangesData* = object + pubsubTopics*: seq[PubsubTopic] + contentTopics*: seq[ContentTopic] + + ranges*: seq[(Slice[SyncID], RangeType)] + fingerprints*: seq[Fingerprint] # Range type fingerprint stored here in order + itemSets*: seq[ItemSet] # Range type itemset stored here in order + + WakuMessageAndTopic* = object + pubsub*: PubSubTopic + message*: WakuMessage + +const EmptyFingerprint*: Fingerprint = [ + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, +] + +const FullFingerprint*: Fingerprint = [ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +] + +proc high*(T: type SyncID): T = + ## Same as high(int) but for IDs + + return SyncID(time: Timestamp(high(int64)), fingerprint: FullFingerprint) + +proc low*(T: type SyncID): T = + ## Same as low(int) but for IDs + + return SyncID(time: Timestamp(low(int64)), fingerprint: EmptyFingerprint) + +proc `$`*(value: SyncID): string = + return '(' & $value.time & ", " & $value.hash & ')' + +proc cmp(x, y: Fingerprint): int = + if x < y: + return -1 + elif x == y: + return 0 + + return 1 + +proc cmp*(x, y: SyncID): int = + if x.time == y.time: + return cmp(x.hash, y.hash) + + if x.time < y.time: + return -1 + + return 1 + +proc `<`*(x, y: SyncID): bool = + cmp(x, y) == -1 + +proc `>`*(x, y: SyncID): bool = + cmp(x, y) == 1 diff --git a/third-party/nwaku/waku/waku_store_sync/protocols_metrics.nim b/third-party/nwaku/waku/waku_store_sync/protocols_metrics.nim new file mode 100644 index 0000000..53595f9 --- /dev/null +++ b/third-party/nwaku/waku/waku_store_sync/protocols_metrics.nim @@ -0,0 +1,24 @@ +import metrics + +const + Reconciliation* = "reconciliation" + Transfer* = "transfer" + Receiving* = "receive" + Sending* = "sent" + +declarePublicHistogram reconciliation_roundtrips, + "the nubmer of roundtrips for each reconciliation", + buckets = [1.0, 2.0, 3.0, 5.0, 8.0, 13.0, Inf] + +declarePublicHistogram reconciliation_differences, + "the nubmer of differences for each reconciliation", + buckets = [0.0, 10.0, 50.0, 100.0, 500.0, 1000.0, 5000.0, Inf] + +declarePublicCounter total_bytes_exchanged, + "the number of bytes sent and received by the protocols", ["protocol", "direction"] + +declarePublicCounter total_transfer_messages_exchanged, + "the number of messages sent and received by the transfer protocol", ["direction"] + +declarePublicGauge total_messages_cached, + "the number of messages cached by the node after prunning" diff --git a/third-party/nwaku/waku/waku_store_sync/reconciliation.nim b/third-party/nwaku/waku/waku_store_sync/reconciliation.nim new file mode 100644 index 0000000..838c494 --- /dev/null +++ b/third-party/nwaku/waku/waku_store_sync/reconciliation.nim @@ -0,0 +1,496 @@ +{.push raises: [].} + +import + std/[sequtils, options, sets], + stew/byteutils, + results, + chronicles, + chronos, + metrics, + libp2p/utility, + libp2p/protocols/protocol, + libp2p/stream/connection, + libp2p/crypto/crypto, + eth/p2p/discoveryv5/enr +import + ../common/nimchronos, + ../common/protobuf, + ../common/paging, + ../waku_enr, + ../waku_core/codecs, + ../waku_core/time, + ../waku_core/topics/pubsub_topic, + ../waku_core/topics/content_topic, + ../waku_core/message/digest, + ../waku_core/message/message, + ../node/peer_manager/peer_manager, + ../waku_archive, + ./common, + ./codec, + ./storage/storage, + ./storage/seq_storage, + ./storage/range_processing, + ./protocols_metrics + +logScope: + topics = "waku reconciliation" + +const DefaultStorageCap = 50_000 + +type SyncReconciliation* = ref object of LPProtocol + pubsubTopics: HashSet[PubsubTopic] # Empty set means accept all. See spec. + contentTopics: HashSet[ContentTopic] # Empty set means accept all. See spec. + + peerManager: PeerManager + + wakuArchive: WakuArchive + + storage: SyncStorage + + # Receive IDs from transfer protocol for storage + idsRx: AsyncQueue[(SyncID, PubsubTopic, ContentTopic)] + + # Send Hashes to transfer protocol for reception + localWantsTx: AsyncQueue[(PeerId)] + + # Send Hashes to transfer protocol for transmission + remoteNeedsTx: AsyncQueue[(PeerId, WakuMessageHash)] + + # params + syncInterval: timer.Duration # Time between each synchronization attempt + syncRange: timer.Duration # Amount of time in the past to sync + relayJitter: Duration # Amount of time since the present to ignore when syncing + + # futures + periodicSyncFut: Future[void] + periodicPruneFut: Future[void] + idsReceiverFut: Future[void] + +proc messageIngress*( + self: SyncReconciliation, pubsubTopic: PubsubTopic, msg: WakuMessage +) = + trace "message ingress", pubsub_topic = pubsubTopic, msg = msg + + if msg.ephemeral: + return + + let msgHash = computeMessageHash(pubsubTopic, msg) + + let id = SyncID(time: msg.timestamp, hash: msgHash) + + self.storage.insert(id, pubsubTopic, msg.contentTopic).isOkOr: + error "failed to insert new message", msg_hash = $id.hash.toHex(), error = $error + +proc messageIngress*( + self: SyncReconciliation, + msgHash: WakuMessageHash, + pubsubTopic: PubsubTopic, + msg: WakuMessage, +) = + trace "message ingress", msg_hash = msgHash.toHex(), msg = msg + + if msg.ephemeral: + return + + let id = SyncID(time: msg.timestamp, hash: msgHash) + + self.storage.insert(id, pubsubTopic, msg.contentTopic).isOkOr: + error "failed to insert new message", msg_hash = $id.hash.toHex(), error = $error + +proc messageIngress*( + self: SyncReconciliation, + id: SyncID, + pubsubTopic: PubsubTopic, + contentTopic: ContentTopic, +) = + self.storage.insert(id, pubsubTopic, contentTopic).isOkOr: + error "failed to insert new message", msg_hash = $id.hash.toHex(), error = $error + +proc preProcessPayload( + self: SyncReconciliation, payload: RangesData +): Option[RangesData] = + ## Check the received payload for topics and/or time mismatch. + + var payload = payload + + # Always use the smallest pubsub topic scope possible + if payload.pubsubTopics.len > 0 and self.pubsubTopics.len > 0: + let pubsubIntersection = self.pubsubTopics * payload.pubsubTopics.toHashSet() + + if pubsubIntersection.len < 1: + return none(RangesData) + + payload.pubsubTopics = pubsubIntersection.toSeq() + elif self.pubsubTopics.len > 0: + payload.pubsubTopics = self.pubsubTopics.toSeq() + + # Always use the smallest content topic scope possible + if payload.contentTopics.len > 0 and self.contentTopics.len > 0: + let contentIntersection = self.contentTopics * payload.contentTopics.toHashSet() + + if contentIntersection.len < 1: + return none(RangesData) + + payload.contentTopics = contentIntersection.toSeq() + elif self.contentTopics.len > 0: + payload.contentTopics = self.contentTopics.toSeq() + + let timeRange = calculateTimeRange(self.relayJitter, self.syncRange) + let selfLowerBound = timeRange.a + + # for non skip ranges check if they happen before any of our ranges + # convert to skip range before processing + for i in 0 ..< payload.ranges.len: + let rangeType = payload.ranges[i][1] + if rangeType != RangeType.Skip: + continue + + let upperBound = payload.ranges[i][0].b.time + if selfLowerBound > upperBound: + payload.ranges[i][1] = RangeType.Skip + + if rangeType == RangeType.Fingerprint: + payload.fingerprints.delete(0) + elif rangeType == RangeType.ItemSet: + payload.itemSets.delete(0) + else: + break + + return some(payload) + +proc processRequest( + self: SyncReconciliation, conn: Connection +): Future[Result[void, string]] {.async.} = + var + roundTrips = 0 + diffs = 0 + + # Signal to transfer protocol that this reconciliation is starting + await self.localWantsTx.addLast(conn.peerId) + + while true: + let readRes = catch: + await conn.readLp(int.high) + + let buffer: seq[byte] = readRes.valueOr: + await conn.close() + return err("remote " & $conn.peerId & " connection read error: " & error.msg) + + total_bytes_exchanged.inc(buffer.len, labelValues = [Reconciliation, Receiving]) + + let recvPayload = RangesData.deltaDecode(buffer).valueOr: + await conn.close() + return err("remote " & $conn.peerId & " payload decoding error: " & error) + + roundTrips.inc() + + trace "sync payload received", + local = self.peerManager.switch.peerInfo.peerId, + remote = conn.peerId, + payload = recvPayload + + if recvPayload.ranges.len == 0 or recvPayload.ranges.allIt(it[1] == RangeType.Skip): + break + + var + hashToRecv: seq[WakuMessageHash] + hashToSend: seq[WakuMessageHash] + sendPayload: RangesData + rawPayload: seq[byte] + + let preProcessedPayloadRes = self.preProcessPayload(recvPayload) + if preProcessedPayloadRes.isSome(): + let preProcessedPayload = preProcessedPayloadRes.get() + + trace "pre-processed payload", + local = self.peerManager.switch.peerInfo.peerId, + remote = conn.peerId, + payload = preProcessedPayload + + sendPayload = + self.storage.processPayload(preProcessedPayload, hashToSend, hashToRecv) + + trace "sync payload processed", + hash_to_send = hashToSend, hash_to_recv = hashToRecv + + sendPayload.pubsubTopics = self.pubsubTopics.toSeq() + sendPayload.contentTopics = self.contentTopics.toSeq() + + for hash in hashToSend: + self.remoteNeedsTx.addLastNoWait((conn.peerId, hash)) + diffs.inc() + + for hash in hashToRecv: + diffs.inc() + + rawPayload = sendPayload.deltaEncode() + + total_bytes_exchanged.inc(rawPayload.len, labelValues = [Reconciliation, Sending]) + + let writeRes = catch: + await conn.writeLP(rawPayload) + + if writeRes.isErr(): + await conn.close() + return + err("remote " & $conn.peerId & " connection write error: " & writeRes.error.msg) + + trace "sync payload sent", + local = self.peerManager.switch.peerInfo.peerId, + remote = conn.peerId, + payload = sendPayload + + if sendPayload.ranges.len == 0 or sendPayload.ranges.allIt(it[1] == RangeType.Skip): + break + + continue + + # Signal to transfer protocol that this reconciliation is done + await self.localWantsTx.addLast(conn.peerId) + + reconciliation_roundtrips.observe(roundTrips) + reconciliation_differences.observe(diffs) + + await conn.close() + + return ok() + +proc initiate( + self: SyncReconciliation, + connection: Connection, + offset: Duration, + syncRange: Duration, + pubsubTopics: seq[PubsubTopic], + contentTopics: seq[ContentTopic], +): Future[Result[void, string]] {.async.} = + let + timeRange = calculateTimeRange(offset, syncRange) + lower = SyncID(time: timeRange.a, hash: EmptyFingerprint) + upper = SyncID(time: timeRange.b, hash: FullFingerprint) + bounds = lower .. upper + + fingerprint = self.storage.computeFingerprint(bounds, pubsubTopics, contentTopics) + + initPayload = RangesData( + pubsubTopics: pubsubTopics, + contentTopics: contentTopics, + ranges: @[(bounds, RangeType.Fingerprint)], + fingerprints: @[fingerprint], + itemSets: @[], + ) + + let sendPayload = initPayload.deltaEncode() + + total_bytes_exchanged.inc(sendPayload.len, labelValues = [Reconciliation, Sending]) + + let writeRes = catch: + await connection.writeLP(sendPayload) + + if writeRes.isErr(): + await connection.close() + return err( + "remote " & $connection.peerId & " connection write error: " & writeRes.error.msg + ) + + trace "sync payload sent", + local = self.peerManager.switch.peerInfo.peerId, + remote = connection.peerId, + payload = initPayload + + ?await self.processRequest(connection) + + return ok() + +proc storeSynchronization*( + self: SyncReconciliation, + peerInfo: Option[RemotePeerInfo] = none(RemotePeerInfo), + offset: Duration = self.relayJitter, + syncRange: Duration = self.syncRange, + pubsubTopics: HashSet[PubsubTopic] = self.pubsubTopics, + contentTopics: HashSet[ContentTopic] = self.contentTopics, +): Future[Result[void, string]] {.async.} = + let peer = peerInfo.valueOr: + self.peerManager.selectPeer(WakuReconciliationCodec).valueOr: + return err("no suitable peer found for sync") + + let connOpt = await self.peerManager.dialPeer(peer, WakuReconciliationCodec) + + let conn: Connection = connOpt.valueOr: + return err("fail to dial remote " & $peer.peerId) + + debug "sync session initialized", + local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId + + ( + await self.initiate( + conn, offset, syncRange, pubsubTopics.toSeq(), contentTopics.toSeq() + ) + ).isOkOr: + error "sync session failed", + local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId, err = error + + return err("sync request error: " & error) + + debug "sync session ended gracefully", + local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId + + return ok() + +proc initFillStorage( + syncRange: timer.Duration, wakuArchive: WakuArchive +): Future[Result[SeqStorage, string]] {.async.} = + if wakuArchive.isNil(): + return err("waku archive unavailable") + + let endTime = getNowInNanosecondTime() + let starTime = endTime - syncRange.nanos + + var query = ArchiveQuery( + includeData: true, + cursor: none(ArchiveCursor), + startTime: some(starTime), + endTime: some(endTime), + pageSize: 100, + direction: PagingDirection.FORWARD, + ) + + debug "initial storage filling started" + + var storage = SeqStorage.new(DefaultStorageCap) + + while true: + let response = (await wakuArchive.findMessages(query)).valueOr: + return err("archive retrival failed: " & $error) + + # we assume IDs are already in order + for i in 0 ..< response.hashes.len: + let hash = response.hashes[i] + let msg = response.messages[i] + let pubsubTopic = response.topics[i] + + let id = SyncID(time: msg.timestamp, hash: hash) + discard storage.insert(id, pubsubTopic, msg.contentTopic) + + if response.cursor.isNone(): + break + + query.cursor = response.cursor + + debug "initial storage filling done", elements = storage.length() + + return ok(storage) + +proc new*( + T: type SyncReconciliation, + pubsubTopics: seq[PubSubTopic], + contentTopics: seq[ContentTopic], + peerManager: PeerManager, + wakuArchive: WakuArchive, + syncRange: timer.Duration = DefaultSyncRange, + syncInterval: timer.Duration = DefaultSyncInterval, + relayJitter: timer.Duration = DefaultGossipSubJitter, + idsRx: AsyncQueue[(SyncID, PubsubTopic, ContentTopic)], + localWantsTx: AsyncQueue[PeerId], + remoteNeedsTx: AsyncQueue[(PeerId, WakuMessageHash)], +): Future[Result[T, string]] {.async.} = + let res = await initFillStorage(syncRange, wakuArchive) + let storage = + if res.isErr(): + warn "will not sync messages before this point in time", error = res.error + SeqStorage.new(DefaultStorageCap) + else: + res.get() + + var sync = SyncReconciliation( + pubsubTopics: pubsubTopics.toHashSet(), + contentTopics: contentTopics.toHashSet(), + peerManager: peerManager, + storage: storage, + syncRange: syncRange, + syncInterval: syncInterval, + relayJitter: relayJitter, + idsRx: idsRx, + localWantsTx: localWantsTx, + remoteNeedsTx: remoteNeedsTx, + ) + + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = + try: + (await sync.processRequest(conn)).isOkOr: + error "request processing error", error = error + except CatchableError: + error "exception in reconciliation handler", error = getCurrentExceptionMsg() + + sync.handler = handler + sync.codec = WakuReconciliationCodec + + info "Store Reconciliation protocol initialized", + sync_range = syncRange, sync_interval = syncInterval, relay_jitter = relayJitter + + return ok(sync) + +proc periodicSync(self: SyncReconciliation) {.async.} = + debug "periodic sync initialized", interval = $self.syncInterval + + while true: # infinite loop + await sleepAsync(self.syncInterval) + + debug "periodic sync started" + + (await self.storeSynchronization()).isOkOr: + error "periodic sync failed", err = error + continue + + debug "periodic sync done" + +proc periodicPrune(self: SyncReconciliation) {.async.} = + debug "periodic prune initialized", interval = $self.syncInterval + + # preventing sync and prune loops of happening at the same time. + await sleepAsync((self.syncInterval div 2)) + + while true: # infinite loop + await sleepAsync(self.syncInterval) + + debug "periodic prune started" + + let time = getNowInNanosecondTime() - self.syncRange.nanos + + let count = self.storage.prune(time) + + total_messages_cached.set(self.storage.length()) + + debug "periodic prune done", elements_pruned = count + +proc idsReceiverLoop(self: SyncReconciliation) {.async.} = + while true: # infinite loop + let (id, pubsub, content) = await self.idsRx.popfirst() + + self.messageIngress(id, pubsub, content) + +proc start*(self: SyncReconciliation) = + if self.started: + return + + self.started = true + + if self.syncInterval > ZeroDuration: + self.periodicSyncFut = self.periodicSync() + + if self.syncInterval > ZeroDuration: + self.periodicPruneFut = self.periodicPrune() + + self.idsReceiverFut = self.idsReceiverLoop() + + info "Store Sync Reconciliation protocol started" + +proc stop*(self: SyncReconciliation) = + if self.syncInterval > ZeroDuration: + self.periodicSyncFut.cancelSoon() + + if self.syncInterval > ZeroDuration: + self.periodicPruneFut.cancelSoon() + + self.idsReceiverFut.cancelSoon() + + info "Store Sync Reconciliation protocol stopped" diff --git a/third-party/nwaku/waku/waku_store_sync/storage/range_processing.nim b/third-party/nwaku/waku/waku_store_sync/storage/range_processing.nim new file mode 100644 index 0000000..9d7fca0 --- /dev/null +++ b/third-party/nwaku/waku/waku_store_sync/storage/range_processing.nim @@ -0,0 +1,55 @@ +import chronos + +import ../../waku_core/time, ../common + +proc calculateTimeRange*( + jitter: Duration = 20.seconds, syncRange: Duration = 1.hours +): Slice[Timestamp] = + ## Calculates the start and end time of a sync session + + var now = getNowInNanosecondTime() + + # Because of message jitter inherent to Relay protocol + now -= jitter.nanos + + let syncRange = syncRange.nanos + + let syncStart = now - syncRange + let syncEnd = now + + return Timestamp(syncStart) .. Timestamp(syncEnd) + +proc equalPartitioning*(slice: Slice[SyncID], count: int): seq[Slice[SyncID]] = + ## Partition into N time slices. + ## Remainder is distributed equaly to the first slices. + + let totalLength: int64 = slice.b.time - slice.a.time + + if totalLength < count: + return @[] + + let parts = totalLength div count + var rem = totalLength mod count + + var bounds = newSeqOfCap[Slice[SyncID]](count) + + var lb = slice.a.time + + for i in 0 ..< count: + var ub = lb + parts + + if rem > 0: + ub += 1 + rem -= 1 + + let lower = SyncID(time: lb, hash: EmptyFingerprint) + let upper = SyncID(time: ub, hash: EmptyFingerprint) + let bound = lower .. upper + + bounds.add(bound) + + lb = ub + + return bounds + +#TODO implement exponential partitioning diff --git a/third-party/nwaku/waku/waku_store_sync/storage/seq_storage.nim b/third-party/nwaku/waku/waku_store_sync/storage/seq_storage.nim new file mode 100644 index 0000000..272b3a7 --- /dev/null +++ b/third-party/nwaku/waku/waku_store_sync/storage/seq_storage.nim @@ -0,0 +1,519 @@ +import + std/[algorithm, sequtils, math, options, tables, packedsets, sugar], + results, + chronos, + stew/arrayops + +import + ../../waku_core/time, + ../../waku_core/message/digest, + ../../waku_core/topics/pubsub_topic, + ../../waku_core/topics/content_topic, + ../common, + ./range_processing, + ./storage + +type SeqStorage* = ref object of SyncStorage + elements: seq[SyncID] + + pubsubTopicIndexes: seq[int] + contentTopicIndexes: seq[int] + + pubsubTopics: seq[PubSubTopic] + contentTopics: seq[ContentTopic] + + unusedPubsubTopicSet: PackedSet[int] + unusedContentTopicSet: PackedSet[int] + + # Numer of parts a range will be splitted into. + partitionCount: int + + # Number of element in a range for which item sets are used instead of fingerprints. + lengthThreshold: int + +method length*(self: SeqStorage): int {.raises: [].} = + return self.elements.len + +proc pubsubTopicsLen*(self: SeqStorage): int {.raises: [].} = + return self.pubsubTopics.len + +proc contentTopicsLen*(self: SeqStorage): int {.raises: [].} = + return self.contentTopics.len + +proc unusedPubsubTopicsLen*(self: SeqStorage): int {.raises: [].} = + return self.unusedPubsubTopicSet.len + +proc unusedContentTopicsLen*(self: SeqStorage): int {.raises: [].} = + return self.unusedContentTopicSet.len + +proc getPubsubTopicIndex(self: SeqStorage, pubsubTopic: PubSubTopic): int = + for i, selfTopic in self.pubsubTopics: + if pubsubTopic == selfTopic: + return i + + if self.unusedPubsubTopicSet.len > 0: + let unusedIdx = self.unusedPubsubTopicSet.toSeq()[0] + self.unusedPubsubTopicSet.excl(unusedIdx) + self.pubsubTopics[unusedIdx] = pubsubTopic + return unusedIdx + + let newIdx = self.pubsubTopics.len + self.pubsubTopics.add(pubsubTopic) + return newidx + +proc getContentTopicIndex(self: SeqStorage, contentTopic: ContentTopic): int = + for i, selfTopic in self.contentTopics: + if contentTopic == selfTopic: + return i + + if self.unusedContentTopicSet.len > 0: + let unusedIdx = self.unusedContentTopicSet.toSeq()[0] + self.unusedContentTopicSet.excl(unusedIdx) + self.contentTopics[unusedIdx] = contentTopic + return unusedIdx + + let newIdx = self.contentTopics.len + self.contentTopics.add(contentTopic) + return newIdx + +proc insertAt( + self: SeqStorage, + idx: int, + element: SyncID, + pubsubTopic: PubSubTopic, + contentTopic: ContentTopic, +) = + if idx < self.elements.len and self.elements[idx] == element: + # duplicate element ignore + return + + self.elements.insert(element, idx) + + let pubsubIndex = self.getPubsubTopicIndex(pubsubTopic) + let contentIndex = self.getContentTopicIndex(contentTopic) + + self.pubsubTopicIndexes.insert(pubsubIndex, idx) + self.contentTopicIndexes.insert(contentIndex, idx) + +method insert*( + self: SeqStorage, + element: SyncID, + pubsubTopic: PubSubTopic, + contentTopic: ContentTopic, +): Result[void, string] {.raises: [].} = + let idx = self.elements.lowerBound(element, common.cmp) + self.insertAt(idx, element, pubsubTopic, contentTopic) + + return ok() + +method batchInsert*( + self: SeqStorage, + elements: seq[SyncID], + pubsubTopics: seq[PubSubTopic], + contentTopics: seq[ContentTopic], +): Result[void, string] {.raises: [].} = + ## Insert the sorted seq of new elements. + + if elements.len == 1: + return self.insert(elements[0], pubsubTopics[0], contentTopics[0]) + + if not elements.isSorted(common.cmp): + return err("seq not sorted") + + var idx = 0 + for i in 0 ..< elements.len: + let element = elements[i] + let pubsubTopic = pubsubTopics[i] + let contentTopic = contentTopics[i] + + idx = self.elements[idx ..< self.elements.len].lowerBound(element, common.cmp) + + self.insertAt(idx, element, pubsubTopic, contentTopic) + + return ok() + +method prune*(self: SeqStorage, timestamp: Timestamp): int {.raises: [].} = + ## Remove all elements before the timestamp. + ## Returns # of elements pruned. + + if self.elements.len == 0: + return 0 + + let bound = SyncID(time: timestamp, hash: EmptyWakuMessageHash) + + let idx = self.elements.lowerBound(bound, common.cmp) + + self.elements.delete(0 ..< idx) + self.pubsubTopicIndexes.delete(0 ..< idx) + self.contentTopicIndexes.delete(0 ..< idx) + + # Free unused content topics + let contentIdxSet = self.contentTopicIndexes.toPackedSet() + var contentTopicSet: PackedSet[int] + for i in 0 ..< self.contentTopics.len: + contentTopicSet.incl(i) + + self.unusedContentTopicSet = contentTopicSet - contentIdxSet + + # Free unused pubsub topics + let pubsubIdxSet = self.pubsubTopicIndexes.toPackedSet() + var pubsubTopicSet: PackedSet[int] + for i in 0 ..< self.pubsubTopics.len: + pubsubTopicSet.incl(i) + + self.unusedPubsubTopicSet = pubsubTopicSet - pubsubIdxSet + + return idx + +proc computefingerprintFromSlice( + self: SeqStorage, + sliceOpt: Option[Slice[int]], + pubsubTopicSet: PackedSet[int], + contentTopicSet: PackedSet[int], +): Fingerprint = + ## XOR all hashes of a slice of the storage. + + var fingerprint = EmptyFingerprint + + if sliceOpt.isNone(): + return fingerprint + + let idxSlice = sliceOpt.get() + + let elementSlice = self.elements[idxSlice] + let pubsubSlice = self.pubsubTopicIndexes[idxSlice] + let contentSlice = self.contentTopicIndexes[idxSlice] + + for i in 0 ..< elementSlice.len: + let id = elementSlice[i] + let pubsub = pubsubSlice[i] + let content = contentSlice[i] + + if pubsubTopicSet.len > 0 and pubsub notin pubsubTopicSet: + continue + + if contentTopicSet.len > 0 and content notin contentTopicSet: + continue + + fingerprint = fingerprint xor id.hash + + return fingerprint + +proc findIdxBounds(self: SeqStorage, slice: Slice[SyncID]): Option[Slice[int]] = + ## Given bounds find the corresponding indices in this storage + + let lower = self.elements.lowerBound(slice.a, common.cmp) + var upper = self.elements.upperBound(slice.b, common.cmp) + + if upper < 1: + # entire range is before any of our elements + return none(Slice[int]) + + if lower >= self.elements.len: + # entire range is after any of our elements + return none(Slice[int]) + + return some(lower ..< upper) + +method computeFingerprint*( + self: SeqStorage, + bounds: Slice[SyncID], + pubsubTopics: seq[PubsubTopic], + contentTopics: seq[ContentTopic], +): Fingerprint {.raises: [].} = + let idxSliceOpt = self.findIdxBounds(bounds) + + var pubsubTopicSet = initPackedSet[int]() + for inputTopic in pubsubTopics: + for i, localTopic in self.pubsubTopics: + if inputTopic == localTopic: + pubsubTopicSet.incl(i) + + var contentTopicSet = initPackedSet[int]() + for inputTopic in contentTopics: + for i, localTopic in self.contentTopics: + if inputTopic == localTopic: + contentTopicSet.incl(i) + + return self.computefingerprintFromSlice(idxSliceOpt, pubsubTopicSet, contentTopicSet) + +proc getFilteredElements( + self: SeqStorage, + slice: Slice[int], + pubsubTopicSet: PackedSet[int], + contentTopicSet: PackedSet[int], +): seq[SyncID] = + let elements = collect(newSeq): + for i in slice: + if pubsubTopicSet.len > 0 and self.pubsubTopicIndexes[i] notin pubsubTopicSet: + continue + + if contentTopicSet.len > 0 and self.contentTopicIndexes[i] notin contentTopicSet: + continue + + self.elements[i] + + elements + +proc processFingerprintRange*( + self: SeqStorage, + inputBounds: Slice[SyncID], + pubsubTopicSet: PackedSet[int], + contentTopicSet: PackedSet[int], + inputFingerprint: Fingerprint, + output: var RangesData, +) {.raises: [].} = + ## Compares fingerprints and partition new ranges. + + let idxSlice = self.findIdxBounds(inputBounds) + let ourFingerprint = + self.computeFingerprintFromSlice(idxSlice, pubsubTopicSet, contentTopicSet) + + if ourFingerprint == inputFingerprint: + output.ranges.add((inputBounds, RangeType.Skip)) + return + + if idxSlice.isNone(): + output.ranges.add((inputBounds, RangeType.ItemSet)) + let state = ItemSet(elements: @[], reconciled: true) + output.itemSets.add(state) + return + + let slice = idxSlice.get() + + if slice.len <= self.lengthThreshold: + output.ranges.add((inputBounds, RangeType.ItemSet)) + let elements = self.getFilteredElements(slice, pubsubTopicSet, contentTopicSet) + let state = ItemSet(elements: elements, reconciled: false) + output.itemSets.add(state) + return + + let partitions = equalPartitioning(inputBounds, self.partitionCount) + for partitionBounds in partitions: + let sliceOpt = self.findIdxBounds(partitionBounds) + + if sliceOpt.isNone(): + output.ranges.add((partitionBounds, RangeType.ItemSet)) + let state = ItemSet(elements: @[], reconciled: true) + output.itemSets.add(state) + continue + + let slice = sliceOpt.get() + + if slice.len <= self.lengthThreshold: + output.ranges.add((partitionBounds, RangeType.ItemSet)) + let elements = self.getFilteredElements(slice, pubsubTopicSet, contentTopicSet) + let state = ItemSet(elements: elements, reconciled: false) + output.itemSets.add(state) + continue + + let fingerprint = + self.computeFingerprintFromSlice(some(slice), pubsubTopicSet, contentTopicSet) + output.ranges.add((partitionBounds, RangeType.Fingerprint)) + output.fingerprints.add(fingerprint) + continue + +proc processItemSetRange*( + self: SeqStorage, + inputBounds: Slice[SyncID], + pubsubTopicSet: PackedSet[int], + contentTopicSet: PackedSet[int], + inputItemSet: ItemSet, + hashToSend: var seq[Fingerprint], + hashToRecv: var seq[Fingerprint], + output: var RangesData, +) {.raises: [].} = + ## Compares item sets and outputs differences + + let idxSlice = self.findIdxBounds(inputBounds) + + if idxSlice.isNone(): + if not inputItemSet.reconciled: + output.ranges.add((inputBounds, RangeType.ItemSet)) + let state = ItemSet(elements: @[], reconciled: true) + output.itemSets.add(state) + else: + output.ranges.add((inputBounds, RangeType.Skip)) + + return + + let slice = idxSlice.get() + + var i = 0 + let n = inputItemSet.elements.len + + var j = slice.a + let m = slice.b + 1 + + while (j < m): + let ourElement = self.elements[j] + let pubsub = self.pubsubTopicIndexes[j] + let content = self.contentTopicIndexes[j] + + if pubsubTopicSet.len > 0 and pubsub notin pubsubTopicSet: + j.inc() + continue + + if contentTopicSet.len > 0 and content notin contentTopicSet: + j.inc() + continue + + if i >= n: + # in case we have more elements + hashToSend.add(ourElement.hash) + j.inc() + continue + + let theirElement = inputItemSet.elements[i] + + if theirElement < ourElement: + hashToRecv.add(theirElement.hash) + i.inc() + elif theirElement > ourElement: + hashToSend.add(ourElement.hash) + j.inc() + else: + i.inc() + j.inc() + + while (i < n): + # in case they have more elements + let theirElement = inputItemSet.elements[i] + i.inc() + hashToRecv.add(theirElement.hash) + + if not inputItemSet.reconciled: + output.ranges.add((inputBounds, RangeType.ItemSet)) + let elements = self.getFilteredElements(slice, pubsubTopicSet, contentTopicSet) + let state = ItemSet(elements: elements, reconciled: true) + output.itemSets.add(state) + else: + output.ranges.add((inputBounds, RangeType.Skip)) + +method processPayload*( + self: SeqStorage, + input: RangesData, + hashToSend: var seq[Fingerprint], + hashToRecv: var seq[Fingerprint], +): RangesData {.raises: [].} = + var output = RangesData() + + var + i = 0 + j = 0 + + var pubsubTopicSet = initPackedSet[int]() + for inputTopic in input.pubsubTopics: + for i, localTopic in self.pubsubTopics: + if inputTopic == localTopic: + pubsubTopicSet.incl(i) + + var contentTopicSet = initPackedSet[int]() + for inputTopic in input.contentTopics: + for i, localTopic in self.contentTopics: + if inputTopic == localTopic: + contentTopicSet.incl(i) + + for (bounds, rangeType) in input.ranges: + case rangeType + of RangeType.Skip: + output.ranges.add((bounds, RangeType.Skip)) + + continue + of RangeType.Fingerprint: + let fingerprint = input.fingerprints[i] + i.inc() + + self.processFingerprintRange( + bounds, pubsubTopicSet, contentTopicSet, fingerprint, output + ) + + continue + of RangeType.ItemSet: + let itemSet = input.itemsets[j] + j.inc() + + self.processItemSetRange( + bounds, pubsubTopicSet, contentTopicSet, itemSet, hashToSend, hashToRecv, output + ) + + continue + + # merge consecutive skip ranges + var allSkip = true + i = output.ranges.len - 1 + while i >= 0: + let currRange = output.ranges[i] + + if allSkip and currRange[1] != RangeType.Skip: + allSkip = false + + if i <= 0: + break + + let prevRange = output.ranges[i - 1] + + if currRange[1] != RangeType.Skip or prevRange[1] != RangeType.Skip: + i.dec() + continue + + let lb = prevRange[0].a + let ub = currRange[0].b + let newRange = (lb .. ub, RangeType.Skip) + + output.ranges.delete(i) + output.ranges[i - 1] = newRange + + i.dec() + + if allSkip: + output = RangesData() + + return output + +proc new*(T: type SeqStorage, capacity: int, threshold = 100, partitions = 8): T = + return SeqStorage( + elements: newSeqOfCap[SyncID](capacity), + lengthThreshold: threshold, + partitionCount: partitions, + ) + +proc new*( + T: type SeqStorage, + elements: seq[SyncID], + pubsubTopics: seq[PubsubTopic], + contentTopics: seq[ContentTopic], + threshold = 100, + partitions = 8, +): T = + var idx = 0 + var uniquePubsubTopics = initOrderedTable[PubsubTopic, int]() + for pubsub in pubsubTopics: + if pubsub notin uniquePubsubTopics: + uniquePubsubTopics[pubsub] = idx + idx.inc() + + let pubsubTopicIndexes = collect(newSeq): + for pubsub in pubsubTopics: + uniquePubsubTopics[pubsub] + + idx = 0 + var uniqueContentTopics = initOrderedTable[ContentTopic, int]() + for content in contentTopics: + if content notin uniqueContentTopics: + uniqueContentTopics[content] = idx + idx.inc() + + let contentTopicIndexes = collect(newSeq): + for content in contentTopics: + uniqueContentTopics[content] + + return SeqStorage( + elements: elements, + pubsubTopics: uniquePubsubTopics.keys.toSeq(), + contentTopics: uniqueContentTopics.keys.toSeq(), + pubsubTopicIndexes: pubsubTopicIndexes, + contentTopicIndexes: contentTopicIndexes, + lengthThreshold: threshold, + partitionCount: partitions, + ) diff --git a/third-party/nwaku/waku/waku_store_sync/storage/storage.nim b/third-party/nwaku/waku/waku_store_sync/storage/storage.nim new file mode 100644 index 0000000..4d22d01 --- /dev/null +++ b/third-party/nwaku/waku/waku_store_sync/storage/storage.nim @@ -0,0 +1,52 @@ +import results + +import + ../../waku_core/time, + ../../waku_core/topics/content_topic, + ../../waku_core/topics/pubsub_topic, + ../common + +type SyncStorage* = ref object of RootObj + +method insert*( + self: SyncStorage, element: SyncID, pubsubTopic: PubsubTopic, topic: ContentTopic +): Result[void, string] {.base, gcsafe, raises: [].} = + return err("insert method not implemented for SyncStorage") + +method batchInsert*( + self: SyncStorage, + elements: seq[SyncID], + pubsubTopics: seq[PubsubTopic], + contentTopics: seq[ContentTopic], +): Result[void, string] {.base, gcsafe, raises: [].} = + return err("batchInsert method not implemented for SyncStorage") + +method prune*( + self: SyncStorage, timestamp: Timestamp +): int {.base, gcsafe, raises: [].} = + -1 + +method computeFingerprint*( + self: SyncStorage, + bounds: Slice[SyncID], + pubsubTopics: seq[PubsubTopic], + contentTopics: seq[ContentTopic], +): Fingerprint {.base, gcsafe, raises: [].} = + return FullFingerprint + +method processPayload*( + self: SyncStorage, + input: RangesData, + hashToSend: var seq[Fingerprint], + hashToRecv: var seq[Fingerprint], +): RangesData {.base, gcsafe, raises: [].} = + return RangesData( + pubsubTopics: @["InsertPubsubTopicHere"], + contentTopics: @["InsertContentTopicHere"], + ranges: @[], + fingerprints: @[FullFingerprint], + itemSets: @[], + ) + +method length*(self: SyncStorage): int {.base, gcsafe, raises: [].} = + -1 diff --git a/third-party/nwaku/waku/waku_store_sync/transfer.nim b/third-party/nwaku/waku/waku_store_sync/transfer.nim new file mode 100644 index 0000000..f7f06bf --- /dev/null +++ b/third-party/nwaku/waku/waku_store_sync/transfer.nim @@ -0,0 +1,238 @@ +{.push raises: [].} + +import + std/[sets, tables], + results, + chronicles, + chronos, + metrics, + libp2p/utility, + libp2p/protocols/protocol, + libp2p/stream/connection, + libp2p/crypto/crypto, + eth/p2p/discoveryv5/enr +import + ../common/nimchronos, + ../common/protobuf, + ../waku_enr, + ../waku_core/codecs, + ../waku_core/topics/pubsub_topic, + ../waku_core/topics/content_topic, + ../waku_core/message/digest, + ../waku_core/message/message, + ../waku_core/message/default_values, + ../node/peer_manager/peer_manager, + ../waku_archive, + ../waku_archive/common, + ./common, + ./codec, + ./protocols_metrics + +logScope: + topics = "waku transfer" + +type SyncTransfer* = ref object of LPProtocol + wakuArchive: WakuArchive + peerManager: PeerManager + + # Send IDs to reconciliation protocol for storage + idsTx: AsyncQueue[(SyncID, PubsubTopic, ContentTopic)] + + # Receive Hashes from reconciliation protocol for reception + localWantsRx: AsyncQueue[PeerId] + localWantsRxFut: Future[void] + inSessions: HashSet[PeerId] + + # Receive Hashes from reconciliation protocol for transmission + remoteNeedsRx: AsyncQueue[(PeerId, WakuMessageHash)] + remoteNeedsRxFut: Future[void] + outSessions: Table[PeerId, Connection] + +proc sendMessage( + conn: Connection, payload: WakuMessageAndTopic +): Future[Result[void, string]] {.async.} = + let rawPayload = payload.encode().buffer + + total_bytes_exchanged.inc(rawPayload.len, labelValues = [Transfer, Sending]) + + let writeRes = catch: + await conn.writeLP(rawPayload) + + if writeRes.isErr(): + return + err("remote " & $conn.peerId & " connection write error: " & writeRes.error.msg) + + total_transfer_messages_exchanged.inc(labelValues = [Sending]) + + return ok() + +proc openConnection( + self: SyncTransfer, peerId: PeerId +): Future[Result[Connection, string]] {.async.} = + let connOpt = await self.peerManager.dialPeer(peerId, WakuTransferCodec) + + let conn: Connection = connOpt.valueOr: + return err("fail to dial remote " & $peerId) + + debug "transfer session initialized", + local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId + + return ok(conn) + +proc wantsReceiverLoop(self: SyncTransfer) {.async.} = + ## Waits for peer ids of nodes + ## we are reconciliating with + + while true: # infinite loop + let peerId = await self.localWantsRx.popFirst() + + if self.inSessions.containsOrIncl(peerId): + self.inSessions.excl(peerId) + + return + +proc needsReceiverLoop(self: SyncTransfer) {.async.} = + ## Waits for message hashes, + ## open connection to the other peers, + ## get the messages from DB and then send them. + + while true: # infinite loop + let (peerId, fingerprint) = await self.remoteNeedsRx.popFirst() + + if (not self.outSessions.hasKey(peerId)) or self.outSessions[peerId].closed() or + ## sanity check, should not be possible + self.outSessions[peerId].isClosedRemotely: + ## quite possibly remote end has closed the connection, believing transfer to be done + debug "opening transfer connection to remote peer", + my_peer_id = self.peerManager.switch.peerInfo.peerId, remote_peer_id = peerId + + let connection = (await self.openConnection(peerId)).valueOr: + error "failed to establish transfer connection", error = error + continue + + self.outSessions[peerid] = connection + + let connection = self.outSessions[peerId] + + var query = ArchiveQuery() + query.includeData = true + query.hashes = @[fingerprint] + + let response = (await self.wakuArchive.findMessages(query)).valueOr: + error "failed to query archive", error = error + continue + + if response.messages.len < 1: + error "failed to fetch message from db" + continue + + let msg = + WakuMessageAndTopic(pubsub: response.topics[0], message: response.messages[0]) + + trace "sending transfer message", + my_peer_id = self.peerManager.switch.peerInfo.peerId, + remote_peer_id = peerId, + msg = msg + + (await sendMessage(connection, msg)).isOkOr: + self.outSessions.del(peerId) + await connection.close() + error "failed to send message", error = error + continue + + return + +proc initProtocolHandler(self: SyncTransfer) = + proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = + while true: + ## removed DOS prototection until we can design something better + #[ if not self.inSessions.contains(conn.peerId): + error "unwanted peer, disconnecting", remote = conn.peerId + break ]# + + let readRes = catch: + await conn.readLp(int64(DefaultMaxWakuMessageSize)) + + let buffer: seq[byte] = readRes.valueOr: + # connection closed normally + break + + total_bytes_exchanged.inc(buffer.len, labelValues = [Transfer, Receiving]) + + let payload = WakuMessageAndTopic.decode(buffer).valueOr: + error "decoding error", error = $error + continue + + total_transfer_messages_exchanged.inc(labelValues = [Receiving]) + + let msg = payload.message + let pubsub = payload.pubsub + + let hash = computeMessageHash(pubsub, msg) + + try: + #TODO verify msg RLN proof... + (await self.wakuArchive.syncMessageIngress(hash, pubsub, msg)).isOkOr: + error "failed to archive message", error = $error + continue + except CatchableError: + error "syncMessageIngress failed", + remote_peer_id = conn.peerId, error = getCurrentExceptionMsg() + continue + + let id = SyncID(time: msg.timestamp, hash: hash) + + await self.idsTx.addLast((id, pubsub, msg.contentTopic)) + + continue + + await conn.close() + + debug "transfer session ended", + local = self.peerManager.switch.peerInfo.peerId, remote = conn.peerId + + return + + self.handler = handler + self.codec = WakuTransferCodec + +proc new*( + T: type SyncTransfer, + peerManager: PeerManager, + wakuArchive: WakuArchive, + idsTx: AsyncQueue[(SyncID, PubsubTopic, ContentTopic)], + localWantsRx: AsyncQueue[PeerId], + remoteNeedsRx: AsyncQueue[(PeerId, WakuMessageHash)], +): T = + var transfer = SyncTransfer( + peerManager: peerManager, + wakuArchive: wakuArchive, + idsTx: idsTx, + localWantsRx: localWantsRx, + remoteNeedsRx: remoteNeedsRx, + ) + + transfer.initProtocolHandler() + + info "Store Transfer protocol initialized" + + return transfer + +proc start*(self: SyncTransfer) = + if self.started: + return + + self.started = true + + self.localWantsRxFut = self.wantsReceiverLoop() + self.remoteNeedsRxFut = self.needsReceiverLoop() + + info "Store Sync Transfer protocol started" + +proc stop*(self: SyncTransfer) = + self.started = false + + self.localWantsRxFut.cancelSoon() + self.remoteNeedsRxFut.cancelSoon() + + info "Store Sync Transfer protocol stopped"