mirror of
https://github.com/logos-messaging/logos-messaging-go-bindings.git
synced 2026-01-03 06:23:09 +00:00
Merge commit '2f6de9187f2962e717ef9edcc4ae549461dd515f' as 'third-party/nwaku'
This commit is contained in:
commit
e0e1dbcba6
9
third-party/nwaku/.dockerignore
vendored
Normal file
9
third-party/nwaku/.dockerignore
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
/README.md
|
||||
/Dockerfile
|
||||
/.*ignore
|
||||
/LICENSE*
|
||||
/tests
|
||||
/metrics
|
||||
/nimcache
|
||||
librln*
|
||||
**/vendor/*
|
||||
18
third-party/nwaku/.editorconfig
vendored
Normal file
18
third-party/nwaku/.editorconfig
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
end_of_line = lf
|
||||
trim_trailing_whitespace = true
|
||||
insert_final_newline = true
|
||||
|
||||
|
||||
[{Makefile, *.sh}]
|
||||
indent_style = tab
|
||||
|
||||
# Trailing spaces in markdown indicate word wrap
|
||||
[{*.markdown,*.md}]
|
||||
trim_trailing_spaces = false
|
||||
max_line_length = 80
|
||||
34
third-party/nwaku/.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
34
third-party/nwaku/.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Report any bugs or unexpected behavior
|
||||
title: 'bug: '
|
||||
labels: bug, track:maintenance
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Problem
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
### Impact
|
||||
Indicate how significant you believe the impact of the bug is. Bugs that lead to data loss or corruption would be considered `critical`. In such cases, please also add the `critical` label.
|
||||
|
||||
### To reproduce
|
||||
If you can reproduce the behavior, steps to reproduce:
|
||||
1. Go to '...'
|
||||
2. Click on '....'
|
||||
3. Scroll down to '....'
|
||||
4. See error
|
||||
|
||||
### Expected behavior
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
### Screenshots/logs
|
||||
If applicable, add screenshots or logs to help explain your problem.
|
||||
|
||||
### nwaku version/commit hash
|
||||
State the version of `nwaku` where you've encountered the bug or, if built off a specific commit, the relevant commit hash. You can check the version by running `./wakunode2 --version`.
|
||||
- e.g. `v0.9` or `ed53bcd`
|
||||
|
||||
### Additional context
|
||||
Add any other context about the problem here.
|
||||
48
third-party/nwaku/.github/ISSUE_TEMPLATE/bump_dependencies.md
vendored
Normal file
48
third-party/nwaku/.github/ISSUE_TEMPLATE/bump_dependencies.md
vendored
Normal file
@ -0,0 +1,48 @@
|
||||
---
|
||||
name: Bump dependencies
|
||||
about: Bump vendor dependencies for release
|
||||
title: 'Bump vendor dependencies for release 0.0.0'
|
||||
labels: dependencies
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Add appropriate release number to title! -->
|
||||
|
||||
Update `nwaku` "vendor" dependencies.
|
||||
|
||||
### Items to bump
|
||||
- [ ] dnsclient.nim ( update to the latest tag version )
|
||||
- [ ] nim-bearssl
|
||||
- [ ] nimbus-build-system
|
||||
- [ ] nim-chronicles
|
||||
- [ ] nim-chronos
|
||||
- [ ] nim-confutils
|
||||
- [ ] nimcrypto
|
||||
- [ ] nim-dnsdisc
|
||||
- [ ] nim-eth
|
||||
- [ ] nim-faststreams
|
||||
- [ ] nim-http-utils
|
||||
- [ ] nim-json-rpc
|
||||
- [ ] nim-json-serialization
|
||||
- [ ] nim-libbacktrace
|
||||
- [ ] nim-libp2p ( update to the latest tag version )
|
||||
- [ ] nim-metrics
|
||||
- [ ] nim-nat-traversal
|
||||
- [ ] nim-presto
|
||||
- [ ] nim-regex ( update to the latest tag version )
|
||||
- [ ] nim-results
|
||||
- [ ] nim-secp256k1
|
||||
- [ ] nim-serialization
|
||||
- [ ] nim-sqlite3-abi ( update to the latest tag version )
|
||||
- [ ] nim-stew
|
||||
- [ ] nim-stint
|
||||
- [ ] nim-taskpools ( update to the latest tag version )
|
||||
- [ ] nim-testutils ( update to the latest tag version )
|
||||
- [ ] nim-toml-serialization
|
||||
- [ ] nim-unicodedb
|
||||
- [ ] nim-unittest2 ( update to the latest tag version )
|
||||
- [ ] nim-web3 ( update to the latest tag version )
|
||||
- [ ] nim-websock ( update to the latest tag version )
|
||||
- [ ] nim-zlib
|
||||
- [ ] zerokit ( this should be kept in version `v0.7.0` )
|
||||
26
third-party/nwaku/.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
26
third-party/nwaku/.github/ISSUE_TEMPLATE/feature_request.md
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
name: Feature request
|
||||
about: Suggest an idea for the nwaku implementation
|
||||
title: 'feat: '
|
||||
labels: track:production
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Problem
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
### Suggested solution
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
### Alternatives considered
|
||||
A clear and concise description of any alternative solutions or features you've considered.
|
||||
|
||||
### Additional context
|
||||
Add any other context or screenshots about the feature request here.
|
||||
|
||||
### Acceptance criteria
|
||||
A list of tasks that need to be done for the issue to be considered resolved.
|
||||
|
||||
### Epic
|
||||
Epic title and link the feature refers to.
|
||||
17
third-party/nwaku/.github/ISSUE_TEMPLATE/improvement.md
vendored
Normal file
17
third-party/nwaku/.github/ISSUE_TEMPLATE/improvement.md
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
---
|
||||
name: Improvement
|
||||
about: Suggest improvements to the codebase or processes. This includes refactoring,
|
||||
docs and any other chores.
|
||||
title: 'chore:'
|
||||
labels: track:maintenance
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
### Background
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]; There is a spelling error in [...]; It's difficult to read the code in module [...]
|
||||
|
||||
### Details
|
||||
A clear and concise description of what you want to happen.
|
||||
|
||||
### Acceptance criteria
|
||||
A list of tasks that need to be done for the issue to be considered resolved.
|
||||
41
third-party/nwaku/.github/ISSUE_TEMPLATE/milestone.md
vendored
Normal file
41
third-party/nwaku/.github/ISSUE_TEMPLATE/milestone.md
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
---
|
||||
name: Milestone Issue Template
|
||||
about: Track Milestones
|
||||
title: "[Milestone] "
|
||||
labels: milestone
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Please ensure you are assigning the matching epic label to the milestone -->
|
||||
<!-- All _active_ (being worked on) milestones MUST have an owner (GitHub assignee) -->
|
||||
|
||||
**Planned start date**:
|
||||
**Due date**:
|
||||
|
||||
# Summary
|
||||
|
||||
# Acceptance Criteria
|
||||
|
||||
<!-- describe the deliverable of this milestone and its attributes in plain English -->
|
||||
|
||||
## Tasks
|
||||
|
||||
<!--
|
||||
|
||||
Breakdown of the work
|
||||
|
||||
- [ ] Task 1
|
||||
- [ ] Link to GitHub issue tracking task 2
|
||||
|
||||
-->
|
||||
|
||||
# RAID (Risks, Assumptions, Issues and Dependencies)
|
||||
|
||||
<!-- List dependencies on other milestones (avoid dependencies on tasks) -->
|
||||
|
||||
<!-- List dependencies on other teams -->
|
||||
|
||||
<!-- List any risks or assumptions that will be cleared as work progresses -->
|
||||
|
||||
<!-- List any GitHub issues that tracks any blocker or any of the items above -->
|
||||
72
third-party/nwaku/.github/ISSUE_TEMPLATE/prepare_release.md
vendored
Normal file
72
third-party/nwaku/.github/ISSUE_TEMPLATE/prepare_release.md
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
---
|
||||
name: Prepare release
|
||||
about: Execute tasks for the creation and publishing of a new release
|
||||
title: 'Prepare release 0.0.0'
|
||||
labels: release
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Add appropriate release number to title!
|
||||
|
||||
For detailed info on the release process refer to https://github.com/waku-org/nwaku/blob/master/docs/contributors/release-process.md
|
||||
-->
|
||||
|
||||
### Items to complete
|
||||
|
||||
All items below are to be completed by the owner of the given release.
|
||||
|
||||
- [ ] Create release branch
|
||||
- [ ] Assign release candidate tag to the release branch HEAD. e.g. v0.30.0-rc.0
|
||||
- [ ] Generate and edit releases notes in CHANGELOG.md
|
||||
- [ ] Review possible update of [config-options](https://github.com/waku-org/docs.waku.org/blob/develop/docs/guides/nwaku/config-options.md)
|
||||
- [ ] _End user impact_: Summarize impact of changes on Status end users (can be a comment in this issue).
|
||||
- [ ] **Validate release candidate**
|
||||
- [ ] Bump nwaku dependency in [waku-rust-bindings](https://github.com/waku-org/waku-rust-bindings) and make sure all examples and tests work
|
||||
|
||||
- [ ] Automated testing
|
||||
- [ ] Ensures js-waku tests are green against release candidate
|
||||
- [ ] Ask Vac-QA and Vac-DST to perform available tests against release candidate
|
||||
- [ ] Vac-QA
|
||||
- [ ] Vac-DST (we need additional report. see [this](https://www.notion.so/DST-Reports-1228f96fb65c80729cd1d98a7496fe6f))
|
||||
|
||||
- [ ] **On Waku fleets**
|
||||
- [ ] Lock `waku.test` fleet to release candidate version
|
||||
- [ ] Continuously stress `waku.test` fleet for a week (e.g. from `wakudev`)
|
||||
- [ ] Search _Kibana_ logs from the previous month (since last release was deployed), for possible crashes or errors in `waku.test` and `waku.sandbox`.
|
||||
- Most relevant logs are `(fleet: "waku.test" OR fleet: "waku.sandbox") AND message: "SIGSEGV"`
|
||||
- [ ] Run release candidate with `waku-simulator`, ensure that nodes connected to each other
|
||||
- [ ] Unlock `waku.test` to resume auto-deployment of latest `master` commit
|
||||
|
||||
- [ ] **On Status fleet**
|
||||
- [ ] Deploy release candidate to `status.staging`
|
||||
- [ ] Perform [sanity check](https://www.notion.so/How-to-test-Nwaku-on-Status-12c6e4b9bf06420ca868bd199129b425) and log results as comments in this issue.
|
||||
- [ ] Connect 2 instances to `status.staging` fleet, one in relay mode, the other one in light client.
|
||||
- [ ] 1:1 Chats with each other
|
||||
- [ ] Send and receive messages in a community
|
||||
- [ ] Close one instance, send messages with second instance, reopen first instance and confirm messages sent while offline are retrieved from store
|
||||
- [ ] Perform checks based _end user impact_
|
||||
- [ ] Inform other (Waku and Status) CCs to point their instance to `status.staging` for a few days. Ping Status colleagues from their Discord server or [Status community](https://status.app/c/G3kAAMSQtb05kog3aGbr3kiaxN4tF5xy4BAGEkkLwILk2z3GcoYlm5hSJXGn7J3laft-tnTwDWmYJ18dP_3bgX96dqr_8E3qKAvxDf3NrrCMUBp4R9EYkQez9XSM4486mXoC3mIln2zc-TNdvjdfL9eHVZ-mGgs=#zQ3shZeEJqTC1xhGUjxuS4rtHSrhJ8vUYp64v6qWkLpvdy9L9) (not blocking point.)
|
||||
- [ ] Ask Status-QA to perform sanity checks (as described above) + checks based on _end user impact_; do specify the version being tested
|
||||
- [ ] Ask Status-QA or infra to run the automated Status e2e tests against `status.staging`
|
||||
- [ ] Get other CCs sign-off: they comment on this PR "used app for a week, no problem", or problem reported, resolved and new RC
|
||||
- [ ] **Get Status-QA sign-off**. Ensuring that `status.test` update will not disturb ongoing activities.
|
||||
|
||||
- [ ] **Proceed with release**
|
||||
|
||||
- [ ] Assign a release tag to the same commit that contains the validated release-candidate tag
|
||||
- [ ] Create GitHub release
|
||||
- [ ] Deploy the release to DockerHub
|
||||
- [ ] Announce the release
|
||||
|
||||
- [ ] **Promote release to fleets**.
|
||||
- [ ] Update infra config with any deprecated arguments or changed options
|
||||
- [ ] [Deploy final release to `waku.sandbox` fleet](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox)
|
||||
- [ ] [Deploy final release to `status.staging` fleet](https://ci.infra.status.im/job/nim-waku/job/deploy-shards-staging/)
|
||||
- [ ] [Deploy final release to `status.prod` fleet](https://ci.infra.status.im/job/nim-waku/job/deploy-shards-test/)
|
||||
|
||||
- [ ] **Post release**
|
||||
- [ ] Submit a PR from the release branch to master. Important to commit the PR with "create a merge commit" option.
|
||||
- [ ] Update waku-org/nwaku-compose with the new release version.
|
||||
- [ ] Update version in js-waku repo. [update only this](https://github.com/waku-org/js-waku/blob/7c0ce7b2eca31cab837da0251e1e4255151be2f7/.github/workflows/ci.yml#L135) by submitting a PR.
|
||||
19
third-party/nwaku/.github/ISSUE_TEMPLATE/research-related-issue.md
vendored
Normal file
19
third-party/nwaku/.github/ISSUE_TEMPLATE/research-related-issue.md
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
---
|
||||
name: Research-related issue
|
||||
about: Use this template if your issue is related to any Vac research tracks
|
||||
title: 'research:'
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
### Problem
|
||||
|
||||
### Acceptance criteria
|
||||
|
||||
### Details
|
||||
|
||||
### Possible Solutions
|
||||
|
||||
### Research track
|
||||
Indicate the Vac research track that this issue relates to. Please also add the relevant track as a label.
|
||||
8
third-party/nwaku/.github/pull_request_template.md
vendored
Normal file
8
third-party/nwaku/.github/pull_request_template.md
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
|
||||
## Description
|
||||
|
||||
## Changes
|
||||
|
||||
## Issue
|
||||
|
||||
closes #
|
||||
12
third-party/nwaku/.github/workflows/auto_assign_pr.yml
vendored
Normal file
12
third-party/nwaku/.github/workflows/auto_assign_pr.yml
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
name: Auto Assign PR to Creator
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
|
||||
jobs:
|
||||
assign_creator:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: toshimaru/auto-author-assign@v1.6.2
|
||||
191
third-party/nwaku/.github/workflows/ci.yml
vendored
Normal file
191
third-party/nwaku/.github/workflows/ci.yml
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
name: ci
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
NPROC: 2
|
||||
MAKEFLAGS: "-j${NPROC}"
|
||||
NIMFLAGS: "--parallelBuild:${NPROC} --colors:off -d:chronicles_colors:none"
|
||||
|
||||
jobs:
|
||||
changes: # changes detection
|
||||
runs-on: ubuntu-22.04
|
||||
permissions:
|
||||
pull-requests: read
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
name: Checkout code
|
||||
id: checkout
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
common:
|
||||
- '.github/workflows/**'
|
||||
- 'vendor/**'
|
||||
- 'Makefile'
|
||||
- 'waku.nimble'
|
||||
- 'library/**'
|
||||
v2:
|
||||
- 'waku/**'
|
||||
- 'apps/**'
|
||||
- 'tools/**'
|
||||
- 'tests/all_tests_v2.nim'
|
||||
- 'tests/**'
|
||||
docker:
|
||||
- 'docker/**'
|
||||
|
||||
outputs:
|
||||
common: ${{ steps.filter.outputs.common }}
|
||||
v2: ${{ steps.filter.outputs.v2 }}
|
||||
docker: ${{ steps.filter.outputs.docker }}
|
||||
|
||||
build:
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-22.04, macos-13]
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: build-${{ matrix.os }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get submodules hash
|
||||
id: submodules
|
||||
run: |
|
||||
echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache submodules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
vendor/
|
||||
.git/modules
|
||||
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
|
||||
|
||||
- name: Build binaries
|
||||
run: make V=1 QUICK_AND_DIRTY_COMPILER=1 all tools
|
||||
|
||||
build-windows:
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }}
|
||||
uses: ./.github/workflows/windows-build.yml
|
||||
with:
|
||||
branch: ${{ github.ref }}
|
||||
|
||||
test:
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-22.04, macos-13]
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: test-${{ matrix.os }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get submodules hash
|
||||
id: submodules
|
||||
run: |
|
||||
echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache submodules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
vendor/
|
||||
.git/modules
|
||||
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
postgres_enabled=0
|
||||
if [ ${{ runner.os }} == "Linux" ]; then
|
||||
sudo docker run --rm -d -e POSTGRES_PASSWORD=test123 -p 5432:5432 postgres:15.4-alpine3.18
|
||||
postgres_enabled=1
|
||||
fi
|
||||
|
||||
export MAKEFLAGS="-j1"
|
||||
export NIMFLAGS="--colors:off -d:chronicles_colors:none"
|
||||
export USE_LIBBACKTRACE=0
|
||||
|
||||
make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test
|
||||
make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled testwakunode2
|
||||
|
||||
build-docker-image:
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' || needs.changes.outputs.docker == 'true' }}
|
||||
uses: waku-org/nwaku/.github/workflows/container-image.yml@master
|
||||
secrets: inherit
|
||||
|
||||
nwaku-nwaku-interop-tests:
|
||||
needs: build-docker-image
|
||||
uses: waku-org/waku-interop-tests/.github/workflows/nim_waku_PR.yml@SMOKE_TEST_0.0.1
|
||||
with:
|
||||
node_nwaku: ${{ needs.build-docker-image.outputs.image }}
|
||||
|
||||
secrets: inherit
|
||||
|
||||
js-waku-node:
|
||||
needs: build-docker-image
|
||||
uses: waku-org/js-waku/.github/workflows/test-node.yml@master
|
||||
with:
|
||||
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
|
||||
test_type: node
|
||||
|
||||
js-waku-node-optional:
|
||||
needs: build-docker-image
|
||||
uses: waku-org/js-waku/.github/workflows/test-node.yml@master
|
||||
with:
|
||||
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
|
||||
test_type: node-optional
|
||||
|
||||
lint:
|
||||
name: "Lint"
|
||||
runs-on: ubuntu-22.04
|
||||
needs: build
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get submodules hash
|
||||
id: submodules
|
||||
run: |
|
||||
echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache submodules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
vendor/
|
||||
.git/modules
|
||||
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
|
||||
|
||||
- name: Build nph
|
||||
run: |
|
||||
make build-nph
|
||||
|
||||
- name: Check nph formatting
|
||||
run: |
|
||||
shopt -s extglob # Enable extended globbing
|
||||
NPH=$(make print-nph-path)
|
||||
echo "using nph at ${NPH}"
|
||||
"${NPH}" examples waku tests tools apps *.@(nim|nims|nimble)
|
||||
git diff --exit-code
|
||||
99
third-party/nwaku/.github/workflows/container-image.yml
vendored
Normal file
99
third-party/nwaku/.github/workflows/container-image.yml
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
name: container-image-build
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
image_tag:
|
||||
type: string
|
||||
default: ${{ github.event.number }}
|
||||
outputs:
|
||||
image:
|
||||
description: The resulting image link
|
||||
value: ${{ jobs.build-docker-image.outputs.image }}
|
||||
|
||||
env:
|
||||
NPROC: 2
|
||||
MAKEFLAGS: "-j${NPROC}"
|
||||
NIMFLAGS: "--parallelBuild:${NPROC}"
|
||||
|
||||
# This workflow should not run for outside contributors
|
||||
# If org secrets are not available, we'll avoid building and publishing the docker image and we'll pass the workflow
|
||||
jobs:
|
||||
build-docker-image:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-22.04]
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: docker-build-${{ matrix.os }}
|
||||
outputs:
|
||||
image: ${{ steps.build.outputs.image }}
|
||||
steps:
|
||||
- name: Check secrets
|
||||
id: secrets
|
||||
continue-on-error: true
|
||||
run: |
|
||||
if [[ -z "$QUAY_PASSWORD" || -z "$QUAY_USER" ]]; then
|
||||
echo "User does not have access to secrets, skipping workflow"
|
||||
exit 1
|
||||
fi
|
||||
env:
|
||||
QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }}
|
||||
QUAY_USER: ${{ secrets.QUAY_USER }}
|
||||
|
||||
- name: Checkout code
|
||||
if: ${{ steps.secrets.outcome == 'success' }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Get submodules hash
|
||||
id: submodules
|
||||
if: ${{ steps.secrets.outcome == 'success' }}
|
||||
run: |
|
||||
echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache submodules
|
||||
if: ${{ steps.secrets.outcome == 'success' }}
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
vendor/
|
||||
.git/modules
|
||||
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
|
||||
|
||||
- name: Build binaries
|
||||
id: build
|
||||
if: ${{ steps.secrets.outcome == 'success' }}
|
||||
run: |
|
||||
|
||||
make -j${NPROC} V=1 QUICK_AND_DIRTY_COMPILER=1 NIMFLAGS="-d:disableMarchNative -d:postgres -d:chronicles_colors:none" wakunode2
|
||||
|
||||
SHORT_REF=$(git rev-parse --short HEAD)
|
||||
|
||||
TAG=$([ "${PR_NUMBER}" == "" ] && echo "${SHORT_REF}" || echo "${PR_NUMBER}")
|
||||
IMAGE=quay.io/wakuorg/nwaku-pr:${TAG}
|
||||
|
||||
echo "image=${IMAGE}" >> $GITHUB_OUTPUT
|
||||
echo "commit_hash=$(git rev-parse HEAD)" >> $GITHUB_OUTPUT
|
||||
|
||||
docker login -u ${QUAY_USER} -p ${QUAY_PASSWORD} quay.io
|
||||
docker build -t ${IMAGE} -f docker/binaries/Dockerfile.bn.amd64 --label quay.expires-after=30d .
|
||||
docker push ${IMAGE}
|
||||
env:
|
||||
QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }}
|
||||
QUAY_USER: ${{ secrets.QUAY_USER }}
|
||||
PR_NUMBER: ${{ inputs.image_tag}}
|
||||
|
||||
- name: Comment PR
|
||||
uses: thollander/actions-comment-pull-request@v2
|
||||
if: ${{ github.event_name == 'pull_request' && steps.secrets.outcome == 'success' }}
|
||||
with:
|
||||
message: |
|
||||
You can find the image built from this PR at
|
||||
|
||||
```
|
||||
${{steps.build.outputs.image}}
|
||||
```
|
||||
|
||||
Built from ${{ steps.build.outputs.commit_hash }}
|
||||
comment_tag: execution-rln-v${{ matrix.rln_version }}
|
||||
54
third-party/nwaku/.github/workflows/pr-lint.yml
vendored
Normal file
54
third-party/nwaku/.github/workflows/pr-lint.yml
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
name: "Lint PR"
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types:
|
||||
- opened
|
||||
- edited
|
||||
- synchronize
|
||||
|
||||
jobs:
|
||||
labels:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
name: Checkout code
|
||||
id: checkout
|
||||
- uses: dorny/paths-filter@v2
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
config:
|
||||
- 'apps/wakunode2/external_config.nim'
|
||||
- 'apps/networkmonitor/networkmonitor_config.nim'
|
||||
- 'apps/chat2/config_chat2.nim'
|
||||
- 'apps/chat2bridge/config_chat2bridge.nim'
|
||||
|
||||
db_schema:
|
||||
- 'waku/waku_archive/driver/postgres_driver/postgres_driver.nim'
|
||||
- 'waku/waku_archive/driver/sqlite_driver/queries.nim'
|
||||
- name: Comment config change
|
||||
uses: thollander/actions-comment-pull-request@v2
|
||||
if: ${{steps.filter.outputs.config == 'true'}}
|
||||
with:
|
||||
message: |
|
||||
This PR may contain changes to **configuration options** of one of the apps.
|
||||
|
||||
If you are introducing a breaking change (i.e. the set of options in latest release would no longer be applicable) make sure the original option is preserved with a *deprecation* note for 2 following releases before it is actually removed.
|
||||
|
||||
Please also make sure the label `release-notes` is added to make sure any changes to the user interface are properly announced in changelog and release notes.
|
||||
comment_tag: configs
|
||||
|
||||
- name: Comment DB schema change
|
||||
uses: thollander/actions-comment-pull-request@v2
|
||||
if: ${{steps.filter.outputs.db_schema == 'true'}}
|
||||
with:
|
||||
header: pr-title-lint-error
|
||||
message: |
|
||||
This PR may contain changes to **database schema** of one of the drivers.
|
||||
|
||||
If you are introducing any changes to the schema, make sure the upgrade from the latest release to this change passes without any errors/issues.
|
||||
|
||||
Please make sure the label `release-notes` is added to make sure upgrade instructions properly highlight this change.
|
||||
comment_tag: db_schema
|
||||
163
third-party/nwaku/.github/workflows/pre-release.yml
vendored
Normal file
163
third-party/nwaku/.github/workflows/pre-release.yml
vendored
Normal file
@ -0,0 +1,163 @@
|
||||
name: Pre-Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*-rc.*'
|
||||
schedule:
|
||||
- cron: 13 3 * * *
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
RELEASE_NAME: nightly
|
||||
|
||||
NPROC: 2
|
||||
MAKEFLAGS: "-j${NPROC}"
|
||||
NIMFLAGS: "--parallelBuild:${NPROC}"
|
||||
|
||||
jobs:
|
||||
tag-name:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Vars
|
||||
id: vars
|
||||
run: |
|
||||
TAG=$([[ "${{github.ref}}" == "refs/heads/master" ]] && echo "${{env.RELEASE_NAME}}" || echo ${{github.ref}} | sed 's#refs/tags/##')
|
||||
echo "tag=${TAG}" >> $GITHUB_OUTPUT
|
||||
outputs:
|
||||
tag: ${{steps.vars.outputs.tag}}
|
||||
|
||||
build-and-publish:
|
||||
needs: tag-name
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-22.04, macos-13]
|
||||
arch: [amd64]
|
||||
include:
|
||||
- os: macos-13
|
||||
arch: arm64
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: prep variables
|
||||
id: vars
|
||||
run: |
|
||||
ARCH=${{matrix.arch}}
|
||||
|
||||
echo "arch=${ARCH}" >> $GITHUB_OUTPUT
|
||||
|
||||
NWAKU_ARTIFACT_NAME=$(echo "nwaku-${ARCH}-${{runner.os}}-${{ needs.tag-name.outputs.tag }}.tar.gz" | tr "[:upper:]" "[:lower:]")
|
||||
NWAKU_TOOLS_ARTIFACT_NAME=$(echo "nwaku-tools-${ARCH}-${{runner.os}}-${{ needs.tag-name.outputs.tag }}.tar.gz" | tr "[:upper:]" "[:lower:]")
|
||||
|
||||
echo "nwaku=${NWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT
|
||||
echo "nwakutools=${NWAKU_TOOLS_ARTIFACT_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
|
||||
- name: build artifacts
|
||||
id: build
|
||||
run: |
|
||||
OS=$([[ "${{runner.os}}" == "macOS" ]] && echo "macosx" || echo "linux")
|
||||
|
||||
make QUICK_AND_DIRTY_COMPILER=1 V=1 CI=false NIMFLAGS="-d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" \
|
||||
update
|
||||
|
||||
make QUICK_AND_DIRTY_COMPILER=1 V=1 CI=false\
|
||||
NIMFLAGS="-d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" \
|
||||
wakunode2\
|
||||
chat2\
|
||||
tools
|
||||
|
||||
tar -cvzf ${{steps.vars.outputs.nwaku}} ./build/wakunode2 ./build/chat2
|
||||
tar -cvzf ${{steps.vars.outputs.nwakutools}} ./build/wakucanary ./build/networkmonitor
|
||||
|
||||
- name: upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: wakunode2
|
||||
path: ${{steps.vars.outputs.nwaku}}
|
||||
retention-days: 2
|
||||
|
||||
- name: upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: wakutools
|
||||
path: ${{steps.vars.outputs.nwakutools}}
|
||||
retention-days: 2
|
||||
|
||||
build-docker-image:
|
||||
needs: tag-name
|
||||
uses: waku-org/nwaku/.github/workflows/container-image.yml@master
|
||||
with:
|
||||
image_tag: ${{ needs.tag-name.outputs.tag }}
|
||||
secrets: inherit
|
||||
|
||||
js-waku-node:
|
||||
needs: build-docker-image
|
||||
uses: waku-org/js-waku/.github/workflows/test-node.yml@master
|
||||
with:
|
||||
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
|
||||
test_type: node
|
||||
debug: waku*
|
||||
|
||||
js-waku-node-optional:
|
||||
needs: build-docker-image
|
||||
uses: waku-org/js-waku/.github/workflows/test-node.yml@master
|
||||
with:
|
||||
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
|
||||
test_type: node-optional
|
||||
debug: waku*
|
||||
|
||||
create-release-candidate:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [ tag-name, build-and-publish ]
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
ref: master
|
||||
|
||||
- name: download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
|
||||
- name: prep variables
|
||||
id: vars
|
||||
run: |
|
||||
REF=$(echo ${{github.ref}} | sed 's#.*/##')
|
||||
|
||||
echo "ref=${REF}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: generate release notes
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
set -x
|
||||
gh release view ${{ needs.tag-name.outputs.tag }} &>/dev/null &&\
|
||||
gh release delete -y ${{ needs.tag-name.outputs.tag }} &&\
|
||||
[[ "${{ needs.tag-name.outputs.tag }}" == "nightly" ]] && git tag -d ${{ needs.tag-name.outputs.tag }}
|
||||
|
||||
RELEASE_NOTES_TAG=$([[ "${{ needs.tag-name.outputs.tag }}" != "nightly" ]] && echo "-t ${{steps.vars.outputs.ref}}" || echo "")
|
||||
|
||||
docker run \
|
||||
-t \
|
||||
--rm \
|
||||
-v ${PWD}:/opt/sv4git/repo:z \
|
||||
-u $(id -u) \
|
||||
docker.io/wakuorg/sv4git:latest \
|
||||
release-notes ${RELEASE_NOTES_TAG} --previous $(git tag -l --sort -creatordate | grep -e "^v[0-9]*\.[0-9]*\.[0-9]*$") |\
|
||||
sed -E 's@#([0-9]+)@[#\1](https://github.com/waku-org/nwaku/issues/\1)@g' > release_notes.md
|
||||
|
||||
sed -i "s/^## .*/Generated at $(date)/" release_notes.md
|
||||
|
||||
cat release_notes.md
|
||||
|
||||
TARGET=$([[ "${{ needs.tag-name.outputs.tag }}" == "nightly" ]] && echo "--target ${{steps.vars.outputs.ref}}" || echo "")
|
||||
|
||||
gh release create ${{ needs.tag-name.outputs.tag }} --prerelease ${TARGET} \
|
||||
--title ${{ needs.tag-name.outputs.tag }} --notes-file release_notes.md \
|
||||
wakunode2/* wakutools/*
|
||||
65
third-party/nwaku/.github/workflows/release-assets.yml
vendored
Normal file
65
third-party/nwaku/.github/workflows/release-assets.yml
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
name: Upload Release Asset
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*' # "e.g. v0.4"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
NPROC: 2
|
||||
|
||||
jobs:
|
||||
build-and-upload:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-22.04, macos-13]
|
||||
arch: [amd64]
|
||||
include:
|
||||
- os: macos-13
|
||||
arch: arm64
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
|
||||
name: ${{ matrix.os }} - ${{ matrix.arch }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get submodules hash
|
||||
id: submodules
|
||||
run: |
|
||||
echo "hash=$(git submodule status | awk '{print $1}' | sort | shasum -a 256 | sed 's/[ -]*//g')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Cache submodules
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
vendor/
|
||||
.git/modules
|
||||
key: ${{ runner.os }}-${{matrix.arch}}-submodules-${{ steps.submodules.outputs.hash }}
|
||||
|
||||
- name: prep variables
|
||||
id: vars
|
||||
run: |
|
||||
NWAKU_ARTIFACT_NAME=$(echo "nwaku-${{matrix.arch}}-${{runner.os}}.tar.gz" | tr "[:upper:]" "[:lower:]")
|
||||
|
||||
echo "nwaku=${NWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
OS=$([[ "${{runner.os}}" == "macOS" ]] && echo "macosx" || echo "linux")
|
||||
|
||||
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" V=1 update
|
||||
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false wakunode2
|
||||
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" CI=false chat2
|
||||
tar -cvzf ${{steps.vars.outputs.nwaku}} ./build/
|
||||
|
||||
- name: Upload asset
|
||||
uses: actions/upload-artifact@v4.4.0
|
||||
with:
|
||||
name: ${{steps.vars.outputs.nwaku}}
|
||||
path: ${{steps.vars.outputs.nwaku}}
|
||||
if-no-files-found: error
|
||||
17
third-party/nwaku/.github/workflows/sync-labels.yml
vendored
Normal file
17
third-party/nwaku/.github/workflows/sync-labels.yml
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
name: Sync labels
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- .github/labels.yml
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: micnncim/action-label-syncer@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
manifest: .github/labels.yml
|
||||
104
third-party/nwaku/.github/workflows/windows-build.yml
vendored
Normal file
104
third-party/nwaku/.github/workflows/windows-build.yml
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
name: ci / build-windows
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
branch:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: windows-latest
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: msys2 {0}
|
||||
|
||||
env:
|
||||
MSYSTEM: MINGW64
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Setup MSYS2
|
||||
uses: msys2/setup-msys2@v2
|
||||
with:
|
||||
update: true
|
||||
install: >-
|
||||
git
|
||||
base-devel
|
||||
mingw-w64-x86_64-toolchain
|
||||
make
|
||||
cmake
|
||||
upx
|
||||
mingw-w64-x86_64-rust
|
||||
mingw-w64-x86_64-postgresql
|
||||
mingw-w64-x86_64-gcc
|
||||
mingw-w64-x86_64-gcc-libs
|
||||
mingw-w64-x86_64-libwinpthread-git
|
||||
mingw-w64-x86_64-zlib
|
||||
mingw-w64-x86_64-openssl
|
||||
mingw-w64-x86_64-python
|
||||
mingw-w64-x86_64-cmake
|
||||
mingw-w64-x86_64-llvm
|
||||
mingw-w64-x86_64-clang
|
||||
|
||||
- name: Add UPX to PATH
|
||||
run: |
|
||||
echo "/usr/bin:$PATH" >> $GITHUB_PATH
|
||||
echo "/mingw64/bin:$PATH" >> $GITHUB_PATH
|
||||
echo "/usr/lib:$PATH" >> $GITHUB_PATH
|
||||
echo "/mingw64/lib:$PATH" >> $GITHUB_PATH
|
||||
|
||||
- name: Verify dependencies
|
||||
run: |
|
||||
which upx gcc g++ make cmake cargo rustc python
|
||||
|
||||
- name: Updating submodules
|
||||
run: git submodule update --init --recursive
|
||||
|
||||
- name: Creating tmp directory
|
||||
run: mkdir -p tmp
|
||||
|
||||
- name: Building Nim
|
||||
run: |
|
||||
cd vendor/nimbus-build-system/vendor/Nim
|
||||
./build_all.bat
|
||||
cd ../../../..
|
||||
|
||||
- name: Building miniupnpc
|
||||
run: |
|
||||
cd vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc
|
||||
make -f Makefile.mingw CC=gcc CXX=g++ libminiupnpc.a V=1
|
||||
cd ../../../../..
|
||||
|
||||
- name: Building libnatpmp
|
||||
run: |
|
||||
cd ./vendor/nim-nat-traversal/vendor/libnatpmp-upstream
|
||||
make CC="gcc -fPIC -D_WIN32_WINNT=0x0600 -DNATPMP_STATICLIB" libnatpmp.a V=1
|
||||
cd ../../../../
|
||||
|
||||
- name: Building wakunode2.exe
|
||||
run: |
|
||||
make wakunode2 LOG_LEVEL=DEBUG V=3 -j8
|
||||
|
||||
- name: Building libwaku.dll
|
||||
run: |
|
||||
make libwaku STATIC=0 LOG_LEVEL=DEBUG V=1 -j
|
||||
|
||||
- name: Check Executable
|
||||
run: |
|
||||
if [ -f "./build/wakunode2.exe" ]; then
|
||||
echo "wakunode2.exe build successful"
|
||||
else
|
||||
echo "Build failed: wakunode2.exe not found"
|
||||
exit 1
|
||||
fi
|
||||
if [ -f "./build/libwaku.dll" ]; then
|
||||
echo "libwaku.dll build successful"
|
||||
else
|
||||
echo "Build failed: libwaku.dll not found"
|
||||
exit 1
|
||||
fi
|
||||
81
third-party/nwaku/.gitignore
vendored
Normal file
81
third-party/nwaku/.gitignore
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
/nimcache
|
||||
|
||||
# Executables shall be put in an ignored build/ directory
|
||||
/build
|
||||
|
||||
# Nimble packages
|
||||
/vendor/.nimble
|
||||
|
||||
# Generated Files
|
||||
*.generated.nim
|
||||
|
||||
# ntags/ctags output
|
||||
/tags
|
||||
|
||||
# a symlink that can't be added to the repo because of Windows
|
||||
/waku.nims
|
||||
|
||||
# Ignore dynamic, static libs and libtool archive files
|
||||
*.so
|
||||
*.dylib
|
||||
*.a
|
||||
*.la
|
||||
*.exe
|
||||
*.dll
|
||||
|
||||
.DS_Store
|
||||
|
||||
# Ignore simulation generated metrics files
|
||||
/metrics/prometheus
|
||||
/metrics/waku-sim-all-nodes-grafana-dashboard.json
|
||||
|
||||
*.log
|
||||
/package-lock.json
|
||||
/package.json
|
||||
node_modules/
|
||||
/.update.timestamp
|
||||
|
||||
# Ignore Jetbrains IDE files
|
||||
.idea/
|
||||
|
||||
# ignore vscode files
|
||||
.vscode/
|
||||
|
||||
# RLN / keystore
|
||||
rlnKeystore.json
|
||||
*.tar.gz
|
||||
|
||||
# Nimbus Build System
|
||||
nimbus-build-system.paths
|
||||
|
||||
# sqlite db
|
||||
*.db
|
||||
*.db-shm
|
||||
*.db-wal
|
||||
*.sqlite3
|
||||
*.sqlite3-shm
|
||||
*.sqlite3-wal
|
||||
|
||||
/examples/nodejs/build/
|
||||
/examples/rust/target/
|
||||
|
||||
|
||||
# Coverage
|
||||
coverage_html_report/
|
||||
*.info
|
||||
|
||||
# Wildcard
|
||||
*.ignore.*
|
||||
|
||||
# Ignore all possible node runner directories
|
||||
**/keystore/
|
||||
**/rln_tree/
|
||||
**/certs/
|
||||
|
||||
# simple qt example
|
||||
.qmake.stash
|
||||
main-qt
|
||||
waku_handler.moc.cpp
|
||||
|
||||
# Nix build result
|
||||
result
|
||||
190
third-party/nwaku/.gitmodules
vendored
Normal file
190
third-party/nwaku/.gitmodules
vendored
Normal file
@ -0,0 +1,190 @@
|
||||
[submodule "vendor/nim-eth"]
|
||||
path = vendor/nim-eth
|
||||
url = https://github.com/status-im/nim-eth.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-secp256k1"]
|
||||
path = vendor/nim-secp256k1
|
||||
url = https://github.com/status-im/nim-secp256k1.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-libp2p"]
|
||||
path = vendor/nim-libp2p
|
||||
url = https://github.com/vacp2p/nim-libp2p.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-stew"]
|
||||
path = vendor/nim-stew
|
||||
url = https://github.com/status-im/nim-stew.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nimbus-build-system"]
|
||||
path = vendor/nimbus-build-system
|
||||
url = https://github.com/status-im/nimbus-build-system.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-nat-traversal"]
|
||||
path = vendor/nim-nat-traversal
|
||||
url = https://github.com/status-im/nim-nat-traversal.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-libbacktrace"]
|
||||
path = vendor/nim-libbacktrace
|
||||
url = https://github.com/status-im/nim-libbacktrace.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-confutils"]
|
||||
path = vendor/nim-confutils
|
||||
url = https://github.com/status-im/nim-confutils.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-chronicles"]
|
||||
path = vendor/nim-chronicles
|
||||
url = https://github.com/status-im/nim-chronicles.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-faststreams"]
|
||||
path = vendor/nim-faststreams
|
||||
url = https://github.com/status-im/nim-faststreams.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-chronos"]
|
||||
path = vendor/nim-chronos
|
||||
url = https://github.com/status-im/nim-chronos.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-json-serialization"]
|
||||
path = vendor/nim-json-serialization
|
||||
url = https://github.com/status-im/nim-json-serialization.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-serialization"]
|
||||
path = vendor/nim-serialization
|
||||
url = https://github.com/status-im/nim-serialization.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nimcrypto"]
|
||||
path = vendor/nimcrypto
|
||||
url = https://github.com/cheatfate/nimcrypto.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-metrics"]
|
||||
path = vendor/nim-metrics
|
||||
url = https://github.com/status-im/nim-metrics.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-stint"]
|
||||
path = vendor/nim-stint
|
||||
url = https://github.com/status-im/nim-stint.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-json-rpc"]
|
||||
path = vendor/nim-json-rpc
|
||||
url = https://github.com/status-im/nim-json-rpc.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-http-utils"]
|
||||
path = vendor/nim-http-utils
|
||||
url = https://github.com/status-im/nim-http-utils.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-bearssl"]
|
||||
path = vendor/nim-bearssl
|
||||
url = https://github.com/status-im/nim-bearssl.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-sqlite3-abi"]
|
||||
path = vendor/nim-sqlite3-abi
|
||||
url = https://github.com/arnetheduck/nim-sqlite3-abi.git
|
||||
ignore = dirty
|
||||
branch = master
|
||||
[submodule "vendor/nim-web3"]
|
||||
path = vendor/nim-web3
|
||||
url = https://github.com/status-im/nim-web3.git
|
||||
[submodule "vendor/nim-testutils"]
|
||||
path = vendor/nim-testutils
|
||||
url = https://github.com/status-im/nim-testutils.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-unittest2"]
|
||||
path = vendor/nim-unittest2
|
||||
url = https://github.com/status-im/nim-unittest2.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-websock"]
|
||||
path = vendor/nim-websock
|
||||
url = https://github.com/status-im/nim-websock.git
|
||||
ignore = untracked
|
||||
branch = main
|
||||
[submodule "vendor/nim-zlib"]
|
||||
path = vendor/nim-zlib
|
||||
url = https://github.com/status-im/nim-zlib.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-dnsdisc"]
|
||||
path = vendor/nim-dnsdisc
|
||||
url = https://github.com/status-im/nim-dnsdisc.git
|
||||
ignore = untracked
|
||||
branch = main
|
||||
[submodule "vendor/dnsclient.nim"]
|
||||
path = vendor/dnsclient.nim
|
||||
url = https://github.com/ba0f3/dnsclient.nim.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-toml-serialization"]
|
||||
path = vendor/nim-toml-serialization
|
||||
url = https://github.com/status-im/nim-toml-serialization.git
|
||||
[submodule "vendor/nim-presto"]
|
||||
path = vendor/nim-presto
|
||||
url = https://github.com/status-im/nim-presto.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/zerokit"]
|
||||
path = vendor/zerokit
|
||||
url = https://github.com/vacp2p/zerokit.git
|
||||
ignore = dirty
|
||||
branch = v0.5.1
|
||||
[submodule "vendor/nim-regex"]
|
||||
path = vendor/nim-regex
|
||||
url = https://github.com/nitely/nim-regex.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-unicodedb"]
|
||||
path = vendor/nim-unicodedb
|
||||
url = https://github.com/nitely/nim-unicodedb.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-taskpools"]
|
||||
path = vendor/nim-taskpools
|
||||
url = https://github.com/status-im/nim-taskpools.git
|
||||
ignore = untracked
|
||||
branch = stable
|
||||
[submodule "vendor/nim-results"]
|
||||
ignore = untracked
|
||||
branch = master
|
||||
path = vendor/nim-results
|
||||
url = https://github.com/arnetheduck/nim-results.git
|
||||
[submodule "vendor/db_connector"]
|
||||
path = vendor/db_connector
|
||||
url = https://github.com/nim-lang/db_connector.git
|
||||
ignore = untracked
|
||||
branch = devel
|
||||
[submodule "vendor/nph"]
|
||||
ignore = untracked
|
||||
branch = master
|
||||
path = vendor/nph
|
||||
url = https://github.com/arnetheduck/nph.git
|
||||
[submodule "vendor/nim-minilru"]
|
||||
path = vendor/nim-minilru
|
||||
url = https://github.com/status-im/nim-minilru.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/waku-rlnv2-contract"]
|
||||
path = vendor/waku-rlnv2-contract
|
||||
url = https://github.com/waku-org/waku-rlnv2-contract.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/mix"]
|
||||
path = vendor/mix
|
||||
url = https://github.com/vacp2p/mix/
|
||||
branch = main
|
||||
22
third-party/nwaku/.sv4git.yml
vendored
Normal file
22
third-party/nwaku/.sv4git.yml
vendored
Normal file
@ -0,0 +1,22 @@
|
||||
version: "1.1" #config version
|
||||
|
||||
tag:
|
||||
pattern: "v%d.%d.%d"
|
||||
filter: "v*"
|
||||
|
||||
release-notes:
|
||||
sections: # Array with each section of release note. Check template section for more information.
|
||||
- name: Features # Name used on section.
|
||||
section-type: commits # Type of the section, supported types: commits, breaking-changes.
|
||||
commit-types: [feat] # Commit types for commit section-type, one commit type cannot be in more than one section.
|
||||
- name: Bug Fixes
|
||||
section-type: commits
|
||||
commit-types: [fix, bug]
|
||||
- name: Changes
|
||||
section-type: commits
|
||||
commit-types: [chore, docs, build, refactor, docker]
|
||||
|
||||
commit-message:
|
||||
|
||||
issue:
|
||||
regex: '#[0-9]+' # Regex for issue id.
|
||||
8
third-party/nwaku/.sv4git/templates/releasenotes-md.tpl
vendored
Normal file
8
third-party/nwaku/.sv4git/templates/releasenotes-md.tpl
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
## {{if .Release}}{{.Release}}{{end}}{{if and (not .Date.IsZero) .Release}} ({{end}}{{timefmt .Date "2006-01-02"}}{{if and (not .Date.IsZero) .Release}}){{end}}
|
||||
{{- range $section := .Sections }}
|
||||
{{- if (eq $section.SectionType "commits") }}
|
||||
{{- template "rn-md-section-commits.tpl" $section }}
|
||||
{{- else if (eq $section.SectionType "breaking-changes")}}
|
||||
{{- template "rn-md-section-breaking-changes.tpl" $section }}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
7
third-party/nwaku/.sv4git/templates/rn-md-section-commits.tpl
vendored
Normal file
7
third-party/nwaku/.sv4git/templates/rn-md-section-commits.tpl
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
{{- if .}}{{- if ne .SectionName ""}}
|
||||
|
||||
### {{.SectionName}}
|
||||
{{range $k,$v := .Items}}
|
||||
- {{if $v.Message.Scope}}**{{$v.Message.Scope}}:** {{end}}{{$v.Message.Description}} ([{{$v.Hash}}](https://github.com/waku-org/nwaku/commit/{{$v.Hash}})){{if $v.Message.Metadata.issue}} ([https://github.com/waku-org/nwaku/issues/{{$v.Message.Metadata.issue}}]({{$v.Message.Metadata.issue}})){{end}}
|
||||
{{- end}}
|
||||
{{- end}}{{- end}}
|
||||
2495
third-party/nwaku/CHANGELOG.md
vendored
Normal file
2495
third-party/nwaku/CHANGELOG.md
vendored
Normal file
File diff suppressed because it is too large
Load Diff
93
third-party/nwaku/Dockerfile
vendored
Normal file
93
third-party/nwaku/Dockerfile
vendored
Normal file
@ -0,0 +1,93 @@
|
||||
# BUILD NIM APP ----------------------------------------------------------------
|
||||
FROM rust:1.81.0-alpine3.19 AS nim-build
|
||||
|
||||
ARG NIMFLAGS
|
||||
ARG MAKE_TARGET=wakunode2
|
||||
ARG NIM_COMMIT
|
||||
ARG LOG_LEVEL=TRACE
|
||||
ARG HEAPTRACK_BUILD=0
|
||||
|
||||
# Get build tools and required header files
|
||||
RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
|
||||
# workaround for alpine issue: https://github.com/alpinelinux/docker-alpine/issues/383
|
||||
RUN apk update && apk upgrade
|
||||
|
||||
# Ran separately from 'make' to avoid re-doing
|
||||
RUN git submodule update --init --recursive
|
||||
|
||||
RUN if [ "$HEAPTRACK_BUILD" = "1" ]; then \
|
||||
git apply --directory=vendor/nimbus-build-system/vendor/Nim docs/tutorial/nim.2.2.4_heaptracker_addon.patch; \
|
||||
fi
|
||||
|
||||
# Slowest build step for the sake of caching layers
|
||||
RUN make -j$(nproc) deps QUICK_AND_DIRTY_COMPILER=1 ${NIM_COMMIT}
|
||||
|
||||
# Build the final node binary
|
||||
RUN make -j$(nproc) ${NIM_COMMIT} $MAKE_TARGET LOG_LEVEL=${LOG_LEVEL} NIMFLAGS="${NIMFLAGS}"
|
||||
|
||||
|
||||
# PRODUCTION IMAGE -------------------------------------------------------------
|
||||
|
||||
FROM alpine:3.18 AS prod
|
||||
|
||||
ARG MAKE_TARGET=wakunode2
|
||||
|
||||
LABEL maintainer="jakub@status.im"
|
||||
LABEL source="https://github.com/waku-org/nwaku"
|
||||
LABEL description="Wakunode: Waku client"
|
||||
LABEL commit="unknown"
|
||||
LABEL version="unknown"
|
||||
|
||||
# DevP2P, LibP2P, and JSON RPC ports
|
||||
EXPOSE 30303 60000 8545
|
||||
|
||||
# Referenced in the binary
|
||||
RUN apk add --no-cache libgcc libpq-dev bind-tools
|
||||
|
||||
# Copy to separate location to accomodate different MAKE_TARGET values
|
||||
COPY --from=nim-build /app/build/$MAKE_TARGET /usr/local/bin/
|
||||
|
||||
# Copy migration scripts for DB upgrades
|
||||
COPY --from=nim-build /app/migrations/ /app/migrations/
|
||||
|
||||
# Symlink the correct wakunode binary
|
||||
RUN ln -sv /usr/local/bin/$MAKE_TARGET /usr/bin/wakunode
|
||||
|
||||
ENTRYPOINT ["/usr/bin/wakunode"]
|
||||
|
||||
# By default just show help if called without arguments
|
||||
CMD ["--help"]
|
||||
|
||||
|
||||
# DEBUG IMAGE ------------------------------------------------------------------
|
||||
|
||||
# Build debug tools: heaptrack
|
||||
FROM alpine:3.18 AS heaptrack-build
|
||||
|
||||
RUN apk update
|
||||
RUN apk add -- gdb git g++ make cmake zlib-dev boost-dev libunwind-dev
|
||||
RUN git clone https://github.com/KDE/heaptrack.git /heaptrack
|
||||
|
||||
WORKDIR /heaptrack/build
|
||||
# going to a commit that builds properly. We will revisit this for new releases
|
||||
RUN git reset --hard f9cc35ebbdde92a292fe3870fe011ad2874da0ca
|
||||
RUN cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
RUN make -j$(nproc)
|
||||
|
||||
|
||||
# Debug image
|
||||
FROM prod AS debug-with-heaptrack
|
||||
|
||||
RUN apk add --no-cache gdb libunwind
|
||||
|
||||
# Add heaptrack
|
||||
COPY --from=heaptrack-build /heaptrack/build/ /heaptrack/build/
|
||||
|
||||
ENV LD_LIBRARY_PATH=/heaptrack/build/lib/heaptrack/
|
||||
RUN ln -s /heaptrack/build/bin/heaptrack /usr/local/bin/heaptrack
|
||||
|
||||
ENTRYPOINT ["/heaptrack/build/bin/heaptrack", "/usr/bin/wakunode"]
|
||||
58
third-party/nwaku/Dockerfile.lightpushWithMix.compile
vendored
Normal file
58
third-party/nwaku/Dockerfile.lightpushWithMix.compile
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
# BUILD NIM APP ----------------------------------------------------------------
|
||||
FROM rust:1.81.0-alpine3.19 AS nim-build
|
||||
|
||||
ARG NIMFLAGS
|
||||
ARG MAKE_TARGET=lightpushwithmix
|
||||
ARG NIM_COMMIT
|
||||
ARG LOG_LEVEL=TRACE
|
||||
|
||||
# Get build tools and required header files
|
||||
RUN apk add --no-cache bash git build-base openssl-dev pcre-dev linux-headers curl jq
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
|
||||
# workaround for alpine issue: https://github.com/alpinelinux/docker-alpine/issues/383
|
||||
RUN apk update && apk upgrade
|
||||
|
||||
# Ran separately from 'make' to avoid re-doing
|
||||
RUN git submodule update --init --recursive
|
||||
|
||||
# Slowest build step for the sake of caching layers
|
||||
RUN make -j$(nproc) deps QUICK_AND_DIRTY_COMPILER=1 ${NIM_COMMIT}
|
||||
|
||||
# Build the final node binary
|
||||
RUN make -j$(nproc) ${NIM_COMMIT} $MAKE_TARGET LOG_LEVEL=${LOG_LEVEL} NIMFLAGS="${NIMFLAGS}"
|
||||
|
||||
|
||||
# REFERENCE IMAGE as BASE for specialized PRODUCTION IMAGES----------------------------------------
|
||||
FROM alpine:3.18 AS base_lpt
|
||||
|
||||
ARG MAKE_TARGET=lightpushwithmix
|
||||
|
||||
LABEL maintainer="prem@waku.org"
|
||||
LABEL source="https://github.com/waku-org/nwaku"
|
||||
LABEL description="Lite Push With Mix: Waku light-client"
|
||||
LABEL commit="unknown"
|
||||
LABEL version="unknown"
|
||||
|
||||
# DevP2P, LibP2P, and JSON RPC ports
|
||||
EXPOSE 30303 60000 8545
|
||||
|
||||
# Referenced in the binary
|
||||
RUN apk add --no-cache libgcc pcre-dev libpq-dev \
|
||||
wget \
|
||||
iproute2 \
|
||||
python3 \
|
||||
jq
|
||||
|
||||
# Fix for 'Error loading shared library libpcre.so.3: No such file or directory'
|
||||
RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3
|
||||
|
||||
COPY --from=nim-build /app/build/lightpush_publisher_mix /usr/bin/
|
||||
RUN chmod +x /usr/bin/lightpush_publisher_mix
|
||||
|
||||
# Standalone image to be used manually and in lpt-runner -------------------------------------------
|
||||
FROM base_lpt AS standalone_lpt
|
||||
|
||||
ENTRYPOINT ["/usr/bin/lightpush_publisher_mix"]
|
||||
205
third-party/nwaku/LICENSE-APACHEv2
vendored
Normal file
205
third-party/nwaku/LICENSE-APACHEv2
vendored
Normal file
@ -0,0 +1,205 @@
|
||||
nim-waku is licensed under the Apache License version 2
|
||||
Copyright (c) 2018 Status Research & Development GmbH
|
||||
-----------------------------------------------------
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2018 Status Research & Development GmbH
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
25
third-party/nwaku/LICENSE-MIT
vendored
Normal file
25
third-party/nwaku/LICENSE-MIT
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
nim-waku is licensed under the MIT License
|
||||
Copyright (c) 2018 Status Research & Development GmbH
|
||||
-----------------------------------------------------
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2018 Status Research & Development GmbH
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
546
third-party/nwaku/Makefile
vendored
Normal file
546
third-party/nwaku/Makefile
vendored
Normal file
@ -0,0 +1,546 @@
|
||||
# Copyright (c) 2022 Status Research & Development GmbH. Licensed under
|
||||
# either of:
|
||||
# - Apache License, version 2.0
|
||||
# - MIT license
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
export BUILD_SYSTEM_DIR := vendor/nimbus-build-system
|
||||
export EXCLUDED_NIM_PACKAGES := vendor/nim-dnsdisc/vendor
|
||||
LINK_PCRE := 0
|
||||
FORMAT_MSG := "\\x1B[95mFormatting:\\x1B[39m"
|
||||
# we don't want an error here, so we can handle things later, in the ".DEFAULT" target
|
||||
-include $(BUILD_SYSTEM_DIR)/makefiles/variables.mk
|
||||
|
||||
|
||||
ifeq ($(NIM_PARAMS),)
|
||||
# "variables.mk" was not included, so we update the submodules.
|
||||
GIT_SUBMODULE_UPDATE := git submodule update --init --recursive
|
||||
.DEFAULT:
|
||||
+@ echo -e "Git submodules not found. Running '$(GIT_SUBMODULE_UPDATE)'.\n"; \
|
||||
$(GIT_SUBMODULE_UPDATE); \
|
||||
echo
|
||||
# Now that the included *.mk files appeared, and are newer than this file, Make will restart itself:
|
||||
# https://www.gnu.org/software/make/manual/make.html#Remaking-Makefiles
|
||||
#
|
||||
# After restarting, it will execute its original goal, so we don't have to start a child Make here
|
||||
# with "$(MAKE) $(MAKECMDGOALS)". Isn't hidden control flow great?
|
||||
|
||||
else # "variables.mk" was included. Business as usual until the end of this file.
|
||||
|
||||
# Determine the OS
|
||||
detected_OS := $(shell uname -s)
|
||||
ifneq (,$(findstring MINGW,$(detected_OS)))
|
||||
detected_OS := Windows
|
||||
endif
|
||||
|
||||
ifeq ($(detected_OS),Windows)
|
||||
# Update MINGW_PATH to standard MinGW location
|
||||
MINGW_PATH = /mingw64
|
||||
NIM_PARAMS += --passC:"-I$(MINGW_PATH)/include"
|
||||
NIM_PARAMS += --passL:"-L$(MINGW_PATH)/lib"
|
||||
NIM_PARAMS += --passL:"-Lvendor/nim-nat-traversal/vendor/miniupnp/miniupnpc"
|
||||
NIM_PARAMS += --passL:"-Lvendor/nim-nat-traversal/vendor/libnatpmp-upstream"
|
||||
|
||||
LIBS = -lws2_32 -lbcrypt -liphlpapi -luserenv -lntdll -lminiupnpc -lnatpmp -lpq
|
||||
NIM_PARAMS += $(foreach lib,$(LIBS),--passL:"$(lib)")
|
||||
endif
|
||||
|
||||
##########
|
||||
## Main ##
|
||||
##########
|
||||
.PHONY: all test update clean
|
||||
|
||||
# default target, because it's the first one that doesn't start with '.'
|
||||
all: | wakunode2 example2 chat2 chat2bridge libwaku
|
||||
|
||||
test_file := $(word 2,$(MAKECMDGOALS))
|
||||
define test_name
|
||||
$(shell echo '$(MAKECMDGOALS)' | cut -d' ' -f3-)
|
||||
endef
|
||||
|
||||
test:
|
||||
ifeq ($(strip $(test_file)),)
|
||||
$(MAKE) testcommon
|
||||
$(MAKE) testwaku
|
||||
else
|
||||
$(MAKE) compile-test TEST_FILE="$(test_file)" TEST_NAME="$(call test_name)"
|
||||
endif
|
||||
# this prevents make from erroring on unknown targets like "Index"
|
||||
%:
|
||||
@true
|
||||
|
||||
waku.nims:
|
||||
ln -s waku.nimble $@
|
||||
|
||||
update: | update-common
|
||||
rm -rf waku.nims && \
|
||||
$(MAKE) waku.nims $(HANDLE_OUTPUT)
|
||||
$(MAKE) build-nph
|
||||
|
||||
clean:
|
||||
rm -rf build
|
||||
|
||||
# must be included after the default target
|
||||
-include $(BUILD_SYSTEM_DIR)/makefiles/targets.mk
|
||||
|
||||
## Possible values: prod; debug
|
||||
TARGET ?= prod
|
||||
|
||||
## Git version
|
||||
GIT_VERSION ?= $(shell git describe --abbrev=6 --always --tags)
|
||||
## Compilation parameters. If defined in the CLI the assignments won't be executed
|
||||
NIM_PARAMS := $(NIM_PARAMS) -d:git_version=\"$(GIT_VERSION)\"
|
||||
|
||||
## Heaptracker options
|
||||
HEAPTRACKER ?= 0
|
||||
HEAPTRACKER_INJECT ?= 0
|
||||
ifeq ($(HEAPTRACKER), 1)
|
||||
# Assumes Nim's lib/system/alloc.nim is patched!
|
||||
TARGET := debug-with-heaptrack
|
||||
|
||||
ifeq ($(HEAPTRACKER_INJECT), 1)
|
||||
# the Nim compiler will load 'libheaptrack_inject.so'
|
||||
HEAPTRACK_PARAMS := -d:heaptracker -d:heaptracker_inject
|
||||
NIM_PARAMS := $(NIM_PARAMS) -d:heaptracker -d:heaptracker_inject
|
||||
else
|
||||
# the Nim compiler will load 'libheaptrack_preload.so'
|
||||
HEAPTRACK_PARAMS := -d:heaptracker
|
||||
NIM_PARAMS := $(NIM_PARAMS) -d:heaptracker
|
||||
endif
|
||||
|
||||
endif
|
||||
## end of Heaptracker options
|
||||
|
||||
##################
|
||||
## Dependencies ##
|
||||
##################
|
||||
.PHONY: deps libbacktrace
|
||||
|
||||
rustup:
|
||||
ifeq (, $(shell which cargo))
|
||||
# Install Rustup if it's not installed
|
||||
# -y: Assume "yes" for all prompts
|
||||
# --default-toolchain stable: Install the stable toolchain
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain stable
|
||||
endif
|
||||
|
||||
rln-deps: rustup
|
||||
./scripts/install_rln_tests_dependencies.sh
|
||||
|
||||
deps: | deps-common nat-libs waku.nims
|
||||
|
||||
|
||||
### nim-libbacktrace
|
||||
|
||||
# "-d:release" implies "--stacktrace:off" and it cannot be added to config.nims
|
||||
ifeq ($(DEBUG), 0)
|
||||
NIM_PARAMS := $(NIM_PARAMS) -d:release
|
||||
else
|
||||
NIM_PARAMS := $(NIM_PARAMS) -d:debug
|
||||
endif
|
||||
|
||||
ifeq ($(USE_LIBBACKTRACE), 0)
|
||||
NIM_PARAMS := $(NIM_PARAMS) -d:disable_libbacktrace
|
||||
endif
|
||||
|
||||
libbacktrace:
|
||||
+ $(MAKE) -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0
|
||||
|
||||
clean-libbacktrace:
|
||||
+ $(MAKE) -C vendor/nim-libbacktrace clean $(HANDLE_OUTPUT)
|
||||
|
||||
# Extend deps and clean targets
|
||||
ifneq ($(USE_LIBBACKTRACE), 0)
|
||||
deps: | libbacktrace
|
||||
endif
|
||||
|
||||
ifeq ($(POSTGRES), 1)
|
||||
NIM_PARAMS := $(NIM_PARAMS) -d:postgres -d:nimDebugDlOpen
|
||||
endif
|
||||
|
||||
ifeq ($(DEBUG_DISCV5), 1)
|
||||
NIM_PARAMS := $(NIM_PARAMS) -d:debugDiscv5
|
||||
endif
|
||||
|
||||
clean: | clean-libbacktrace
|
||||
|
||||
### Create nimble links (used when building with Nix)
|
||||
|
||||
nimbus-build-system-nimble-dir:
|
||||
NIMBLE_DIR="$(CURDIR)/$(NIMBLE_DIR)" \
|
||||
PWD_CMD="$(PWD)" \
|
||||
$(CURDIR)/scripts/generate_nimble_links.sh
|
||||
|
||||
##################
|
||||
## RLN ##
|
||||
##################
|
||||
.PHONY: librln
|
||||
|
||||
LIBRLN_BUILDDIR := $(CURDIR)/vendor/zerokit
|
||||
LIBRLN_VERSION := v0.7.0
|
||||
|
||||
ifeq ($(detected_OS),Windows)
|
||||
LIBRLN_FILE := rln.lib
|
||||
else
|
||||
LIBRLN_FILE := librln_$(LIBRLN_VERSION).a
|
||||
endif
|
||||
|
||||
$(LIBRLN_FILE):
|
||||
echo -e $(BUILD_MSG) "$@" && \
|
||||
./scripts/build_rln.sh $(LIBRLN_BUILDDIR) $(LIBRLN_VERSION) $(LIBRLN_FILE)
|
||||
|
||||
librln: | $(LIBRLN_FILE)
|
||||
$(eval NIM_PARAMS += --passL:$(LIBRLN_FILE) --passL:-lm)
|
||||
|
||||
clean-librln:
|
||||
cargo clean --manifest-path vendor/zerokit/rln/Cargo.toml
|
||||
rm -f $(LIBRLN_FILE)
|
||||
|
||||
# Extend clean target
|
||||
clean: | clean-librln
|
||||
|
||||
#################
|
||||
## Waku Common ##
|
||||
#################
|
||||
.PHONY: testcommon
|
||||
|
||||
testcommon: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim testcommon $(NIM_PARAMS) waku.nims
|
||||
|
||||
|
||||
##########
|
||||
## Waku ##
|
||||
##########
|
||||
.PHONY: testwaku wakunode2 testwakunode2 example2 chat2 chat2bridge liteprotocoltester
|
||||
|
||||
# install rln-deps only for the testwaku target
|
||||
testwaku: | build deps rln-deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim test -d:os=$(shell uname) $(NIM_PARAMS) waku.nims
|
||||
|
||||
wakunode2: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
\
|
||||
$(ENV_SCRIPT) nim wakunode2 $(NIM_PARAMS) waku.nims
|
||||
|
||||
benchmarks: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim benchmarks $(NIM_PARAMS) waku.nims
|
||||
|
||||
testwakunode2: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim testwakunode2 $(NIM_PARAMS) waku.nims
|
||||
|
||||
example2: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim example2 $(NIM_PARAMS) waku.nims
|
||||
|
||||
chat2: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim chat2 $(NIM_PARAMS) waku.nims
|
||||
|
||||
chat2mix: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim chat2mix $(NIM_PARAMS) waku.nims
|
||||
|
||||
rln-db-inspector: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim rln_db_inspector $(NIM_PARAMS) waku.nims
|
||||
|
||||
chat2bridge: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim chat2bridge $(NIM_PARAMS) waku.nims
|
||||
|
||||
liteprotocoltester: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim liteprotocoltester $(NIM_PARAMS) waku.nims
|
||||
|
||||
lightpushwithmix: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim lightpushwithmix $(NIM_PARAMS) waku.nims
|
||||
|
||||
build/%: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$*" && \
|
||||
$(ENV_SCRIPT) nim buildone $(NIM_PARAMS) waku.nims $*
|
||||
|
||||
compile-test: | build deps librln
|
||||
echo -e $(BUILD_MSG) "$(TEST_FILE)" "\"$(TEST_NAME)\"" && \
|
||||
$(ENV_SCRIPT) nim buildTest $(NIM_PARAMS) waku.nims $(TEST_FILE) && \
|
||||
$(ENV_SCRIPT) nim execTest $(NIM_PARAMS) waku.nims $(TEST_FILE) "\"$(TEST_NAME)\""; \
|
||||
|
||||
################
|
||||
## Waku tools ##
|
||||
################
|
||||
.PHONY: tools wakucanary networkmonitor
|
||||
|
||||
tools: networkmonitor wakucanary
|
||||
|
||||
wakucanary: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim wakucanary $(NIM_PARAMS) waku.nims
|
||||
|
||||
networkmonitor: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim networkmonitor $(NIM_PARAMS) waku.nims
|
||||
|
||||
############
|
||||
## Format ##
|
||||
############
|
||||
.PHONY: build-nph install-nph clean-nph print-nph-path
|
||||
|
||||
# Default location for nph binary shall be next to nim binary to make it available on the path.
|
||||
NPH:=$(shell dirname $(NIM_BINARY))/nph
|
||||
|
||||
build-nph: | build deps
|
||||
ifeq ("$(wildcard $(NPH))","")
|
||||
$(ENV_SCRIPT) nim c --skipParentCfg:on vendor/nph/src/nph.nim && \
|
||||
mv vendor/nph/src/nph $(shell dirname $(NPH))
|
||||
echo "nph utility is available at " $(NPH)
|
||||
else
|
||||
echo "nph utility already exists at " $(NPH)
|
||||
endif
|
||||
|
||||
GIT_PRE_COMMIT_HOOK := .git/hooks/pre-commit
|
||||
|
||||
install-nph: build-nph
|
||||
ifeq ("$(wildcard $(GIT_PRE_COMMIT_HOOK))","")
|
||||
cp ./scripts/git_pre_commit_format.sh $(GIT_PRE_COMMIT_HOOK)
|
||||
else
|
||||
echo "$(GIT_PRE_COMMIT_HOOK) already present, will NOT override"
|
||||
exit 1
|
||||
endif
|
||||
|
||||
nph/%: | build-nph
|
||||
echo -e $(FORMAT_MSG) "nph/$*" && \
|
||||
$(NPH) $*
|
||||
|
||||
clean-nph:
|
||||
rm -f $(NPH)
|
||||
|
||||
# To avoid hardcoding nph binary location in several places
|
||||
print-nph-path:
|
||||
echo "$(NPH)"
|
||||
|
||||
clean: | clean-nph
|
||||
|
||||
###################
|
||||
## Documentation ##
|
||||
###################
|
||||
.PHONY: docs coverage
|
||||
|
||||
# TODO: Remove unused target
|
||||
docs: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim doc --run --index:on --project --out:.gh-pages waku/waku.nim waku.nims
|
||||
|
||||
coverage:
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) ./scripts/run_cov.sh -y
|
||||
|
||||
|
||||
#####################
|
||||
## Container image ##
|
||||
#####################
|
||||
# -d:insecure - Necessary to enable Prometheus HTTP endpoint for metrics
|
||||
# -d:chronicles_colors:none - Necessary to disable colors in logs for Docker
|
||||
DOCKER_IMAGE_NIMFLAGS ?= -d:chronicles_colors:none -d:insecure -d:postgres
|
||||
DOCKER_IMAGE_NIMFLAGS := $(DOCKER_IMAGE_NIMFLAGS) $(HEAPTRACK_PARAMS)
|
||||
|
||||
# build a docker image for the fleet
|
||||
docker-image: MAKE_TARGET ?= wakunode2
|
||||
docker-image: DOCKER_IMAGE_TAG ?= $(MAKE_TARGET)-$(GIT_VERSION)
|
||||
docker-image: DOCKER_IMAGE_NAME ?= wakuorg/nwaku:$(DOCKER_IMAGE_TAG)
|
||||
docker-image:
|
||||
docker build \
|
||||
--build-arg="MAKE_TARGET=$(MAKE_TARGET)" \
|
||||
--build-arg="NIMFLAGS=$(DOCKER_IMAGE_NIMFLAGS)" \
|
||||
--build-arg="NIM_COMMIT=$(DOCKER_NIM_COMMIT)" \
|
||||
--build-arg="LOG_LEVEL=$(LOG_LEVEL)" \
|
||||
--build-arg="HEAPTRACK_BUILD=$(HEAPTRACKER)" \
|
||||
--label="commit=$(shell git rev-parse HEAD)" \
|
||||
--label="version=$(GIT_VERSION)" \
|
||||
--target $(TARGET) \
|
||||
--tag $(DOCKER_IMAGE_NAME) .
|
||||
|
||||
docker-quick-image: MAKE_TARGET ?= wakunode2
|
||||
docker-quick-image: DOCKER_IMAGE_TAG ?= $(MAKE_TARGET)-$(GIT_VERSION)
|
||||
docker-quick-image: DOCKER_IMAGE_NAME ?= wakuorg/nwaku:$(DOCKER_IMAGE_TAG)
|
||||
docker-quick-image: NIM_PARAMS := $(NIM_PARAMS) -d:chronicles_colors:none -d:insecure -d:postgres --passL:$(LIBRLN_FILE) --passL:-lm
|
||||
docker-quick-image: | build deps librln wakunode2
|
||||
docker build \
|
||||
--build-arg="MAKE_TARGET=$(MAKE_TARGET)" \
|
||||
--tag $(DOCKER_IMAGE_NAME) \
|
||||
--target $(TARGET) \
|
||||
--file docker/binaries/Dockerfile.bn.local \
|
||||
.
|
||||
|
||||
docker-push:
|
||||
docker push $(DOCKER_IMAGE_NAME)
|
||||
|
||||
####################################
|
||||
## Container lite-protocol-tester ##
|
||||
####################################
|
||||
# -d:insecure - Necessary to enable Prometheus HTTP endpoint for metrics
|
||||
# -d:chronicles_colors:none - Necessary to disable colors in logs for Docker
|
||||
DOCKER_LPT_NIMFLAGS ?= -d:chronicles_colors:none -d:insecure
|
||||
|
||||
# build a docker image for the fleet
|
||||
docker-liteprotocoltester: DOCKER_LPT_TAG ?= latest
|
||||
docker-liteprotocoltester: DOCKER_LPT_NAME ?= wakuorg/liteprotocoltester:$(DOCKER_LPT_TAG)
|
||||
# --no-cache
|
||||
docker-liteprotocoltester:
|
||||
docker build \
|
||||
--build-arg="MAKE_TARGET=liteprotocoltester" \
|
||||
--build-arg="NIMFLAGS=$(DOCKER_LPT_NIMFLAGS)" \
|
||||
--build-arg="NIM_COMMIT=$(DOCKER_NIM_COMMIT)" \
|
||||
--build-arg="LOG_LEVEL=TRACE" \
|
||||
--label="commit=$(shell git rev-parse HEAD)" \
|
||||
--label="version=$(GIT_VERSION)" \
|
||||
--target $(if $(filter deploy,$(DOCKER_LPT_TAG)),deployment_lpt,standalone_lpt) \
|
||||
--tag $(DOCKER_LPT_NAME) \
|
||||
--file apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile \
|
||||
.
|
||||
|
||||
docker-quick-liteprotocoltester: DOCKER_LPT_TAG ?= latest
|
||||
docker-quick-liteprotocoltester: DOCKER_LPT_NAME ?= wakuorg/liteprotocoltester:$(DOCKER_LPT_TAG)
|
||||
docker-quick-liteprotocoltester: | liteprotocoltester
|
||||
docker build \
|
||||
--tag $(DOCKER_LPT_NAME) \
|
||||
--file apps/liteprotocoltester/Dockerfile.liteprotocoltester \
|
||||
.
|
||||
|
||||
docker-liteprotocoltester-push:
|
||||
docker push $(DOCKER_LPT_NAME)
|
||||
|
||||
|
||||
################
|
||||
## C Bindings ##
|
||||
################
|
||||
.PHONY: cbindings cwaku_example libwaku
|
||||
|
||||
STATIC ?= 0
|
||||
|
||||
|
||||
libwaku: | build deps librln
|
||||
rm -f build/libwaku*
|
||||
|
||||
ifeq ($(STATIC), 1)
|
||||
echo -e $(BUILD_MSG) "build/$@.a" && $(ENV_SCRIPT) nim libwakuStatic $(NIM_PARAMS) waku.nims
|
||||
else ifeq ($(detected_OS),Windows)
|
||||
echo -e $(BUILD_MSG) "build/$@.dll" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims
|
||||
else
|
||||
echo -e $(BUILD_MSG) "build/$@.so" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims
|
||||
endif
|
||||
|
||||
#####################
|
||||
## Mobile Bindings ##
|
||||
#####################
|
||||
.PHONY: libwaku-android \
|
||||
libwaku-android-precheck \
|
||||
libwaku-android-arm64 \
|
||||
libwaku-android-amd64 \
|
||||
libwaku-android-x86 \
|
||||
libwaku-android-arm \
|
||||
rebuild-nat-libs \
|
||||
build-libwaku-for-android-arch
|
||||
|
||||
ANDROID_TARGET ?= 30
|
||||
ifeq ($(detected_OS),Darwin)
|
||||
ANDROID_TOOLCHAIN_DIR := $(ANDROID_NDK_HOME)/toolchains/llvm/prebuilt/darwin-x86_64
|
||||
else
|
||||
ANDROID_TOOLCHAIN_DIR := $(ANDROID_NDK_HOME)/toolchains/llvm/prebuilt/linux-x86_64
|
||||
endif
|
||||
|
||||
rebuild-nat-libs: | clean-cross nat-libs
|
||||
|
||||
libwaku-android-precheck:
|
||||
ifndef ANDROID_NDK_HOME
|
||||
$(error ANDROID_NDK_HOME is not set)
|
||||
endif
|
||||
|
||||
build-libwaku-for-android-arch:
|
||||
$(MAKE) rebuild-nat-libs CC=$(ANDROID_TOOLCHAIN_DIR)/bin/$(ANDROID_COMPILER) && \
|
||||
./scripts/build_rln_android.sh $(CURDIR)/build $(LIBRLN_BUILDDIR) $(LIBRLN_VERSION) $(CROSS_TARGET) $(ABIDIR) && \
|
||||
CPU=$(CPU) ABIDIR=$(ABIDIR) ANDROID_ARCH=$(ANDROID_ARCH) ANDROID_COMPILER=$(ANDROID_COMPILER) ANDROID_TOOLCHAIN_DIR=$(ANDROID_TOOLCHAIN_DIR) $(ENV_SCRIPT) nim libWakuAndroid $(NIM_PARAMS) waku.nims
|
||||
|
||||
libwaku-android-arm64: ANDROID_ARCH=aarch64-linux-android
|
||||
libwaku-android-arm64: CPU=arm64
|
||||
libwaku-android-arm64: ABIDIR=arm64-v8a
|
||||
libwaku-android-arm64: | libwaku-android-precheck build deps
|
||||
$(MAKE) build-libwaku-for-android-arch ANDROID_ARCH=$(ANDROID_ARCH) CROSS_TARGET=$(ANDROID_ARCH) CPU=$(CPU) ABIDIR=$(ABIDIR) ANDROID_COMPILER=$(ANDROID_ARCH)$(ANDROID_TARGET)-clang
|
||||
|
||||
libwaku-android-amd64: ANDROID_ARCH=x86_64-linux-android
|
||||
libwaku-android-amd64: CPU=amd64
|
||||
libwaku-android-amd64: ABIDIR=x86_64
|
||||
libwaku-android-amd64: | libwaku-android-precheck build deps
|
||||
$(MAKE) build-libwaku-for-android-arch ANDROID_ARCH=$(ANDROID_ARCH) CROSS_TARGET=$(ANDROID_ARCH) CPU=$(CPU) ABIDIR=$(ABIDIR) ANDROID_COMPILER=$(ANDROID_ARCH)$(ANDROID_TARGET)-clang
|
||||
|
||||
libwaku-android-x86: ANDROID_ARCH=i686-linux-android
|
||||
libwaku-android-x86: CPU=i386
|
||||
libwaku-android-x86: ABIDIR=x86
|
||||
libwaku-android-x86: | libwaku-android-precheck build deps
|
||||
$(MAKE) build-libwaku-for-android-arch ANDROID_ARCH=$(ANDROID_ARCH) CROSS_TARGET=$(ANDROID_ARCH) CPU=$(CPU) ABIDIR=$(ABIDIR) ANDROID_COMPILER=$(ANDROID_ARCH)$(ANDROID_TARGET)-clang
|
||||
|
||||
libwaku-android-arm: ANDROID_ARCH=armv7a-linux-androideabi
|
||||
libwaku-android-arm: CPU=arm
|
||||
libwaku-android-arm: ABIDIR=armeabi-v7a
|
||||
libwaku-android-arm: | libwaku-android-precheck build deps
|
||||
# cross-rs target architecture name does not match the one used in android
|
||||
$(MAKE) build-libwaku-for-android-arch ANDROID_ARCH=$(ANDROID_ARCH) CROSS_TARGET=armv7-linux-androideabi CPU=$(CPU) ABIDIR=$(ABIDIR) ANDROID_COMPILER=$(ANDROID_ARCH)$(ANDROID_TARGET)-clang
|
||||
|
||||
libwaku-android:
|
||||
$(MAKE) libwaku-android-amd64
|
||||
$(MAKE) libwaku-android-arm64
|
||||
$(MAKE) libwaku-android-x86
|
||||
# This target is disabled because on recent versions of cross-rs complain with the following error
|
||||
# relocation R_ARM_THM_ALU_PREL_11_0 cannot be used against symbol 'stack_init_trampoline_return'; recompile with -fPIC
|
||||
# It's likely this architecture is not used so we might just not support it.
|
||||
# $(MAKE) libwaku-android-arm
|
||||
|
||||
cwaku_example: | build libwaku
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
cc -o "build/$@" \
|
||||
./examples/cbindings/waku_example.c \
|
||||
./examples/cbindings/base64.c \
|
||||
-lwaku -Lbuild/ \
|
||||
-pthread -ldl -lm \
|
||||
-lminiupnpc -Lvendor/nim-nat-traversal/vendor/miniupnp/miniupnpc/build/ \
|
||||
-lnatpmp -Lvendor/nim-nat-traversal/vendor/libnatpmp-upstream/ \
|
||||
vendor/nim-libbacktrace/libbacktrace_wrapper.o \
|
||||
vendor/nim-libbacktrace/install/usr/lib/libbacktrace.a
|
||||
|
||||
cppwaku_example: | build libwaku
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
g++ -o "build/$@" \
|
||||
./examples/cpp/waku.cpp \
|
||||
./examples/cpp/base64.cpp \
|
||||
-lwaku -Lbuild/ \
|
||||
-pthread -ldl -lm \
|
||||
-lminiupnpc -Lvendor/nim-nat-traversal/vendor/miniupnp/miniupnpc/build/ \
|
||||
-lnatpmp -Lvendor/nim-nat-traversal/vendor/libnatpmp-upstream/ \
|
||||
vendor/nim-libbacktrace/libbacktrace_wrapper.o \
|
||||
vendor/nim-libbacktrace/install/usr/lib/libbacktrace.a
|
||||
|
||||
nodejswaku: | build deps
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
node-gyp build --directory=examples/nodejs/
|
||||
|
||||
endif # "variables.mk" was not included
|
||||
|
||||
###################
|
||||
# Release Targets #
|
||||
###################
|
||||
|
||||
release-notes:
|
||||
docker run \
|
||||
-it \
|
||||
--rm \
|
||||
-v $${PWD}:/opt/sv4git/repo:z \
|
||||
-u $(shell id -u) \
|
||||
docker.io/wakuorg/sv4git:latest \
|
||||
release-notes |\
|
||||
sed -E 's@#([0-9]+)@[#\1](https://github.com/waku-org/nwaku/issues/\1)@g'
|
||||
# I could not get the tool to replace issue ids with links, so using sed for now,
|
||||
# asked here: https://github.com/bvieira/sv4git/discussions/101
|
||||
|
||||
186
third-party/nwaku/README.md
vendored
Normal file
186
third-party/nwaku/README.md
vendored
Normal file
@ -0,0 +1,186 @@
|
||||
# Nwaku
|
||||
|
||||
## Introduction
|
||||
|
||||
The nwaku repository implements Waku, and provides tools related to it.
|
||||
|
||||
- A Nim implementation of the [Waku (v2) protocol](https://specs.vac.dev/specs/waku/v2/waku-v2.html).
|
||||
- CLI application `wakunode2` that allows you to run a Waku node.
|
||||
- Examples of Waku usage.
|
||||
- Various tests of above.
|
||||
|
||||
For more details see the [source code](waku/README.md)
|
||||
|
||||
## How to Build & Run ( Linux, MacOS & WSL )
|
||||
|
||||
These instructions are generic. For more detailed instructions, see the Waku source code above.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
The standard developer tools, including a C compiler, GNU Make, Bash, and Git. More information on these installations can be found [here](https://docs.waku.org/guides/nwaku/build-source#install-dependencies).
|
||||
|
||||
> In some distributions (Fedora linux for example), you may need to install `which` utility separately. Nimbus build system is relying on it.
|
||||
|
||||
You'll also need an installation of Rust and its toolchain (specifically `rustc` and `cargo`).
|
||||
The easiest way to install these, is using `rustup`:
|
||||
|
||||
```bash
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
```
|
||||
|
||||
### Wakunode
|
||||
|
||||
```bash
|
||||
# The first `make` invocation will update all Git submodules.
|
||||
# You'll run `make update` after each `git pull` in the future to keep those submodules updated.
|
||||
make wakunode2
|
||||
|
||||
# Build with custom compilation flags. Do not use NIM_PARAMS unless you know what you are doing.
|
||||
# Replace with your own flags
|
||||
make wakunode2 NIMFLAGS="-d:chronicles_colors:none -d:disableMarchNative"
|
||||
|
||||
# Run with DNS bootstrapping
|
||||
./build/wakunode2 --dns-discovery --dns-discovery-url=DNS_BOOTSTRAP_NODE_URL
|
||||
|
||||
# See available command line options
|
||||
./build/wakunode2 --help
|
||||
```
|
||||
To join the network, you need to know the address of at least one bootstrap node.
|
||||
Please refer to the [Waku README](https://github.com/waku-org/nwaku/blob/master/waku/README.md) for more information.
|
||||
|
||||
For more on how to run `wakunode2`, refer to:
|
||||
- [Run using binaries](https://docs.waku.org/guides/nwaku/build-source)
|
||||
- [Run using docker](https://docs.waku.org/guides/nwaku/run-docker)
|
||||
- [Run using docker-compose](https://docs.waku.org/guides/nwaku/run-docker-compose)
|
||||
|
||||
#### Issues
|
||||
##### WSL
|
||||
If you encounter difficulties building the project on WSL, consider placing the project within WSL's filesystem, avoiding the `/mnt/` directory.
|
||||
|
||||
### How to Build & Run ( Windows )
|
||||
|
||||
### Windows Build Instructions
|
||||
|
||||
#### 1. Install Required Tools
|
||||
- **Git Bash Terminal**: Download and install from https://git-scm.com/download/win
|
||||
- **MSYS2**:
|
||||
a. Download installer from https://www.msys2.org
|
||||
b. Install at "C:\" (default location). Remove/rename the msys folder in case of previous installation.
|
||||
c. Use the mingw64 terminal from msys64 directory for package installation.
|
||||
|
||||
#### 2. Install Dependencies
|
||||
Open MSYS2 mingw64 terminal and run the following one-by-one :
|
||||
```bash
|
||||
pacman -Syu --noconfirm
|
||||
pacman -S --noconfirm --needed mingw-w64-x86_64-toolchain
|
||||
pacman -S --noconfirm --needed base-devel make cmake upx
|
||||
pacman -S --noconfirm --needed mingw-w64-x86_64-rust
|
||||
pacman -S --noconfirm --needed mingw-w64-x86_64-postgresql
|
||||
pacman -S --noconfirm --needed mingw-w64-x86_64-gcc
|
||||
pacman -S --noconfirm --needed mingw-w64-x86_64-gcc-libs
|
||||
pacman -S --noconfirm --needed mingw-w64-x86_64-libwinpthread-git
|
||||
pacman -S --noconfirm --needed mingw-w64-x86_64-zlib
|
||||
pacman -S --noconfirm --needed mingw-w64-x86_64-openssl
|
||||
pacman -S --noconfirm --needed mingw-w64-x86_64-python
|
||||
```
|
||||
|
||||
#### 3. Build Wakunode
|
||||
- Open Git Bash as administrator
|
||||
- clone nwaku and cd nwaku
|
||||
- Execute: `./scripts/build_windows.sh`
|
||||
|
||||
#### 4. Troubleshooting
|
||||
If `wakunode2.exe` isn't generated:
|
||||
- **Missing Dependencies**: Verify with:
|
||||
`which make cmake gcc g++ rustc cargo python3 upx`
|
||||
If missing, revisit Step 2 or ensure MSYS2 is at `C:\`
|
||||
- **Installation Conflicts**: Remove existing MinGW/MSYS2/Git Bash installations and perform fresh install
|
||||
|
||||
### Developing
|
||||
|
||||
#### Nim Runtime
|
||||
This repository is bundled with a Nim runtime that includes the necessary dependencies for the project.
|
||||
|
||||
Before you can utilize the runtime you'll need to build the project, as detailed in a previous section.
|
||||
This will generate a `vendor` directory containing various dependencies, including the `nimbus-build-system` which has the bundled nim runtime.
|
||||
|
||||
After successfully building the project, you may bring the bundled runtime into scope by running:
|
||||
```bash
|
||||
source env.sh
|
||||
```
|
||||
If everything went well, you should see your prompt suffixed with `[Nimbus env]$`. Now you can run `nim` commands as usual.
|
||||
|
||||
### Test Suite
|
||||
|
||||
```bash
|
||||
# Run all the Waku tests
|
||||
make test
|
||||
|
||||
# Run a specific test file
|
||||
make test <test_file_path>
|
||||
# e.g. : make test tests/wakunode2/test_all.nim
|
||||
|
||||
# Run a specific test name from a specific test file
|
||||
make test <test_file_path> <test_name>
|
||||
# e.g. : make test tests/wakunode2/test_all.nim "node setup is successful with default configuration"
|
||||
```
|
||||
|
||||
### Building single test files
|
||||
|
||||
During development it is helpful to build and run a single test file.
|
||||
To support this make has a specific target:
|
||||
|
||||
targets:
|
||||
- `build/<relative path to your test file.nim>`
|
||||
- `test/<relative path to your test file.nim>`
|
||||
|
||||
Binary will be created as `<path to your test file.nim>.bin` under the `build` directory .
|
||||
|
||||
```bash
|
||||
# Build and run your test file separately
|
||||
make test/tests/common/test_enr_builder.nim
|
||||
```
|
||||
|
||||
### Testing against `js-waku`
|
||||
Refer to [js-waku repo](https://github.com/waku-org/js-waku/tree/master/packages/tests) for instructions.
|
||||
|
||||
## Formatting
|
||||
|
||||
Nim files are expected to be formatted using the [`nph`](https://github.com/arnetheduck/nph) version present in `vendor/nph`.
|
||||
|
||||
You can easily format file with the `make nph/<relative path to nim> file` command.
|
||||
For example:
|
||||
|
||||
```
|
||||
make nph/waku/waku_core.nim
|
||||
```
|
||||
|
||||
A convenient git hook is provided to automatically format file at commit time.
|
||||
Run the following command to install it:
|
||||
|
||||
```shell
|
||||
make install-nph
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
Examples can be found in the examples folder.
|
||||
This includes a fully featured chat example.
|
||||
|
||||
### Tools
|
||||
|
||||
Different tools and their corresponding how-to guides can be found in the `tools` folder.
|
||||
|
||||
### Bugs, Questions & Features
|
||||
|
||||
For an inquiry, or if you would like to propose new features, feel free to [open a general issue](https://github.com/waku-org/nwaku/issues/new).
|
||||
|
||||
For bug reports, please [tag your issue with the `bug` label](https://github.com/waku-org/nwaku/issues/new).
|
||||
|
||||
If you believe the reported issue requires critical attention, please [use the `critical` label](https://github.com/waku-org/nwaku/issues/new?labels=critical,bug) to assist with triaging.
|
||||
|
||||
To get help, or participate in the conversation, join the [Waku Discord](https://discord.waku.org/) server.
|
||||
|
||||
### Docs
|
||||
|
||||
* [REST API Documentation](https://waku-org.github.io/waku-rest-api/)
|
||||
74
third-party/nwaku/apps/benchmarks/benchmarks.nim
vendored
Normal file
74
third-party/nwaku/apps/benchmarks/benchmarks.nim
vendored
Normal file
@ -0,0 +1,74 @@
|
||||
import
|
||||
std/[strutils, times, sequtils, osproc], math, results, options, testutils/unittests
|
||||
|
||||
import
|
||||
waku/[
|
||||
waku_rln_relay/protocol_types,
|
||||
waku_rln_relay/rln,
|
||||
waku_rln_relay,
|
||||
waku_rln_relay/conversion_utils,
|
||||
waku_rln_relay/group_manager/on_chain/group_manager,
|
||||
],
|
||||
tests/waku_rln_relay/utils_onchain
|
||||
|
||||
proc benchmark(
|
||||
manager: OnChainGroupManager, registerCount: int, messageLimit: int
|
||||
): Future[string] {.async, gcsafe.} =
|
||||
# Register a new member so that we can later generate proofs
|
||||
let idCredentials = generateCredentials(manager.rlnInstance, registerCount)
|
||||
|
||||
var start_time = getTime()
|
||||
for i in 0 .. registerCount - 1:
|
||||
try:
|
||||
await manager.register(idCredentials[i], UserMessageLimit(messageLimit + 1))
|
||||
except Exception, CatchableError:
|
||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
|
||||
debug "registration finished",
|
||||
iter = i, elapsed_ms = (getTime() - start_time).inMilliseconds
|
||||
|
||||
discard await manager.updateRoots()
|
||||
let proofResult = await manager.fetchMerkleProofElements()
|
||||
if proofResult.isErr():
|
||||
error "Failed to fetch Merkle proof", error = proofResult.error
|
||||
manager.merkleProofCache = proofResult.get()
|
||||
|
||||
let epoch = default(Epoch)
|
||||
debug "epoch in bytes", epochHex = epoch.inHex()
|
||||
let data: seq[byte] = newSeq[byte](1024)
|
||||
|
||||
var proofGenTimes: seq[times.Duration] = @[]
|
||||
var proofVerTimes: seq[times.Duration] = @[]
|
||||
|
||||
start_time = getTime()
|
||||
for i in 1 .. messageLimit:
|
||||
var generate_time = getTime()
|
||||
let proof = manager.generateProof(data, epoch, MessageId(i.uint8)).valueOr:
|
||||
raiseAssert $error
|
||||
proofGenTimes.add(getTime() - generate_time)
|
||||
|
||||
let verify_time = getTime()
|
||||
let ok = manager.verifyProof(data, proof).valueOr:
|
||||
raiseAssert $error
|
||||
proofVerTimes.add(getTime() - verify_time)
|
||||
debug "iteration finished",
|
||||
iter = i, elapsed_ms = (getTime() - start_time).inMilliseconds
|
||||
|
||||
echo "Proof generation times: ", sum(proofGenTimes) div len(proofGenTimes)
|
||||
echo "Proof verification times: ", sum(proofVerTimes) div len(proofVerTimes)
|
||||
|
||||
proc main() =
|
||||
# Start a local Ethereum JSON-RPC (Anvil) so that the group-manager setup can connect.
|
||||
let anvilProc = runAnvil()
|
||||
defer:
|
||||
stopAnvil(anvilProc)
|
||||
|
||||
# Set up an On-chain group manager (includes contract deployment)
|
||||
let manager = waitFor setupOnchainGroupManager()
|
||||
(waitFor manager.init()).isOkOr:
|
||||
raiseAssert $error
|
||||
|
||||
discard waitFor benchmark(manager, 200, 20)
|
||||
|
||||
when isMainModule:
|
||||
main()
|
||||
626
third-party/nwaku/apps/chat2/chat2.nim
vendored
Normal file
626
third-party/nwaku/apps/chat2/chat2.nim
vendored
Normal file
@ -0,0 +1,626 @@
|
||||
## chat2 is an example of usage of Waku v2. For suggested usage options, please
|
||||
## see dingpu tutorial in docs folder.
|
||||
|
||||
when not (compileOption("threads")):
|
||||
{.fatal: "Please, compile this program with the --threads:on option!".}
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[strformat, strutils, times, options, random, sequtils]
|
||||
import
|
||||
confutils,
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/keys,
|
||||
bearssl,
|
||||
stew/[byteutils, results],
|
||||
metrics,
|
||||
metrics/chronos_httpserver
|
||||
import
|
||||
libp2p/[
|
||||
switch, # manage transports, a single entry point for dialing and listening
|
||||
crypto/crypto, # cryptographic functions
|
||||
stream/connection, # create and close stream read / write connections
|
||||
multiaddress,
|
||||
# encode different addressing schemes. For example, /ip4/7.7.7.7/tcp/6543 means it is using IPv4 protocol and TCP
|
||||
peerinfo,
|
||||
# manage the information of a peer, such as peer ID and public / private key
|
||||
peerid, # Implement how peers interact
|
||||
protobuf/minprotobuf, # message serialisation/deserialisation from and to protobufs
|
||||
nameresolving/dnsresolver,
|
||||
] # define DNS resolution
|
||||
import
|
||||
waku/[
|
||||
waku_core,
|
||||
waku_lightpush_legacy/common,
|
||||
waku_lightpush_legacy/rpc,
|
||||
waku_enr,
|
||||
discovery/waku_dnsdisc,
|
||||
waku_store_legacy,
|
||||
waku_node,
|
||||
node/waku_metrics,
|
||||
node/peer_manager,
|
||||
factory/builder,
|
||||
common/utils/nat,
|
||||
waku_relay,
|
||||
waku_store/common,
|
||||
],
|
||||
./config_chat2
|
||||
|
||||
import libp2p/protocols/pubsub/rpc/messages, libp2p/protocols/pubsub/pubsub
|
||||
import ../../waku/waku_rln_relay
|
||||
|
||||
const Help =
|
||||
"""
|
||||
Commands: /[?|help|connect|nick|exit]
|
||||
help: Prints this help
|
||||
connect: dials a remote peer
|
||||
nick: change nickname for current chat session
|
||||
exit: exits chat session
|
||||
"""
|
||||
|
||||
# XXX Connected is a bit annoying, because incoming connections don't trigger state change
|
||||
# Could poll connection pool or something here, I suppose
|
||||
# TODO Ensure connected turns true on incoming connections, or get rid of it
|
||||
type Chat = ref object
|
||||
node: WakuNode # waku node for publishing, subscribing, etc
|
||||
transp: StreamTransport # transport streams between read & write file descriptor
|
||||
subscribed: bool # indicates if a node is subscribed or not to a topic
|
||||
connected: bool # if the node is connected to another peer
|
||||
started: bool # if the node has started
|
||||
nick: string # nickname for this chat session
|
||||
prompt: bool # chat prompt is showing
|
||||
contentTopic: string # default content topic for chat messages
|
||||
|
||||
type
|
||||
PrivateKey* = crypto.PrivateKey
|
||||
Topic* = waku_core.PubsubTopic
|
||||
|
||||
#####################
|
||||
## chat2 protobufs ##
|
||||
#####################
|
||||
|
||||
type
|
||||
SelectResult*[T] = Result[T, string]
|
||||
|
||||
Chat2Message* = object
|
||||
timestamp*: int64
|
||||
nick*: string
|
||||
payload*: seq[byte]
|
||||
|
||||
proc init*(T: type Chat2Message, buffer: seq[byte]): ProtoResult[T] =
|
||||
var msg = Chat2Message()
|
||||
let pb = initProtoBuffer(buffer)
|
||||
|
||||
var timestamp: uint64
|
||||
discard ?pb.getField(1, timestamp)
|
||||
msg.timestamp = int64(timestamp)
|
||||
|
||||
discard ?pb.getField(2, msg.nick)
|
||||
discard ?pb.getField(3, msg.payload)
|
||||
|
||||
ok(msg)
|
||||
|
||||
proc encode*(message: Chat2Message): ProtoBuffer =
|
||||
var serialised = initProtoBuffer()
|
||||
|
||||
serialised.write(1, uint64(message.timestamp))
|
||||
serialised.write(2, message.nick)
|
||||
serialised.write(3, message.payload)
|
||||
|
||||
return serialised
|
||||
|
||||
proc toString*(message: Chat2Message): string =
|
||||
# Get message date and timestamp in local time
|
||||
let time = message.timestamp.fromUnix().local().format("'<'MMM' 'dd,' 'HH:mm'>'")
|
||||
|
||||
return time & " " & message.nick & ": " & string.fromBytes(message.payload)
|
||||
|
||||
#####################
|
||||
|
||||
proc connectToNodes(c: Chat, nodes: seq[string]) {.async.} =
|
||||
echo "Connecting to nodes"
|
||||
await c.node.connectToNodes(nodes)
|
||||
c.connected = true
|
||||
|
||||
proc showChatPrompt(c: Chat) =
|
||||
if not c.prompt:
|
||||
try:
|
||||
stdout.write(">> ")
|
||||
stdout.flushFile()
|
||||
c.prompt = true
|
||||
except IOError:
|
||||
discard
|
||||
|
||||
proc getChatLine(c: Chat, msg: WakuMessage): Result[string, string] =
|
||||
# No payload encoding/encryption from Waku
|
||||
let
|
||||
pb = Chat2Message.init(msg.payload)
|
||||
chatLine =
|
||||
if pb.isOk:
|
||||
pb[].toString()
|
||||
else:
|
||||
string.fromBytes(msg.payload)
|
||||
return ok(chatline)
|
||||
|
||||
proc printReceivedMessage(c: Chat, msg: WakuMessage) =
|
||||
let
|
||||
pb = Chat2Message.init(msg.payload)
|
||||
chatLine =
|
||||
if pb.isOk:
|
||||
pb[].toString()
|
||||
else:
|
||||
string.fromBytes(msg.payload)
|
||||
try:
|
||||
echo &"{chatLine}"
|
||||
except ValueError:
|
||||
# Formatting fail. Print chat line in any case.
|
||||
echo chatLine
|
||||
|
||||
c.prompt = false
|
||||
showChatPrompt(c)
|
||||
trace "Printing message",
|
||||
topic = DefaultPubsubTopic, chatLine, contentTopic = msg.contentTopic
|
||||
|
||||
proc readNick(transp: StreamTransport): Future[string] {.async.} =
|
||||
# Chat prompt
|
||||
stdout.write("Choose a nickname >> ")
|
||||
stdout.flushFile()
|
||||
return await transp.readLine()
|
||||
|
||||
proc startMetricsServer(
|
||||
serverIp: IpAddress, serverPort: Port
|
||||
): Result[MetricsHttpServerRef, string] =
|
||||
info "Starting metrics HTTP server", serverIp = $serverIp, serverPort = $serverPort
|
||||
|
||||
let metricsServerRes = MetricsHttpServerRef.new($serverIp, serverPort)
|
||||
if metricsServerRes.isErr():
|
||||
return err("metrics HTTP server start failed: " & $metricsServerRes.error)
|
||||
|
||||
let server = metricsServerRes.value
|
||||
try:
|
||||
waitFor server.start()
|
||||
except CatchableError:
|
||||
return err("metrics HTTP server start failed: " & getCurrentExceptionMsg())
|
||||
|
||||
info "Metrics HTTP server started", serverIp = $serverIp, serverPort = $serverPort
|
||||
ok(metricsServerRes.value)
|
||||
|
||||
proc publish(c: Chat, line: string) =
|
||||
# First create a Chat2Message protobuf with this line of text
|
||||
let time = getTime().toUnix()
|
||||
let chat2pb =
|
||||
Chat2Message(timestamp: time, nick: c.nick, payload: line.toBytes()).encode()
|
||||
|
||||
## @TODO: error handling on failure
|
||||
proc handler(response: PushResponse) {.gcsafe, closure.} =
|
||||
trace "lightpush response received", response = response
|
||||
|
||||
var message = WakuMessage(
|
||||
payload: chat2pb.buffer,
|
||||
contentTopic: c.contentTopic,
|
||||
version: 0,
|
||||
timestamp: getNanosecondTime(time),
|
||||
)
|
||||
if not isNil(c.node.wakuRlnRelay):
|
||||
# for future version when we support more than one rln protected content topic,
|
||||
# we should check the message content topic as well
|
||||
let appendRes = c.node.wakuRlnRelay.appendRLNProof(message, float64(time))
|
||||
if appendRes.isErr():
|
||||
debug "could not append rate limit proof to the message"
|
||||
else:
|
||||
debug "rate limit proof is appended to the message"
|
||||
let decodeRes = RateLimitProof.init(message.proof)
|
||||
if decodeRes.isErr():
|
||||
error "could not decode the RLN proof"
|
||||
|
||||
let proof = decodeRes.get()
|
||||
# TODO move it to log after dogfooding
|
||||
let msgEpoch = fromEpoch(proof.epoch)
|
||||
if fromEpoch(c.node.wakuRlnRelay.lastEpoch) == msgEpoch:
|
||||
echo "--rln epoch: ",
|
||||
msgEpoch, " ⚠️ message rate violation! you are spamming the network!"
|
||||
else:
|
||||
echo "--rln epoch: ", msgEpoch
|
||||
# update the last epoch
|
||||
c.node.wakuRlnRelay.lastEpoch = proof.epoch
|
||||
|
||||
try:
|
||||
if not c.node.wakuLegacyLightPush.isNil():
|
||||
# Attempt lightpush
|
||||
(waitFor c.node.legacyLightpushPublish(some(DefaultPubsubTopic), message)).isOkOr:
|
||||
error "failed to publish lightpush message", error = error
|
||||
else:
|
||||
(waitFor c.node.publish(some(DefaultPubsubTopic), message)).isOkOr:
|
||||
error "failed to publish message", error = error
|
||||
except CatchableError:
|
||||
error "caught error publishing message: ", error = getCurrentExceptionMsg()
|
||||
|
||||
# TODO This should read or be subscribe handler subscribe
|
||||
proc readAndPrint(c: Chat) {.async.} =
|
||||
while true:
|
||||
# while p.connected:
|
||||
# # TODO: echo &"{p.id} -> "
|
||||
#
|
||||
# echo cast[string](await p.conn.readLp(1024))
|
||||
#echo "readAndPrint subscribe NYI"
|
||||
await sleepAsync(100.millis)
|
||||
|
||||
# TODO Implement
|
||||
proc writeAndPrint(c: Chat) {.async.} =
|
||||
while true:
|
||||
# Connect state not updated on incoming WakuRelay connections
|
||||
# if not c.connected:
|
||||
# echo "type an address or wait for a connection:"
|
||||
# echo "type /[help|?] for help"
|
||||
|
||||
# Chat prompt
|
||||
showChatPrompt(c)
|
||||
|
||||
let line = await c.transp.readLine()
|
||||
if line.startsWith("/help") or line.startsWith("/?") or not c.started:
|
||||
echo Help
|
||||
continue
|
||||
|
||||
# if line.startsWith("/disconnect"):
|
||||
# echo "Ending current session"
|
||||
# if p.connected and p.conn.closed.not:
|
||||
# await p.conn.close()
|
||||
# p.connected = false
|
||||
elif line.startsWith("/connect"):
|
||||
# TODO Should be able to connect to multiple peers for Waku chat
|
||||
if c.connected:
|
||||
echo "already connected to at least one peer"
|
||||
continue
|
||||
|
||||
echo "enter address of remote peer"
|
||||
let address = await c.transp.readLine()
|
||||
if address.len > 0:
|
||||
await c.connectToNodes(@[address])
|
||||
elif line.startsWith("/nick"):
|
||||
# Set a new nickname
|
||||
c.nick = await readNick(c.transp)
|
||||
echo "You are now known as " & c.nick
|
||||
elif line.startsWith("/exit"):
|
||||
echo "quitting..."
|
||||
|
||||
try:
|
||||
await c.node.stop()
|
||||
except:
|
||||
echo "exception happened when stopping: " & getCurrentExceptionMsg()
|
||||
|
||||
quit(QuitSuccess)
|
||||
else:
|
||||
# XXX connected state problematic
|
||||
if c.started:
|
||||
c.publish(line)
|
||||
# TODO Connect to peer logic?
|
||||
else:
|
||||
try:
|
||||
if line.startsWith("/") and "p2p" in line:
|
||||
await c.connectToNodes(@[line])
|
||||
except:
|
||||
echo &"unable to dial remote peer {line}"
|
||||
echo getCurrentExceptionMsg()
|
||||
|
||||
proc readWriteLoop(c: Chat) {.async.} =
|
||||
asyncSpawn c.writeAndPrint() # execute the async function but does not block
|
||||
asyncSpawn c.readAndPrint()
|
||||
|
||||
proc readInput(wfd: AsyncFD) {.thread, raises: [Defect, CatchableError].} =
|
||||
## This procedure performs reading from `stdin` and sends data over
|
||||
## pipe to main thread.
|
||||
let transp = fromPipe(wfd)
|
||||
|
||||
while true:
|
||||
let line = stdin.readLine()
|
||||
discard waitFor transp.write(line & "\r\n")
|
||||
|
||||
{.pop.}
|
||||
# @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
||||
proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
let
|
||||
transp = fromPipe(rfd)
|
||||
conf = Chat2Conf.load()
|
||||
nodekey =
|
||||
if conf.nodekey.isSome():
|
||||
conf.nodekey.get()
|
||||
else:
|
||||
PrivateKey.random(Secp256k1, rng[]).tryGet()
|
||||
|
||||
# set log level
|
||||
if conf.logLevel != LogLevel.NONE:
|
||||
setLogLevel(conf.logLevel)
|
||||
|
||||
let natRes = setupNat(
|
||||
conf.nat,
|
||||
clientId,
|
||||
Port(uint16(conf.tcpPort) + conf.portsShift),
|
||||
Port(uint16(conf.udpPort) + conf.portsShift),
|
||||
)
|
||||
|
||||
if natRes.isErr():
|
||||
raise newException(ValueError, "setupNat error " & natRes.error)
|
||||
|
||||
let (extIp, extTcpPort, extUdpPort) = natRes.get()
|
||||
|
||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||
|
||||
let recordRes = enrBuilder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
error "failed to create enr record", error = recordRes.error
|
||||
quit(QuitFailure)
|
||||
else:
|
||||
recordRes.get()
|
||||
|
||||
let node = block:
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodeKey)
|
||||
builder.withRecord(record)
|
||||
|
||||
builder
|
||||
.withNetworkConfigurationDetails(
|
||||
conf.listenAddress,
|
||||
Port(uint16(conf.tcpPort) + conf.portsShift),
|
||||
extIp,
|
||||
extTcpPort,
|
||||
wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift),
|
||||
wsEnabled = conf.websocketSupport,
|
||||
wssEnabled = conf.websocketSecureSupport,
|
||||
)
|
||||
.tryGet()
|
||||
builder.build().tryGet()
|
||||
|
||||
await node.start()
|
||||
|
||||
if conf.rlnRelayCredPath == "":
|
||||
raise newException(ConfigurationError, "rln-relay-cred-path MUST be passed")
|
||||
|
||||
if conf.relay:
|
||||
let shards =
|
||||
conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it)))
|
||||
(await node.mountRelay()).isOkOr:
|
||||
echo "failed to mount relay: " & error
|
||||
return
|
||||
|
||||
await node.mountLibp2pPing()
|
||||
|
||||
let nick = await readNick(transp)
|
||||
echo "Welcome, " & nick & "!"
|
||||
|
||||
var chat = Chat(
|
||||
node: node,
|
||||
transp: transp,
|
||||
subscribed: true,
|
||||
connected: false,
|
||||
started: true,
|
||||
nick: nick,
|
||||
prompt: false,
|
||||
contentTopic: conf.contentTopic,
|
||||
)
|
||||
|
||||
if conf.staticnodes.len > 0:
|
||||
echo "Connecting to static peers..."
|
||||
await connectToNodes(chat, conf.staticnodes)
|
||||
|
||||
var dnsDiscoveryUrl = none(string)
|
||||
|
||||
if conf.fleet != Fleet.none:
|
||||
# Use DNS discovery to connect to selected fleet
|
||||
echo "Connecting to " & $conf.fleet & " fleet using DNS discovery..."
|
||||
|
||||
if conf.fleet == Fleet.test:
|
||||
dnsDiscoveryUrl = some(
|
||||
"enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im"
|
||||
)
|
||||
else:
|
||||
# Connect to sandbox by default
|
||||
dnsDiscoveryUrl = some(
|
||||
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im"
|
||||
)
|
||||
elif conf.dnsDiscoveryUrl != "":
|
||||
# No pre-selected fleet. Discover nodes via DNS using user config
|
||||
debug "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl
|
||||
dnsDiscoveryUrl = some(conf.dnsDiscoveryUrl)
|
||||
|
||||
var discoveredNodes: seq[RemotePeerInfo]
|
||||
|
||||
if dnsDiscoveryUrl.isSome:
|
||||
var nameServers: seq[TransportAddress]
|
||||
for ip in conf.dnsAddrsNameServers:
|
||||
nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
|
||||
|
||||
let dnsResolver = DnsResolver.new(nameServers)
|
||||
|
||||
proc resolver(domain: string): Future[string] {.async, gcsafe.} =
|
||||
trace "resolving", domain = domain
|
||||
let resolved = await dnsResolver.resolveTxt(domain)
|
||||
return resolved[0] # Use only first answer
|
||||
|
||||
var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl.get(), resolver)
|
||||
if wakuDnsDiscovery.isOk:
|
||||
let discoveredPeers = await wakuDnsDiscovery.get().findPeers()
|
||||
if discoveredPeers.isOk:
|
||||
info "Connecting to discovered peers"
|
||||
discoveredNodes = discoveredPeers.get()
|
||||
echo "Discovered and connecting to " & $discoveredNodes
|
||||
waitFor chat.node.connectToNodes(discoveredNodes)
|
||||
else:
|
||||
warn "Failed to init Waku DNS discovery"
|
||||
|
||||
let peerInfo = node.switch.peerInfo
|
||||
let listenStr = $peerInfo.addrs[0] & "/p2p/" & $peerInfo.peerId
|
||||
echo &"Listening on\n {listenStr}"
|
||||
|
||||
if (conf.storenode != "") or (conf.store == true):
|
||||
await node.mountStore()
|
||||
|
||||
var storenode: Option[RemotePeerInfo]
|
||||
|
||||
if conf.storenode != "":
|
||||
let peerInfo = parsePeerInfo(conf.storenode)
|
||||
if peerInfo.isOk():
|
||||
storenode = some(peerInfo.value)
|
||||
else:
|
||||
error "Incorrect conf.storenode", error = peerInfo.error
|
||||
elif discoveredNodes.len > 0:
|
||||
echo "Store enabled, but no store nodes configured. Choosing one at random from discovered peers"
|
||||
storenode = some(discoveredNodes[rand(0 .. len(discoveredNodes) - 1)])
|
||||
|
||||
if storenode.isSome():
|
||||
# We have a viable storenode. Let's query it for historical messages.
|
||||
echo "Connecting to storenode: " & $(storenode.get())
|
||||
|
||||
node.mountStoreClient()
|
||||
node.peerManager.addServicePeer(storenode.get(), WakuStoreCodec)
|
||||
|
||||
proc storeHandler(response: StoreQueryResponse) {.gcsafe.} =
|
||||
for msg in response.messages:
|
||||
let payload =
|
||||
if msg.message.isSome():
|
||||
msg.message.get().payload
|
||||
else:
|
||||
newSeq[byte](0)
|
||||
|
||||
let
|
||||
pb = Chat2Message.init(payload)
|
||||
chatLine =
|
||||
if pb.isOk:
|
||||
pb[].toString()
|
||||
else:
|
||||
string.fromBytes(payload)
|
||||
echo &"{chatLine}"
|
||||
info "Hit store handler"
|
||||
|
||||
let queryRes = await node.query(
|
||||
StoreQueryRequest(contentTopics: @[chat.contentTopic]), storenode.get()
|
||||
)
|
||||
if queryRes.isOk():
|
||||
storeHandler(queryRes.value)
|
||||
|
||||
# NOTE Must be mounted after relay
|
||||
if conf.lightpushnode != "":
|
||||
let peerInfo = parsePeerInfo(conf.lightpushnode)
|
||||
if peerInfo.isOk():
|
||||
await mountLegacyLightPush(node)
|
||||
node.mountLegacyLightPushClient()
|
||||
node.peerManager.addServicePeer(peerInfo.value, WakuLightpushCodec)
|
||||
else:
|
||||
error "LightPush not mounted. Couldn't parse conf.lightpushnode",
|
||||
error = peerInfo.error
|
||||
|
||||
if conf.filternode != "":
|
||||
let peerInfo = parsePeerInfo(conf.filternode)
|
||||
if peerInfo.isOk():
|
||||
await node.mountFilter()
|
||||
await node.mountFilterClient()
|
||||
|
||||
proc filterHandler(
|
||||
pubsubTopic: PubsubTopic, msg: WakuMessage
|
||||
) {.async, gcsafe, closure.} =
|
||||
trace "Hit filter handler", contentTopic = msg.contentTopic
|
||||
chat.printReceivedMessage(msg)
|
||||
|
||||
# TODO: Here to support FilterV2 relevant subscription.
|
||||
else:
|
||||
error "Filter not mounted. Couldn't parse conf.filternode", error = peerInfo.error
|
||||
|
||||
# Subscribe to a topic, if relay is mounted
|
||||
if conf.relay:
|
||||
proc handler(topic: PubsubTopic, msg: WakuMessage): Future[void] {.async, gcsafe.} =
|
||||
trace "Hit subscribe handler", topic
|
||||
|
||||
if msg.contentTopic == chat.contentTopic:
|
||||
chat.printReceivedMessage(msg)
|
||||
|
||||
node.subscribe(
|
||||
(kind: PubsubSub, topic: DefaultPubsubTopic), WakuRelayHandler(handler)
|
||||
).isOkOr:
|
||||
error "failed to subscribe to pubsub topic",
|
||||
topic = DefaultPubsubTopic, error = error
|
||||
|
||||
if conf.rlnRelay:
|
||||
info "WakuRLNRelay is enabled"
|
||||
|
||||
proc spamHandler(wakuMessage: WakuMessage) {.gcsafe, closure.} =
|
||||
debug "spam handler is called"
|
||||
let chatLineResult = chat.getChatLine(wakuMessage)
|
||||
if chatLineResult.isOk():
|
||||
echo "A spam message is found and discarded : ", chatLineResult.value
|
||||
else:
|
||||
echo "A spam message is found and discarded"
|
||||
chat.prompt = false
|
||||
showChatPrompt(chat)
|
||||
|
||||
echo "rln-relay preparation is in progress..."
|
||||
|
||||
let rlnConf = WakuRlnConfig(
|
||||
dynamic: conf.rlnRelayDynamic,
|
||||
credIndex: conf.rlnRelayCredIndex,
|
||||
chainId: UInt256.fromBytesBE(conf.rlnRelayChainId.toBytesBE()),
|
||||
ethClientUrls: conf.ethClientUrls.mapIt(string(it)),
|
||||
creds: some(
|
||||
RlnRelayCreds(
|
||||
path: conf.rlnRelayCredPath, password: conf.rlnRelayCredPassword
|
||||
)
|
||||
),
|
||||
userMessageLimit: conf.rlnRelayUserMessageLimit,
|
||||
epochSizeSec: conf.rlnEpochSizeSec,
|
||||
)
|
||||
|
||||
waitFor node.mountRlnRelay(rlnConf, spamHandler = some(spamHandler))
|
||||
|
||||
let membershipIndex = node.wakuRlnRelay.groupManager.membershipIndex.get()
|
||||
let identityCredential = node.wakuRlnRelay.groupManager.idCredentials.get()
|
||||
echo "your membership index is: ", membershipIndex
|
||||
echo "your rln identity commitment key is: ",
|
||||
identityCredential.idCommitment.inHex()
|
||||
else:
|
||||
info "WakuRLNRelay is disabled"
|
||||
echo "WakuRLNRelay is disabled, please enable it by passing in the --rln-relay flag"
|
||||
if conf.metricsLogging:
|
||||
startMetricsLog()
|
||||
|
||||
if conf.metricsServer:
|
||||
let metricsServer = startMetricsServer(
|
||||
conf.metricsServerAddress, Port(conf.metricsServerPort + conf.portsShift)
|
||||
)
|
||||
|
||||
await chat.readWriteLoop()
|
||||
|
||||
runForever()
|
||||
|
||||
proc main(rng: ref HmacDrbgContext) {.async.} =
|
||||
let (rfd, wfd) = createAsyncPipe()
|
||||
if rfd == asyncInvalidPipe or wfd == asyncInvalidPipe:
|
||||
raise newException(ValueError, "Could not initialize pipe!")
|
||||
|
||||
var thread: Thread[AsyncFD]
|
||||
thread.createThread(readInput, wfd)
|
||||
try:
|
||||
await processInput(rfd, rng)
|
||||
# Handle only ConfigurationError for now
|
||||
# TODO: Throw other errors from the mounting procedure
|
||||
except ConfigurationError as e:
|
||||
raise e
|
||||
|
||||
when isMainModule: # isMainModule = true when the module is compiled as the main file
|
||||
let rng = crypto.newRng()
|
||||
try:
|
||||
waitFor(main(rng))
|
||||
except CatchableError as e:
|
||||
raise e
|
||||
|
||||
## Dump of things that can be improved:
|
||||
##
|
||||
## - Incoming dialed peer does not change connected state (not relying on it for now)
|
||||
## - Unclear if staticnode argument works (can enter manually)
|
||||
## - Don't trigger self / double publish own messages
|
||||
## - Integrate store protocol (fetch messages in beginning)
|
||||
## - Integrate filter protocol (default/option to be light node, connect to filter node)
|
||||
## - Test/default to cluster node connection (diff protocol version)
|
||||
## - Redirect logs to separate file
|
||||
## - Expose basic publish/subscribe etc commands with /syntax
|
||||
## - Show part of peerid to know who sent message
|
||||
## - Deal with protobuf messages (e.g. other chat protocol, or encrypted)
|
||||
351
third-party/nwaku/apps/chat2/config_chat2.nim
vendored
Normal file
351
third-party/nwaku/apps/chat2/config_chat2.nim
vendored
Normal file
@ -0,0 +1,351 @@
|
||||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
confutils,
|
||||
confutils/defs,
|
||||
confutils/std/net,
|
||||
eth/keys,
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/crypto/secp,
|
||||
nimcrypto/utils,
|
||||
std/strutils,
|
||||
regex
|
||||
import waku/waku_core
|
||||
|
||||
type
|
||||
Fleet* = enum
|
||||
none
|
||||
prod
|
||||
test
|
||||
|
||||
EthRpcUrl* = distinct string
|
||||
|
||||
Chat2Conf* = object ## General node config
|
||||
logLevel* {.
|
||||
desc: "Sets the log level.", defaultValue: LogLevel.INFO, name: "log-level"
|
||||
.}: LogLevel
|
||||
|
||||
nodekey* {.desc: "P2P node private key as 64 char hex string.", name: "nodekey".}:
|
||||
Option[crypto.PrivateKey]
|
||||
|
||||
listenAddress* {.
|
||||
defaultValue: defaultListenAddress(config),
|
||||
desc: "Listening address for the LibP2P traffic.",
|
||||
name: "listen-address"
|
||||
.}: IpAddress
|
||||
|
||||
tcpPort* {.desc: "TCP listening port.", defaultValue: 60000, name: "tcp-port".}:
|
||||
Port
|
||||
|
||||
udpPort* {.desc: "UDP listening port.", defaultValue: 60000, name: "udp-port".}:
|
||||
Port
|
||||
|
||||
portsShift* {.
|
||||
desc: "Add a shift to all port numbers.", defaultValue: 0, name: "ports-shift"
|
||||
.}: uint16
|
||||
|
||||
nat* {.
|
||||
desc:
|
||||
"Specify method to use for determining public address. " &
|
||||
"Must be one of: any, none, upnp, pmp, extip:<IP>.",
|
||||
defaultValue: "any"
|
||||
.}: string
|
||||
|
||||
## Persistence config
|
||||
dbPath* {.
|
||||
desc: "The database path for peristent storage", defaultValue: "", name: "db-path"
|
||||
.}: string
|
||||
|
||||
persistPeers* {.
|
||||
desc: "Enable peer persistence: true|false",
|
||||
defaultValue: false,
|
||||
name: "persist-peers"
|
||||
.}: bool
|
||||
|
||||
persistMessages* {.
|
||||
desc: "Enable message persistence: true|false",
|
||||
defaultValue: false,
|
||||
name: "persist-messages"
|
||||
.}: bool
|
||||
|
||||
## Relay config
|
||||
relay* {.
|
||||
desc: "Enable relay protocol: true|false", defaultValue: true, name: "relay"
|
||||
.}: bool
|
||||
|
||||
staticnodes* {.
|
||||
desc: "Peer multiaddr to directly connect with. Argument may be repeated.",
|
||||
name: "staticnode"
|
||||
.}: seq[string]
|
||||
|
||||
keepAlive* {.
|
||||
desc: "Enable keep-alive for idle connections: true|false",
|
||||
defaultValue: false,
|
||||
name: "keep-alive"
|
||||
.}: bool
|
||||
|
||||
clusterId* {.
|
||||
desc:
|
||||
"Cluster id that the node is running in. Node in a different cluster id is disconnected.",
|
||||
defaultValue: 0,
|
||||
name: "cluster-id"
|
||||
.}: uint16
|
||||
|
||||
shards* {.
|
||||
desc:
|
||||
"Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.",
|
||||
defaultValue: @[uint16(0)],
|
||||
name: "shard"
|
||||
.}: seq[uint16]
|
||||
|
||||
## Store config
|
||||
store* {.
|
||||
desc: "Enable store protocol: true|false", defaultValue: true, name: "store"
|
||||
.}: bool
|
||||
|
||||
storenode* {.
|
||||
desc: "Peer multiaddr to query for storage.", defaultValue: "", name: "storenode"
|
||||
.}: string
|
||||
|
||||
## Filter config
|
||||
filter* {.
|
||||
desc: "Enable filter protocol: true|false", defaultValue: false, name: "filter"
|
||||
.}: bool
|
||||
|
||||
filternode* {.
|
||||
desc: "Peer multiaddr to request content filtering of messages.",
|
||||
defaultValue: "",
|
||||
name: "filternode"
|
||||
.}: string
|
||||
|
||||
## Lightpush config
|
||||
lightpush* {.
|
||||
desc: "Enable lightpush protocol: true|false",
|
||||
defaultValue: false,
|
||||
name: "lightpush"
|
||||
.}: bool
|
||||
|
||||
lightpushnode* {.
|
||||
desc: "Peer multiaddr to request lightpush of published messages.",
|
||||
defaultValue: "",
|
||||
name: "lightpushnode"
|
||||
.}: string
|
||||
|
||||
## Metrics config
|
||||
metricsServer* {.
|
||||
desc: "Enable the metrics server: true|false",
|
||||
defaultValue: false,
|
||||
name: "metrics-server"
|
||||
.}: bool
|
||||
|
||||
metricsServerAddress* {.
|
||||
desc: "Listening address of the metrics server.",
|
||||
defaultValue: parseIpAddress("127.0.0.1"),
|
||||
name: "metrics-server-address"
|
||||
.}: IpAddress
|
||||
|
||||
metricsServerPort* {.
|
||||
desc: "Listening HTTP port of the metrics server.",
|
||||
defaultValue: 8008,
|
||||
name: "metrics-server-port"
|
||||
.}: uint16
|
||||
|
||||
metricsLogging* {.
|
||||
desc: "Enable metrics logging: true|false",
|
||||
defaultValue: true,
|
||||
name: "metrics-logging"
|
||||
.}: bool
|
||||
|
||||
## DNS discovery config
|
||||
dnsDiscovery* {.
|
||||
desc:
|
||||
"Deprecated, please set dns-discovery-url instead. Enable discovering nodes via DNS",
|
||||
defaultValue: false,
|
||||
name: "dns-discovery"
|
||||
.}: bool
|
||||
|
||||
dnsDiscoveryUrl* {.
|
||||
desc: "URL for DNS node list in format 'enrtree://<key>@<fqdn>'",
|
||||
defaultValue: "",
|
||||
name: "dns-discovery-url"
|
||||
.}: string
|
||||
|
||||
dnsAddrsNameServers* {.
|
||||
desc:
|
||||
"DNS name server IPs to query for DNS multiaddrs resolution. Argument may be repeated.",
|
||||
defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")],
|
||||
name: "dns-addrs-name-server"
|
||||
.}: seq[IpAddress]
|
||||
|
||||
## Chat2 configuration
|
||||
fleet* {.
|
||||
desc:
|
||||
"Select the fleet to connect to. This sets the DNS discovery URL to the selected fleet.",
|
||||
defaultValue: Fleet.prod,
|
||||
name: "fleet"
|
||||
.}: Fleet
|
||||
|
||||
contentTopic* {.
|
||||
desc: "Content topic for chat messages.",
|
||||
defaultValue: "/toy-chat/2/huilong/proto",
|
||||
name: "content-topic"
|
||||
.}: string
|
||||
|
||||
## Websocket Configuration
|
||||
websocketSupport* {.
|
||||
desc: "Enable websocket: true|false",
|
||||
defaultValue: false,
|
||||
name: "websocket-support"
|
||||
.}: bool
|
||||
|
||||
websocketPort* {.
|
||||
desc: "WebSocket listening port.", defaultValue: 8000, name: "websocket-port"
|
||||
.}: Port
|
||||
|
||||
websocketSecureSupport* {.
|
||||
desc: "WebSocket Secure Support.",
|
||||
defaultValue: false,
|
||||
name: "websocket-secure-support"
|
||||
.}: bool
|
||||
|
||||
## rln-relay configuration
|
||||
rlnRelay* {.
|
||||
desc: "Enable spam protection through rln-relay: true|false",
|
||||
defaultValue: false,
|
||||
name: "rln-relay"
|
||||
.}: bool
|
||||
|
||||
rlnRelayChainId* {.
|
||||
desc:
|
||||
"Chain ID of the provided contract (optional, will fetch from RPC provider if not used)",
|
||||
defaultValue: 0,
|
||||
name: "rln-relay-chain-id"
|
||||
.}: uint
|
||||
|
||||
rlnRelayCredPath* {.
|
||||
desc: "The path for peristing rln-relay credential",
|
||||
defaultValue: "",
|
||||
name: "rln-relay-cred-path"
|
||||
.}: string
|
||||
|
||||
rlnRelayCredIndex* {.
|
||||
desc: "the index of the onchain commitment to use", name: "rln-relay-cred-index"
|
||||
.}: Option[uint]
|
||||
|
||||
rlnRelayDynamic* {.
|
||||
desc: "Enable waku-rln-relay with on-chain dynamic group management: true|false",
|
||||
defaultValue: false,
|
||||
name: "rln-relay-dynamic"
|
||||
.}: bool
|
||||
|
||||
rlnRelayIdKey* {.
|
||||
desc: "Rln relay identity secret key as a Hex string",
|
||||
defaultValue: "",
|
||||
name: "rln-relay-id-key"
|
||||
.}: string
|
||||
|
||||
rlnRelayIdCommitmentKey* {.
|
||||
desc: "Rln relay identity commitment key as a Hex string",
|
||||
defaultValue: "",
|
||||
name: "rln-relay-id-commitment-key"
|
||||
.}: string
|
||||
|
||||
ethClientUrls* {.
|
||||
desc:
|
||||
"HTTP address of an Ethereum testnet client e.g., http://localhost:8540/. Argument may be repeated.",
|
||||
defaultValue: newSeq[EthRpcUrl](0),
|
||||
name: "rln-relay-eth-client-address"
|
||||
.}: seq[EthRpcUrl]
|
||||
|
||||
rlnRelayEthContractAddress* {.
|
||||
desc: "Address of membership contract on an Ethereum testnet",
|
||||
defaultValue: "",
|
||||
name: "rln-relay-eth-contract-address"
|
||||
.}: string
|
||||
|
||||
rlnRelayCredPassword* {.
|
||||
desc: "Password for encrypting RLN credentials",
|
||||
defaultValue: "",
|
||||
name: "rln-relay-cred-password"
|
||||
.}: string
|
||||
|
||||
rlnRelayUserMessageLimit* {.
|
||||
desc:
|
||||
"Set a user message limit for the rln membership registration. Must be a positive integer. Default is 1.",
|
||||
defaultValue: 1,
|
||||
name: "rln-relay-user-message-limit"
|
||||
.}: uint64
|
||||
|
||||
rlnEpochSizeSec* {.
|
||||
desc:
|
||||
"Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.",
|
||||
defaultValue: 1,
|
||||
name: "rln-relay-epoch-sec"
|
||||
.}: uint64
|
||||
|
||||
# NOTE: Keys are different in nim-libp2p
|
||||
proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T =
|
||||
try:
|
||||
let key = SkPrivateKey.init(utils.fromHex(p)).tryGet()
|
||||
# XXX: Here at the moment
|
||||
result = crypto.PrivateKey(scheme: Secp256k1, skkey: key)
|
||||
except CatchableError as e:
|
||||
raise newException(ValueError, "Invalid private key")
|
||||
|
||||
proc completeCmdArg*(T: type crypto.PrivateKey, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type IpAddress, p: string): T =
|
||||
try:
|
||||
result = parseIpAddress(p)
|
||||
except CatchableError as e:
|
||||
raise newException(ValueError, "Invalid IP address")
|
||||
|
||||
proc completeCmdArg*(T: type IpAddress, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type Port, p: string): T =
|
||||
try:
|
||||
result = Port(parseInt(p))
|
||||
except CatchableError as e:
|
||||
raise newException(ValueError, "Invalid Port number")
|
||||
|
||||
proc completeCmdArg*(T: type Port, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type Option[uint], p: string): T =
|
||||
try:
|
||||
some(parseUint(p))
|
||||
except CatchableError:
|
||||
raise newException(ValueError, "Invalid unsigned integer")
|
||||
|
||||
proc completeCmdArg*(T: type EthRpcUrl, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type EthRpcUrl, s: string): T =
|
||||
## allowed patterns:
|
||||
## http://url:port
|
||||
## https://url:port
|
||||
## http://url:port/path
|
||||
## https://url:port/path
|
||||
## http://url/with/path
|
||||
## http://url:port/path?query
|
||||
## https://url:port/path?query
|
||||
## disallowed patterns:
|
||||
## any valid/invalid ws or wss url
|
||||
var httpPattern =
|
||||
re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
var wsPattern =
|
||||
re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
if regex.match(s, wsPattern):
|
||||
raise newException(
|
||||
ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL"
|
||||
)
|
||||
if not regex.match(s, httpPattern):
|
||||
raise newException(ValueError, "Invalid HTTP RPC URL")
|
||||
return EthRpcUrl(s)
|
||||
|
||||
func defaultListenAddress*(conf: Chat2Conf): IpAddress =
|
||||
# TODO: How should we select between IPv4 and IPv6
|
||||
# Maybe there should be a config option for this.
|
||||
(static parseIpAddress("0.0.0.0"))
|
||||
4
third-party/nwaku/apps/chat2/nim.cfg
vendored
Normal file
4
third-party/nwaku/apps/chat2/nim.cfg
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
-d:chronicles_line_numbers
|
||||
-d:chronicles_runtime_filtering:on
|
||||
-d:discv5_protocol_id:d5waku
|
||||
path = "../.."
|
||||
328
third-party/nwaku/apps/chat2bridge/chat2bridge.nim
vendored
Normal file
328
third-party/nwaku/apps/chat2bridge/chat2bridge.nim
vendored
Normal file
@ -0,0 +1,328 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[tables, times, strutils, hashes, sequtils, json],
|
||||
chronos,
|
||||
confutils,
|
||||
chronicles,
|
||||
chronicles/topics_registry,
|
||||
chronos/streams/tlsstream,
|
||||
metrics,
|
||||
metrics/chronos_httpserver,
|
||||
stew/byteutils,
|
||||
eth/net/nat,
|
||||
# Matterbridge client imports
|
||||
# Waku v2 imports
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/errors,
|
||||
waku/[
|
||||
waku_core,
|
||||
waku_node,
|
||||
node/peer_manager,
|
||||
waku_filter_v2,
|
||||
waku_store,
|
||||
factory/builder,
|
||||
common/utils/matterbridge_client,
|
||||
common/rate_limit/setting,
|
||||
],
|
||||
# Chat 2 imports
|
||||
../chat2/chat2,
|
||||
# Common cli config
|
||||
./config_chat2bridge
|
||||
|
||||
declarePublicCounter chat2_mb_transfers,
|
||||
"Number of messages transferred between chat2 and Matterbridge", ["type"]
|
||||
declarePublicCounter chat2_mb_dropped, "Number of messages dropped", ["reason"]
|
||||
|
||||
logScope:
|
||||
topics = "chat2bridge"
|
||||
|
||||
##################
|
||||
# Default values #
|
||||
##################
|
||||
|
||||
const DeduplQSize = 20 # Maximum number of seen messages to keep in deduplication queue
|
||||
|
||||
#########
|
||||
# Types #
|
||||
#########
|
||||
|
||||
type
|
||||
Chat2MatterBridge* = ref object of RootObj
|
||||
mbClient*: MatterbridgeClient
|
||||
nodev2*: WakuNode
|
||||
running: bool
|
||||
pollPeriod: chronos.Duration
|
||||
seen: seq[Hash] #FIFO queue
|
||||
contentTopic: string
|
||||
|
||||
MbMessageHandler = proc(jsonNode: JsonNode) {.async.}
|
||||
|
||||
###################
|
||||
# Helper functions #
|
||||
###################S
|
||||
|
||||
proc containsOrAdd(sequence: var seq[Hash], hash: Hash): bool =
|
||||
if sequence.contains(hash):
|
||||
return true
|
||||
|
||||
if sequence.len >= DeduplQSize:
|
||||
trace "Deduplication queue full. Removing oldest item."
|
||||
sequence.delete 0, 0 # Remove first item in queue
|
||||
|
||||
sequence.add(hash)
|
||||
|
||||
return false
|
||||
|
||||
proc toWakuMessage(
|
||||
cmb: Chat2MatterBridge, jsonNode: JsonNode
|
||||
): WakuMessage {.raises: [Defect, KeyError].} =
|
||||
# Translates a Matterbridge API JSON response to a Waku v2 message
|
||||
let msgFields = jsonNode.getFields()
|
||||
|
||||
# @TODO error handling here - verify expected fields
|
||||
|
||||
let chat2pb = Chat2Message(
|
||||
timestamp: getTime().toUnix(), # @TODO use provided timestamp
|
||||
nick: msgFields["username"].getStr(),
|
||||
payload: msgFields["text"].getStr().toBytes(),
|
||||
).encode()
|
||||
|
||||
WakuMessage(payload: chat2pb.buffer, contentTopic: cmb.contentTopic, version: 0)
|
||||
|
||||
proc toChat2(cmb: Chat2MatterBridge, jsonNode: JsonNode) {.async.} =
|
||||
let msg = cmb.toWakuMessage(jsonNode)
|
||||
|
||||
if cmb.seen.containsOrAdd(msg.payload.hash()):
|
||||
# This is a duplicate message. Return.
|
||||
chat2_mb_dropped.inc(labelValues = ["duplicate"])
|
||||
return
|
||||
|
||||
trace "Post Matterbridge message to chat2"
|
||||
|
||||
chat2_mb_transfers.inc(labelValues = ["mb_to_chat2"])
|
||||
|
||||
(await cmb.nodev2.publish(some(DefaultPubsubTopic), msg)).isOkOr:
|
||||
error "failed to publish message", error = error
|
||||
|
||||
proc toMatterbridge(
|
||||
cmb: Chat2MatterBridge, msg: WakuMessage
|
||||
) {.gcsafe, raises: [Exception].} =
|
||||
if cmb.seen.containsOrAdd(msg.payload.hash()):
|
||||
# This is a duplicate message. Return.
|
||||
chat2_mb_dropped.inc(labelValues = ["duplicate"])
|
||||
return
|
||||
|
||||
if msg.contentTopic != cmb.contentTopic:
|
||||
# Only bridge messages on the configured content topic
|
||||
chat2_mb_dropped.inc(labelValues = ["filtered"])
|
||||
return
|
||||
|
||||
trace "Post chat2 message to Matterbridge"
|
||||
|
||||
chat2_mb_transfers.inc(labelValues = ["chat2_to_mb"])
|
||||
|
||||
let chat2Msg = Chat2Message.init(msg.payload)
|
||||
|
||||
assert chat2Msg.isOk
|
||||
|
||||
let postRes = cmb.mbClient.postMessage(
|
||||
text = string.fromBytes(chat2Msg[].payload), username = chat2Msg[].nick
|
||||
)
|
||||
|
||||
if postRes.isErr() or (postRes[] == false):
|
||||
chat2_mb_dropped.inc(labelValues = ["duplicate"])
|
||||
error "Matterbridge host unreachable. Dropping message."
|
||||
|
||||
proc pollMatterbridge(cmb: Chat2MatterBridge, handler: MbMessageHandler) {.async.} =
|
||||
while cmb.running:
|
||||
let getRes = cmb.mbClient.getMessages()
|
||||
|
||||
if getRes.isOk():
|
||||
for jsonNode in getRes[]:
|
||||
await handler(jsonNode)
|
||||
else:
|
||||
error "Matterbridge host unreachable. Sleeping before retrying."
|
||||
await sleepAsync(chronos.seconds(10))
|
||||
|
||||
await sleepAsync(cmb.pollPeriod)
|
||||
|
||||
##############
|
||||
# Public API #
|
||||
##############
|
||||
proc new*(
|
||||
T: type Chat2MatterBridge,
|
||||
# Matterbridge initialisation
|
||||
mbHostUri: string,
|
||||
mbGateway: string,
|
||||
# NodeV2 initialisation
|
||||
nodev2Key: crypto.PrivateKey,
|
||||
nodev2BindIp: IpAddress,
|
||||
nodev2BindPort: Port,
|
||||
nodev2ExtIp = none[IpAddress](),
|
||||
nodev2ExtPort = none[Port](),
|
||||
contentTopic: string,
|
||||
): T {.
|
||||
raises: [Defect, ValueError, KeyError, TLSStreamProtocolError, IOError, LPError]
|
||||
.} =
|
||||
# Setup Matterbridge
|
||||
let mbClient = MatterbridgeClient.new(mbHostUri, mbGateway)
|
||||
|
||||
# Let's verify the Matterbridge configuration before continuing
|
||||
let clientHealth = mbClient.isHealthy()
|
||||
|
||||
if clientHealth.isOk() and clientHealth[]:
|
||||
info "Reached Matterbridge host", host = mbClient.host
|
||||
else:
|
||||
raise newException(ValueError, "Matterbridge client not reachable/healthy")
|
||||
|
||||
# Setup Waku v2 node
|
||||
let nodev2 = block:
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodev2Key)
|
||||
|
||||
builder
|
||||
.withNetworkConfigurationDetails(
|
||||
nodev2BindIp, nodev2BindPort, nodev2ExtIp, nodev2ExtPort
|
||||
)
|
||||
.tryGet()
|
||||
builder.build().tryGet()
|
||||
|
||||
return Chat2MatterBridge(
|
||||
mbClient: mbClient,
|
||||
nodev2: nodev2,
|
||||
running: false,
|
||||
pollPeriod: chronos.seconds(1),
|
||||
contentTopic: contentTopic,
|
||||
)
|
||||
|
||||
proc start*(cmb: Chat2MatterBridge) {.async.} =
|
||||
info "Starting Chat2MatterBridge"
|
||||
|
||||
cmb.running = true
|
||||
|
||||
debug "Start polling Matterbridge"
|
||||
|
||||
# Start Matterbridge polling (@TODO: use streaming interface)
|
||||
proc mbHandler(jsonNode: JsonNode) {.async.} =
|
||||
trace "Bridging message from Matterbridge to chat2", jsonNode = jsonNode
|
||||
waitFor cmb.toChat2(jsonNode)
|
||||
|
||||
asyncSpawn cmb.pollMatterbridge(mbHandler)
|
||||
|
||||
# Start Waku v2 node
|
||||
debug "Start listening on Waku v2"
|
||||
await cmb.nodev2.start()
|
||||
|
||||
# Always mount relay for bridge
|
||||
# `triggerSelf` is false on a `bridge` to avoid duplicates
|
||||
(await cmb.nodev2.mountRelay()).isOkOr:
|
||||
error "failed to mount relay", error = error
|
||||
return
|
||||
|
||||
cmb.nodev2.wakuRelay.triggerSelf = false
|
||||
|
||||
# Bridging
|
||||
# Handle messages on Waku v2 and bridge to Matterbridge
|
||||
proc relayHandler(
|
||||
pubsubTopic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async.} =
|
||||
trace "Bridging message from Chat2 to Matterbridge", msg = msg
|
||||
try:
|
||||
cmb.toMatterbridge(msg)
|
||||
except:
|
||||
error "exception in relayHandler: " & getCurrentExceptionMsg()
|
||||
|
||||
cmb.nodev2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr:
|
||||
error "failed to subscribe to relay", topic = DefaultPubsubTopic, error = error
|
||||
return
|
||||
|
||||
proc stop*(cmb: Chat2MatterBridge) {.async: (raises: [Exception]).} =
|
||||
info "Stopping Chat2MatterBridge"
|
||||
|
||||
cmb.running = false
|
||||
|
||||
await cmb.nodev2.stop()
|
||||
|
||||
{.pop.}
|
||||
# @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
||||
when isMainModule:
|
||||
import waku/common/utils/nat, waku/waku_api/message_cache
|
||||
|
||||
let
|
||||
rng = newRng()
|
||||
conf = Chat2MatterbridgeConf.load()
|
||||
|
||||
if conf.logLevel != LogLevel.NONE:
|
||||
setLogLevel(conf.logLevel)
|
||||
|
||||
let natRes = setupNat(
|
||||
conf.nat,
|
||||
clientId,
|
||||
Port(uint16(conf.libp2pTcpPort) + conf.portsShift),
|
||||
Port(uint16(conf.udpPort) + conf.portsShift),
|
||||
)
|
||||
if natRes.isErr():
|
||||
error "Error in setupNat", error = natRes.error
|
||||
|
||||
# Load address configuration
|
||||
let
|
||||
(nodev2ExtIp, nodev2ExtPort, _) = natRes.get()
|
||||
## The following heuristic assumes that, in absence of manual
|
||||
## config, the external port is the same as the bind port.
|
||||
extPort =
|
||||
if nodev2ExtIp.isSome() and nodev2ExtPort.isNone():
|
||||
some(Port(uint16(conf.libp2pTcpPort) + conf.portsShift))
|
||||
else:
|
||||
nodev2ExtPort
|
||||
|
||||
let bridge = Chat2Matterbridge.new(
|
||||
mbHostUri = "http://" & $initTAddress(conf.mbHostAddress, Port(conf.mbHostPort)),
|
||||
mbGateway = conf.mbGateway,
|
||||
nodev2Key = conf.nodekey,
|
||||
nodev2BindIp = conf.listenAddress,
|
||||
nodev2BindPort = Port(uint16(conf.libp2pTcpPort) + conf.portsShift),
|
||||
nodev2ExtIp = nodev2ExtIp,
|
||||
nodev2ExtPort = extPort,
|
||||
contentTopic = conf.contentTopic,
|
||||
)
|
||||
|
||||
waitFor bridge.start()
|
||||
|
||||
# Now load rest of config
|
||||
# Mount configured Waku v2 protocols
|
||||
waitFor mountLibp2pPing(bridge.nodev2)
|
||||
|
||||
if conf.store:
|
||||
waitFor mountStore(bridge.nodev2)
|
||||
|
||||
if conf.filter:
|
||||
waitFor mountFilter(bridge.nodev2)
|
||||
|
||||
if conf.staticnodes.len > 0:
|
||||
waitFor connectToNodes(bridge.nodev2, conf.staticnodes)
|
||||
|
||||
if conf.storenode != "":
|
||||
let storePeer = parsePeerInfo(conf.storenode)
|
||||
if storePeer.isOk():
|
||||
bridge.nodev2.peerManager.addServicePeer(storePeer.value, WakuStoreCodec)
|
||||
else:
|
||||
error "Error parsing conf.storenode", error = storePeer.error
|
||||
|
||||
if conf.filternode != "":
|
||||
let filterPeer = parsePeerInfo(conf.filternode)
|
||||
if filterPeer.isOk():
|
||||
bridge.nodev2.peerManager.addServicePeer(
|
||||
filterPeer.value, WakuFilterSubscribeCodec
|
||||
)
|
||||
else:
|
||||
error "Error parsing conf.filternode", error = filterPeer.error
|
||||
|
||||
if conf.metricsServer:
|
||||
let
|
||||
address = conf.metricsServerAddress
|
||||
port = conf.metricsServerPort + conf.portsShift
|
||||
info "Starting metrics HTTP server", address, port
|
||||
startMetricsHttpServer($address, Port(port))
|
||||
|
||||
runForever()
|
||||
148
third-party/nwaku/apps/chat2bridge/config_chat2bridge.nim
vendored
Normal file
148
third-party/nwaku/apps/chat2bridge/config_chat2bridge.nim
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
import
|
||||
confutils,
|
||||
confutils/defs,
|
||||
confutils/std/net,
|
||||
chronicles,
|
||||
chronos,
|
||||
libp2p/crypto/[crypto, secp],
|
||||
eth/keys
|
||||
|
||||
type Chat2MatterbridgeConf* = object
|
||||
logLevel* {.
|
||||
desc: "Sets the log level", defaultValue: LogLevel.INFO, name: "log-level"
|
||||
.}: LogLevel
|
||||
|
||||
listenAddress* {.
|
||||
defaultValue: defaultListenAddress(config),
|
||||
desc: "Listening address for the LibP2P traffic",
|
||||
name: "listen-address"
|
||||
.}: IpAddress
|
||||
|
||||
libp2pTcpPort* {.
|
||||
desc: "Libp2p TCP listening port (for Waku v2)",
|
||||
defaultValue: 9000,
|
||||
name: "libp2p-tcp-port"
|
||||
.}: uint16
|
||||
|
||||
udpPort* {.desc: "UDP listening port", defaultValue: 9000, name: "udp-port".}: uint16
|
||||
|
||||
portsShift* {.
|
||||
desc: "Add a shift to all default port numbers",
|
||||
defaultValue: 0,
|
||||
name: "ports-shift"
|
||||
.}: uint16
|
||||
|
||||
nat* {.
|
||||
desc:
|
||||
"Specify method to use for determining public address. " &
|
||||
"Must be one of: any, none, upnp, pmp, extip:<IP>",
|
||||
defaultValue: "any"
|
||||
.}: string
|
||||
|
||||
metricsServer* {.
|
||||
desc: "Enable the metrics server", defaultValue: false, name: "metrics-server"
|
||||
.}: bool
|
||||
|
||||
metricsServerAddress* {.
|
||||
desc: "Listening address of the metrics server",
|
||||
defaultValue: parseIpAddress("127.0.0.1"),
|
||||
name: "metrics-server-address"
|
||||
.}: IpAddress
|
||||
|
||||
metricsServerPort* {.
|
||||
desc: "Listening HTTP port of the metrics server",
|
||||
defaultValue: 8008,
|
||||
name: "metrics-server-port"
|
||||
.}: uint16
|
||||
|
||||
### Waku v2 options
|
||||
staticnodes* {.
|
||||
desc: "Multiaddr of peer to directly connect with. Argument may be repeated",
|
||||
name: "staticnode"
|
||||
.}: seq[string]
|
||||
|
||||
nodekey* {.
|
||||
desc: "P2P node private key as hex",
|
||||
defaultValue: crypto.PrivateKey.random(Secp256k1, newRng()[]).tryGet(),
|
||||
name: "nodekey"
|
||||
.}: crypto.PrivateKey
|
||||
|
||||
store* {.
|
||||
desc: "Flag whether to start store protocol", defaultValue: true, name: "store"
|
||||
.}: bool
|
||||
|
||||
filter* {.
|
||||
desc: "Flag whether to start filter protocol", defaultValue: false, name: "filter"
|
||||
.}: bool
|
||||
|
||||
relay* {.
|
||||
desc: "Flag whether to start relay protocol", defaultValue: true, name: "relay"
|
||||
.}: bool
|
||||
|
||||
storenode* {.
|
||||
desc: "Multiaddr of peer to connect with for waku store protocol",
|
||||
defaultValue: "",
|
||||
name: "storenode"
|
||||
.}: string
|
||||
|
||||
filternode* {.
|
||||
desc: "Multiaddr of peer to connect with for waku filter protocol",
|
||||
defaultValue: "",
|
||||
name: "filternode"
|
||||
.}: string
|
||||
|
||||
# Matterbridge options
|
||||
mbHostAddress* {.
|
||||
desc: "Listening address of the Matterbridge host",
|
||||
defaultValue: parseIpAddress("127.0.0.1"),
|
||||
name: "mb-host-address"
|
||||
.}: IpAddress
|
||||
|
||||
mbHostPort* {.
|
||||
desc: "Listening port of the Matterbridge host",
|
||||
defaultValue: 4242,
|
||||
name: "mb-host-port"
|
||||
.}: uint16
|
||||
|
||||
mbGateway* {.
|
||||
desc: "Matterbridge gateway", defaultValue: "gateway1", name: "mb-gateway"
|
||||
.}: string
|
||||
|
||||
## Chat2 options
|
||||
contentTopic* {.
|
||||
desc: "Content topic to bridge chat messages to.",
|
||||
defaultValue: "/toy-chat/2/huilong/proto",
|
||||
name: "content-topic"
|
||||
.}: string
|
||||
|
||||
proc parseCmdArg*(T: type keys.KeyPair, p: string): T =
|
||||
try:
|
||||
let privkey = keys.PrivateKey.fromHex(string(p)).tryGet()
|
||||
result = privkey.toKeyPair()
|
||||
except CatchableError:
|
||||
raise newException(ValueError, "Invalid private key")
|
||||
|
||||
proc completeCmdArg*(T: type keys.KeyPair, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T =
|
||||
let key = SkPrivateKey.init(p)
|
||||
if key.isOk():
|
||||
crypto.PrivateKey(scheme: Secp256k1, skkey: key.get())
|
||||
else:
|
||||
raise newException(ValueError, "Invalid private key")
|
||||
|
||||
proc completeCmdArg*(T: type crypto.PrivateKey, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type IpAddress, p: string): T =
|
||||
try:
|
||||
result = parseIpAddress(p)
|
||||
except CatchableError:
|
||||
raise newException(ValueError, "Invalid IP address")
|
||||
|
||||
proc completeCmdArg*(T: type IpAddress, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
func defaultListenAddress*(conf: Chat2MatterbridgeConf): IpAddress =
|
||||
(parseIpAddress("0.0.0.0"))
|
||||
4
third-party/nwaku/apps/chat2bridge/nim.cfg
vendored
Normal file
4
third-party/nwaku/apps/chat2bridge/nim.cfg
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
-d:chronicles_line_numbers
|
||||
-d:chronicles_runtime_filtering:on
|
||||
-d:discv5_protocol_id:d5waku
|
||||
path = "../.."
|
||||
703
third-party/nwaku/apps/chat2mix/chat2mix.nim
vendored
Normal file
703
third-party/nwaku/apps/chat2mix/chat2mix.nim
vendored
Normal file
@ -0,0 +1,703 @@
|
||||
## chat2 is an example of usage of Waku v2. For suggested usage options, please
|
||||
## see dingpu tutorial in docs folder.
|
||||
|
||||
when not (compileOption("threads")):
|
||||
{.fatal: "Please, compile this program with the --threads:on option!".}
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[strformat, strutils, times, options, random, sequtils]
|
||||
import
|
||||
confutils,
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/keys,
|
||||
bearssl,
|
||||
results,
|
||||
stew/[byteutils],
|
||||
metrics,
|
||||
metrics/chronos_httpserver
|
||||
import
|
||||
libp2p/[
|
||||
switch, # manage transports, a single entry point for dialing and listening
|
||||
crypto/crypto, # cryptographic functions
|
||||
stream/connection, # create and close stream read / write connections
|
||||
multiaddress,
|
||||
# encode different addressing schemes. For example, /ip4/7.7.7.7/tcp/6543 means it is using IPv4 protocol and TCP
|
||||
peerinfo,
|
||||
# manage the information of a peer, such as peer ID and public / private key
|
||||
peerid, # Implement how peers interact
|
||||
protobuf/minprotobuf, # message serialisation/deserialisation from and to protobufs
|
||||
nameresolving/dnsresolver,
|
||||
] # define DNS resolution
|
||||
import mix/curve25519
|
||||
import
|
||||
waku/[
|
||||
waku_core,
|
||||
waku_lightpush/common,
|
||||
waku_lightpush/rpc,
|
||||
waku_enr,
|
||||
discovery/waku_dnsdisc,
|
||||
waku_node,
|
||||
node/waku_metrics,
|
||||
node/peer_manager,
|
||||
factory/builder,
|
||||
common/utils/nat,
|
||||
waku_store/common,
|
||||
waku_filter_v2/client,
|
||||
common/logging,
|
||||
],
|
||||
./config_chat2mix
|
||||
|
||||
import libp2p/protocols/pubsub/rpc/messages, libp2p/protocols/pubsub/pubsub
|
||||
import ../../waku/waku_rln_relay
|
||||
|
||||
logScope:
|
||||
topics = "chat2 mix"
|
||||
|
||||
const Help =
|
||||
"""
|
||||
Commands: /[?|help|connect|nick|exit]
|
||||
help: Prints this help
|
||||
connect: dials a remote peer
|
||||
nick: change nickname for current chat session
|
||||
exit: exits chat session
|
||||
"""
|
||||
|
||||
# XXX Connected is a bit annoying, because incoming connections don't trigger state change
|
||||
# Could poll connection pool or something here, I suppose
|
||||
# TODO Ensure connected turns true on incoming connections, or get rid of it
|
||||
type Chat = ref object
|
||||
node: WakuNode # waku node for publishing, subscribing, etc
|
||||
transp: StreamTransport # transport streams between read & write file descriptor
|
||||
subscribed: bool # indicates if a node is subscribed or not to a topic
|
||||
connected: bool # if the node is connected to another peer
|
||||
started: bool # if the node has started
|
||||
nick: string # nickname for this chat session
|
||||
prompt: bool # chat prompt is showing
|
||||
contentTopic: string # default content topic for chat messages
|
||||
conf: Chat2Conf # configuration for chat2
|
||||
|
||||
type
|
||||
PrivateKey* = crypto.PrivateKey
|
||||
Topic* = waku_core.PubsubTopic
|
||||
|
||||
#####################
|
||||
## chat2 protobufs ##
|
||||
#####################
|
||||
|
||||
type
|
||||
SelectResult*[T] = Result[T, string]
|
||||
|
||||
Chat2Message* = object
|
||||
timestamp*: int64
|
||||
nick*: string
|
||||
payload*: seq[byte]
|
||||
|
||||
proc getPubsubTopic*(
|
||||
conf: Chat2Conf, node: WakuNode, contentTopic: string
|
||||
): PubsubTopic =
|
||||
let shard = node.wakuAutoSharding.get().getShard(contentTopic).valueOr:
|
||||
echo "Could not parse content topic: " & error
|
||||
return "" #TODO: fix this.
|
||||
return $RelayShard(clusterId: conf.clusterId, shardId: shard.shardId)
|
||||
|
||||
proc init*(T: type Chat2Message, buffer: seq[byte]): ProtoResult[T] =
|
||||
var msg = Chat2Message()
|
||||
let pb = initProtoBuffer(buffer)
|
||||
|
||||
var timestamp: uint64
|
||||
discard ?pb.getField(1, timestamp)
|
||||
msg.timestamp = int64(timestamp)
|
||||
|
||||
discard ?pb.getField(2, msg.nick)
|
||||
discard ?pb.getField(3, msg.payload)
|
||||
|
||||
ok(msg)
|
||||
|
||||
proc encode*(message: Chat2Message): ProtoBuffer =
|
||||
var serialised = initProtoBuffer()
|
||||
|
||||
serialised.write(1, uint64(message.timestamp))
|
||||
serialised.write(2, message.nick)
|
||||
serialised.write(3, message.payload)
|
||||
|
||||
return serialised
|
||||
|
||||
proc toString*(message: Chat2Message): string =
|
||||
# Get message date and timestamp in local time
|
||||
let time = message.timestamp.fromUnix().local().format("'<'MMM' 'dd,' 'HH:mm'>'")
|
||||
|
||||
return time & " " & message.nick & ": " & string.fromBytes(message.payload)
|
||||
|
||||
#####################
|
||||
|
||||
proc connectToNodes(c: Chat, nodes: seq[string]) {.async.} =
|
||||
echo "Connecting to nodes"
|
||||
await c.node.connectToNodes(nodes)
|
||||
c.connected = true
|
||||
|
||||
proc showChatPrompt(c: Chat) =
|
||||
if not c.prompt:
|
||||
try:
|
||||
stdout.write(">> ")
|
||||
stdout.flushFile()
|
||||
c.prompt = true
|
||||
except IOError:
|
||||
discard
|
||||
|
||||
proc getChatLine(c: Chat, msg: WakuMessage): Result[string, string] =
|
||||
# No payload encoding/encryption from Waku
|
||||
let
|
||||
pb = Chat2Message.init(msg.payload)
|
||||
chatLine =
|
||||
if pb.isOk:
|
||||
pb[].toString()
|
||||
else:
|
||||
string.fromBytes(msg.payload)
|
||||
return ok(chatline)
|
||||
|
||||
proc printReceivedMessage(c: Chat, msg: WakuMessage) =
|
||||
let
|
||||
pb = Chat2Message.init(msg.payload)
|
||||
chatLine =
|
||||
if pb.isOk:
|
||||
pb[].toString()
|
||||
else:
|
||||
string.fromBytes(msg.payload)
|
||||
try:
|
||||
echo &"{chatLine}"
|
||||
except ValueError:
|
||||
# Formatting fail. Print chat line in any case.
|
||||
echo chatLine
|
||||
|
||||
c.prompt = false
|
||||
showChatPrompt(c)
|
||||
trace "Printing message", chatLine, contentTopic = msg.contentTopic
|
||||
|
||||
proc readNick(transp: StreamTransport): Future[string] {.async.} =
|
||||
# Chat prompt
|
||||
stdout.write("Choose a nickname >> ")
|
||||
stdout.flushFile()
|
||||
return await transp.readLine()
|
||||
|
||||
proc startMetricsServer(
|
||||
serverIp: IpAddress, serverPort: Port
|
||||
): Result[MetricsHttpServerRef, string] =
|
||||
info "Starting metrics HTTP server", serverIp = $serverIp, serverPort = $serverPort
|
||||
|
||||
let metricsServerRes = MetricsHttpServerRef.new($serverIp, serverPort)
|
||||
if metricsServerRes.isErr():
|
||||
return err("metrics HTTP server start failed: " & $metricsServerRes.error)
|
||||
|
||||
let server = metricsServerRes.value
|
||||
try:
|
||||
waitFor server.start()
|
||||
except CatchableError:
|
||||
return err("metrics HTTP server start failed: " & getCurrentExceptionMsg())
|
||||
|
||||
info "Metrics HTTP server started", serverIp = $serverIp, serverPort = $serverPort
|
||||
ok(metricsServerRes.value)
|
||||
|
||||
proc publish(c: Chat, line: string) {.async.} =
|
||||
# First create a Chat2Message protobuf with this line of text
|
||||
let time = getTime().toUnix()
|
||||
let chat2pb =
|
||||
Chat2Message(timestamp: time, nick: c.nick, payload: line.toBytes()).encode()
|
||||
|
||||
## @TODO: error handling on failure
|
||||
proc handler(response: LightPushResponse) {.gcsafe, closure.} =
|
||||
trace "lightpush response received", response = response
|
||||
|
||||
var message = WakuMessage(
|
||||
payload: chat2pb.buffer,
|
||||
contentTopic: c.contentTopic,
|
||||
version: 0,
|
||||
timestamp: getNanosecondTime(time),
|
||||
)
|
||||
|
||||
try:
|
||||
if not c.node.wakuLightpushClient.isNil():
|
||||
# Attempt lightpush with mix
|
||||
|
||||
(
|
||||
waitFor c.node.lightpushPublish(
|
||||
some(c.conf.getPubsubTopic(c.node, c.contentTopic)),
|
||||
message,
|
||||
none(RemotePeerInfo),
|
||||
true,
|
||||
)
|
||||
).isOkOr:
|
||||
error "failed to publish lightpush message", error = error
|
||||
else:
|
||||
error "failed to publish message as lightpush client is not initialized"
|
||||
except CatchableError:
|
||||
error "caught error publishing message: ", error = getCurrentExceptionMsg()
|
||||
|
||||
# TODO This should read or be subscribe handler subscribe
|
||||
proc readAndPrint(c: Chat) {.async.} =
|
||||
while true:
|
||||
# while p.connected:
|
||||
# # TODO: echo &"{p.id} -> "
|
||||
#
|
||||
# echo cast[string](await p.conn.readLp(1024))
|
||||
#echo "readAndPrint subscribe NYI"
|
||||
await sleepAsync(100)
|
||||
|
||||
# TODO Implement
|
||||
proc writeAndPrint(c: Chat) {.async.} =
|
||||
while true:
|
||||
# Connect state not updated on incoming WakuRelay connections
|
||||
# if not c.connected:
|
||||
# echo "type an address or wait for a connection:"
|
||||
# echo "type /[help|?] for help"
|
||||
|
||||
# Chat prompt
|
||||
showChatPrompt(c)
|
||||
|
||||
let line = await c.transp.readLine()
|
||||
if line.startsWith("/help") or line.startsWith("/?") or not c.started:
|
||||
echo Help
|
||||
continue
|
||||
|
||||
# if line.startsWith("/disconnect"):
|
||||
# echo "Ending current session"
|
||||
# if p.connected and p.conn.closed.not:
|
||||
# await p.conn.close()
|
||||
# p.connected = false
|
||||
elif line.startsWith("/connect"):
|
||||
# TODO Should be able to connect to multiple peers for Waku chat
|
||||
if c.connected:
|
||||
echo "already connected to at least one peer"
|
||||
continue
|
||||
|
||||
echo "enter address of remote peer"
|
||||
let address = await c.transp.readLine()
|
||||
if address.len > 0:
|
||||
await c.connectToNodes(@[address])
|
||||
elif line.startsWith("/nick"):
|
||||
# Set a new nickname
|
||||
c.nick = await readNick(c.transp)
|
||||
echo "You are now known as " & c.nick
|
||||
elif line.startsWith("/exit"):
|
||||
echo "quitting..."
|
||||
|
||||
try:
|
||||
await c.node.stop()
|
||||
except:
|
||||
echo "exception happened when stopping: " & getCurrentExceptionMsg()
|
||||
|
||||
quit(QuitSuccess)
|
||||
else:
|
||||
# XXX connected state problematic
|
||||
if c.started:
|
||||
echo "publishing message: " & line
|
||||
await c.publish(line)
|
||||
# TODO Connect to peer logic?
|
||||
else:
|
||||
try:
|
||||
if line.startsWith("/") and "p2p" in line:
|
||||
await c.connectToNodes(@[line])
|
||||
except:
|
||||
echo &"unable to dial remote peer {line}"
|
||||
echo getCurrentExceptionMsg()
|
||||
|
||||
proc readWriteLoop(c: Chat) {.async.} =
|
||||
asyncSpawn c.writeAndPrint() # execute the async function but does not block
|
||||
asyncSpawn c.readAndPrint()
|
||||
|
||||
proc readInput(wfd: AsyncFD) {.thread, raises: [Defect, CatchableError].} =
|
||||
## This procedure performs reading from `stdin` and sends data over
|
||||
## pipe to main thread.
|
||||
let transp = fromPipe(wfd)
|
||||
|
||||
while true:
|
||||
let line = stdin.readLine()
|
||||
discard waitFor transp.write(line & "\r\n")
|
||||
|
||||
var alreadyUsedServicePeers {.threadvar.}: seq[RemotePeerInfo]
|
||||
|
||||
proc selectRandomServicePeer*(
|
||||
pm: PeerManager, actualPeer: Option[RemotePeerInfo], codec: string
|
||||
): Result[RemotePeerInfo, void] =
|
||||
if actualPeer.isSome():
|
||||
alreadyUsedServicePeers.add(actualPeer.get())
|
||||
|
||||
let supportivePeers = pm.switch.peerStore.getPeersByProtocol(codec).filterIt(
|
||||
it notin alreadyUsedServicePeers
|
||||
)
|
||||
if supportivePeers.len == 0:
|
||||
return err()
|
||||
|
||||
let rndPeerIndex = rand(0 .. supportivePeers.len - 1)
|
||||
return ok(supportivePeers[rndPeerIndex])
|
||||
|
||||
proc maintainSubscription(
|
||||
wakuNode: WakuNode,
|
||||
filterPubsubTopic: PubsubTopic,
|
||||
filterContentTopic: ContentTopic,
|
||||
filterPeer: RemotePeerInfo,
|
||||
preventPeerSwitch: bool,
|
||||
) {.async.} =
|
||||
var actualFilterPeer = filterPeer
|
||||
const maxFailedSubscribes = 3
|
||||
const maxFailedServiceNodeSwitches = 10
|
||||
var noFailedSubscribes = 0
|
||||
var noFailedServiceNodeSwitches = 0
|
||||
while true:
|
||||
info "maintaining subscription at", peer = constructMultiaddrStr(actualFilterPeer)
|
||||
# First use filter-ping to check if we have an active subscription
|
||||
let pingRes = await wakuNode.wakuFilterClient.ping(actualFilterPeer)
|
||||
if pingRes.isErr():
|
||||
# No subscription found. Let's subscribe.
|
||||
error "ping failed.", err = pingRes.error
|
||||
trace "no subscription found. Sending subscribe request"
|
||||
|
||||
let subscribeRes = await wakuNode.filterSubscribe(
|
||||
some(filterPubsubTopic), filterContentTopic, actualFilterPeer
|
||||
)
|
||||
|
||||
if subscribeRes.isErr():
|
||||
noFailedSubscribes += 1
|
||||
error "Subscribe request failed.",
|
||||
err = subscribeRes.error,
|
||||
peer = actualFilterPeer,
|
||||
failCount = noFailedSubscribes
|
||||
|
||||
# TODO: disconnet from failed actualFilterPeer
|
||||
# asyncSpawn(wakuNode.peerManager.switch.disconnect(p))
|
||||
# wakunode.peerManager.peerStore.delete(actualFilterPeer)
|
||||
|
||||
if noFailedSubscribes < maxFailedSubscribes:
|
||||
await sleepAsync(2000) # Wait a bit before retrying
|
||||
continue
|
||||
elif not preventPeerSwitch:
|
||||
let peerOpt = selectRandomServicePeer(
|
||||
wakuNode.peerManager, some(actualFilterPeer), WakuFilterSubscribeCodec
|
||||
)
|
||||
if peerOpt.isOk():
|
||||
actualFilterPeer = peerOpt.get()
|
||||
|
||||
info "Found new peer for codec",
|
||||
codec = filterPubsubTopic, peer = constructMultiaddrStr(actualFilterPeer)
|
||||
|
||||
noFailedSubscribes = 0
|
||||
continue # try again with new peer without delay
|
||||
else:
|
||||
error "Failed to find new service peer. Exiting."
|
||||
noFailedServiceNodeSwitches += 1
|
||||
break
|
||||
else:
|
||||
if noFailedSubscribes > 0:
|
||||
noFailedSubscribes -= 1
|
||||
|
||||
notice "subscribe request successful."
|
||||
else:
|
||||
info "subscription is live."
|
||||
|
||||
await sleepAsync(30000) # Subscription maintenance interval
|
||||
|
||||
proc processMixNodes(localnode: WakuNode, nodes: seq[string]) {.async.} =
|
||||
if nodes.len == 0:
|
||||
return
|
||||
|
||||
info "Processing mix nodes: ", nodes = $nodes
|
||||
for node in nodes:
|
||||
var enrRec: enr.Record
|
||||
if enrRec.fromURI(node):
|
||||
let peerInfo = enrRec.toRemotePeerInfo().valueOr:
|
||||
error "Failed to parse mix node", error = error
|
||||
continue
|
||||
localnode.peermanager.addPeer(peerInfo, Discv5)
|
||||
info "Added mix node", peer = peerInfo
|
||||
else:
|
||||
error "Failed to parse mix node ENR", node = node
|
||||
|
||||
{.pop.}
|
||||
# @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
||||
proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
let
|
||||
transp = fromPipe(rfd)
|
||||
conf = Chat2Conf.load()
|
||||
nodekey =
|
||||
if conf.nodekey.isSome():
|
||||
conf.nodekey.get()
|
||||
else:
|
||||
PrivateKey.random(Secp256k1, rng[]).tryGet()
|
||||
|
||||
# set log level
|
||||
if conf.logLevel != LogLevel.NONE:
|
||||
setLogLevel(conf.logLevel)
|
||||
|
||||
let natRes = setupNat(
|
||||
conf.nat,
|
||||
clientId,
|
||||
Port(uint16(conf.tcpPort) + conf.portsShift),
|
||||
Port(uint16(conf.udpPort) + conf.portsShift),
|
||||
)
|
||||
|
||||
if natRes.isErr():
|
||||
raise newException(ValueError, "setupNat error " & natRes.error)
|
||||
|
||||
let (extIp, extTcpPort, extUdpPort) = natRes.get()
|
||||
|
||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||
|
||||
enrBuilder.withWakuRelaySharding(
|
||||
RelayShards(clusterId: conf.clusterId, shardIds: conf.shards)
|
||||
).isOkOr:
|
||||
error "failed to add sharded topics to ENR", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
let recordRes = enrBuilder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
error "failed to create enr record", error = recordRes.error
|
||||
quit(QuitFailure)
|
||||
else:
|
||||
recordRes.get()
|
||||
|
||||
let node = block:
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodeKey)
|
||||
builder.withRecord(record)
|
||||
|
||||
builder
|
||||
.withNetworkConfigurationDetails(
|
||||
conf.listenAddress,
|
||||
Port(uint16(conf.tcpPort) + conf.portsShift),
|
||||
extIp,
|
||||
extTcpPort,
|
||||
wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift),
|
||||
wsEnabled = conf.websocketSupport,
|
||||
wssEnabled = conf.websocketSecureSupport,
|
||||
)
|
||||
.tryGet()
|
||||
builder.build().tryGet()
|
||||
|
||||
node.mountAutoSharding(conf.clusterId, conf.numShardsInNetwork).isOkOr:
|
||||
error "failed to mount waku sharding: ", error = error
|
||||
quit(QuitFailure)
|
||||
node.mountMetadata(conf.clusterId, conf.shards).isOkOr:
|
||||
error "failed to mount waku metadata protocol: ", err = error
|
||||
quit(QuitFailure)
|
||||
|
||||
let (mixPrivKey, mixPubKey) = generateKeyPair().valueOr:
|
||||
error "failed to generate mix key pair", error = error
|
||||
return
|
||||
|
||||
(await node.mountMix(conf.clusterId, mixPrivKey)).isOkOr:
|
||||
error "failed to mount waku mix protocol: ", error = $error
|
||||
quit(QuitFailure)
|
||||
if conf.mixnodes.len > 0:
|
||||
await processMixNodes(node, conf.mixnodes)
|
||||
await node.start()
|
||||
|
||||
node.peerManager.start()
|
||||
|
||||
await node.mountLibp2pPing()
|
||||
await node.mountPeerExchangeClient()
|
||||
let pubsubTopic = conf.getPubsubTopic(node, conf.contentTopic)
|
||||
echo "pubsub topic is: " & pubsubTopic
|
||||
let nick = await readNick(transp)
|
||||
echo "Welcome, " & nick & "!"
|
||||
|
||||
var chat = Chat(
|
||||
node: node,
|
||||
transp: transp,
|
||||
subscribed: true,
|
||||
connected: false,
|
||||
started: true,
|
||||
nick: nick,
|
||||
prompt: false,
|
||||
contentTopic: conf.contentTopic,
|
||||
conf: conf,
|
||||
)
|
||||
|
||||
var dnsDiscoveryUrl = none(string)
|
||||
|
||||
if conf.fleet != Fleet.none:
|
||||
# Use DNS discovery to connect to selected fleet
|
||||
echo "Connecting to " & $conf.fleet & " fleet using DNS discovery..."
|
||||
|
||||
if conf.fleet == Fleet.test:
|
||||
dnsDiscoveryUrl = some(
|
||||
"enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im"
|
||||
)
|
||||
else:
|
||||
# Connect to sandbox by default
|
||||
dnsDiscoveryUrl = some(
|
||||
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im"
|
||||
)
|
||||
elif conf.dnsDiscoveryUrl != "":
|
||||
# No pre-selected fleet. Discover nodes via DNS using user config
|
||||
debug "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl
|
||||
dnsDiscoveryUrl = some(conf.dnsDiscoveryUrl)
|
||||
|
||||
var discoveredNodes: seq[RemotePeerInfo]
|
||||
|
||||
if dnsDiscoveryUrl.isSome:
|
||||
var nameServers: seq[TransportAddress]
|
||||
for ip in conf.dnsDiscoveryNameServers:
|
||||
nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
|
||||
|
||||
let dnsResolver = DnsResolver.new(nameServers)
|
||||
|
||||
proc resolver(domain: string): Future[string] {.async, gcsafe.} =
|
||||
trace "resolving", domain = domain
|
||||
let resolved = await dnsResolver.resolveTxt(domain)
|
||||
return resolved[0] # Use only first answer
|
||||
|
||||
var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl.get(), resolver)
|
||||
if wakuDnsDiscovery.isOk:
|
||||
let discoveredPeers = await wakuDnsDiscovery.get().findPeers()
|
||||
if discoveredPeers.isOk:
|
||||
info "Connecting to discovered peers"
|
||||
discoveredNodes = discoveredPeers.get()
|
||||
echo "Discovered and connecting to " & $discoveredNodes
|
||||
waitFor chat.node.connectToNodes(discoveredNodes)
|
||||
else:
|
||||
warn "Failed to init Waku DNS discovery"
|
||||
|
||||
let peerInfo = node.switch.peerInfo
|
||||
let listenStr = $peerInfo.addrs[0] & "/p2p/" & $peerInfo.peerId
|
||||
echo &"Listening on\n {listenStr}"
|
||||
|
||||
if (conf.storenode != "") or (conf.store == true):
|
||||
await node.mountStore()
|
||||
|
||||
var storenode: Option[RemotePeerInfo]
|
||||
|
||||
if conf.storenode != "":
|
||||
let peerInfo = parsePeerInfo(conf.storenode)
|
||||
if peerInfo.isOk():
|
||||
storenode = some(peerInfo.value)
|
||||
else:
|
||||
error "Incorrect conf.storenode", error = peerInfo.error
|
||||
elif discoveredNodes.len > 0:
|
||||
echo "Store enabled, but no store nodes configured. Choosing one at random from discovered peers"
|
||||
storenode = some(discoveredNodes[rand(0 .. len(discoveredNodes) - 1)])
|
||||
|
||||
if storenode.isSome():
|
||||
# We have a viable storenode. Let's query it for historical messages.
|
||||
echo "Connecting to storenode: " & $(storenode.get())
|
||||
|
||||
node.mountStoreClient()
|
||||
node.peerManager.addServicePeer(storenode.get(), WakuStoreCodec)
|
||||
|
||||
proc storeHandler(response: StoreQueryResponse) {.gcsafe.} =
|
||||
for msg in response.messages:
|
||||
let payload =
|
||||
if msg.message.isSome():
|
||||
msg.message.get().payload
|
||||
else:
|
||||
newSeq[byte](0)
|
||||
|
||||
let
|
||||
pb = Chat2Message.init(payload)
|
||||
chatLine =
|
||||
if pb.isOk:
|
||||
pb[].toString()
|
||||
else:
|
||||
string.fromBytes(payload)
|
||||
echo &"{chatLine}"
|
||||
info "Hit store handler"
|
||||
|
||||
let queryRes = await node.query(
|
||||
StoreQueryRequest(contentTopics: @[chat.contentTopic]), storenode.get()
|
||||
)
|
||||
if queryRes.isOk():
|
||||
storeHandler(queryRes.value)
|
||||
|
||||
if conf.edgemode: #Mount light protocol clients
|
||||
node.mountLightPushClient()
|
||||
await node.mountFilterClient()
|
||||
let filterHandler = proc(
|
||||
pubsubTopic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async, closure.} =
|
||||
trace "Hit filter handler", contentTopic = msg.contentTopic
|
||||
chat.printReceivedMessage(msg)
|
||||
|
||||
node.wakuFilterClient.registerPushHandler(filterHandler)
|
||||
var servicePeerInfo: RemotePeerInfo
|
||||
if conf.serviceNode != "":
|
||||
servicePeerInfo = parsePeerInfo(conf.serviceNode).valueOr:
|
||||
error "Couldn't parse conf.serviceNode", error = error
|
||||
RemotePeerInfo()
|
||||
if $servicePeerInfo.peerId == "":
|
||||
# Assuming that service node supports all services
|
||||
servicePeerInfo = selectRandomServicePeer(
|
||||
node.peerManager, none(RemotePeerInfo), WakuLightpushCodec
|
||||
).valueOr:
|
||||
error "Couldn't find any service peer"
|
||||
quit(QuitFailure)
|
||||
|
||||
#await mountLegacyLightPush(node)
|
||||
node.peerManager.addServicePeer(servicePeerInfo, WakuLightpushCodec)
|
||||
node.peerManager.addServicePeer(servicePeerInfo, WakuPeerExchangeCodec)
|
||||
|
||||
# Start maintaining subscription
|
||||
asyncSpawn maintainSubscription(
|
||||
node, pubsubTopic, conf.contentTopic, servicePeerInfo, false
|
||||
)
|
||||
echo "waiting for mix nodes to be discovered..."
|
||||
while true:
|
||||
if node.getMixNodePoolSize() >= 3:
|
||||
break
|
||||
discard await node.fetchPeerExchangePeers()
|
||||
await sleepAsync(1000)
|
||||
|
||||
while node.getMixNodePoolSize() < 3:
|
||||
info "waiting for mix nodes to be discovered",
|
||||
currentpoolSize = node.getMixNodePoolSize()
|
||||
await sleepAsync(1000)
|
||||
notice "ready to publish with mix node pool size ",
|
||||
currentpoolSize = node.getMixNodePoolSize()
|
||||
echo "ready to publish messages now"
|
||||
|
||||
# Once min mixnodes are discovered loop as per default setting
|
||||
node.startPeerExchangeLoop()
|
||||
|
||||
if conf.metricsLogging:
|
||||
startMetricsLog()
|
||||
|
||||
if conf.metricsServer:
|
||||
let metricsServer = startMetricsServer(
|
||||
conf.metricsServerAddress, Port(conf.metricsServerPort + conf.portsShift)
|
||||
)
|
||||
|
||||
await chat.readWriteLoop()
|
||||
|
||||
runForever()
|
||||
|
||||
proc main(rng: ref HmacDrbgContext) {.async.} =
|
||||
let (rfd, wfd) = createAsyncPipe()
|
||||
if rfd == asyncInvalidPipe or wfd == asyncInvalidPipe:
|
||||
raise newException(ValueError, "Could not initialize pipe!")
|
||||
|
||||
var thread: Thread[AsyncFD]
|
||||
thread.createThread(readInput, wfd)
|
||||
try:
|
||||
await processInput(rfd, rng)
|
||||
# Handle only ConfigurationError for now
|
||||
# TODO: Throw other errors from the mounting procedure
|
||||
except ConfigurationError as e:
|
||||
raise e
|
||||
|
||||
when isMainModule: # isMainModule = true when the module is compiled as the main file
|
||||
let rng = crypto.newRng()
|
||||
try:
|
||||
waitFor(main(rng))
|
||||
except CatchableError as e:
|
||||
raise e
|
||||
|
||||
## Dump of things that can be improved:
|
||||
##
|
||||
## - Incoming dialed peer does not change connected state (not relying on it for now)
|
||||
## - Unclear if staticnode argument works (can enter manually)
|
||||
## - Don't trigger self / double publish own messages
|
||||
## - Test/default to cluster node connection (diff protocol version)
|
||||
## - Redirect logs to separate file
|
||||
## - Expose basic publish/subscribe etc commands with /syntax
|
||||
## - Show part of peerid to know who sent message
|
||||
## - Deal with protobuf messages (e.g. other chat protocol, or encrypted)
|
||||
293
third-party/nwaku/apps/chat2mix/config_chat2mix.nim
vendored
Normal file
293
third-party/nwaku/apps/chat2mix/config_chat2mix.nim
vendored
Normal file
@ -0,0 +1,293 @@
|
||||
import chronicles, chronos, std/strutils, regex
|
||||
|
||||
import
|
||||
eth/keys,
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/crypto/secp,
|
||||
nimcrypto/utils,
|
||||
confutils,
|
||||
confutils/defs,
|
||||
confutils/std/net
|
||||
|
||||
import waku/waku_core
|
||||
|
||||
type
|
||||
Fleet* = enum
|
||||
none
|
||||
sandbox
|
||||
test
|
||||
|
||||
EthRpcUrl* = distinct string
|
||||
|
||||
Chat2Conf* = object ## General node config
|
||||
edgemode* {.
|
||||
defaultValue: true, desc: "Run the app in edge mode", name: "edge-mode"
|
||||
.}: bool
|
||||
|
||||
logLevel* {.
|
||||
desc: "Sets the log level.", defaultValue: LogLevel.INFO, name: "log-level"
|
||||
.}: LogLevel
|
||||
|
||||
nodekey* {.desc: "P2P node private key as 64 char hex string.", name: "nodekey".}:
|
||||
Option[crypto.PrivateKey]
|
||||
|
||||
listenAddress* {.
|
||||
defaultValue: defaultListenAddress(config),
|
||||
desc: "Listening address for the LibP2P traffic.",
|
||||
name: "listen-address"
|
||||
.}: IpAddress
|
||||
|
||||
tcpPort* {.desc: "TCP listening port.", defaultValue: 60000, name: "tcp-port".}:
|
||||
Port
|
||||
|
||||
udpPort* {.desc: "UDP listening port.", defaultValue: 60000, name: "udp-port".}:
|
||||
Port
|
||||
|
||||
portsShift* {.
|
||||
desc: "Add a shift to all port numbers.", defaultValue: 0, name: "ports-shift"
|
||||
.}: uint16
|
||||
|
||||
nat* {.
|
||||
desc:
|
||||
"Specify method to use for determining public address. " &
|
||||
"Must be one of: any, none, upnp, pmp, extip:<IP>.",
|
||||
defaultValue: "any"
|
||||
.}: string
|
||||
|
||||
## Persistence config
|
||||
dbPath* {.
|
||||
desc: "The database path for peristent storage", defaultValue: "", name: "db-path"
|
||||
.}: string
|
||||
|
||||
persistPeers* {.
|
||||
desc: "Enable peer persistence: true|false",
|
||||
defaultValue: false,
|
||||
name: "persist-peers"
|
||||
.}: bool
|
||||
|
||||
persistMessages* {.
|
||||
desc: "Enable message persistence: true|false",
|
||||
defaultValue: false,
|
||||
name: "persist-messages"
|
||||
.}: bool
|
||||
|
||||
## Relay config
|
||||
relay* {.
|
||||
desc: "Enable relay protocol: true|false", defaultValue: true, name: "relay"
|
||||
.}: bool
|
||||
|
||||
staticnodes* {.
|
||||
desc: "Peer multiaddr to directly connect with. Argument may be repeated.",
|
||||
name: "staticnode",
|
||||
defaultValue: @[]
|
||||
.}: seq[string]
|
||||
|
||||
mixnodes* {.
|
||||
desc: "Peer ENR to add as a mixnode. Argument may be repeated.", name: "mixnode"
|
||||
.}: seq[string]
|
||||
|
||||
keepAlive* {.
|
||||
desc: "Enable keep-alive for idle connections: true|false",
|
||||
defaultValue: false,
|
||||
name: "keep-alive"
|
||||
.}: bool
|
||||
|
||||
clusterId* {.
|
||||
desc:
|
||||
"Cluster id that the node is running in. Node in a different cluster id is disconnected.",
|
||||
defaultValue: 1,
|
||||
name: "cluster-id"
|
||||
.}: uint16
|
||||
|
||||
numShardsInNetwork* {.
|
||||
desc: "Number of shards in the network",
|
||||
defaultValue: 8,
|
||||
name: "num-shards-in-network"
|
||||
.}: uint32
|
||||
|
||||
shards* {.
|
||||
desc:
|
||||
"Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.",
|
||||
defaultValue:
|
||||
@[
|
||||
uint16(0),
|
||||
uint16(1),
|
||||
uint16(2),
|
||||
uint16(3),
|
||||
uint16(4),
|
||||
uint16(5),
|
||||
uint16(6),
|
||||
uint16(7),
|
||||
],
|
||||
name: "shard"
|
||||
.}: seq[uint16]
|
||||
|
||||
## Store config
|
||||
store* {.
|
||||
desc: "Enable store protocol: true|false", defaultValue: false, name: "store"
|
||||
.}: bool
|
||||
|
||||
storenode* {.
|
||||
desc: "Peer multiaddr to query for storage.", defaultValue: "", name: "storenode"
|
||||
.}: string
|
||||
|
||||
## Filter config
|
||||
filter* {.
|
||||
desc: "Enable filter protocol: true|false", defaultValue: false, name: "filter"
|
||||
.}: bool
|
||||
|
||||
## Lightpush config
|
||||
lightpush* {.
|
||||
desc: "Enable lightpush protocol: true|false",
|
||||
defaultValue: false,
|
||||
name: "lightpush"
|
||||
.}: bool
|
||||
|
||||
servicenode* {.
|
||||
desc: "Peer multiaddr to request lightpush and filter services",
|
||||
defaultValue: "",
|
||||
name: "servicenode"
|
||||
.}: string
|
||||
|
||||
## Metrics config
|
||||
metricsServer* {.
|
||||
desc: "Enable the metrics server: true|false",
|
||||
defaultValue: false,
|
||||
name: "metrics-server"
|
||||
.}: bool
|
||||
|
||||
metricsServerAddress* {.
|
||||
desc: "Listening address of the metrics server.",
|
||||
defaultValue: parseIpAddress("127.0.0.1"),
|
||||
name: "metrics-server-address"
|
||||
.}: IpAddress
|
||||
|
||||
metricsServerPort* {.
|
||||
desc: "Listening HTTP port of the metrics server.",
|
||||
defaultValue: 8008,
|
||||
name: "metrics-server-port"
|
||||
.}: uint16
|
||||
|
||||
metricsLogging* {.
|
||||
desc: "Enable metrics logging: true|false",
|
||||
defaultValue: true,
|
||||
name: "metrics-logging"
|
||||
.}: bool
|
||||
|
||||
## DNS discovery config
|
||||
dnsDiscovery* {.
|
||||
desc:
|
||||
"Deprecated, please set dns-discovery-url instead. Enable discovering nodes via DNS",
|
||||
defaultValue: false,
|
||||
name: "dns-discovery"
|
||||
.}: bool
|
||||
|
||||
dnsDiscoveryUrl* {.
|
||||
desc: "URL for DNS node list in format 'enrtree://<key>@<fqdn>'",
|
||||
defaultValue: "",
|
||||
name: "dns-discovery-url"
|
||||
.}: string
|
||||
|
||||
dnsDiscoveryNameServers* {.
|
||||
desc: "DNS name server IPs to query. Argument may be repeated.",
|
||||
defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")],
|
||||
name: "dns-discovery-name-server"
|
||||
.}: seq[IpAddress]
|
||||
|
||||
## Chat2 configuration
|
||||
fleet* {.
|
||||
desc:
|
||||
"Select the fleet to connect to. This sets the DNS discovery URL to the selected fleet.",
|
||||
defaultValue: Fleet.test,
|
||||
name: "fleet"
|
||||
.}: Fleet
|
||||
|
||||
contentTopic* {.
|
||||
desc: "Content topic for chat messages.",
|
||||
defaultValue: "/toy-chat-mix/2/huilong/proto",
|
||||
name: "content-topic"
|
||||
.}: string
|
||||
|
||||
## Websocket Configuration
|
||||
websocketSupport* {.
|
||||
desc: "Enable websocket: true|false",
|
||||
defaultValue: false,
|
||||
name: "websocket-support"
|
||||
.}: bool
|
||||
|
||||
websocketPort* {.
|
||||
desc: "WebSocket listening port.", defaultValue: 8000, name: "websocket-port"
|
||||
.}: Port
|
||||
|
||||
websocketSecureSupport* {.
|
||||
desc: "WebSocket Secure Support.",
|
||||
defaultValue: false,
|
||||
name: "websocket-secure-support"
|
||||
.}: bool ## rln-relay configuration
|
||||
|
||||
# NOTE: Keys are different in nim-libp2p
|
||||
proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T =
|
||||
try:
|
||||
let key = SkPrivateKey.init(utils.fromHex(p)).tryGet()
|
||||
# XXX: Here at the moment
|
||||
result = crypto.PrivateKey(scheme: Secp256k1, skkey: key)
|
||||
except CatchableError as e:
|
||||
raise newException(ValueError, "Invalid private key")
|
||||
|
||||
proc completeCmdArg*(T: type crypto.PrivateKey, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type IpAddress, p: string): T =
|
||||
try:
|
||||
result = parseIpAddress(p)
|
||||
except CatchableError as e:
|
||||
raise newException(ValueError, "Invalid IP address")
|
||||
|
||||
proc completeCmdArg*(T: type IpAddress, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type Port, p: string): T =
|
||||
try:
|
||||
result = Port(parseInt(p))
|
||||
except CatchableError as e:
|
||||
raise newException(ValueError, "Invalid Port number")
|
||||
|
||||
proc completeCmdArg*(T: type Port, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type Option[uint], p: string): T =
|
||||
try:
|
||||
some(parseUint(p))
|
||||
except CatchableError:
|
||||
raise newException(ValueError, "Invalid unsigned integer")
|
||||
|
||||
proc completeCmdArg*(T: type EthRpcUrl, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type EthRpcUrl, s: string): T =
|
||||
## allowed patterns:
|
||||
## http://url:port
|
||||
## https://url:port
|
||||
## http://url:port/path
|
||||
## https://url:port/path
|
||||
## http://url/with/path
|
||||
## http://url:port/path?query
|
||||
## https://url:port/path?query
|
||||
## disallowed patterns:
|
||||
## any valid/invalid ws or wss url
|
||||
var httpPattern =
|
||||
re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
var wsPattern =
|
||||
re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
if regex.match(s, wsPattern):
|
||||
raise newException(
|
||||
ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL"
|
||||
)
|
||||
if not regex.match(s, httpPattern):
|
||||
raise newException(ValueError, "Invalid HTTP RPC URL")
|
||||
return EthRpcUrl(s)
|
||||
|
||||
func defaultListenAddress*(conf: Chat2Conf): IpAddress =
|
||||
# TODO: How should we select between IPv4 and IPv6
|
||||
# Maybe there should be a config option for this.
|
||||
(static parseIpAddress("0.0.0.0"))
|
||||
4
third-party/nwaku/apps/chat2mix/nim.cfg
vendored
Normal file
4
third-party/nwaku/apps/chat2mix/nim.cfg
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
-d:chronicles_line_numbers
|
||||
-d:chronicles_runtime_filtering:on
|
||||
-d:discv5_protocol_id:d5waku
|
||||
path = "../.."
|
||||
27
third-party/nwaku/apps/liteprotocoltester/.env
vendored
Normal file
27
third-party/nwaku/apps/liteprotocoltester/.env
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
START_PUBLISHING_AFTER_SECS=45
|
||||
# can add some seconds delay before SENDER starts publishing
|
||||
|
||||
NUM_MESSAGES=0
|
||||
# 0 for infinite number of messages
|
||||
|
||||
MESSAGE_INTERVAL_MILLIS=8000
|
||||
# ms delay between messages
|
||||
|
||||
|
||||
MIN_MESSAGE_SIZE=15Kb
|
||||
MAX_MESSAGE_SIZE=145Kb
|
||||
|
||||
## for wakusim
|
||||
#SHARD=0
|
||||
#CONTENT_TOPIC=/tester/2/light-pubsub-test/wakusim
|
||||
#CLUSTER_ID=66
|
||||
|
||||
## for status.prod
|
||||
#SHARDS=32
|
||||
CONTENT_TOPIC=/tester/2/light-pubsub-test/fleet
|
||||
CLUSTER_ID=16
|
||||
|
||||
## for TWN
|
||||
#SHARD=4
|
||||
#CONTENT_TOPIC=/tester/2/light-pubsub-test/twn
|
||||
#CLUSTER_ID=1
|
||||
37
third-party/nwaku/apps/liteprotocoltester/Dockerfile.liteprotocoltester
vendored
Normal file
37
third-party/nwaku/apps/liteprotocoltester/Dockerfile.liteprotocoltester
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
# TESTING IMAGE --------------------------------------------------------------
|
||||
|
||||
## NOTICE: This is a short cut build file for ubuntu users who compiles nwaku in ubuntu distro.
|
||||
## This is used for faster turnaround time for testing the compiled binary.
|
||||
## Prerequisites: compiled liteprotocoltester binary in build/ directory
|
||||
|
||||
FROM ubuntu:noble AS prod
|
||||
|
||||
LABEL maintainer="zoltan@status.im"
|
||||
LABEL source="https://github.com/waku-org/nwaku"
|
||||
LABEL description="Lite Protocol Tester: Waku light-client"
|
||||
LABEL commit="unknown"
|
||||
LABEL version="unknown"
|
||||
|
||||
# DevP2P, LibP2P, and JSON RPC ports
|
||||
EXPOSE 30303 60000 8545
|
||||
|
||||
# Referenced in the binary
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libgcc1 \
|
||||
libpcre3 \
|
||||
libpq-dev \
|
||||
wget \
|
||||
iproute2 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Fix for 'Error loading shared library libpcre.so.3: No such file or directory'
|
||||
RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3
|
||||
|
||||
COPY build/liteprotocoltester /usr/bin/
|
||||
COPY apps/liteprotocoltester/run_tester_node.sh /usr/bin/
|
||||
COPY apps/liteprotocoltester/run_tester_node_on_fleet.sh /usr/bin/
|
||||
|
||||
ENTRYPOINT ["/usr/bin/run_tester_node.sh", "/usr/bin/liteprotocoltester"]
|
||||
|
||||
# # By default just show help if called without arguments
|
||||
CMD ["--help"]
|
||||
76
third-party/nwaku/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile
vendored
Normal file
76
third-party/nwaku/apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
# BUILD NIM APP ----------------------------------------------------------------
|
||||
FROM rust:1.77.1-alpine3.18 AS nim-build
|
||||
|
||||
ARG NIMFLAGS
|
||||
ARG MAKE_TARGET=liteprotocoltester
|
||||
ARG NIM_COMMIT
|
||||
ARG LOG_LEVEL=TRACE
|
||||
|
||||
# Get build tools and required header files
|
||||
RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
|
||||
# workaround for alpine issue: https://github.com/alpinelinux/docker-alpine/issues/383
|
||||
RUN apk update && apk upgrade
|
||||
|
||||
# Ran separately from 'make' to avoid re-doing
|
||||
RUN git submodule update --init --recursive
|
||||
|
||||
# Slowest build step for the sake of caching layers
|
||||
RUN make -j$(nproc) deps QUICK_AND_DIRTY_COMPILER=1 ${NIM_COMMIT}
|
||||
|
||||
# Build the final node binary
|
||||
RUN make -j$(nproc) ${NIM_COMMIT} $MAKE_TARGET LOG_LEVEL=${LOG_LEVEL} NIMFLAGS="${NIMFLAGS}"
|
||||
|
||||
|
||||
# REFERENCE IMAGE as BASE for specialized PRODUCTION IMAGES----------------------------------------
|
||||
FROM alpine:3.18 AS base_lpt
|
||||
|
||||
ARG MAKE_TARGET=liteprotocoltester
|
||||
|
||||
LABEL maintainer="zoltan@status.im"
|
||||
LABEL source="https://github.com/waku-org/nwaku"
|
||||
LABEL description="Lite Protocol Tester: Waku light-client"
|
||||
LABEL commit="unknown"
|
||||
LABEL version="unknown"
|
||||
|
||||
# DevP2P, LibP2P, and JSON RPC ports
|
||||
EXPOSE 30303 60000 8545
|
||||
|
||||
# Referenced in the binary
|
||||
RUN apk add --no-cache libgcc libpq-dev \
|
||||
wget \
|
||||
iproute2 \
|
||||
python3
|
||||
|
||||
# Fix for 'Error loading shared library libpcre.so.3: No such file or directory'
|
||||
RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3
|
||||
|
||||
COPY --from=nim-build /app/build/liteprotocoltester /usr/bin/
|
||||
RUN chmod +x /usr/bin/liteprotocoltester
|
||||
|
||||
# Standalone image to be used manually and in lpt-runner -------------------------------------------
|
||||
FROM base_lpt AS standalone_lpt
|
||||
|
||||
COPY --from=nim-build /app/apps/liteprotocoltester/run_tester_node.sh /usr/bin/
|
||||
COPY --from=nim-build /app/apps/liteprotocoltester/run_tester_node_on_fleet.sh /usr/bin/
|
||||
|
||||
RUN chmod +x /usr/bin/run_tester_node.sh
|
||||
|
||||
ENTRYPOINT ["/usr/bin/run_tester_node.sh", "/usr/bin/liteprotocoltester"]
|
||||
|
||||
# Image for infra deployment -------------------------------------------
|
||||
FROM base_lpt AS deployment_lpt
|
||||
|
||||
# let supervisor python script flush logs immediately
|
||||
ENV PYTHONUNBUFFERED="1"
|
||||
|
||||
COPY --from=nim-build /app/apps/liteprotocoltester/run_tester_node_at_infra.sh /usr/bin/
|
||||
COPY --from=nim-build /app/apps/liteprotocoltester/infra.env /usr/bin/
|
||||
COPY --from=nim-build /app/apps/liteprotocoltester/lpt_supervisor.py /usr/bin/
|
||||
RUN chmod +x /usr/bin/run_tester_node_at_infra.sh
|
||||
RUN chmod +x /usr/bin/lpt_supervisor.py
|
||||
|
||||
ENTRYPOINT ["/usr/bin/lpt_supervisor.py"]
|
||||
329
third-party/nwaku/apps/liteprotocoltester/README.md
vendored
Normal file
329
third-party/nwaku/apps/liteprotocoltester/README.md
vendored
Normal file
@ -0,0 +1,329 @@
|
||||
# Waku - Lite Protocol Tester
|
||||
|
||||
## Aim
|
||||
|
||||
Testing reliability of light client protocols in different scale.
|
||||
Measure message delivery reliability and latency between light push client(s) and a filter client(s) node(s).
|
||||
|
||||
## Concept of testing
|
||||
|
||||
A tester node is configured either 'publisher' or 'receiver' and connects to a certain service node.
|
||||
All service protocols are disabled except for lightpush client or filter client. This way we would like to simulate
|
||||
a light client application.
|
||||
Each publisher pumps messages to the network in a preconfigured way (number of messages, frequency) while on the receiver side
|
||||
we would like to track and measure message losses, mis-ordered receives, late arrived messages and latencies.
|
||||
Ideally the tester nodes will connect to different edge of the network where we can gather more result from mulitple publishers
|
||||
and multiple receivers.
|
||||
|
||||
Publishers are fill all message payloads with information about the test message and sender, helping the receiver side to calculate results.
|
||||
|
||||
## Usage
|
||||
|
||||
### Using lpt-runner
|
||||
|
||||
For ease of use, you can clone lpt-runner repository. That will utilize previously pushed liteprotocoltester docker image.
|
||||
It is recommended to use this method for fleet testing.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/waku-org/lpt-runner.git
|
||||
cd lpt-runner
|
||||
|
||||
# check Reame.md for more information
|
||||
# edit .env file to your needs
|
||||
|
||||
docker compose up -d
|
||||
|
||||
# navigate localhost:3033 to see the lite-protocol-tester dashboard
|
||||
```
|
||||
|
||||
> See more detailed examples below.
|
||||
|
||||
### Integration with waku-simulator!
|
||||
|
||||
- For convenience, integration is done in cooperation with waku-simulator repository, but nothing is tightly coupled.
|
||||
- waku-simulator must be started separately with its own configuration.
|
||||
- To enable waku-simulator working without RLN currently a separate branch is to be used.
|
||||
- When waku-simulator is configured and up and running, lite-protocol-tester composite docker setup can be started.
|
||||
|
||||
```bash
|
||||
|
||||
# Start waku-simulator
|
||||
|
||||
git clone https://github.com/waku-org/waku-simulator.git ../waku-simulator
|
||||
cd ../waku-simulator
|
||||
git checkout chore-integrate-liteprotocoltester
|
||||
|
||||
# optionally edit .env file
|
||||
|
||||
docker compose -f docker-compose-norln.yml up -d
|
||||
|
||||
# navigate localhost:30001 to see the waku-simulator dashboard
|
||||
|
||||
cd ../{your-repository}
|
||||
|
||||
make LOG_LEVEL=DEBUG liteprotocoltester
|
||||
|
||||
cd apps/liteprotocoltester
|
||||
|
||||
# optionally edit .env file
|
||||
|
||||
docker compose -f docker-compose-on-simularor.yml build
|
||||
docker compose -f docker-compose-on-simularor.yml up -d
|
||||
docker compose -f docker-compose-on-simularor.yml logs -f receivernode
|
||||
```
|
||||
#### Current setup
|
||||
|
||||
- waku-simulator is configured to run with 25 full node
|
||||
- liteprotocoltester is configured to run with 3 publisher and 1 receiver
|
||||
- liteprotocoltester is configured to run 1 lightpush service and a filter service node
|
||||
- light clients are connected accordingly
|
||||
- publishers will send 250 messages in every 200ms with size between 1KiB and 120KiB
|
||||
- Notice there is a configurable wait before start publishing messages as it is noticed time is needed for the service nodes to get connected to full nodes from simulator
|
||||
- light clients will print report on their and the connected service node's connectivity to the network in every 20 secs.
|
||||
|
||||
#### Test monitoring
|
||||
|
||||
Navigate to http://localhost:3033 to see the lite-protocol-tester dashboard.
|
||||
|
||||
### Run independently on a chosen waku fleet
|
||||
|
||||
This option is simple as is just to run the built liteprotocoltester binary with run_tester_node.sh script.
|
||||
|
||||
Syntax:
|
||||
`./run_tester_node.sh <path-to-liteprotocoltester-binary> <SENDER|RECEIVER> <service-node-address>`
|
||||
|
||||
How to run from you nwaku repository:
|
||||
```bash
|
||||
cd ../{your-repository}
|
||||
|
||||
make LOG_LEVEL=DEBUG liteprotocoltester
|
||||
|
||||
cd apps/liteprotocoltester
|
||||
|
||||
# optionally edit .env file
|
||||
|
||||
# run publisher side
|
||||
./run_tester_node.sh ../../build/liteprotocoltester SENDER [chosen service node address that support lightpush]
|
||||
|
||||
# or run receiver side
|
||||
./run_tester_node.sh ../../build/liteprotocoltester RECEIVER [chosen service node address that support filter service]
|
||||
```
|
||||
|
||||
#### Recommendations
|
||||
|
||||
In order to run on any kind of network, it is recommended to deploy the built `liteprotocoltester` binary with the `.env` file and the `run_tester_node.sh` script to the desired machine.
|
||||
|
||||
Select a lightpush service node and a filter service node from the targeted network, or you can run your own. Note down the selected peers peer_id.
|
||||
|
||||
Run a SENDER role liteprotocoltester and a RECEIVER role one on different terminals. Depending on the test aim, you may want to redirect the output to a file.
|
||||
|
||||
> RECEIVER side will periodically print statistics to standard output.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment variables for docker compose runs
|
||||
|
||||
| Variable | Description | Default |
|
||||
| ---: | :--- | :--- |
|
||||
| NUM_MESSAGES | Number of message to publish, 0 means infinite | 120 |
|
||||
| MESSAGE_INTERVAL_MILLIS | Frequency of messages in milliseconds | 1000 |
|
||||
| SHARD | Used shard for testing | 0 |
|
||||
| CONTENT_TOPIC | content_topic for testing | /tester/1/light-pubsub-example/proto |
|
||||
| CLUSTER_ID | cluster_id of the network | 16 |
|
||||
| START_PUBLISHING_AFTER_SECS | Delay in seconds before starting to publish to let service node connected | 5 |
|
||||
| MIN_MESSAGE_SIZE | Minimum message size in bytes | 1KiB |
|
||||
| MAX_MESSAGE_SIZE | Maximum message size in bytes | 120KiB |
|
||||
|
||||
|
||||
### Lite Protocol Tester application cli options
|
||||
|
||||
| Option | Description | Default |
|
||||
| :--- | :--- | :--- |
|
||||
| --test_func | separation of PUBLISHER or RECEIVER mode | RECEIVER |
|
||||
| --service-node| Address of the service node to use for lightpush and/or filter service | - |
|
||||
| --bootstrap-node| Address of the fleet's bootstrap node to use to determine service peer randomly choosen from the network. `--service-node` switch has precedence over this | - |
|
||||
| --num-messages | Number of message to publish | 120 |
|
||||
| --message-interval | Frequency of messages in milliseconds | 1000 |
|
||||
| --min-message-size | Minimum message size in bytes | 1KiB |
|
||||
| --max-message-size | Maximum message size in bytes | 120KiB |
|
||||
| --start-publishing-after | Delay in seconds before starting to publish to let service node connected in seconds | 5 |
|
||||
| --pubsub-topic | Used pubsub_topic for testing | /waku/2/default-waku/proto |
|
||||
| --content_topic | content_topic for testing | /tester/1/light-pubsub-example/proto |
|
||||
| --cluster-id | Cluster id for the test | 0 |
|
||||
| --config-file | TOML configuration file to fine tune the light waku node<br>Note that some configurations (full node services) are not taken into account | - |
|
||||
| --nat |Same as wakunode "nat" configuration, appear here to ease test setup | any |
|
||||
| --rest-address | For convenience rest configuration can be done here | 127.0.0.1 |
|
||||
| --rest-port | For convenience rest configuration can be done here | 8654 |
|
||||
| --rest-allow-origin | For convenience rest configuration can be done here | * |
|
||||
| --log-level | Log level for the application | DEBUG |
|
||||
| --log-format | Logging output format (TEXT or JSON) | TEXT |
|
||||
| --metrics-port | Metrics scarpe port | 8003 |
|
||||
|
||||
### Specifying peer addresses
|
||||
|
||||
Service node or bootstrap addresses can be specified in multiadress or ENR form.
|
||||
|
||||
### Using bootstrap nodes
|
||||
|
||||
There are multiple benefits of using bootstrap nodes. By using them liteprotocoltester will use Peer Exchange protocol to get possible peers from the network that are capable to serve as service peers for testing. Additionally it will test dial them to verify their connectivity - this will be reported in the logs and on dashboard metrics.
|
||||
Also by using bootstrap node and peer exchange discovery, litprotocoltester will be able to simulate service peer switch in case of failures. There are built in tresholds count for service peer failures (3) after service peer will be switched during the test. Also there will be max 10 trials of switching peer before test declared failed and quit.
|
||||
These service peer failures are reported, thus extending network reliability measures.
|
||||
|
||||
### Building docker image
|
||||
|
||||
Easiest way to build the docker image is to use the provided Makefile target.
|
||||
|
||||
```bash
|
||||
cd <your-repository>
|
||||
make docker-liteprotocoltester
|
||||
```
|
||||
This will build liteprotocoltester from the ground up and create a docker image with the binary copied to it under image name and tag `wakuorg/liteprotocoltester:latest`.
|
||||
|
||||
#### Building public image
|
||||
|
||||
If you want to push the image to a public registry, you can use the jenkins job to do so.
|
||||
The job is available at https://ci.status.im/job/waku/job/liteprotocoltester/job/build-liteprotocoltester-image
|
||||
|
||||
#### Building and deployment for infra testing
|
||||
|
||||
For specific and continuous testing purposes we have a deployment of `liteprotocoltester` test suite to our infra appliances.
|
||||
This has its own configuration, constraints and requirements. To ease this job, image shall be built and pushed with `deploy` tag.
|
||||
This can be done by the jenkins job mentioned above.
|
||||
|
||||
or manually by:
|
||||
```bash
|
||||
cd <your-repository>
|
||||
make DOCKER_LPT_TAG=deploy docker-liteprotocoltester
|
||||
```
|
||||
|
||||
The image created with this method will be different from under any other tag. It prepared to run a preconfigured test suite continuously.
|
||||
It will also miss prometheus metrics scraping endpoint and grafana, thus it is not recommended to use it for general testing.
|
||||
|
||||
#### Manually building for docker compose runs on simulator or standalone
|
||||
Please note that currently to ease testing and development tester application docker image is based on ubuntu and uses the externally pre-built binary of 'liteprotocoltester'.
|
||||
This speeds up image creation. Another dokcer build file is provided for proper build of boundle image.
|
||||
|
||||
> `Dockerfile.liteprotocoltester` will create an ubuntu based image with the binary copied from the build directory.
|
||||
|
||||
> `Dockerfile.liteprotocoltester.compile` will create an ubuntu based image completely compiled from source. This can be slow.
|
||||
|
||||
#### Creating standalone runner docker image
|
||||
|
||||
To ease the work with lite-protocol-tester, a docker image is possible to build.
|
||||
With that image it is easy to run the application in a container.
|
||||
|
||||
> `Dockerfile.liteprotocoltester` will create an ubuntu image with the binary copied from the build directory. You need to pre-build the application.
|
||||
|
||||
Here is how to build and run:
|
||||
```bash
|
||||
cd <your-repository>
|
||||
make liteprotocoltester
|
||||
|
||||
cd apps/liteprotocoltester
|
||||
docker build -t liteprotocoltester:latest -f Dockerfile.liteprotocoltester ../..
|
||||
|
||||
# alternatively you can push it to a registry
|
||||
|
||||
# edit and adjust .env file to your needs and for the network configuration
|
||||
|
||||
docker run --env-file .env liteprotocoltester:latest RECEIVER <service-node-peer-address>
|
||||
|
||||
docker run --env-file .env liteprotocoltester:latest SENDER <service-node-peer-address>
|
||||
```
|
||||
|
||||
#### Run test with auto service peer selection from a fleet using bootstrap node
|
||||
|
||||
```bash
|
||||
|
||||
docker run --env-file .env liteprotocoltester:latest RECEIVER <bootstrap-node-peer-address> BOOTSTRAP
|
||||
|
||||
docker run --env-file .env liteprotocoltester:latest SENDER <bootstrap-node-peer-address> BOOTSTRAP
|
||||
```
|
||||
|
||||
> Notice that official image is also available at harbor.status.im/wakuorg/liteprotocoltester:latest
|
||||
|
||||
## Examples
|
||||
|
||||
### Bootstrap or Service node selection
|
||||
|
||||
The easiest way to get the proper bootstrap nodes for the tests from https://fleets.status.im page.
|
||||
Adjust on which fleets you would like to run the tests.
|
||||
|
||||
> Please note that not all of them configured to support Peer Exchange protocol, those ones cannot be for bootstrap nodes for `liteprotocoltester`.
|
||||
|
||||
### Environment variables
|
||||
You need not necessary to use .env file, although it can be more convenient.
|
||||
Anytime you can override all or part of the environment variables defined in the .env file.
|
||||
|
||||
### Run standalone
|
||||
|
||||
Example of running the liteprotocoltester in standalone mode on status.stagin network.
|
||||
Testing includes using bootstrap nodes to gather service peers from the network via Peer Exchange protocol.
|
||||
Both parties will test-dial all the peers retrieved with the corresponding protocol.
|
||||
Sender will start publishing messages after 60 seconds, sending 200 messages with 1 second delay between them.
|
||||
Message size will be between 15KiB and 145KiB.
|
||||
Cluster id and Pubsub-topic must be accurately set according to the network configuration.
|
||||
|
||||
The example shows that either multiaddress or ENR form accepted.
|
||||
|
||||
```bash
|
||||
export START_PUBLISHING_AFTER_SECS=60
|
||||
export NUM_MESSAGES=200
|
||||
export MESSAGE_INTERVAL_MILLIS=1000
|
||||
export MIN_MESSAGE_SIZE=15Kb
|
||||
export MAX_MESSAGE_SIZE=145Kb
|
||||
export SHARD=32
|
||||
export CONTENT_TOPIC=/tester/2/light-pubsub-test/fleet
|
||||
export CLUSTER_ID=16
|
||||
|
||||
docker run harbor.status.im/wakuorg/liteprotocoltester:latest RECEIVER /dns4/boot-01.do-ams3.status.staging.status.im/tcp/30303/p2p/16Uiu2HAmQE7FXQc6iZHdBzYfw3qCSDa9dLc1wsBJKoP4aZvztq2d BOOTSTRAP
|
||||
|
||||
# in different terminal session, repeat the exports and run the other party of the test.
|
||||
docker run harbor.status.im/wakuorg/liteprotocoltester:latest SENDER enr:-QEiuECJPv2vL00Jp5sTEMAFyW7qXkK2cFgphlU_G8-FJuJqoW_D5aWIy3ylGdv2K8DkiG7PWgng4Ql_VI7Qc2RhBdwfAYJpZIJ2NIJpcIQvTKi6im11bHRpYWRkcnO4cgA2NjFib290LTAxLmFjLWNuLWhvbmdrb25nLWMuc3RhdHVzLnN0YWdpbmcuc3RhdHVzLmltBnZfADg2MWJvb3QtMDEuYWMtY24taG9uZ2tvbmctYy5zdGF0dXMuc3RhZ2luZy5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEDkbgV7oqPNmFtX5FzSPi9WH8kkmrPB1R3n9xRXge91M-DdGNwgnZfg3VkcIIjKIV3YWt1Mg0 BOOTSTRAP
|
||||
|
||||
```
|
||||
|
||||
### Use of lpt-runner
|
||||
|
||||
Another method is to use [lpt-runner repository](https://github.com/waku-org/lpt-runner/tree/master).
|
||||
This extends testing with grafana dashboard and ease the test setup.
|
||||
Please read the corresponding [README](https://github.com/waku-org/lpt-runner/blob/master/README.md) there as well.
|
||||
|
||||
In this example we will run similar test as above but there will be 3 instances of publisher nodes and 1 receiver node.
|
||||
This test uses waku.sandbox fleet which is connected to TWN. This implies lower message rates due to the RLN rate limation.
|
||||
Also leave a gap of 120 seconds before starting to publish messages to let receiver side fully finish peer test-dialing.
|
||||
For TWN network it is always wise to use bootstrap nodes with Peer Exchange support.
|
||||
|
||||
> Theoritically we can use the same bootstrap nodes for both parties, but it is recommended to use different ones to simulate different network edges, thus getting more meaningful results.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/waku-org/lpt-runner.git
|
||||
cd lpt-runner
|
||||
|
||||
export NUM_PUBLISHER_NODES=3
|
||||
export NUM_RECEIVER_NODES=1
|
||||
export START_PUBLISHING_AFTER_SECS=120
|
||||
export NUM_MESSAGES=300
|
||||
export MESSAGE_INTERVAL_MILLIS=7000
|
||||
export MIN_MESSAGE_SIZE=15Kb
|
||||
export MAX_MESSAGE_SIZE=145Kb
|
||||
export SHARD=4
|
||||
export CONTENT_TOPIC=/tester/2/light-pubsub-test/twn
|
||||
export CLUSTER_ID=1
|
||||
|
||||
export FILTER_BOOTSTRAP=/dns4/node-01.ac-cn-hongkong-c.waku.sandbox.status.im/tcp/30303/p2p/16Uiu2HAmQYiojgZ8APsh9wqbWNyCstVhnp9gbeNrxSEQnLJchC92
|
||||
export LIGHTPUSH_BOOTSTRAP=/dns4/node-01.do-ams3.waku.sandbox.status.im/tcp/30303/p2p/16Uiu2HAmNaeL4p3WEYzC9mgXBmBWSgWjPHRvatZTXnp8Jgv3iKsb
|
||||
|
||||
docker compose up -d
|
||||
|
||||
# we can check logs from one or all SENDER
|
||||
docker compose logs -f --index 1 publishernode
|
||||
|
||||
# for checking receiver side performance
|
||||
docker compose logs -f receivernode
|
||||
|
||||
# when test completed
|
||||
docker compose down
|
||||
```
|
||||
|
||||
For dashboard navigate to http://localhost:3033
|
||||
65
third-party/nwaku/apps/liteprotocoltester/diagnose_connections.nim
vendored
Normal file
65
third-party/nwaku/apps/liteprotocoltester/diagnose_connections.nim
vendored
Normal file
@ -0,0 +1,65 @@
|
||||
when (NimMajor, NimMinor) < (1, 4):
|
||||
{.push raises: [Defect].}
|
||||
else:
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[options, net, strformat],
|
||||
chronicles,
|
||||
chronos,
|
||||
metrics,
|
||||
libbacktrace,
|
||||
libp2p/crypto/crypto,
|
||||
confutils,
|
||||
libp2p/wire
|
||||
|
||||
import
|
||||
../../tools/confutils/cli_args,
|
||||
waku/[
|
||||
node/peer_manager,
|
||||
waku_lightpush/common,
|
||||
waku_relay,
|
||||
waku_filter_v2,
|
||||
waku_peer_exchange/protocol,
|
||||
waku_core/multiaddrstr,
|
||||
waku_enr/capabilities,
|
||||
]
|
||||
logScope:
|
||||
topics = "diagnose connections"
|
||||
|
||||
proc allPeers(pm: PeerManager): string =
|
||||
var allStr: string = ""
|
||||
for idx, peer in pm.switch.peerStore.peers():
|
||||
allStr.add(
|
||||
" " & $idx & ". | " & constructMultiaddrStr(peer) & " | agent: " &
|
||||
peer.getAgent() & " | protos: " & $peer.protocols & " | caps: " &
|
||||
$peer.enr.map(getCapabilities) & "\n"
|
||||
)
|
||||
return allStr
|
||||
|
||||
proc logSelfPeers*(pm: PeerManager) =
|
||||
let selfLighpushPeers = pm.switch.peerStore.getPeersByProtocol(WakuLightPushCodec)
|
||||
let selfRelayPeers = pm.switch.peerStore.getPeersByProtocol(WakuRelayCodec)
|
||||
let selfFilterPeers = pm.switch.peerStore.getPeersByProtocol(WakuFilterSubscribeCodec)
|
||||
let selfPxPeers = pm.switch.peerStore.getPeersByProtocol(WakuPeerExchangeCodec)
|
||||
|
||||
let printable = catch:
|
||||
"""*------------------------------------------------------------------------------------------*
|
||||
| Self ({constructMultiaddrStr(pm.switch.peerInfo)}) peers:
|
||||
*------------------------------------------------------------------------------------------*
|
||||
| Lightpush peers({selfLighpushPeers.len()}): ${selfLighpushPeers}
|
||||
*------------------------------------------------------------------------------------------*
|
||||
| Filter peers({selfFilterPeers.len()}): ${selfFilterPeers}
|
||||
*------------------------------------------------------------------------------------------*
|
||||
| Relay peers({selfRelayPeers.len()}): ${selfRelayPeers}
|
||||
*------------------------------------------------------------------------------------------*
|
||||
| PX peers({selfPxPeers.len()}): ${selfPxPeers}
|
||||
*------------------------------------------------------------------------------------------*
|
||||
| All peers with protocol support:
|
||||
{allPeers(pm)}
|
||||
*------------------------------------------------------------------------------------------*""".fmt()
|
||||
|
||||
if printable.isErr():
|
||||
echo "Error while printing statistics: " & printable.error().msg
|
||||
else:
|
||||
echo printable.get()
|
||||
227
third-party/nwaku/apps/liteprotocoltester/docker-compose-on-simularor.yml
vendored
Normal file
227
third-party/nwaku/apps/liteprotocoltester/docker-compose-on-simularor.yml
vendored
Normal file
@ -0,0 +1,227 @@
|
||||
version: "3.7"
|
||||
x-logging: &logging
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 1000m
|
||||
|
||||
# Environment variable definitions
|
||||
x-eth-client-address: ð_client_address ${ETH_CLIENT_ADDRESS:-} # Add your ETH_CLIENT_ADDRESS after the "-"
|
||||
|
||||
x-rln-environment: &rln_env
|
||||
RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4}
|
||||
RLN_RELAY_CRED_PATH: ${RLN_RELAY_CRED_PATH:-} # Optional: Add your RLN_RELAY_CRED_PATH after the "-"
|
||||
RLN_RELAY_CRED_PASSWORD: ${RLN_RELAY_CRED_PASSWORD:-} # Optional: Add your RLN_RELAY_CRED_PASSWORD after the "-"
|
||||
|
||||
x-test-running-conditions: &test_running_conditions
|
||||
NUM_MESSAGES: ${NUM_MESSAGES:-120}
|
||||
MESSAGE_INTERVAL_MILLIS: "${MESSAGE_INTERVAL_MILLIS:-1000}"
|
||||
SHARD: ${SHARD:-0}
|
||||
CONTENT_TOPIC: ${CONTENT_TOPIC:-/tester/2/light-pubsub-test/wakusim}
|
||||
CLUSTER_ID: ${CLUSTER_ID:-66}
|
||||
MIN_MESSAGE_SIZE: ${MIN_MESSAGE_SIZE:-1Kb}
|
||||
MAX_MESSAGE_SIZE: ${MAX_MESSAGE_SIZE:-150Kb}
|
||||
START_PUBLISHING_AFTER_SECS: ${START_PUBLISHING_AFTER_SECS:-5} # seconds
|
||||
|
||||
|
||||
# Services definitions
|
||||
services:
|
||||
lightpush-service:
|
||||
image: ${NWAKU_IMAGE:-harbor.status.im/wakuorg/nwaku:latest-release}
|
||||
# ports:
|
||||
# - 30304:30304/tcp
|
||||
# - 30304:30304/udp
|
||||
# - 9005:9005/udp
|
||||
# - 127.0.0.1:8003:8003
|
||||
# - 80:80 #Let's Encrypt
|
||||
# - 8000:8000/tcp #WSS
|
||||
# - 127.0.0.1:8645:8645
|
||||
<<:
|
||||
- *logging
|
||||
environment:
|
||||
DOMAIN: ${DOMAIN}
|
||||
RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}"
|
||||
ETH_CLIENT_ADDRESS: *eth_client_address
|
||||
EXTRA_ARGS: ${EXTRA_ARGS}
|
||||
<<:
|
||||
- *rln_env
|
||||
- *test_running_conditions
|
||||
volumes:
|
||||
- ./run_service_node.sh:/opt/run_service_node.sh:Z
|
||||
- ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z
|
||||
- ./rln_tree:/etc/rln_tree/:Z
|
||||
- ./keystore:/keystore:Z
|
||||
entrypoint: sh
|
||||
command:
|
||||
- /opt/run_service_node.sh
|
||||
- LIGHTPUSH
|
||||
networks:
|
||||
- waku-simulator_simulation
|
||||
|
||||
publishernode:
|
||||
image: waku.liteprotocoltester:latest
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: ./apps/liteprotocoltester/Dockerfile.liteprotocoltester
|
||||
deploy:
|
||||
replicas: ${NUM_PUBLISHER_NODES:-3}
|
||||
# ports:
|
||||
# - 30304:30304/tcp
|
||||
# - 30304:30304/udp
|
||||
# - 9005:9005/udp
|
||||
# - 127.0.0.1:8003:8003
|
||||
# - 80:80 #Let's Encrypt
|
||||
# - 8000:8000/tcp #WSS
|
||||
# - 127.0.0.1:8646:8646
|
||||
<<:
|
||||
- *logging
|
||||
environment:
|
||||
DOMAIN: ${DOMAIN}
|
||||
RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}"
|
||||
ETH_CLIENT_ADDRESS: *eth_client_address
|
||||
EXTRA_ARGS: ${EXTRA_ARGS}
|
||||
<<:
|
||||
- *rln_env
|
||||
- *test_running_conditions
|
||||
volumes:
|
||||
- ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z
|
||||
- ./rln_tree:/etc/rln_tree/:Z
|
||||
- ./keystore:/keystore:Z
|
||||
entrypoint: sh
|
||||
command:
|
||||
- /usr/bin/run_tester_node.sh
|
||||
- /usr/bin/liteprotocoltester
|
||||
- SENDER
|
||||
- waku-sim
|
||||
depends_on:
|
||||
- lightpush-service
|
||||
configs:
|
||||
- source: cfg_tester_node.toml
|
||||
target: config.toml
|
||||
networks:
|
||||
- waku-simulator_simulation
|
||||
|
||||
filter-service:
|
||||
image: ${NWAKU_IMAGE:-harbor.status.im/wakuorg/nwaku:latest-release}
|
||||
# ports:
|
||||
# - 30304:30305/tcp
|
||||
# - 30304:30305/udp
|
||||
# - 9005:9005/udp
|
||||
# - 127.0.0.1:8003:8003
|
||||
# - 80:80 #Let's Encrypt
|
||||
# - 8000:8000/tcp #WSS
|
||||
# - 127.0.0.1:8645:8645
|
||||
<<:
|
||||
- *logging
|
||||
environment:
|
||||
DOMAIN: ${DOMAIN}
|
||||
RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}"
|
||||
ETH_CLIENT_ADDRESS: *eth_client_address
|
||||
EXTRA_ARGS: ${EXTRA_ARGS}
|
||||
<<:
|
||||
- *rln_env
|
||||
- *test_running_conditions
|
||||
volumes:
|
||||
- ./run_service_node.sh:/opt/run_service_node.sh:Z
|
||||
- ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z
|
||||
- ./rln_tree:/etc/rln_tree/:Z
|
||||
- ./keystore:/keystore:Z
|
||||
entrypoint: sh
|
||||
command:
|
||||
- /opt/run_service_node.sh
|
||||
- FILTER
|
||||
networks:
|
||||
- waku-simulator_simulation
|
||||
|
||||
|
||||
receivernode:
|
||||
image: waku.liteprotocoltester:latest
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: ./apps/liteprotocoltester/Dockerfile.liteprotocoltester
|
||||
deploy:
|
||||
replicas: ${NUM_RECEIVER_NODES:-1}
|
||||
# ports:
|
||||
# - 30304:30304/tcp
|
||||
# - 30304:30304/udp
|
||||
# - 9005:9005/udp
|
||||
# - 127.0.0.1:8003:8003
|
||||
# - 80:80 #Let's Encrypt
|
||||
# - 8000:8000/tcp #WSS
|
||||
# - 127.0.0.1:8647:8647
|
||||
<<:
|
||||
- *logging
|
||||
environment:
|
||||
DOMAIN: ${DOMAIN}
|
||||
RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}"
|
||||
ETH_CLIENT_ADDRESS: *eth_client_address
|
||||
EXTRA_ARGS: ${EXTRA_ARGS}
|
||||
<<:
|
||||
- *rln_env
|
||||
- *test_running_conditions
|
||||
volumes:
|
||||
- ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z
|
||||
- ./rln_tree:/etc/rln_tree/:Z
|
||||
- ./keystore:/keystore:Z
|
||||
entrypoint: sh
|
||||
command:
|
||||
- /usr/bin/run_tester_node.sh
|
||||
- /usr/bin/liteprotocoltester
|
||||
- RECEIVER
|
||||
- waku-sim
|
||||
depends_on:
|
||||
- filter-service
|
||||
- publishernode
|
||||
configs:
|
||||
- source: cfg_tester_node.toml
|
||||
target: config.toml
|
||||
networks:
|
||||
- waku-simulator_simulation
|
||||
|
||||
# We have prometheus and grafana defined in waku-simulator already
|
||||
prometheus:
|
||||
image: docker.io/prom/prometheus:latest
|
||||
volumes:
|
||||
- ./monitoring/prometheus-config.yml:/etc/prometheus/prometheus.yml:Z
|
||||
command:
|
||||
- --config.file=/etc/prometheus/prometheus.yml
|
||||
- --web.listen-address=:9099
|
||||
# ports:
|
||||
# - 127.0.0.1:9090:9090
|
||||
restart: on-failure:5
|
||||
depends_on:
|
||||
- filter-service
|
||||
- lightpush-service
|
||||
- publishernode
|
||||
- receivernode
|
||||
networks:
|
||||
- waku-simulator_simulation
|
||||
|
||||
grafana:
|
||||
image: docker.io/grafana/grafana:latest
|
||||
env_file:
|
||||
- ./monitoring/configuration/grafana-plugins.env
|
||||
volumes:
|
||||
- ./monitoring/configuration/grafana.ini:/etc/grafana/grafana.ini:Z
|
||||
- ./monitoring/configuration/dashboards.yaml:/etc/grafana/provisioning/dashboards/dashboards.yaml:Z
|
||||
- ./monitoring/configuration/datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml:Z
|
||||
- ./monitoring/configuration/dashboards:/var/lib/grafana/dashboards/:Z
|
||||
- ./monitoring/configuration/customizations/custom-logo.svg:/usr/share/grafana/public/img/grafana_icon.svg:Z
|
||||
- ./monitoring/configuration/customizations/custom-logo.svg:/usr/share/grafana/public/img/grafana_typelogo.svg:Z
|
||||
- ./monitoring/configuration/customizations/custom-logo.png:/usr/share/grafana/public/img/fav32.png:Z
|
||||
ports:
|
||||
- 0.0.0.0:3033:3033
|
||||
restart: on-failure:5
|
||||
depends_on:
|
||||
- prometheus
|
||||
networks:
|
||||
- waku-simulator_simulation
|
||||
|
||||
configs:
|
||||
cfg_tester_node.toml:
|
||||
content: |
|
||||
max-connections = 100
|
||||
|
||||
networks:
|
||||
waku-simulator_simulation:
|
||||
external: true
|
||||
172
third-party/nwaku/apps/liteprotocoltester/docker-compose.yml
vendored
Normal file
172
third-party/nwaku/apps/liteprotocoltester/docker-compose.yml
vendored
Normal file
@ -0,0 +1,172 @@
|
||||
version: "3.7"
|
||||
x-logging: &logging
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 1000m
|
||||
|
||||
# Environment variable definitions
|
||||
x-eth-client-address: ð_client_address ${ETH_CLIENT_ADDRESS:-} # Add your ETH_CLIENT_ADDRESS after the "-"
|
||||
|
||||
x-rln-environment: &rln_env
|
||||
RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xB9cd878C90E49F797B4431fBF4fb333108CB90e6}
|
||||
RLN_RELAY_CRED_PATH: ${RLN_RELAY_CRED_PATH:-} # Optional: Add your RLN_RELAY_CRED_PATH after the "-"
|
||||
RLN_RELAY_CRED_PASSWORD: ${RLN_RELAY_CRED_PASSWORD:-} # Optional: Add your RLN_RELAY_CRED_PASSWORD after the "-"
|
||||
|
||||
x-test-running-conditions: &test_running_conditions
|
||||
NUM_MESSAGES: ${NUM_MESSAGES:-120}
|
||||
MESSAGE_INTERVAL_MILLIS: "${MESSAGE_INTERVAL_MILLIS:-1000}"
|
||||
SHARD: ${SHARD:-0}
|
||||
CONTENT_TOPIC: ${CONTENT_TOPIC:-/tester/2/light-pubsub-test/wakusim}
|
||||
CLUSTER_ID: ${CLUSTER_ID:-66}
|
||||
MIN_MESSAGE_SIZE: ${MIN_MESSAGE_SIZE:-1Kb}
|
||||
MAX_MESSAGE_SIZE: ${MAX_MESSAGE_SIZE:-150Kb}
|
||||
START_PUBLISHING_AFTER_SECS: ${START_PUBLISHING_AFTER_SECS:-5} # seconds
|
||||
STANDALONE: ${STANDALONE:-1}
|
||||
RECEIVER_METRICS_PORT: 8003
|
||||
PUBLISHER_METRICS_PORT: 8003
|
||||
|
||||
|
||||
# Services definitions
|
||||
services:
|
||||
servicenode:
|
||||
image: ${NWAKU_IMAGE:-harbor.status.im/wakuorg/nwaku:latest-release}
|
||||
ports:
|
||||
- 30304:30304/tcp
|
||||
- 30304:30304/udp
|
||||
- 9005:9005/udp
|
||||
- 127.0.0.1:8003:8003
|
||||
- 80:80 #Let's Encrypt
|
||||
- 8000:8000/tcp #WSS
|
||||
- 127.0.0.1:8645:8645
|
||||
<<:
|
||||
- *logging
|
||||
environment:
|
||||
DOMAIN: ${DOMAIN}
|
||||
RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}"
|
||||
ETH_CLIENT_ADDRESS: *eth_client_address
|
||||
EXTRA_ARGS: ${EXTRA_ARGS}
|
||||
<<:
|
||||
- *rln_env
|
||||
- *test_running_conditions
|
||||
volumes:
|
||||
- ./run_service_node.sh:/opt/run_service_node.sh:Z
|
||||
- ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z
|
||||
- ./rln_tree:/etc/rln_tree/:Z
|
||||
- ./keystore:/keystore:Z
|
||||
entrypoint: sh
|
||||
command:
|
||||
- /opt/run_service_node.sh
|
||||
|
||||
publishernode:
|
||||
image: waku.liteprotocoltester:latest
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: ./apps/liteprotocoltester/Dockerfile.liteprotocoltester
|
||||
ports:
|
||||
# - 30304:30304/tcp
|
||||
# - 30304:30304/udp
|
||||
# - 9005:9005/udp
|
||||
# - 127.0.0.1:8003:8003
|
||||
# - 80:80 #Let's Encrypt
|
||||
# - 8000:8000/tcp #WSS
|
||||
- 127.0.0.1:8646:8646
|
||||
<<:
|
||||
- *logging
|
||||
environment:
|
||||
DOMAIN: ${DOMAIN}
|
||||
RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}"
|
||||
ETH_CLIENT_ADDRESS: *eth_client_address
|
||||
EXTRA_ARGS: ${EXTRA_ARGS}
|
||||
<<:
|
||||
- *rln_env
|
||||
- *test_running_conditions
|
||||
volumes:
|
||||
- ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z
|
||||
- ./rln_tree:/etc/rln_tree/:Z
|
||||
- ./keystore:/keystore:Z
|
||||
entrypoint: sh
|
||||
command:
|
||||
- /usr/bin/run_tester_node.sh
|
||||
- /usr/bin/liteprotocoltester
|
||||
- SENDER
|
||||
- servicenode
|
||||
depends_on:
|
||||
- servicenode
|
||||
configs:
|
||||
- source: cfg_tester_node.toml
|
||||
target: config.toml
|
||||
|
||||
receivernode:
|
||||
image: waku.liteprotocoltester:latest
|
||||
build:
|
||||
context: ../..
|
||||
dockerfile: ./apps/liteprotocoltester/Dockerfile.liteprotocoltester
|
||||
ports:
|
||||
# - 30304:30304/tcp
|
||||
# - 30304:30304/udp
|
||||
# - 9005:9005/udp
|
||||
# - 127.0.0.1:8003:8003
|
||||
# - 80:80 #Let's Encrypt
|
||||
# - 8000:8000/tcp #WSS
|
||||
- 127.0.0.1:8647:8647
|
||||
<<:
|
||||
- *logging
|
||||
environment:
|
||||
DOMAIN: ${DOMAIN}
|
||||
RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}"
|
||||
ETH_CLIENT_ADDRESS: *eth_client_address
|
||||
EXTRA_ARGS: ${EXTRA_ARGS}
|
||||
<<:
|
||||
- *rln_env
|
||||
- *test_running_conditions
|
||||
volumes:
|
||||
- ./run_tester_node.sh:/opt/run_tester_node.sh:Z
|
||||
- ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z
|
||||
- ./rln_tree:/etc/rln_tree/:Z
|
||||
- ./keystore:/keystore:Z
|
||||
entrypoint: sh
|
||||
command:
|
||||
- /usr/bin/run_tester_node.sh
|
||||
- /usr/bin/liteprotocoltester
|
||||
- RECEIVER
|
||||
- servicenode
|
||||
depends_on:
|
||||
- servicenode
|
||||
- publishernode
|
||||
configs:
|
||||
- source: cfg_tester_node.toml
|
||||
target: config.toml
|
||||
|
||||
prometheus:
|
||||
image: docker.io/prom/prometheus:latest
|
||||
volumes:
|
||||
- ./monitoring/prometheus-config.yml:/etc/prometheus/prometheus.yml:Z
|
||||
command:
|
||||
- --config.file=/etc/prometheus/prometheus.yml
|
||||
ports:
|
||||
- 127.0.0.1:9090:9090
|
||||
depends_on:
|
||||
- servicenode
|
||||
|
||||
grafana:
|
||||
image: docker.io/grafana/grafana:latest
|
||||
env_file:
|
||||
- ./monitoring/configuration/grafana-plugins.env
|
||||
volumes:
|
||||
- ./monitoring/configuration/grafana.ini:/etc/grafana/grafana.ini:Z
|
||||
- ./monitoring/configuration/dashboards.yaml:/etc/grafana/provisioning/dashboards/dashboards.yaml:Z
|
||||
- ./monitoring/configuration/datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml:Z
|
||||
- ./monitoring/configuration/dashboards:/var/lib/grafana/dashboards/:Z
|
||||
- ./monitoring/configuration/customizations/custom-logo.svg:/usr/share/grafana/public/img/grafana_icon.svg:Z
|
||||
- ./monitoring/configuration/customizations/custom-logo.svg:/usr/share/grafana/public/img/grafana_typelogo.svg:Z
|
||||
- ./monitoring/configuration/customizations/custom-logo.png:/usr/share/grafana/public/img/fav32.png:Z
|
||||
ports:
|
||||
- 0.0.0.0:3000:3000
|
||||
depends_on:
|
||||
- prometheus
|
||||
|
||||
configs:
|
||||
cfg_tester_node.toml:
|
||||
content: |
|
||||
max-connections = 100
|
||||
11
third-party/nwaku/apps/liteprotocoltester/infra.env
vendored
Normal file
11
third-party/nwaku/apps/liteprotocoltester/infra.env
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
TEST_INTERVAL_MINUTES=180
|
||||
START_PUBLISHING_AFTER_SECS=120
|
||||
NUM_MESSAGES=300
|
||||
MESSAGE_INTERVAL_MILLIS=1000
|
||||
MIN_MESSAGE_SIZE=15Kb
|
||||
MAX_MESSAGE_SIZE=145Kb
|
||||
SHARD=32
|
||||
CONTENT_TOPIC=/tester/2/light-pubsub-test-at-infra/status-prod
|
||||
CLUSTER_ID=16
|
||||
LIGHTPUSH_BOOTSTRAP=enr:-QEKuED9AJm2HGgrRpVaJY2nj68ao_QiPeUT43sK-aRM7sMJ6R4G11OSDOwnvVacgN1sTw-K7soC5dzHDFZgZkHU0u-XAYJpZIJ2NIJpcISnYxMvim11bHRpYWRkcnO4WgAqNiVib290LTAxLmRvLWFtczMuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfACw2JWJvb3QtMDEuZG8tYW1zMy5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEC3rRtFQSgc24uWewzXaxTY8hDAHB8sgnxr9k8Rjb5GeSDdGNwgnZfg3VkcIIjKIV3YWt1Mg0
|
||||
FILTER_BOOTSTRAP=enr:-QEcuED7ww5vo2rKc1pyBp7fubBUH-8STHEZHo7InjVjLblEVyDGkjdTI9VdqmYQOn95vuQH-Htku17WSTzEufx-Wg4mAYJpZIJ2NIJpcIQihw1Xim11bHRpYWRkcnO4bAAzNi5ib290LTAxLmdjLXVzLWNlbnRyYWwxLWEuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfADU2LmJvb3QtMDEuZ2MtdXMtY2VudHJhbDEtYS5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaECxjqgDQ0WyRSOilYU32DA5k_XNlDis3m1VdXkK9xM6kODdGNwgnZfg3VkcIIjKIV3YWt1Mg0
|
||||
24
third-party/nwaku/apps/liteprotocoltester/legacy_publisher.nim
vendored
Normal file
24
third-party/nwaku/apps/liteprotocoltester/legacy_publisher.nim
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
import chronos, results, options
|
||||
import waku/[waku_node, waku_core]
|
||||
import publisher_base
|
||||
|
||||
type LegacyPublisher* = ref object of PublisherBase
|
||||
|
||||
proc new*(T: type LegacyPublisher, wakuNode: WakuNode): T =
|
||||
if isNil(wakuNode.wakuLegacyLightpushClient):
|
||||
wakuNode.mountLegacyLightPushClient()
|
||||
|
||||
return LegacyPublisher(wakuNode: wakuNode)
|
||||
|
||||
method send*(
|
||||
self: LegacyPublisher,
|
||||
topic: PubsubTopic,
|
||||
message: WakuMessage,
|
||||
servicePeer: RemotePeerInfo,
|
||||
): Future[Result[void, string]] {.async.} =
|
||||
# when error it must return original error desc due the text is used for distinction between error types in metrics.
|
||||
discard (
|
||||
await self.wakuNode.legacyLightpushPublish(some(topic), message, servicePeer)
|
||||
).valueOr:
|
||||
return err(error)
|
||||
return ok()
|
||||
217
third-party/nwaku/apps/liteprotocoltester/liteprotocoltester.nim
vendored
Normal file
217
third-party/nwaku/apps/liteprotocoltester/liteprotocoltester.nim
vendored
Normal file
@ -0,0 +1,217 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[options, strutils, os, sequtils, net],
|
||||
chronicles,
|
||||
chronos,
|
||||
metrics,
|
||||
libbacktrace,
|
||||
system/ansi_c,
|
||||
libp2p/crypto/crypto,
|
||||
confutils
|
||||
|
||||
import
|
||||
../../tools/confutils/cli_args,
|
||||
waku/[
|
||||
common/enr,
|
||||
common/logging,
|
||||
factory/waku as waku_factory,
|
||||
waku_node,
|
||||
node/waku_metrics,
|
||||
node/peer_manager,
|
||||
waku_lightpush/common,
|
||||
waku_filter_v2,
|
||||
waku_peer_exchange/protocol,
|
||||
waku_core/peers,
|
||||
waku_core/multiaddrstr,
|
||||
],
|
||||
./tester_config,
|
||||
./publisher,
|
||||
./receiver,
|
||||
./diagnose_connections,
|
||||
./service_peer_management
|
||||
|
||||
logScope:
|
||||
topics = "liteprotocoltester main"
|
||||
|
||||
proc logConfig(conf: LiteProtocolTesterConf) =
|
||||
info "Configuration: Lite protocol tester", conf = $conf
|
||||
|
||||
{.pop.}
|
||||
when isMainModule:
|
||||
## Node setup happens in 6 phases:
|
||||
## 1. Set up storage
|
||||
## 2. Initialize node
|
||||
## 3. Mount and initialize configured protocols
|
||||
## 4. Start node and mounted protocols
|
||||
## 5. Start monitoring tools and external interfaces
|
||||
## 6. Setup graceful shutdown hooks
|
||||
|
||||
const versionString = "version / git commit hash: " & waku_factory.git_version
|
||||
|
||||
let confRes = LiteProtocolTesterConf.load(version = versionString)
|
||||
if confRes.isErr():
|
||||
error "failure while loading the configuration", error = confRes.error
|
||||
quit(QuitFailure)
|
||||
|
||||
var conf = confRes.get()
|
||||
|
||||
## Logging setup
|
||||
logging.setupLog(conf.logLevel, conf.logFormat)
|
||||
|
||||
info "Running Lite Protocol Tester node", version = waku_factory.git_version
|
||||
logConfig(conf)
|
||||
|
||||
##Prepare Waku configuration
|
||||
## - load from config file
|
||||
## - override according to tester functionality
|
||||
##
|
||||
|
||||
var wakuNodeConf: WakuNodeConf
|
||||
|
||||
if conf.configFile.isSome():
|
||||
try:
|
||||
var configFile {.threadvar.}: InputFile
|
||||
configFile = conf.configFile.get()
|
||||
wakuNodeConf = WakuNodeConf.load(
|
||||
version = versionString,
|
||||
printUsage = false,
|
||||
secondarySources = proc(
|
||||
wnconf: WakuNodeConf, sources: auto
|
||||
) {.gcsafe, raises: [ConfigurationError].} =
|
||||
echo "Loading secondary configuration file into WakuNodeConf"
|
||||
sources.addConfigFile(Toml, configFile),
|
||||
)
|
||||
except CatchableError:
|
||||
error "Loading Waku configuration failed", error = getCurrentExceptionMsg()
|
||||
quit(QuitFailure)
|
||||
|
||||
wakuNodeConf.logLevel = conf.logLevel
|
||||
wakuNodeConf.logFormat = conf.logFormat
|
||||
wakuNodeConf.nat = conf.nat
|
||||
wakuNodeConf.maxConnections = 500
|
||||
wakuNodeConf.restAddress = conf.restAddress
|
||||
wakuNodeConf.restPort = conf.restPort
|
||||
wakuNodeConf.restAllowOrigin = conf.restAllowOrigin
|
||||
|
||||
wakuNodeConf.dnsAddrsNameServers =
|
||||
@[parseIpAddress("8.8.8.8"), parseIpAddress("1.1.1.1")]
|
||||
|
||||
wakuNodeConf.shards = @[conf.shard]
|
||||
wakuNodeConf.contentTopics = conf.contentTopics
|
||||
wakuNodeConf.clusterId = conf.clusterId
|
||||
## TODO: Depending on the tester needs we might extend here with shards, clusterId, etc...
|
||||
|
||||
wakuNodeConf.metricsServer = true
|
||||
wakuNodeConf.metricsServerAddress = parseIpAddress("0.0.0.0")
|
||||
wakuNodeConf.metricsServerPort = conf.metricsPort
|
||||
|
||||
# If bootstrap option is chosen we expect our clients will not mounted
|
||||
# so we will mount PeerExchange manually to gather possible service peers,
|
||||
# if got some we will mount the client protocols afterward.
|
||||
wakuNodeConf.peerExchange = false
|
||||
wakuNodeConf.relay = false
|
||||
wakuNodeConf.filter = false
|
||||
wakuNodeConf.lightpush = false
|
||||
wakuNodeConf.store = false
|
||||
|
||||
wakuNodeConf.rest = false
|
||||
wakuNodeConf.relayServiceRatio = "40:60"
|
||||
|
||||
let wakuConf = wakuNodeConf.toWakuConf().valueOr:
|
||||
error "Issue converting toWakuConf", error = $error
|
||||
quit(QuitFailure)
|
||||
|
||||
var waku = (waitFor Waku.new(wakuConf)).valueOr:
|
||||
error "Waku initialization failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
(waitFor startWaku(addr waku)).isOkOr:
|
||||
error "Starting waku failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
debug "Setting up shutdown hooks"
|
||||
|
||||
proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} =
|
||||
await waku.stop()
|
||||
quit(QuitSuccess)
|
||||
|
||||
# Handle Ctrl-C SIGINT
|
||||
proc handleCtrlC() {.noconv.} =
|
||||
when defined(windows):
|
||||
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
||||
setupForeignThreadGc()
|
||||
notice "Shutting down after receiving SIGINT"
|
||||
asyncSpawn asyncStopper(waku)
|
||||
|
||||
setControlCHook(handleCtrlC)
|
||||
|
||||
# Handle SIGTERM
|
||||
when defined(posix):
|
||||
proc handleSigterm(signal: cint) {.noconv.} =
|
||||
notice "Shutting down after receiving SIGTERM"
|
||||
asyncSpawn asyncStopper(waku)
|
||||
|
||||
c_signal(ansi_c.SIGTERM, handleSigterm)
|
||||
|
||||
# Handle SIGSEGV
|
||||
when defined(posix):
|
||||
proc handleSigsegv(signal: cint) {.noconv.} =
|
||||
# Require --debugger:native
|
||||
fatal "Shutting down after receiving SIGSEGV", stacktrace = getBacktrace()
|
||||
|
||||
# Not available in -d:release mode
|
||||
writeStackTrace()
|
||||
|
||||
waitFor waku.stop()
|
||||
quit(QuitFailure)
|
||||
|
||||
c_signal(ansi_c.SIGSEGV, handleSigsegv)
|
||||
|
||||
info "Node setup complete"
|
||||
|
||||
var codec = WakuLightPushCodec
|
||||
# mounting relevant client, for PX filter client must be mounted ahead
|
||||
if conf.testFunc == TesterFunctionality.SENDER:
|
||||
codec = WakuLightPushCodec
|
||||
else:
|
||||
codec = WakuFilterSubscribeCodec
|
||||
|
||||
var lookForServiceNode = false
|
||||
var serviceNodePeerInfo: RemotePeerInfo
|
||||
if conf.serviceNode.len == 0:
|
||||
if conf.bootstrapNode.len > 0:
|
||||
info "Bootstrapping with PeerExchange to gather random service node"
|
||||
let futForServiceNode = pxLookupServiceNode(waku.node, conf)
|
||||
if not (waitFor futForServiceNode.withTimeout(20.minutes)):
|
||||
error "Service node not found in time via PX"
|
||||
quit(QuitFailure)
|
||||
|
||||
if futForServiceNode.read().isErr():
|
||||
error "Service node for test not found via PX"
|
||||
quit(QuitFailure)
|
||||
|
||||
serviceNodePeerInfo = selectRandomServicePeer(
|
||||
waku.node.peerManager, none(RemotePeerInfo), codec
|
||||
).valueOr:
|
||||
error "Service node selection failed"
|
||||
quit(QuitFailure)
|
||||
else:
|
||||
error "No service or bootstrap node provided"
|
||||
quit(QuitFailure)
|
||||
else:
|
||||
# support for both ENR and URI formatted service node addresses
|
||||
serviceNodePeerInfo = translateToRemotePeerInfo(conf.serviceNode).valueOr:
|
||||
error "failed to parse service-node", node = conf.serviceNode
|
||||
quit(QuitFailure)
|
||||
|
||||
info "Service node to be used", serviceNode = $serviceNodePeerInfo
|
||||
|
||||
logSelfPeers(waku.node.peerManager)
|
||||
|
||||
if conf.testFunc == TesterFunctionality.SENDER:
|
||||
setupAndPublish(waku.node, conf, serviceNodePeerInfo)
|
||||
else:
|
||||
setupAndListen(waku.node, conf, serviceNodePeerInfo)
|
||||
|
||||
runForever()
|
||||
56
third-party/nwaku/apps/liteprotocoltester/lpt_metrics.nim
vendored
Normal file
56
third-party/nwaku/apps/liteprotocoltester/lpt_metrics.nim
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
## Example showing how a resource restricted client may
|
||||
## subscribe to messages without relay
|
||||
|
||||
import metrics
|
||||
|
||||
export metrics
|
||||
|
||||
declarePublicGauge lpt_receiver_sender_peer_count, "count of sender peers"
|
||||
|
||||
declarePublicCounter lpt_receiver_received_messages_count,
|
||||
"number of messages received per peer", ["peer"]
|
||||
|
||||
declarePublicCounter lpt_receiver_received_bytes,
|
||||
"number of received bytes per peer", ["peer"]
|
||||
|
||||
declarePublicGauge lpt_receiver_missing_messages_count,
|
||||
"number of missing messages per peer", ["peer"]
|
||||
|
||||
declarePublicCounter lpt_receiver_duplicate_messages_count,
|
||||
"number of duplicate messages per peer", ["peer"]
|
||||
|
||||
declarePublicGauge lpt_receiver_distinct_duplicate_messages_count,
|
||||
"number of distinct duplicate messages per peer", ["peer"]
|
||||
|
||||
declarePublicGauge lpt_receiver_latencies,
|
||||
"Message delivery latency per peer (min-avg-max)", ["peer", "latency"]
|
||||
|
||||
declarePublicCounter lpt_receiver_lost_subscription_count,
|
||||
"number of filter service peer failed PING requests - lost subscription"
|
||||
|
||||
declarePublicCounter lpt_publisher_sent_messages_count, "number of messages published"
|
||||
|
||||
declarePublicCounter lpt_publisher_failed_messages_count,
|
||||
"number of messages failed to publish per failure cause", ["cause"]
|
||||
|
||||
declarePublicCounter lpt_publisher_sent_bytes, "number of total bytes sent"
|
||||
|
||||
declarePublicCounter lpt_service_peer_failure_count,
|
||||
"number of failure during using service peer [publisher/receiever]", ["role", "agent"]
|
||||
|
||||
declarePublicCounter lpt_change_service_peer_count,
|
||||
"number of times [publisher/receiver] had to change service peer", ["role"]
|
||||
|
||||
declarePublicGauge lpt_px_peers,
|
||||
"Number of peers PeerExchange discovered and can be dialed"
|
||||
|
||||
declarePublicGauge lpt_dialed_peers, "Number of peers successfully dialed", ["agent"]
|
||||
|
||||
declarePublicGauge lpt_dial_failures, "Number of dial failures by cause", ["agent"]
|
||||
|
||||
declarePublicHistogram lpt_publish_duration_seconds,
|
||||
"duration to lightpush messages",
|
||||
buckets = [
|
||||
0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0,
|
||||
15.0, 20.0, 30.0, Inf,
|
||||
]
|
||||
54
third-party/nwaku/apps/liteprotocoltester/lpt_supervisor.py
vendored
Executable file
54
third-party/nwaku/apps/liteprotocoltester/lpt_supervisor.py
vendored
Executable file
@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import os
|
||||
import time
|
||||
from subprocess import Popen
|
||||
import sys
|
||||
|
||||
def load_env(file_path):
|
||||
predefined_test_env = {}
|
||||
with open(file_path) as f:
|
||||
for line in f:
|
||||
if line.strip() and not line.startswith('#'):
|
||||
key, value = line.strip().split('=', 1)
|
||||
predefined_test_env[key] = value
|
||||
return predefined_test_env
|
||||
|
||||
def run_tester_node(predefined_test_env):
|
||||
role = sys.argv[1]
|
||||
# override incoming environment variables with the ones from the file to prefer predefined testing environment.
|
||||
for key, value in predefined_test_env.items():
|
||||
os.environ[key] = value
|
||||
|
||||
script_cmd = "/usr/bin/run_tester_node_at_infra.sh /usr/bin/liteprotocoltester {role}".format(role=role)
|
||||
return os.system(script_cmd)
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2 or sys.argv[1] not in ["RECEIVER", "SENDER", "SENDERV3"]:
|
||||
print("Error: First argument must be either 'RECEIVER' or 'SENDER' or 'SENDERV3'")
|
||||
sys.exit(1)
|
||||
|
||||
predefined_test_env_file = '/usr/bin/infra.env'
|
||||
predefined_test_env = load_env(predefined_test_env_file)
|
||||
|
||||
test_interval_minutes = int(predefined_test_env.get('TEST_INTERVAL_MINUTES', 60)) # Default to 60 minutes if not set
|
||||
print(f"supervisor: Start testing loop. Interval is {test_interval_minutes} minutes")
|
||||
counter = 0
|
||||
|
||||
while True:
|
||||
counter += 1
|
||||
start_time = time.time()
|
||||
print(f"supervisor: Run #{counter} started at {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_time))}")
|
||||
print(f"supervisor: with arguments: {predefined_test_env}")
|
||||
|
||||
exit_code = run_tester_node(predefined_test_env)
|
||||
|
||||
end_time = time.time()
|
||||
run_time = end_time - start_time
|
||||
sleep_time = max(5 * 60, (test_interval_minutes * 60) - run_time)
|
||||
|
||||
print(f"supervisor: Tester node finished at {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_time))}")
|
||||
print(f"supervisor: Runtime was {run_time:.2f} seconds")
|
||||
print(f"supervisor: Next run scheduled in {sleep_time // 60:.2f} minutes")
|
||||
|
||||
time.sleep(sleep_time)
|
||||
BIN
third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/customizations/custom-logo.png
vendored
Normal file
BIN
third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/customizations/custom-logo.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 11 KiB |
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 13 KiB |
9
third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/dashboards.yaml
vendored
Normal file
9
third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/dashboards.yaml
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'Prometheus'
|
||||
orgId: 1
|
||||
folder: ''
|
||||
type: file
|
||||
options:
|
||||
path: /var/lib/grafana/dashboards
|
||||
File diff suppressed because it is too large
Load Diff
11
third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/datasources.yaml
vendored
Normal file
11
third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/datasources.yaml
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
org_id: 1
|
||||
url: http://prometheus:9099
|
||||
is_default: true
|
||||
version: 1
|
||||
editable: true
|
||||
2
third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/grafana-plugins.env
vendored
Normal file
2
third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/grafana-plugins.env
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
#GF_INSTALL_PLUGINS=grafana-worldmap-panel,grafana-piechart-panel,digrich-bubblechart-panel,yesoreyeram-boomtheme-panel,briangann-gauge-panel,jdbranham-diagram-panel,agenty-flowcharting-panel,citilogics-geoloop-panel,savantly-heatmap-panel,mtanda-histogram-panel,pierosavi-imageit-panel,michaeldmoore-multistat-panel,zuburqan-parity-report-panel,natel-plotly-panel,bessler-pictureit-panel,grafana-polystat-panel,corpglory-progresslist-panel,snuids-radar-panel,fzakaria-simple-config.config.annotations-datasource,vonage-status-panel,snuids-trafficlights-panel,pr0ps-trackmap-panel,alexandra-trackmap-panel,btplc-trend-box-panel
|
||||
GF_INSTALL_PLUGINS=grafana-worldmap-panel,grafana-piechart-panel,yesoreyeram-boomtheme-panel,briangann-gauge-panel,pierosavi-imageit-panel,bessler-pictureit-panel,vonage-status-panel
|
||||
53
third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/grafana.ini
vendored
Normal file
53
third-party/nwaku/apps/liteprotocoltester/monitoring/configuration/grafana.ini
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
instance_name = liteprotocoltester dashboard
|
||||
|
||||
;[dashboards.json]
|
||||
;enabled = true
|
||||
;path = /home/git/grafana/grafana-dashboards/dashboards
|
||||
|
||||
[server]
|
||||
http_port = 3033
|
||||
|
||||
#################################### Auth ##########################
|
||||
[auth]
|
||||
disable_login_form = false
|
||||
|
||||
#################################### Anonymous Auth ##########################
|
||||
[auth.anonymous]
|
||||
# enable anonymous access
|
||||
enabled = true
|
||||
|
||||
# specify organization name that should be used for unauthenticated users
|
||||
;org_name = Public
|
||||
|
||||
# specify role for unauthenticated users
|
||||
org_role = Admin
|
||||
; org_role = Viewer
|
||||
|
||||
;[security]
|
||||
;admin_user = ocr
|
||||
;admin_password = ocr
|
||||
|
||||
;[users]
|
||||
# disable user signup / registration
|
||||
;allow_sign_up = false
|
||||
|
||||
# Set to true to automatically assign new users to the default organization (id 1)
|
||||
;auto_assign_org = true
|
||||
|
||||
# Default role new users will be automatically assigned (if disabled above is set to true)
|
||||
;auto_assign_org_role = Viewer
|
||||
|
||||
#################################### SMTP / Emailing ##########################
|
||||
;[smtp]
|
||||
;enabled = false
|
||||
;host = localhost:25
|
||||
;user =
|
||||
;password =
|
||||
;cert_file =
|
||||
;key_file =
|
||||
;skip_verify = false
|
||||
;from_address = admin@grafana.localhost
|
||||
|
||||
;[emails]
|
||||
;welcome_email_on_sign_up = false
|
||||
|
||||
35
third-party/nwaku/apps/liteprotocoltester/monitoring/prometheus-config.yml
vendored
Normal file
35
third-party/nwaku/apps/liteprotocoltester/monitoring/prometheus-config.yml
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 15s
|
||||
external_labels:
|
||||
monitor: "Monitoring"
|
||||
|
||||
scrape_configs:
|
||||
- job_name: "liteprotocoltester"
|
||||
static_configs:
|
||||
- targets: ["liteprotocoltester-publishernode-1:8003",
|
||||
"liteprotocoltester-publishernode-2:8003",
|
||||
"liteprotocoltester-publishernode-3:8003",
|
||||
"liteprotocoltester-publishernode-4:8003",
|
||||
"liteprotocoltester-publishernode-5:8003",
|
||||
"liteprotocoltester-publishernode-6:8003",
|
||||
"liteprotocoltester-receivernode-1:8003",
|
||||
"liteprotocoltester-receivernode-2:8003",
|
||||
"liteprotocoltester-receivernode-3:8003",
|
||||
"liteprotocoltester-receivernode-4:8003",
|
||||
"liteprotocoltester-receivernode-5:8003",
|
||||
"liteprotocoltester-receivernode-6:8003",
|
||||
"publishernode:8003",
|
||||
"publishernode-1:8003",
|
||||
"publishernode-2:8003",
|
||||
"publishernode-3:8003",
|
||||
"publishernode-4:8003",
|
||||
"publishernode-5:8003",
|
||||
"publishernode-6:8003",
|
||||
"receivernode:8003",
|
||||
"receivernode-1:8003",
|
||||
"receivernode-2:8003",
|
||||
"receivernode-3:8003",
|
||||
"receivernode-4:8003",
|
||||
"receivernode-5:8003",
|
||||
"receivernode-6:8003",]
|
||||
4
third-party/nwaku/apps/liteprotocoltester/nim.cfg
vendored
Normal file
4
third-party/nwaku/apps/liteprotocoltester/nim.cfg
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
-d:chronicles_line_numbers
|
||||
-d:chronicles_runtime_filtering:on
|
||||
-d:discv5_protocol_id:d5waku
|
||||
path = "../.."
|
||||
272
third-party/nwaku/apps/liteprotocoltester/publisher.nim
vendored
Normal file
272
third-party/nwaku/apps/liteprotocoltester/publisher.nim
vendored
Normal file
@ -0,0 +1,272 @@
|
||||
import
|
||||
std/[strformat, sysrand, random, strutils, sequtils],
|
||||
system/ansi_c,
|
||||
chronicles,
|
||||
chronos,
|
||||
chronos/timer as chtimer,
|
||||
stew/byteutils,
|
||||
results,
|
||||
json_serialization as js
|
||||
import
|
||||
waku/[
|
||||
common/logging,
|
||||
waku_node,
|
||||
node/peer_manager,
|
||||
waku_core,
|
||||
waku_lightpush/client,
|
||||
waku_lightpush/common,
|
||||
common/utils/parse_size_units,
|
||||
],
|
||||
./tester_config,
|
||||
./tester_message,
|
||||
./lpt_metrics,
|
||||
./diagnose_connections,
|
||||
./service_peer_management,
|
||||
./publisher_base,
|
||||
./legacy_publisher,
|
||||
./v3_publisher
|
||||
|
||||
randomize()
|
||||
|
||||
type SizeRange* = tuple[min: uint64, max: uint64]
|
||||
|
||||
var RANDOM_PAYLOAD {.threadvar.}: seq[byte]
|
||||
RANDOM_PAYLOAD = urandom(1024 * 1024)
|
||||
# 1MiB of random payload to be used to extend message
|
||||
|
||||
proc prepareMessage(
|
||||
sender: string,
|
||||
messageIndex, numMessages: uint32,
|
||||
startedAt: TimeStamp,
|
||||
prevMessageAt: var Timestamp,
|
||||
contentTopic: ContentTopic,
|
||||
size: SizeRange,
|
||||
): (WakuMessage, uint64) =
|
||||
var renderSize = rand(size.min .. size.max)
|
||||
let current = getNowInNanosecondTime()
|
||||
let payload = ProtocolTesterMessage(
|
||||
sender: sender,
|
||||
index: messageIndex,
|
||||
count: numMessages,
|
||||
startedAt: startedAt,
|
||||
sinceStart: current - startedAt,
|
||||
sincePrev: current - prevMessageAt,
|
||||
size: renderSize,
|
||||
)
|
||||
|
||||
prevMessageAt = current
|
||||
|
||||
let text = js.Json.encode(payload)
|
||||
let contentPayload = toBytes(text & " \0")
|
||||
|
||||
if renderSize < len(contentPayload).uint64:
|
||||
renderSize = len(contentPayload).uint64
|
||||
|
||||
let finalPayload =
|
||||
concat(contentPayload, RANDOM_PAYLOAD[0 .. renderSize - len(contentPayload).uint64])
|
||||
let message = WakuMessage(
|
||||
payload: finalPayload, # content of the message
|
||||
contentTopic: contentTopic, # content topic to publish to
|
||||
ephemeral: true, # tell store nodes to not store it
|
||||
timestamp: current, # current timestamp
|
||||
)
|
||||
|
||||
return (message, renderSize)
|
||||
|
||||
var sentMessages {.threadvar.}: OrderedTable[uint32, tuple[hash: string, relayed: bool]]
|
||||
var failedToSendCause {.threadvar.}: Table[string, uint32]
|
||||
var failedToSendCount {.threadvar.}: uint32
|
||||
var numMessagesToSend {.threadvar.}: uint32
|
||||
var messagesSent {.threadvar.}: uint32
|
||||
var noOfServicePeerSwitches {.threadvar.}: uint32
|
||||
|
||||
proc reportSentMessages() =
|
||||
let report = catch:
|
||||
"""*----------------------------------------*
|
||||
| Service Peer Switches: {noOfServicePeerSwitches:>15} |
|
||||
*----------------------------------------*
|
||||
| Expected | Sent | Failed |
|
||||
|{numMessagesToSend+failedToSendCount:>11} |{messagesSent:>11} |{failedToSendCount:>11} |
|
||||
*----------------------------------------*""".fmt()
|
||||
|
||||
if report.isErr:
|
||||
echo "Error while printing statistics"
|
||||
else:
|
||||
echo report.get()
|
||||
|
||||
echo "*--------------------------------------------------------------------------------------------------*"
|
||||
echo "| Failure cause | count |"
|
||||
for (cause, count) in failedToSendCause.pairs:
|
||||
echo fmt"|{cause:<87}|{count:>10}|"
|
||||
echo "*--------------------------------------------------------------------------------------------------*"
|
||||
|
||||
echo "*--------------------------------------------------------------------------------------------------*"
|
||||
echo "| Index | Relayed | Hash |"
|
||||
for (index, info) in sentMessages.pairs:
|
||||
echo fmt"|{index+1:>10}|{info.relayed:<9}| {info.hash:<76}|"
|
||||
echo "*--------------------------------------------------------------------------------------------------*"
|
||||
# evere sent message hash should logged once
|
||||
sentMessages.clear()
|
||||
|
||||
proc publishMessages(
|
||||
wakuNode: WakuNode,
|
||||
publisher: PublisherBase,
|
||||
servicePeer: RemotePeerInfo,
|
||||
lightpushPubsubTopic: PubsubTopic,
|
||||
lightpushContentTopic: ContentTopic,
|
||||
numMessages: uint32,
|
||||
messageSizeRange: SizeRange,
|
||||
messageInterval: Duration,
|
||||
preventPeerSwitch: bool,
|
||||
) {.async.} =
|
||||
var actualServicePeer = servicePeer
|
||||
let startedAt = getNowInNanosecondTime()
|
||||
var prevMessageAt = startedAt
|
||||
var renderMsgSize = messageSizeRange
|
||||
# sets some default of min max message size to avoid conflict with meaningful payload size
|
||||
renderMsgSize.min = max(1024.uint64, renderMsgSize.min) # do not use less than 1KB
|
||||
renderMsgSize.max = max(2048.uint64, renderMsgSize.max) # minimum of max is 2KB
|
||||
renderMsgSize.min = min(renderMsgSize.min, renderMsgSize.max)
|
||||
renderMsgSize.max = max(renderMsgSize.min, renderMsgSize.max)
|
||||
|
||||
const maxFailedPush = 3
|
||||
var noFailedPush = 0
|
||||
var noFailedServiceNodeSwitches = 0
|
||||
|
||||
let selfPeerId = $wakuNode.switch.peerInfo.peerId
|
||||
failedToSendCount = 0
|
||||
numMessagesToSend = if numMessages == 0: uint32.high else: numMessages
|
||||
messagesSent = 0
|
||||
|
||||
while messagesSent < numMessagesToSend:
|
||||
let (message, msgSize) = prepareMessage(
|
||||
selfPeerId,
|
||||
messagesSent + 1,
|
||||
numMessagesToSend,
|
||||
startedAt,
|
||||
prevMessageAt,
|
||||
lightpushContentTopic,
|
||||
renderMsgSize,
|
||||
)
|
||||
|
||||
let publishStartTime = Moment.now()
|
||||
|
||||
let wlpRes = await publisher.send(lightpushPubsubTopic, message, actualServicePeer)
|
||||
|
||||
let publishDuration = Moment.now() - publishStartTime
|
||||
|
||||
let msgHash = computeMessageHash(lightpushPubsubTopic, message).to0xHex
|
||||
|
||||
if wlpRes.isOk():
|
||||
lpt_publish_duration_seconds.observe(publishDuration.milliseconds.float / 1000)
|
||||
|
||||
sentMessages[messagesSent] = (hash: msgHash, relayed: true)
|
||||
notice "published message using lightpush",
|
||||
index = messagesSent + 1,
|
||||
count = numMessagesToSend,
|
||||
size = msgSize,
|
||||
pubsubTopic = lightpushPubsubTopic,
|
||||
hash = msgHash
|
||||
inc(messagesSent)
|
||||
lpt_publisher_sent_messages_count.inc()
|
||||
lpt_publisher_sent_bytes.inc(amount = msgSize.int64)
|
||||
if noFailedPush > 0:
|
||||
noFailedPush -= 1
|
||||
else:
|
||||
sentMessages[messagesSent] = (hash: msgHash, relayed: false)
|
||||
failedToSendCause.mgetOrPut(wlpRes.error, 1).inc()
|
||||
error "failed to publish message using lightpush",
|
||||
err = wlpRes.error, hash = msgHash
|
||||
inc(failedToSendCount)
|
||||
lpt_publisher_failed_messages_count.inc(labelValues = [wlpRes.error])
|
||||
if not wlpRes.error.toLower().contains("dial"):
|
||||
# retry sending after shorter wait
|
||||
await sleepAsync(2.seconds)
|
||||
continue
|
||||
else:
|
||||
noFailedPush += 1
|
||||
lpt_service_peer_failure_count.inc(
|
||||
labelValues = ["publisher", actualServicePeer.getAgent()]
|
||||
)
|
||||
if not preventPeerSwitch and noFailedPush > maxFailedPush:
|
||||
info "Max push failure limit reached, Try switching peer."
|
||||
let peerOpt = selectRandomServicePeer(
|
||||
wakuNode.peerManager, some(actualServicePeer), WakuLightPushCodec
|
||||
)
|
||||
if peerOpt.isOk():
|
||||
actualServicePeer = peerOpt.get()
|
||||
|
||||
info "New service peer in use",
|
||||
codec = lightpushPubsubTopic,
|
||||
peer = constructMultiaddrStr(actualServicePeer)
|
||||
|
||||
noFailedPush = 0
|
||||
noOfServicePeerSwitches += 1
|
||||
lpt_change_service_peer_count.inc(labelValues = ["publisher"])
|
||||
continue # try again with new peer without delay
|
||||
else:
|
||||
error "Failed to find new service peer. Exiting."
|
||||
noFailedServiceNodeSwitches += 1
|
||||
break
|
||||
|
||||
await sleepAsync(messageInterval)
|
||||
|
||||
proc setupAndPublish*(
|
||||
wakuNode: WakuNode, conf: LiteProtocolTesterConf, servicePeer: RemotePeerInfo
|
||||
) =
|
||||
var publisher: PublisherBase
|
||||
if conf.lightpushVersion == LightpushVersion.LEGACY:
|
||||
info "Using legacy lightpush protocol for publishing messages"
|
||||
publisher = LegacyPublisher.new(wakuNode)
|
||||
else:
|
||||
info "Using lightpush v3 protocol for publishing messages"
|
||||
publisher = V3Publisher.new(wakuNode)
|
||||
|
||||
# give some time to receiver side to set up
|
||||
let waitTillStartTesting = conf.startPublishingAfter.seconds
|
||||
|
||||
let parsedMinMsgSize = parseMsgSize(conf.minTestMessageSize).valueOr:
|
||||
error "failed to parse 'min-test-msg-size' param: ", error = error
|
||||
return
|
||||
|
||||
let parsedMaxMsgSize = parseMsgSize(conf.maxTestMessageSize).valueOr:
|
||||
error "failed to parse 'max-test-msg-size' param: ", error = error
|
||||
return
|
||||
|
||||
info "Sending test messages in", wait = waitTillStartTesting
|
||||
waitFor sleepAsync(waitTillStartTesting)
|
||||
|
||||
info "Start sending messages to service node using lightpush"
|
||||
|
||||
sentMessages.sort(system.cmp)
|
||||
|
||||
let interval = secs(60)
|
||||
var printStats: CallbackFunc
|
||||
|
||||
printStats = CallbackFunc(
|
||||
proc(udata: pointer) {.gcsafe.} =
|
||||
reportSentMessages()
|
||||
|
||||
if messagesSent >= numMessagesToSend:
|
||||
info "All messages are sent. Exiting."
|
||||
|
||||
## for gracefull shutdown through signal hooks
|
||||
discard c_raise(ansi_c.SIGTERM)
|
||||
else:
|
||||
discard setTimer(Moment.fromNow(interval), printStats)
|
||||
)
|
||||
|
||||
discard setTimer(Moment.fromNow(interval), printStats)
|
||||
|
||||
# Start maintaining subscription
|
||||
asyncSpawn publishMessages(
|
||||
wakuNode,
|
||||
publisher,
|
||||
servicePeer,
|
||||
conf.getPubsubTopic(),
|
||||
conf.contentTopics[0],
|
||||
conf.numMessages,
|
||||
(min: parsedMinMsgSize, max: parsedMaxMsgSize),
|
||||
conf.messageInterval.milliseconds,
|
||||
conf.fixedServicePeer,
|
||||
)
|
||||
14
third-party/nwaku/apps/liteprotocoltester/publisher_base.nim
vendored
Normal file
14
third-party/nwaku/apps/liteprotocoltester/publisher_base.nim
vendored
Normal file
@ -0,0 +1,14 @@
|
||||
import chronos, results
|
||||
import waku/[waku_node, waku_core]
|
||||
|
||||
type PublisherBase* = ref object of RootObj
|
||||
wakuNode*: WakuNode
|
||||
|
||||
method send*(
|
||||
self: PublisherBase,
|
||||
topic: PubsubTopic,
|
||||
message: WakuMessage,
|
||||
servicePeer: RemotePeerInfo,
|
||||
): Future[Result[void, string]] {.base, async.} =
|
||||
discard
|
||||
# when error it must return original error desc due the text is used for distinction between error types in metrics.
|
||||
182
third-party/nwaku/apps/liteprotocoltester/receiver.nim
vendored
Normal file
182
third-party/nwaku/apps/liteprotocoltester/receiver.nim
vendored
Normal file
@ -0,0 +1,182 @@
|
||||
## Example showing how a resource restricted client may
|
||||
## subscribe to messages without relay
|
||||
|
||||
import
|
||||
std/options,
|
||||
system/ansi_c,
|
||||
chronicles,
|
||||
chronos,
|
||||
chronos/timer as chtimer,
|
||||
stew/byteutils,
|
||||
results,
|
||||
serialization,
|
||||
json_serialization as js
|
||||
|
||||
import
|
||||
waku/[
|
||||
common/logging,
|
||||
node/peer_manager,
|
||||
waku_node,
|
||||
waku_core,
|
||||
waku_filter_v2/client,
|
||||
waku_filter_v2/common,
|
||||
waku_core/multiaddrstr,
|
||||
],
|
||||
./tester_config,
|
||||
./tester_message,
|
||||
./statistics,
|
||||
./diagnose_connections,
|
||||
./service_peer_management,
|
||||
./lpt_metrics
|
||||
|
||||
var actualFilterPeer {.threadvar.}: RemotePeerInfo
|
||||
|
||||
proc unsubscribe(
|
||||
wakuNode: WakuNode, filterPubsubTopic: PubsubTopic, filterContentTopic: ContentTopic
|
||||
) {.async.} =
|
||||
notice "unsubscribing from filter"
|
||||
let unsubscribeRes = await wakuNode.wakuFilterClient.unsubscribe(
|
||||
actualFilterPeer, filterPubsubTopic, @[filterContentTopic]
|
||||
)
|
||||
if unsubscribeRes.isErr:
|
||||
notice "unsubscribe request failed", err = unsubscribeRes.error
|
||||
else:
|
||||
notice "unsubscribe request successful"
|
||||
|
||||
proc maintainSubscription(
|
||||
wakuNode: WakuNode,
|
||||
filterPubsubTopic: PubsubTopic,
|
||||
filterContentTopic: ContentTopic,
|
||||
preventPeerSwitch: bool,
|
||||
) {.async.} =
|
||||
const maxFailedSubscribes = 3
|
||||
const maxFailedServiceNodeSwitches = 10
|
||||
var noFailedSubscribes = 0
|
||||
var noFailedServiceNodeSwitches = 0
|
||||
var isFirstPingOnNewPeer = true
|
||||
while true:
|
||||
info "maintaining subscription at", peer = constructMultiaddrStr(actualFilterPeer)
|
||||
# First use filter-ping to check if we have an active subscription
|
||||
let pingRes = await wakuNode.wakuFilterClient.ping(actualFilterPeer)
|
||||
if pingRes.isErr():
|
||||
if isFirstPingOnNewPeer == false:
|
||||
# Very first ping expected to fail as we have not yet subscribed at all
|
||||
lpt_receiver_lost_subscription_count.inc()
|
||||
isFirstPingOnNewPeer = false
|
||||
# No subscription found. Let's subscribe.
|
||||
error "ping failed.", err = pingRes.error
|
||||
trace "no subscription found. Sending subscribe request"
|
||||
|
||||
let subscribeRes = await wakuNode.filterSubscribe(
|
||||
some(filterPubsubTopic), filterContentTopic, actualFilterPeer
|
||||
)
|
||||
|
||||
if subscribeRes.isErr():
|
||||
noFailedSubscribes += 1
|
||||
lpt_service_peer_failure_count.inc(
|
||||
labelValues = ["receiver", actualFilterPeer.getAgent()]
|
||||
)
|
||||
error "Subscribe request failed.",
|
||||
err = subscribeRes.error,
|
||||
peer = actualFilterPeer,
|
||||
failCount = noFailedSubscribes
|
||||
|
||||
# TODO: disconnet from failed actualFilterPeer
|
||||
# asyncSpawn(wakuNode.peerManager.switch.disconnect(p))
|
||||
# wakunode.peerManager.peerStore.delete(actualFilterPeer)
|
||||
|
||||
if noFailedSubscribes < maxFailedSubscribes:
|
||||
await sleepAsync(2.seconds) # Wait a bit before retrying
|
||||
continue
|
||||
elif not preventPeerSwitch:
|
||||
let peerOpt = selectRandomServicePeer(
|
||||
wakuNode.peerManager, some(actualFilterPeer), WakuFilterSubscribeCodec
|
||||
)
|
||||
if peerOpt.isOk():
|
||||
actualFilterPeer = peerOpt.get()
|
||||
|
||||
info "Found new peer for codec",
|
||||
codec = filterPubsubTopic, peer = constructMultiaddrStr(actualFilterPeer)
|
||||
|
||||
noFailedSubscribes = 0
|
||||
lpt_change_service_peer_count.inc(labelValues = ["receiver"])
|
||||
isFirstPingOnNewPeer = true
|
||||
continue # try again with new peer without delay
|
||||
else:
|
||||
error "Failed to find new service peer. Exiting."
|
||||
noFailedServiceNodeSwitches += 1
|
||||
break
|
||||
else:
|
||||
if noFailedSubscribes > 0:
|
||||
noFailedSubscribes -= 1
|
||||
|
||||
notice "subscribe request successful."
|
||||
else:
|
||||
info "subscription is live."
|
||||
|
||||
await sleepAsync(30.seconds) # Subscription maintenance interval
|
||||
|
||||
proc setupAndListen*(
|
||||
wakuNode: WakuNode, conf: LiteProtocolTesterConf, servicePeer: RemotePeerInfo
|
||||
) =
|
||||
if isNil(wakuNode.wakuFilterClient):
|
||||
# if we have not yet initialized lightpush client, then do it as the only way we can get here is
|
||||
# by having a service peer discovered.
|
||||
waitFor wakuNode.mountFilterClient()
|
||||
|
||||
info "Start receiving messages to service node using filter",
|
||||
servicePeer = servicePeer
|
||||
|
||||
var stats: PerPeerStatistics
|
||||
actualFilterPeer = servicePeer
|
||||
|
||||
let pushHandler = proc(
|
||||
pubsubTopic: PubsubTopic, message: WakuMessage
|
||||
): Future[void] {.async, closure.} =
|
||||
let payloadStr = string.fromBytes(message.payload)
|
||||
let testerMessage = js.Json.decode(payloadStr, ProtocolTesterMessage)
|
||||
let msgHash = computeMessageHash(pubsubTopic, message).to0xHex
|
||||
|
||||
stats.addMessage(testerMessage.sender, testerMessage, msgHash)
|
||||
|
||||
notice "message received",
|
||||
index = testerMessage.index,
|
||||
count = testerMessage.count,
|
||||
startedAt = $testerMessage.startedAt,
|
||||
sinceStart = $testerMessage.sinceStart,
|
||||
sincePrev = $testerMessage.sincePrev,
|
||||
size = $testerMessage.size,
|
||||
pubsubTopic = pubsubTopic,
|
||||
hash = msgHash
|
||||
|
||||
wakuNode.wakuFilterClient.registerPushHandler(pushHandler)
|
||||
|
||||
let interval = millis(20000)
|
||||
var printStats: CallbackFunc
|
||||
|
||||
# calculate max wait after the last known message arrived before exiting
|
||||
# 20% of expected messages times the expected interval but capped to 10min
|
||||
let maxWaitForLastMessage: Duration =
|
||||
min(conf.messageInterval.milliseconds * (conf.numMessages div 5), 10.minutes)
|
||||
|
||||
printStats = CallbackFunc(
|
||||
proc(udata: pointer) {.gcsafe.} =
|
||||
stats.echoStats()
|
||||
|
||||
if conf.numMessages > 0 and
|
||||
waitFor stats.checkIfAllMessagesReceived(maxWaitForLastMessage):
|
||||
waitFor unsubscribe(wakuNode, conf.getPubsubTopic(), conf.contentTopics[0])
|
||||
info "All messages received. Exiting."
|
||||
|
||||
## for gracefull shutdown through signal hooks
|
||||
discard c_raise(ansi_c.SIGTERM)
|
||||
else:
|
||||
discard setTimer(Moment.fromNow(interval), printStats)
|
||||
)
|
||||
|
||||
discard setTimer(Moment.fromNow(interval), printStats)
|
||||
|
||||
# Start maintaining subscription
|
||||
asyncSpawn maintainSubscription(
|
||||
wakuNode, conf.getPubsubTopic(), conf.contentTopics[0], conf.fixedServicePeer
|
||||
)
|
||||
63
third-party/nwaku/apps/liteprotocoltester/run_service_node.sh
vendored
Executable file
63
third-party/nwaku/apps/liteprotocoltester/run_service_node.sh
vendored
Executable file
@ -0,0 +1,63 @@
|
||||
#!/bin/sh
|
||||
|
||||
echo "I am a service node"
|
||||
IP=$(ip a | grep "inet " | grep -Fv 127.0.0.1 | sed 's/.*inet \([^/]*\).*/\1/')
|
||||
|
||||
echo "Service node IP: ${IP}"
|
||||
|
||||
if [ -n "${SHARD}" ]; then
|
||||
SHARD=--shard="${SHARD}"
|
||||
else
|
||||
SHARD=--shard="0"
|
||||
fi
|
||||
|
||||
if [ -n "${CLUSTER_ID}" ]; then
|
||||
CLUSTER_ID=--cluster-id="${CLUSTER_ID}"
|
||||
fi
|
||||
|
||||
echo "STANDALONE: ${STANDALONE}"
|
||||
|
||||
if [ -z "${STANDALONE}" ]; then
|
||||
|
||||
RETRIES=${RETRIES:=20}
|
||||
|
||||
while [ -z "${BOOTSTRAP_ENR}" ] && [ ${RETRIES} -ge 0 ]; do
|
||||
BOOTSTRAP_ENR=$(wget -qO- http://bootstrap:8645/debug/v1/info --header='Content-Type:application/json' 2> /dev/null | sed 's/.*"enrUri":"\([^"]*\)".*/\1/');
|
||||
echo "Bootstrap node not ready, retrying (retries left: ${RETRIES})"
|
||||
sleep 3
|
||||
RETRIES=$(( $RETRIES - 1 ))
|
||||
done
|
||||
|
||||
if [ -z "${BOOTSTRAP_ENR}" ]; then
|
||||
echo "Could not get BOOTSTRAP_ENR and none provided. Failing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Using bootstrap node: ${BOOTSTRAP_ENR}"
|
||||
|
||||
fi
|
||||
|
||||
|
||||
exec /usr/bin/wakunode\
|
||||
--relay=true\
|
||||
--filter=true\
|
||||
--lightpush=true\
|
||||
--store=false\
|
||||
--rest=true\
|
||||
--rest-admin=true\
|
||||
--rest-private=true\
|
||||
--rest-address=0.0.0.0\
|
||||
--rest-allow-origin="*"\
|
||||
--keep-alive=true\
|
||||
--max-connections=300\
|
||||
--dns-discovery=true\
|
||||
--discv5-discovery=true\
|
||||
--discv5-enr-auto-update=True\
|
||||
--discv5-bootstrap-node=${BOOTSTRAP_ENR}\
|
||||
--log-level=INFO\
|
||||
--metrics-server=True\
|
||||
--metrics-server-port=8003\
|
||||
--metrics-server-address=0.0.0.0\
|
||||
--nat=extip:${IP}\
|
||||
${SHARD}\
|
||||
${CLUSTER_ID}
|
||||
161
third-party/nwaku/apps/liteprotocoltester/run_tester_node.sh
vendored
Executable file
161
third-party/nwaku/apps/liteprotocoltester/run_tester_node.sh
vendored
Executable file
@ -0,0 +1,161 @@
|
||||
#!/bin/sh
|
||||
|
||||
#set -x
|
||||
|
||||
if test -f .env; then
|
||||
echo "Using .env file"
|
||||
. $(pwd)/.env
|
||||
fi
|
||||
|
||||
|
||||
echo "I am a lite-protocol-tester node"
|
||||
|
||||
BINARY_PATH=$1
|
||||
|
||||
if [ ! -x "${BINARY_PATH}" ]; then
|
||||
echo "Invalid binary path '${BINARY_PATH}'. Failing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${2}" = "--help" ]; then
|
||||
echo "You might want to check nwaku/apps/liteprotocoltester/README.md"
|
||||
exec "${BINARY_PATH}" --help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
FUNCTION=$2
|
||||
if [ "${FUNCTION}" = "SENDER" ]; then
|
||||
FUNCTION="--test-func=SENDER --lightpush-version=LEGACY"
|
||||
SERVICENAME=lightpush-service
|
||||
fi
|
||||
|
||||
if [ "${FUNCTION}" = "SENDERV3" ]; then
|
||||
FUNCTION="--test-func=SENDER --lightpush-version=V3"
|
||||
SERVICENAME=lightpush-service
|
||||
fi
|
||||
|
||||
if [ "${FUNCTION}" = "RECEIVER" ]; then
|
||||
FUNCTION=--test-func=RECEIVER
|
||||
SERVICENAME=filter-service
|
||||
fi
|
||||
|
||||
SERIVCE_NODE_ADDR=$3
|
||||
if [ -z "${SERIVCE_NODE_ADDR}" ]; then
|
||||
echo "Service node peer_id provided. Failing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
SELECTOR=$4
|
||||
if [ -z "${SELECTOR}" ] || [ "${SELECTOR}" = "SERVICE" ]; then
|
||||
SERVICE_NODE_DIRECT=true
|
||||
elif [ "${SELECTOR}" = "BOOTSTRAP" ]; then
|
||||
SERVICE_NODE_DIRECT=false
|
||||
else
|
||||
echo "Invalid selector '${SELECTOR}'. Failing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
DO_DETECT_SERVICENODE=0
|
||||
|
||||
if [ "${SERIVCE_NODE_ADDR}" = "servicenode" ]; then
|
||||
DO_DETECT_SERVICENODE=1
|
||||
SERIVCE_NODE_ADDR=""
|
||||
SERVICENAME=servicenode
|
||||
fi
|
||||
|
||||
if [ "${SERIVCE_NODE_ADDR}" = "waku-sim" ]; then
|
||||
DO_DETECT_SERVICENODE=1
|
||||
SERIVCE_NODE_ADDR=""
|
||||
MY_EXT_IP=$(ip a | grep "inet " | grep -Fv 127.0.0.1 | sed 's/.*inet \([^/]*\).*/\1/')
|
||||
else
|
||||
MY_EXT_IP=$(wget -qO- --no-check-certificate https://api4.ipify.org)
|
||||
fi
|
||||
|
||||
|
||||
if [ $DO_DETECT_SERVICENODE -eq 1 ]; then
|
||||
RETRIES=${RETRIES:=20}
|
||||
|
||||
while [ -z "${SERIVCE_NODE_ADDR}" ] && [ ${RETRIES} -ge 0 ]; do
|
||||
SERVICE_DEBUG_INFO=$(wget -qO- http://${SERVICENAME}:8645/debug/v1/info --header='Content-Type:application/json' 2> /dev/null);
|
||||
echo "SERVICE_DEBUG_INFO: ${SERVICE_DEBUG_INFO}"
|
||||
|
||||
SERIVCE_NODE_ADDR=$(wget -qO- http://${SERVICENAME}:8645/debug/v1/info --header='Content-Type:application/json' 2> /dev/null | sed 's/.*"listenAddresses":\["\([^"]*\)".*/\1/');
|
||||
echo "Service node not ready, retrying (retries left: ${RETRIES})"
|
||||
sleep 3
|
||||
RETRIES=$(( $RETRIES - 1 ))
|
||||
done
|
||||
|
||||
fi
|
||||
|
||||
if [ -z "${SERIVCE_NODE_ADDR}" ]; then
|
||||
echo "Could not get SERIVCE_NODE_ADDR and none provided. Failing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if $SERVICE_NODE_DIRECT; then
|
||||
FULL_NODE=--service-node="${SERIVCE_NODE_ADDR} --fixed-service-peer"
|
||||
else
|
||||
FULL_NODE=--bootstrap-node="${SERIVCE_NODE_ADDR}"
|
||||
fi
|
||||
|
||||
if [ -n "${SHARD}" ]; then
|
||||
SHARD=--shard="${SHARD}"
|
||||
else
|
||||
SHARD=--shard="0"
|
||||
fi
|
||||
|
||||
if [ -n "${CONTENT_TOPIC}" ]; then
|
||||
CONTENT_TOPIC=--content-topic="${CONTENT_TOPIC}"
|
||||
fi
|
||||
|
||||
if [ -n "${CLUSTER_ID}" ]; then
|
||||
CLUSTER_ID=--cluster-id="${CLUSTER_ID}"
|
||||
fi
|
||||
|
||||
if [ -n "${START_PUBLISHING_AFTER_SECS}" ]; then
|
||||
START_PUBLISHING_AFTER_SECS=--start-publishing-after="${START_PUBLISHING_AFTER_SECS}"
|
||||
fi
|
||||
|
||||
if [ -n "${MIN_MESSAGE_SIZE}" ]; then
|
||||
MIN_MESSAGE_SIZE=--min-test-msg-size="${MIN_MESSAGE_SIZE}"
|
||||
fi
|
||||
|
||||
if [ -n "${MAX_MESSAGE_SIZE}" ]; then
|
||||
MAX_MESSAGE_SIZE=--max-test-msg-size="${MAX_MESSAGE_SIZE}"
|
||||
fi
|
||||
|
||||
|
||||
if [ -n "${NUM_MESSAGES}" ]; then
|
||||
NUM_MESSAGES=--num-messages="${NUM_MESSAGES}"
|
||||
fi
|
||||
|
||||
if [ -n "${MESSAGE_INTERVAL_MILLIS}" ]; then
|
||||
MESSAGE_INTERVAL_MILLIS=--message-interval="${MESSAGE_INTERVAL_MILLIS}"
|
||||
fi
|
||||
|
||||
if [ -n "${LOG_LEVEL}" ]; then
|
||||
LOG_LEVEL=--log-level=${LOG_LEVEL}
|
||||
else
|
||||
LOG_LEVEL=--log-level=INFO
|
||||
fi
|
||||
|
||||
echo "Running binary: ${BINARY_PATH}"
|
||||
echo "Tester node: ${FUNCTION}"
|
||||
echo "Using service node: ${SERIVCE_NODE_ADDR}"
|
||||
echo "My external IP: ${MY_EXT_IP}"
|
||||
|
||||
exec "${BINARY_PATH}"\
|
||||
--nat=extip:${MY_EXT_IP}\
|
||||
--test-peers\
|
||||
${LOG_LEVEL}\
|
||||
${FULL_NODE}\
|
||||
${MESSAGE_INTERVAL_MILLIS}\
|
||||
${NUM_MESSAGES}\
|
||||
${SHARD}\
|
||||
${CONTENT_TOPIC}\
|
||||
${CLUSTER_ID}\
|
||||
${FUNCTION}\
|
||||
${START_PUBLISHING_AFTER_SECS}\
|
||||
${MIN_MESSAGE_SIZE}\
|
||||
${MAX_MESSAGE_SIZE}
|
||||
# --config-file=config.toml\
|
||||
119
third-party/nwaku/apps/liteprotocoltester/run_tester_node_at_infra.sh
vendored
Normal file
119
third-party/nwaku/apps/liteprotocoltester/run_tester_node_at_infra.sh
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
#!/bin/sh
|
||||
|
||||
#set -x
|
||||
#echo "$@"
|
||||
|
||||
if test -f .env; then
|
||||
echo "Using .env file"
|
||||
. $(pwd)/.env
|
||||
fi
|
||||
|
||||
|
||||
echo "I am a lite-protocol-tester node"
|
||||
|
||||
BINARY_PATH=$1
|
||||
|
||||
if [ ! -x "${BINARY_PATH}" ]; then
|
||||
echo "Invalid binary path '${BINARY_PATH}'. Failing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${2}" = "--help" ]; then
|
||||
echo "You might want to check nwaku/apps/liteprotocoltester/README.md"
|
||||
exec "${BINARY_PATH}" --help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
FUNCTION=$2
|
||||
if [ "${FUNCTION}" = "SENDER" ]; then
|
||||
FUNCTION="--test-func=SENDER --lightpush-version=LEGACY"
|
||||
SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}}
|
||||
NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"}
|
||||
NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"}
|
||||
METRICS_PORT=--metrics-port="${PUBLISHER_METRICS_PORT:-8003}"
|
||||
fi
|
||||
|
||||
if [ "${FUNCTION}" = "SENDERV3" ]; then
|
||||
FUNCTION="--test-func=SENDER --lightpush-version=V3"
|
||||
SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}}
|
||||
NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"}
|
||||
NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"}
|
||||
METRICS_PORT=--metrics-port="${PUBLISHER_METRICS_PORT:-8003}"
|
||||
fi
|
||||
|
||||
if [ "${FUNCTION}" = "RECEIVER" ]; then
|
||||
FUNCTION=--test-func=RECEIVER
|
||||
SERIVCE_NODE_ADDR=${FILTER_SERVICE_PEER:-${FILTER_BOOTSTRAP:-}}
|
||||
NODE_ARG=${FILTER_SERVICE_PEER:+--service-node="${FILTER_SERVICE_PEER}"}
|
||||
NODE_ARG=${NODE_ARG:---bootstrap-node="${FILTER_BOOTSTRAP}"}
|
||||
METRICS_PORT=--metrics-port="${RECEIVER_METRICS_PORT:-8003}"
|
||||
fi
|
||||
|
||||
if [ -z "${SERIVCE_NODE_ADDR}" ]; then
|
||||
echo "Service/Bootsrap node peer_id or enr is not provided. Failing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
MY_EXT_IP=$(wget -qO- --no-check-certificate https://api4.ipify.org)
|
||||
|
||||
if [ -n "${SHARD}" ]; then
|
||||
SHARD=--shard="${SHARD}"
|
||||
else
|
||||
SHARD=--shard="0"
|
||||
fi
|
||||
|
||||
if [ -n "${CONTENT_TOPIC}" ]; then
|
||||
CONTENT_TOPIC=--content-topic="${CONTENT_TOPIC}"
|
||||
fi
|
||||
|
||||
if [ -n "${CLUSTER_ID}" ]; then
|
||||
CLUSTER_ID=--cluster-id="${CLUSTER_ID}"
|
||||
fi
|
||||
|
||||
if [ -n "${START_PUBLISHING_AFTER_SECS}" ]; then
|
||||
START_PUBLISHING_AFTER_SECS=--start-publishing-after="${START_PUBLISHING_AFTER_SECS}"
|
||||
fi
|
||||
|
||||
if [ -n "${MIN_MESSAGE_SIZE}" ]; then
|
||||
MIN_MESSAGE_SIZE=--min-test-msg-size="${MIN_MESSAGE_SIZE}"
|
||||
fi
|
||||
|
||||
if [ -n "${MAX_MESSAGE_SIZE}" ]; then
|
||||
MAX_MESSAGE_SIZE=--max-test-msg-size="${MAX_MESSAGE_SIZE}"
|
||||
fi
|
||||
|
||||
|
||||
if [ -n "${NUM_MESSAGES}" ]; then
|
||||
NUM_MESSAGES=--num-messages="${NUM_MESSAGES}"
|
||||
fi
|
||||
|
||||
if [ -n "${MESSAGE_INTERVAL_MILLIS}" ]; then
|
||||
MESSAGE_INTERVAL_MILLIS=--message-interval="${MESSAGE_INTERVAL_MILLIS}"
|
||||
fi
|
||||
|
||||
if [ -n "${LOG_LEVEL}" ]; then
|
||||
LOG_LEVEL=--log-level=${LOG_LEVEL}
|
||||
else
|
||||
LOG_LEVEL=--log-level=INFO
|
||||
fi
|
||||
|
||||
echo "Running binary: ${BINARY_PATH}"
|
||||
echo "Node function is: ${FUNCTION}"
|
||||
echo "Using service/bootstrap node as: ${NODE_ARG}"
|
||||
echo "My external IP: ${MY_EXT_IP}"
|
||||
|
||||
exec "${BINARY_PATH}"\
|
||||
--nat=extip:${MY_EXT_IP}\
|
||||
--test-peers\
|
||||
${LOG_LEVEL}\
|
||||
${NODE_ARG}\
|
||||
${MESSAGE_INTERVAL_MILLIS}\
|
||||
${NUM_MESSAGES}\
|
||||
${SHARD}\
|
||||
${CONTENT_TOPIC}\
|
||||
${CLUSTER_ID}\
|
||||
${FUNCTION}\
|
||||
${START_PUBLISHING_AFTER_SECS}\
|
||||
${MIN_MESSAGE_SIZE}\
|
||||
${MAX_MESSAGE_SIZE}\
|
||||
${METRICS_PORT}
|
||||
118
third-party/nwaku/apps/liteprotocoltester/run_tester_node_on_fleet.sh
vendored
Normal file
118
third-party/nwaku/apps/liteprotocoltester/run_tester_node_on_fleet.sh
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
#!/bin/sh
|
||||
|
||||
#set -x
|
||||
#echo "$@"
|
||||
|
||||
if test -f .env; then
|
||||
echo "Using .env file"
|
||||
. $(pwd)/.env
|
||||
fi
|
||||
|
||||
|
||||
echo "I am a lite-protocol-tester node"
|
||||
|
||||
BINARY_PATH=$1
|
||||
|
||||
if [ ! -x "${BINARY_PATH}" ]; then
|
||||
echo "Invalid binary path '${BINARY_PATH}'. Failing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${2}" = "--help" ]; then
|
||||
echo "You might want to check nwaku/apps/liteprotocoltester/README.md"
|
||||
exec "${BINARY_PATH}" --help
|
||||
exit 0
|
||||
fi
|
||||
|
||||
FUNCTION=$2
|
||||
if [ "${FUNCTION}" = "SENDER" ]; then
|
||||
FUNCTION="--test-func=SENDER --lightpush-version=LEGACY"
|
||||
SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}}
|
||||
NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"}
|
||||
NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"}
|
||||
METRICS_PORT=--metrics-port="${PUBLISHER_METRICS_PORT:-8003}"
|
||||
fi
|
||||
|
||||
if [ "${FUNCTION}" = "SENDERV3" ]; then
|
||||
FUNCTION="--test-func=SENDER --lightpush-version=V3"
|
||||
SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}}
|
||||
NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"}
|
||||
NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"}
|
||||
METRICS_PORT=--metrics-port="${PUBLISHER_METRICS_PORT:-8003}"
|
||||
fi
|
||||
|
||||
if [ "${FUNCTION}" = "RECEIVER" ]; then
|
||||
FUNCTION=--test-func=RECEIVER
|
||||
SERIVCE_NODE_ADDR=${FILTER_SERVICE_PEER:-${FILTER_BOOTSTRAP:-}}
|
||||
NODE_ARG=${FILTER_SERVICE_PEER:+--service-node="${FILTER_SERVICE_PEER}"}
|
||||
NODE_ARG=${NODE_ARG:---bootstrap-node="${FILTER_BOOTSTRAP}"}
|
||||
METRICS_PORT=--metrics-port="${RECEIVER_METRICS_PORT:-8003}"
|
||||
fi
|
||||
|
||||
if [ -z "${SERIVCE_NODE_ADDR}" ]; then
|
||||
echo "Service/Bootsrap node peer_id or enr is not provided. Failing"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
MY_EXT_IP=$(wget -qO- --no-check-certificate https://api4.ipify.org)
|
||||
|
||||
if [ -n "${SHARD}" ]; then
|
||||
SHARD=--shard=${SHARD}
|
||||
else
|
||||
SHARD=--shard=0
|
||||
fi
|
||||
|
||||
if [ -n "${CONTENT_TOPIC}" ]; then
|
||||
CONTENT_TOPIC=--content-topic="${CONTENT_TOPIC}"
|
||||
fi
|
||||
|
||||
if [ -n "${CLUSTER_ID}" ]; then
|
||||
CLUSTER_ID=--cluster-id="${CLUSTER_ID}"
|
||||
fi
|
||||
|
||||
if [ -n "${START_PUBLISHING_AFTER}" ]; then
|
||||
START_PUBLISHING_AFTER=--start-publishing-after="${START_PUBLISHING_AFTER}"
|
||||
fi
|
||||
|
||||
if [ -n "${MIN_MESSAGE_SIZE}" ]; then
|
||||
MIN_MESSAGE_SIZE=--min-test-msg-size="${MIN_MESSAGE_SIZE}"
|
||||
fi
|
||||
|
||||
if [ -n "${MAX_MESSAGE_SIZE}" ]; then
|
||||
MAX_MESSAGE_SIZE=--max-test-msg-size="${MAX_MESSAGE_SIZE}"
|
||||
fi
|
||||
|
||||
|
||||
if [ -n "${NUM_MESSAGES}" ]; then
|
||||
NUM_MESSAGES=--num-messages="${NUM_MESSAGES}"
|
||||
fi
|
||||
|
||||
if [ -n "${MESSAGE_INTERVAL_MILLIS}" ]; then
|
||||
MESSAGE_INTERVAL_MILLIS=--message-interval="${MESSAGE_INTERVAL_MILLIS}"
|
||||
fi
|
||||
|
||||
if [ -n "${LOG_LEVEL}" ]; then
|
||||
LOG_LEVEL=--log-level=${LOG_LEVEL}
|
||||
else
|
||||
LOG_LEVEL=--log-level=INFO
|
||||
fi
|
||||
|
||||
echo "Running binary: ${BINARY_PATH}"
|
||||
echo "Node function is: ${FUNCTION}"
|
||||
echo "Using service/bootstrap node as: ${NODE_ARG}"
|
||||
echo "My external IP: ${MY_EXT_IP}"
|
||||
|
||||
exec "${BINARY_PATH}"\
|
||||
--nat=extip:${MY_EXT_IP}\
|
||||
${LOG_LEVEL}\
|
||||
${NODE_ARG}\
|
||||
${MESSAGE_INTERVAL_MILLIS}\
|
||||
${NUM_MESSAGES}\
|
||||
${SHARD}\
|
||||
${CONTENT_TOPIC}\
|
||||
${CLUSTER_ID}\
|
||||
${FUNCTION}\
|
||||
${START_PUBLISHING_AFTER}\
|
||||
${MIN_MESSAGE_SIZE}\
|
||||
${MAX_MESSAGE_SIZE}\
|
||||
${METRICS_PORT}
|
||||
223
third-party/nwaku/apps/liteprotocoltester/service_peer_management.nim
vendored
Normal file
223
third-party/nwaku/apps/liteprotocoltester/service_peer_management.nim
vendored
Normal file
@ -0,0 +1,223 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[options, net, sysrand, random, strformat, strutils, sequtils],
|
||||
chronicles,
|
||||
chronos,
|
||||
metrics,
|
||||
libbacktrace,
|
||||
libp2p/crypto/crypto,
|
||||
confutils,
|
||||
libp2p/wire
|
||||
|
||||
import
|
||||
../wakunode2/cli_args,
|
||||
waku/[
|
||||
common/enr,
|
||||
waku_node,
|
||||
node/peer_manager,
|
||||
waku_lightpush/common,
|
||||
waku_relay,
|
||||
waku_filter_v2,
|
||||
waku_peer_exchange/protocol,
|
||||
waku_core/multiaddrstr,
|
||||
waku_core/topics/pubsub_topic,
|
||||
waku_enr/capabilities,
|
||||
waku_enr/sharding,
|
||||
],
|
||||
./tester_config,
|
||||
./diagnose_connections,
|
||||
./lpt_metrics
|
||||
|
||||
logScope:
|
||||
topics = "service peer mgmt"
|
||||
|
||||
randomize()
|
||||
|
||||
proc translateToRemotePeerInfo*(peerAddress: string): Result[RemotePeerInfo, void] =
|
||||
var peerInfo: RemotePeerInfo
|
||||
var enrRec: enr.Record
|
||||
if enrRec.fromURI(peerAddress):
|
||||
trace "Parsed ENR", enrRec = $enrRec
|
||||
peerInfo = enrRec.toRemotePeerInfo().valueOr:
|
||||
error "failed to convert ENR to RemotePeerInfo", error = error
|
||||
return err()
|
||||
else:
|
||||
peerInfo = parsePeerInfo(peerAddress).valueOr:
|
||||
error "failed to parse node waku peer-exchange peerId", error = error
|
||||
return err()
|
||||
|
||||
return ok(peerInfo)
|
||||
|
||||
## To retrieve peers from PeerExchange partner and return one randomly selected one
|
||||
## among the ones successfully dialed
|
||||
## Note: This is kept for future use.
|
||||
proc selectRandomCapablePeer*(
|
||||
pm: PeerManager, codec: string, pubsubTopic: PubsubTopic
|
||||
): Future[Option[RemotePeerInfo]] {.async.} =
|
||||
var cap = Capabilities.Filter
|
||||
if codec.contains("lightpush"):
|
||||
cap = Capabilities.Lightpush
|
||||
elif codec.contains("filter"):
|
||||
cap = Capabilities.Filter
|
||||
|
||||
var supportivePeers = pm.switch.peerStore.getPeersByCapability(cap)
|
||||
|
||||
trace "Found supportive peers count", count = supportivePeers.len()
|
||||
trace "Found supportive peers", supportivePeers = $supportivePeers
|
||||
if supportivePeers.len == 0:
|
||||
return none(RemotePeerInfo)
|
||||
|
||||
var found = none(RemotePeerInfo)
|
||||
while found.isNone() and supportivePeers.len > 0:
|
||||
let rndPeerIndex = rand(0 .. supportivePeers.len - 1)
|
||||
let randomPeer = supportivePeers[rndPeerIndex]
|
||||
|
||||
debug "Dialing random peer",
|
||||
idx = $rndPeerIndex, peer = constructMultiaddrStr(randomPeer)
|
||||
|
||||
supportivePeers.delete(rndPeerIndex .. rndPeerIndex)
|
||||
|
||||
let connOpt = pm.dialPeer(randomPeer, codec)
|
||||
if (await connOpt.withTimeout(10.seconds)):
|
||||
if connOpt.value().isSome():
|
||||
found = some(randomPeer)
|
||||
debug "Dialing successful",
|
||||
peer = constructMultiaddrStr(randomPeer), codec = codec
|
||||
else:
|
||||
debug "Dialing failed", peer = constructMultiaddrStr(randomPeer), codec = codec
|
||||
else:
|
||||
debug "Timeout dialing service peer",
|
||||
peer = constructMultiaddrStr(randomPeer), codec = codec
|
||||
|
||||
return found
|
||||
|
||||
# Debugging PX gathered peers connectivity
|
||||
proc tryCallAllPxPeers*(
|
||||
pm: PeerManager, codec: string, pubsubTopic: PubsubTopic
|
||||
): Future[Option[seq[RemotePeerInfo]]] {.async.} =
|
||||
var capability = Capabilities.Filter
|
||||
if codec.contains("lightpush"):
|
||||
capability = Capabilities.Lightpush
|
||||
elif codec.contains("filter"):
|
||||
capability = Capabilities.Filter
|
||||
|
||||
var supportivePeers = pm.switch.peerStore.getPeersByCapability(capability)
|
||||
|
||||
lpt_px_peers.set(supportivePeers.len)
|
||||
debug "Found supportive peers count", count = supportivePeers.len()
|
||||
debug "Found supportive peers", supportivePeers = $supportivePeers
|
||||
if supportivePeers.len == 0:
|
||||
return none(seq[RemotePeerInfo])
|
||||
|
||||
var okPeers: seq[RemotePeerInfo] = @[]
|
||||
|
||||
while supportivePeers.len > 0:
|
||||
let rndPeerIndex = rand(0 .. supportivePeers.len - 1)
|
||||
let randomPeer = supportivePeers[rndPeerIndex]
|
||||
|
||||
debug "Dialing random peer",
|
||||
idx = $rndPeerIndex, peer = constructMultiaddrStr(randomPeer)
|
||||
|
||||
supportivePeers.delete(rndPeerIndex, rndPeerIndex)
|
||||
|
||||
let connOpt = pm.dialPeer(randomPeer, codec)
|
||||
if (await connOpt.withTimeout(10.seconds)):
|
||||
if connOpt.value().isSome():
|
||||
okPeers.add(randomPeer)
|
||||
info "Dialing successful",
|
||||
peer = constructMultiaddrStr(randomPeer),
|
||||
agent = randomPeer.getAgent(),
|
||||
codec = codec
|
||||
lpt_dialed_peers.inc(labelValues = [randomPeer.getAgent()])
|
||||
else:
|
||||
lpt_dial_failures.inc(labelValues = [randomPeer.getAgent()])
|
||||
error "Dialing failed",
|
||||
peer = constructMultiaddrStr(randomPeer),
|
||||
agent = randomPeer.getAgent(),
|
||||
codec = codec
|
||||
else:
|
||||
lpt_dial_failures.inc(labelValues = [randomPeer.getAgent()])
|
||||
error "Timeout dialing service peer",
|
||||
peer = constructMultiaddrStr(randomPeer),
|
||||
agent = randomPeer.getAgent(),
|
||||
codec = codec
|
||||
|
||||
var okPeersStr: string = ""
|
||||
for idx, peer in okPeers:
|
||||
okPeersStr.add(
|
||||
" " & $idx & ". | " & constructMultiaddrStr(peer) & " | agent: " &
|
||||
peer.getAgent() & " | protos: " & $peer.protocols & " | caps: " &
|
||||
$peer.enr.map(getCapabilities) & "\n"
|
||||
)
|
||||
echo "PX returned peers found callable for " & codec & " / " & $capability & ":\n"
|
||||
echo okPeersStr
|
||||
|
||||
return some(okPeers)
|
||||
|
||||
proc pxLookupServiceNode*(
|
||||
node: WakuNode, conf: LiteProtocolTesterConf
|
||||
): Future[Result[bool, void]] {.async.} =
|
||||
let codec: string = conf.getCodec()
|
||||
|
||||
if node.wakuPeerExchange.isNil():
|
||||
let peerExchangeNode = translateToRemotePeerInfo(conf.bootstrapNode).valueOr:
|
||||
error "Failed to parse bootstrap node - cannot use PeerExchange.",
|
||||
node = conf.bootstrapNode
|
||||
return err()
|
||||
info "PeerExchange node", peer = constructMultiaddrStr(peerExchangeNode)
|
||||
node.peerManager.addServicePeer(peerExchangeNode, WakuPeerExchangeCodec)
|
||||
|
||||
try:
|
||||
await node.mountPeerExchange(some(conf.clusterId))
|
||||
except CatchableError:
|
||||
error "failed to mount waku peer-exchange protocol",
|
||||
error = getCurrentExceptionMsg()
|
||||
return err()
|
||||
|
||||
var trialCount = 5
|
||||
while trialCount > 0:
|
||||
let futPeers = node.fetchPeerExchangePeers(conf.reqPxPeers)
|
||||
if not await futPeers.withTimeout(30.seconds):
|
||||
notice "Cannot get peers from PX", round = 5 - trialCount
|
||||
else:
|
||||
if futPeers.value().isErr():
|
||||
info "PeerExchange reported error", error = futPeers.read().error
|
||||
return err()
|
||||
|
||||
if conf.testPeers:
|
||||
let peersOpt =
|
||||
await tryCallAllPxPeers(node.peerManager, codec, conf.getPubsubTopic())
|
||||
if peersOpt.isSome():
|
||||
info "Found service peers for codec",
|
||||
codec = codec, peer_count = peersOpt.get().len()
|
||||
return ok(peersOpt.get().len > 0)
|
||||
else:
|
||||
let peerOpt =
|
||||
await selectRandomCapablePeer(node.peerManager, codec, conf.getPubsubTopic())
|
||||
if peerOpt.isSome():
|
||||
info "Found service peer for codec", codec = codec, peer = peerOpt.get()
|
||||
return ok(true)
|
||||
|
||||
await sleepAsync(5.seconds)
|
||||
trialCount -= 1
|
||||
|
||||
return err()
|
||||
|
||||
var alreadyUsedServicePeers {.threadvar.}: seq[RemotePeerInfo]
|
||||
|
||||
## Select service peers by codec from peer store randomly.
|
||||
proc selectRandomServicePeer*(
|
||||
pm: PeerManager, actualPeer: Option[RemotePeerInfo], codec: string
|
||||
): Result[RemotePeerInfo, void] =
|
||||
if actualPeer.isSome():
|
||||
alreadyUsedServicePeers.add(actualPeer.get())
|
||||
|
||||
let supportivePeers = pm.switch.peerStore.getPeersByProtocol(codec).filterIt(
|
||||
it notin alreadyUsedServicePeers
|
||||
)
|
||||
if supportivePeers.len == 0:
|
||||
return err()
|
||||
|
||||
let rndPeerIndex = rand(0 .. supportivePeers.len - 1)
|
||||
return ok(supportivePeers[rndPeerIndex])
|
||||
336
third-party/nwaku/apps/liteprotocoltester/statistics.nim
vendored
Normal file
336
third-party/nwaku/apps/liteprotocoltester/statistics.nim
vendored
Normal file
@ -0,0 +1,336 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sets, tables, sequtils, options, strformat],
|
||||
chronos/timer as chtimer,
|
||||
chronicles,
|
||||
chronos,
|
||||
results,
|
||||
libp2p/peerid
|
||||
|
||||
import ./tester_message, ./lpt_metrics
|
||||
|
||||
type
|
||||
ArrivalInfo = object
|
||||
arrivedAt: Moment
|
||||
prevArrivedAt: Moment
|
||||
prevIndex: uint32
|
||||
|
||||
MessageInfo = tuple[msg: ProtocolTesterMessage, info: ArrivalInfo]
|
||||
DupStat = tuple[hash: string, dupCount: int, size: uint64]
|
||||
|
||||
StatHelper = object
|
||||
prevIndex: uint32
|
||||
prevArrivedAt: Moment
|
||||
lostIndices: HashSet[uint32]
|
||||
seenIndices: HashSet[uint32]
|
||||
maxIndex: uint32
|
||||
duplicates: OrderedTable[uint32, DupStat]
|
||||
|
||||
Statistics* = object
|
||||
received: Table[uint32, MessageInfo]
|
||||
firstReceivedIdx*: uint32
|
||||
allMessageCount*: uint32
|
||||
receivedMessages*: uint32
|
||||
misorderCount*: uint32
|
||||
lateCount*: uint32
|
||||
duplicateCount*: uint32
|
||||
helper: StatHelper
|
||||
|
||||
PerPeerStatistics* = Table[string, Statistics]
|
||||
|
||||
func `$`*(a: Duration): string {.inline.} =
|
||||
## Original stringify implementation from chronos/timer.nim is not capable of printing 0ns
|
||||
## Returns string representation of Duration ``a`` as nanoseconds value.
|
||||
|
||||
if a.isZero:
|
||||
return "0ns"
|
||||
|
||||
return chtimer.`$`(a)
|
||||
|
||||
proc init*(T: type Statistics, expectedMessageCount: int = 1000): T =
|
||||
result.helper.prevIndex = 0
|
||||
result.helper.maxIndex = 0
|
||||
result.helper.seenIndices.init(expectedMessageCount)
|
||||
result.received = initTable[uint32, MessageInfo](expectedMessageCount)
|
||||
return result
|
||||
|
||||
proc addMessage*(
|
||||
self: var Statistics, sender: string, msg: ProtocolTesterMessage, msgHash: string
|
||||
) =
|
||||
if self.allMessageCount == 0:
|
||||
self.allMessageCount = msg.count
|
||||
self.firstReceivedIdx = msg.index
|
||||
elif self.allMessageCount != msg.count:
|
||||
error "Message count mismatch at message",
|
||||
index = msg.index, expected = self.allMessageCount, got = msg.count
|
||||
|
||||
let currentArrived: MessageInfo = (
|
||||
msg: msg,
|
||||
info: ArrivalInfo(
|
||||
arrivedAt: Moment.now(),
|
||||
prevArrivedAt: self.helper.prevArrivedAt,
|
||||
prevIndex: self.helper.prevIndex,
|
||||
),
|
||||
)
|
||||
lpt_receiver_received_bytes.inc(labelValues = [sender], amount = msg.size.int64)
|
||||
if self.received.hasKeyOrPut(msg.index, currentArrived):
|
||||
inc(self.duplicateCount)
|
||||
self.helper.duplicates.mgetOrPut(msg.index, (msgHash, 0, msg.size)).dupCount.inc()
|
||||
warn "Duplicate message",
|
||||
index = msg.index,
|
||||
hash = msgHash,
|
||||
times_duplicated = self.helper.duplicates[msg.index].dupCount
|
||||
lpt_receiver_duplicate_messages_count.inc(labelValues = [sender])
|
||||
lpt_receiver_distinct_duplicate_messages_count.set(
|
||||
labelValues = [sender], value = self.helper.duplicates.len()
|
||||
)
|
||||
return
|
||||
|
||||
## detect misorder arrival and possible lost messages
|
||||
if self.helper.prevIndex + 1 < msg.index:
|
||||
inc(self.misorderCount)
|
||||
warn "Misordered message arrival",
|
||||
index = msg.index, expected = self.helper.prevIndex + 1
|
||||
elif self.helper.prevIndex > msg.index:
|
||||
inc(self.lateCount)
|
||||
warn "Late message arrival", index = msg.index, expected = self.helper.prevIndex + 1
|
||||
|
||||
self.helper.maxIndex = max(self.helper.maxIndex, msg.index)
|
||||
self.helper.prevIndex = msg.index
|
||||
self.helper.prevArrivedAt = currentArrived.info.arrivedAt
|
||||
inc(self.receivedMessages)
|
||||
lpt_receiver_received_messages_count.inc(labelValues = [sender])
|
||||
lpt_receiver_missing_messages_count.set(
|
||||
labelValues = [sender], value = (self.helper.maxIndex - self.receivedMessages).int64
|
||||
)
|
||||
|
||||
proc addMessage*(
|
||||
self: var PerPeerStatistics,
|
||||
peerId: string,
|
||||
msg: ProtocolTesterMessage,
|
||||
msgHash: string,
|
||||
) =
|
||||
if not self.contains(peerId):
|
||||
self[peerId] = Statistics.init()
|
||||
|
||||
let shortSenderId = block:
|
||||
let senderPeer = PeerId.init(msg.sender)
|
||||
if senderPeer.isErr():
|
||||
msg.sender
|
||||
else:
|
||||
senderPeer.get().shortLog()
|
||||
|
||||
discard catch:
|
||||
self[peerId].addMessage(shortSenderId, msg, msgHash)
|
||||
|
||||
lpt_receiver_sender_peer_count.set(value = self.len)
|
||||
|
||||
proc lastMessageArrivedAt*(self: Statistics): Option[Moment] =
|
||||
if self.receivedMessages > 0:
|
||||
return some(self.helper.prevArrivedAt)
|
||||
return none(Moment)
|
||||
|
||||
proc lossCount*(self: Statistics): uint32 =
|
||||
self.helper.maxIndex - self.receivedMessages
|
||||
|
||||
proc calcLatency*(self: Statistics): tuple[min, max, avg: Duration] =
|
||||
var
|
||||
minLatency = nanos(0)
|
||||
maxLatency = nanos(0)
|
||||
avgLatency = nanos(0)
|
||||
|
||||
if self.receivedMessages > 2:
|
||||
try:
|
||||
var prevArrivedAt = self.received[self.firstReceivedIdx].info.arrivedAt
|
||||
|
||||
for idx, (msg, arrival) in self.received.pairs:
|
||||
if idx <= 1:
|
||||
continue
|
||||
let expectedDelay = nanos(msg.sincePrev)
|
||||
|
||||
## latency will be 0 if arrived in shorter time than expected
|
||||
var latency = arrival.arrivedAt - arrival.prevArrivedAt - expectedDelay
|
||||
|
||||
## will not measure zero latency, it is unlikely to happen but in case happens could
|
||||
## ditort the min latency calulculation as we want to calculate the feasible minimum.
|
||||
if latency > nanos(0):
|
||||
if minLatency == nanos(0):
|
||||
minLatency = latency
|
||||
else:
|
||||
minLatency = min(minLatency, latency)
|
||||
|
||||
maxLatency = max(maxLatency, latency)
|
||||
avgLatency += latency
|
||||
|
||||
avgLatency = avgLatency div (self.receivedMessages - 1)
|
||||
except KeyError:
|
||||
error "Error while calculating latency: " & getCurrentExceptionMsg()
|
||||
|
||||
return (minLatency, maxLatency, avgLatency)
|
||||
|
||||
proc missingIndices*(self: Statistics): seq[uint32] =
|
||||
var missing: seq[uint32] = @[]
|
||||
for idx in 1 .. self.helper.maxIndex:
|
||||
if not self.received.hasKey(idx):
|
||||
missing.add(idx)
|
||||
return missing
|
||||
|
||||
proc distinctDupCount(self: Statistics): int {.inline.} =
|
||||
return self.helper.duplicates.len()
|
||||
|
||||
proc allDuplicates(self: Statistics): int {.inline.} =
|
||||
var total = 0
|
||||
for _, (_, dupCount, _) in self.helper.duplicates.pairs:
|
||||
total += dupCount
|
||||
return total
|
||||
|
||||
proc dupMsgs(self: Statistics): string =
|
||||
var dupMsgs: string = ""
|
||||
for idx, (hash, dupCount, size) in self.helper.duplicates.pairs:
|
||||
dupMsgs.add(
|
||||
" index: " & $idx & " | hash: " & hash & " | count: " & $dupCount & " | size: " &
|
||||
$size & "\n"
|
||||
)
|
||||
return dupMsgs
|
||||
|
||||
proc echoStat*(self: Statistics, peerId: string) =
|
||||
let (minL, maxL, avgL) = self.calcLatency()
|
||||
lpt_receiver_latencies.set(labelValues = [peerId, "min"], value = minL.nanos())
|
||||
lpt_receiver_latencies.set(labelValues = [peerId, "avg"], value = avgL.nanos())
|
||||
lpt_receiver_latencies.set(labelValues = [peerId, "max"], value = maxL.nanos())
|
||||
|
||||
let printable = catch:
|
||||
"""*------------------------------------------------------------------------------------------*
|
||||
| Expected | Received | Target | Loss | Misorder | Late | |
|
||||
|{self.helper.maxIndex:>11} |{self.receivedMessages:>11} |{self.allMessageCount:>11} |{self.lossCount():>11} |{self.misorderCount:>11} |{self.lateCount:>11} | |
|
||||
*------------------------------------------------------------------------------------------*
|
||||
| Latency stat: |
|
||||
| min latency: {$minL:<73}|
|
||||
| avg latency: {$avgL:<73}|
|
||||
| max latency: {$maxL:<73}|
|
||||
*------------------------------------------------------------------------------------------*
|
||||
| Duplicate stat: |
|
||||
| distinct duplicate messages: {$self.distinctDupCount():<57}|
|
||||
| sum duplicates : {$self.allDuplicates():<57}|
|
||||
Duplicated messages:
|
||||
{self.dupMsgs()}
|
||||
*------------------------------------------------------------------------------------------*
|
||||
| Lost indices: |
|
||||
| {self.missingIndices()} |
|
||||
*------------------------------------------------------------------------------------------*""".fmt()
|
||||
|
||||
if printable.isErr():
|
||||
echo "Error while printing statistics: " & printable.error().msg
|
||||
else:
|
||||
echo printable.get()
|
||||
|
||||
proc jsonStat*(self: Statistics): string =
|
||||
let minL, maxL, avgL = self.calcLatency()
|
||||
|
||||
let json = catch:
|
||||
"""{{"expected":{self.helper.maxIndex},
|
||||
"received": {self.receivedMessages},
|
||||
"target": {self.allMessageCount},
|
||||
"loss": {self.lossCount()},
|
||||
"misorder": {self.misorderCount},
|
||||
"late": {self.lateCount},
|
||||
"duplicate": {self.duplicateCount},
|
||||
"latency":
|
||||
{{"avg": "{avgL}",
|
||||
"min": "{minL}",
|
||||
"max": "{maxL}"
|
||||
}},
|
||||
"lostIndices": {self.missingIndices()}
|
||||
}}""".fmt()
|
||||
if json.isErr:
|
||||
return "{\"result:\": \"" & json.error.msg & "\"}"
|
||||
|
||||
return json.get()
|
||||
|
||||
proc echoStats*(self: var PerPeerStatistics) =
|
||||
for peerId, stats in self.pairs:
|
||||
let peerLine = catch:
|
||||
"Receiver statistics from peer {peerId}".fmt()
|
||||
if peerLine.isErr:
|
||||
echo "Error while printing statistics"
|
||||
else:
|
||||
echo peerLine.get()
|
||||
stats.echoStat(peerId)
|
||||
|
||||
proc jsonStats*(self: PerPeerStatistics): string =
|
||||
try:
|
||||
#!fmt: off
|
||||
var json = "{\"statistics\": ["
|
||||
var first = true
|
||||
for peerId, stats in self.pairs:
|
||||
if first:
|
||||
first = false
|
||||
else:
|
||||
json.add(", ")
|
||||
json.add("{{\"sender\": \"{peerId}\", \"stat\":".fmt())
|
||||
json.add(stats.jsonStat())
|
||||
json.add("}")
|
||||
json.add("]}")
|
||||
return json
|
||||
#!fmt: on
|
||||
except CatchableError:
|
||||
return
|
||||
"{\"result:\": \"Error while generating json stats: " & getCurrentExceptionMsg() &
|
||||
"\"}"
|
||||
|
||||
proc lastMessageArrivedAt*(self: PerPeerStatistics): Option[Moment] =
|
||||
var lastArrivedAt = Moment.init(0, Millisecond)
|
||||
for stat in self.values:
|
||||
let lastMsgFromPeerAt = stat.lastMessageArrivedAt().valueOr:
|
||||
continue
|
||||
|
||||
if lastMsgFromPeerAt > lastArrivedAt:
|
||||
lastArrivedAt = lastMsgFromPeerAt
|
||||
|
||||
if lastArrivedAt == Moment.init(0, Millisecond):
|
||||
return none(Moment)
|
||||
|
||||
return some(lastArrivedAt)
|
||||
|
||||
proc checkIfAllMessagesReceived*(
|
||||
self: PerPeerStatistics, maxWaitForLastMessage: Duration
|
||||
): Future[bool] {.async.} =
|
||||
# if there are no peers have sent messages, assume we just have started.
|
||||
if self.len == 0:
|
||||
return false
|
||||
|
||||
# check if numerically all messages are received.
|
||||
# this suggest we received at least one message already from one peer
|
||||
var isAlllMessageReceived = true
|
||||
for stat in self.values:
|
||||
if (stat.allMessageCount == 0 and stat.receivedMessages == 0) or
|
||||
stat.helper.maxIndex < stat.allMessageCount:
|
||||
isAlllMessageReceived = false
|
||||
break
|
||||
|
||||
if not isAlllMessageReceived:
|
||||
# if not all message received we still need to check if last message arrived within a time frame
|
||||
# to avoid endless waiting while publishers are already quit.
|
||||
let lastMessageAt = self.lastMessageArrivedAt()
|
||||
if lastMessageAt.isNone():
|
||||
return false
|
||||
|
||||
# last message shall arrived within time limit
|
||||
if Moment.now() - lastMessageAt.get() < maxWaitForLastMessage:
|
||||
return false
|
||||
else:
|
||||
info "No message since max wait time", maxWait = $maxWaitForLastMessage
|
||||
|
||||
## Ok, we see last message arrived from all peers,
|
||||
## lets check if all messages are received
|
||||
## and if not let's wait another 20 secs to give chance the system will send them.
|
||||
var shallWait = false
|
||||
for stat in self.values:
|
||||
if stat.receivedMessages < stat.allMessageCount:
|
||||
shallWait = true
|
||||
|
||||
if shallWait:
|
||||
await sleepAsync(20.seconds)
|
||||
|
||||
return true
|
||||
208
third-party/nwaku/apps/liteprotocoltester/tester_config.nim
vendored
Normal file
208
third-party/nwaku/apps/liteprotocoltester/tester_config.nim
vendored
Normal file
@ -0,0 +1,208 @@
|
||||
import
|
||||
results,
|
||||
chronos,
|
||||
confutils,
|
||||
confutils/defs,
|
||||
confutils/std/net,
|
||||
confutils/toml/defs as confTomlDefs,
|
||||
confutils/toml/std/net as confTomlNet,
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/crypto/secp,
|
||||
libp2p/multiaddress,
|
||||
secp256k1
|
||||
|
||||
import
|
||||
../../tools/confutils/
|
||||
[cli_args, envvar as confEnvvarDefs, envvar_net as confEnvvarNet],
|
||||
waku/[common/logging, waku_core, waku_core/topics/pubsub_topic]
|
||||
|
||||
export confTomlDefs, confTomlNet, confEnvvarDefs, confEnvvarNet
|
||||
|
||||
const
|
||||
LitePubsubTopic* = PubsubTopic("/waku/2/rs/66/0")
|
||||
LiteContentTopic* = ContentTopic("/tester/1/light-pubsub-example/proto")
|
||||
DefaultMinTestMessageSizeStr* = "1KiB"
|
||||
DefaultMaxTestMessageSizeStr* = "150KiB"
|
||||
|
||||
type TesterFunctionality* = enum
|
||||
SENDER # pumps messages to the network
|
||||
RECEIVER # gather and analyze messages from the network
|
||||
|
||||
type LightpushVersion* = enum
|
||||
LEGACY # legacy lightpush protocol
|
||||
V3 # lightpush v3 protocol
|
||||
|
||||
type LiteProtocolTesterConf* = object
|
||||
configFile* {.
|
||||
desc:
|
||||
"Loads configuration from a TOML file (cmd-line parameters take precedence) for the light waku node",
|
||||
name: "config-file"
|
||||
.}: Option[InputFile]
|
||||
|
||||
## Log configuration
|
||||
logLevel* {.
|
||||
desc:
|
||||
"Sets the log level for process. Supported levels: TRACE, DEBUG, INFO, NOTICE, WARN, ERROR or FATAL",
|
||||
defaultValue: logging.LogLevel.DEBUG,
|
||||
name: "log-level"
|
||||
.}: logging.LogLevel
|
||||
|
||||
logFormat* {.
|
||||
desc:
|
||||
"Specifies what kind of logs should be written to stdout. Supported formats: TEXT, JSON",
|
||||
defaultValue: logging.LogFormat.TEXT,
|
||||
name: "log-format"
|
||||
.}: logging.LogFormat
|
||||
|
||||
## Test configuration
|
||||
serviceNode* {.
|
||||
desc: "Peer multiaddr of the service node.", defaultValue: "", name: "service-node"
|
||||
.}: string
|
||||
|
||||
bootstrapNode* {.
|
||||
desc:
|
||||
"Peer multiaddr of the bootstrap node. If `service-node` not set, it is used to retrieve potential service nodes of the network.",
|
||||
defaultValue: "",
|
||||
name: "bootstrap-node"
|
||||
.}: string
|
||||
|
||||
nat* {.
|
||||
desc:
|
||||
"Specify method to use for determining public address. " &
|
||||
"Must be one of: any, none, upnp, pmp, extip:<IP>.",
|
||||
defaultValue: "any"
|
||||
.}: string
|
||||
|
||||
testFunc* {.
|
||||
desc: "Specifies the lite protocol tester side. Supported values: sender, receiver.",
|
||||
defaultValue: TesterFunctionality.RECEIVER,
|
||||
name: "test-func"
|
||||
.}: TesterFunctionality
|
||||
|
||||
lightpushVersion* {.
|
||||
desc: "Version of the sender to use. Supported values: legacy, v3.",
|
||||
defaultValue: LightpushVersion.LEGACY,
|
||||
name: "lightpush-version"
|
||||
.}: LightpushVersion
|
||||
|
||||
numMessages* {.
|
||||
desc: "Number of messages to send.", defaultValue: 120, name: "num-messages"
|
||||
.}: uint32
|
||||
|
||||
startPublishingAfter* {.
|
||||
desc: "Wait number of seconds before start publishing messages.",
|
||||
defaultValue: 5,
|
||||
name: "start-publishing-after"
|
||||
.}: uint32
|
||||
|
||||
messageInterval* {.
|
||||
desc: "Delay between messages in milliseconds.",
|
||||
defaultValue: 1000,
|
||||
name: "message-interval"
|
||||
.}: uint32
|
||||
|
||||
shard* {.desc: "Shards index to subscribe to. ", defaultValue: 0, name: "shard".}:
|
||||
uint16
|
||||
|
||||
contentTopics* {.
|
||||
desc: "Default content topic to subscribe to. Argument may be repeated.",
|
||||
defaultValue: @[LiteContentTopic],
|
||||
name: "content-topic"
|
||||
.}: seq[ContentTopic]
|
||||
|
||||
clusterId* {.
|
||||
desc:
|
||||
"Cluster id that the node is running in. Node in a different cluster id is disconnected.",
|
||||
defaultValue: 0,
|
||||
name: "cluster-id"
|
||||
.}: uint16
|
||||
|
||||
minTestMessageSize* {.
|
||||
desc:
|
||||
"Minimum message size. Accepted units: KiB, KB, and B. e.g. 1024KiB; 1500 B; etc.",
|
||||
defaultValue: DefaultMinTestMessageSizeStr,
|
||||
name: "min-test-msg-size"
|
||||
.}: string
|
||||
|
||||
maxTestMessageSize* {.
|
||||
desc:
|
||||
"Maximum message size. Accepted units: KiB, KB, and B. e.g. 1024KiB; 1500 B; etc.",
|
||||
defaultValue: DefaultMaxTestMessageSizeStr,
|
||||
name: "max-test-msg-size"
|
||||
.}: string
|
||||
## Tester REST service configuration
|
||||
restAddress* {.
|
||||
desc: "Listening address of the REST HTTP server.",
|
||||
defaultValue: parseIpAddress("127.0.0.1"),
|
||||
name: "rest-address"
|
||||
.}: IpAddress
|
||||
|
||||
testPeers* {.
|
||||
desc: "Run dial test on gathered PeerExchange peers.",
|
||||
defaultValue: false,
|
||||
name: "test-peers"
|
||||
.}: bool
|
||||
|
||||
reqPxPeers* {.
|
||||
desc: "Number of peers to request on PeerExchange.",
|
||||
defaultValue: 100,
|
||||
name: "req-px-peers"
|
||||
.}: uint16
|
||||
|
||||
restPort* {.
|
||||
desc: "Listening port of the REST HTTP server.",
|
||||
defaultValue: 8654,
|
||||
name: "rest-port"
|
||||
.}: uint16
|
||||
|
||||
fixedServicePeer* {.
|
||||
desc:
|
||||
"Prevent changing the service peer in case of failures, the full test will stict to the first service peer in use.",
|
||||
defaultValue: false,
|
||||
name: "fixed-service-peer"
|
||||
.}: bool
|
||||
|
||||
restAllowOrigin* {.
|
||||
desc:
|
||||
"Allow cross-origin requests from the specified origin." &
|
||||
"Argument may be repeated." & "Wildcards: * or ? allowed." &
|
||||
"Ex.: \"localhost:*\" or \"127.0.0.1:8080\"",
|
||||
defaultValue: @["*"],
|
||||
name: "rest-allow-origin"
|
||||
.}: seq[string]
|
||||
|
||||
metricsPort* {.
|
||||
desc: "Listening port of the Metrics HTTP server.",
|
||||
defaultValue: 8003,
|
||||
name: "metrics-port"
|
||||
.}: uint16
|
||||
|
||||
{.push warning[ProveInit]: off.}
|
||||
|
||||
proc load*(T: type LiteProtocolTesterConf, version = ""): ConfResult[T] =
|
||||
try:
|
||||
let conf = LiteProtocolTesterConf.load(
|
||||
version = version,
|
||||
secondarySources = proc(
|
||||
conf: LiteProtocolTesterConf, sources: auto
|
||||
) {.gcsafe, raises: [ConfigurationError].} =
|
||||
sources.addConfigFile(Envvar, InputFile("liteprotocoltester")),
|
||||
)
|
||||
ok(conf)
|
||||
except CatchableError:
|
||||
err(getCurrentExceptionMsg())
|
||||
|
||||
proc getPubsubTopic*(conf: LiteProtocolTesterConf): PubsubTopic =
|
||||
return $RelayShard(clusterId: conf.clusterId, shardId: conf.shard)
|
||||
|
||||
proc getCodec*(conf: LiteProtocolTesterConf): string =
|
||||
return
|
||||
if conf.testFunc == TesterFunctionality.RECEIVER:
|
||||
WakuFilterSubscribeCodec
|
||||
else:
|
||||
if conf.lightpushVersion == LightpushVersion.LEGACY:
|
||||
WakuLegacyLightPushCodec
|
||||
else:
|
||||
WakuLightPushCodec
|
||||
|
||||
{.pop.}
|
||||
121
third-party/nwaku/apps/liteprotocoltester/tester_message.nim
vendored
Normal file
121
third-party/nwaku/apps/liteprotocoltester/tester_message.nim
vendored
Normal file
@ -0,0 +1,121 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
chronicles,
|
||||
json_serialization,
|
||||
json_serialization/std/options,
|
||||
json_serialization/lexer
|
||||
|
||||
import ../../waku/waku_api/rest/serdes
|
||||
|
||||
type ProtocolTesterMessage* = object
|
||||
sender*: string
|
||||
index*: uint32
|
||||
count*: uint32
|
||||
startedAt*: int64
|
||||
sinceStart*: int64
|
||||
sincePrev*: int64
|
||||
size*: uint64
|
||||
|
||||
proc writeValue*(
|
||||
writer: var JsonWriter[RestJson], value: ProtocolTesterMessage
|
||||
) {.raises: [IOError].} =
|
||||
writer.beginRecord()
|
||||
writer.writeField("sender", value.sender)
|
||||
writer.writeField("index", value.index)
|
||||
writer.writeField("count", value.count)
|
||||
writer.writeField("startedAt", value.startedAt)
|
||||
writer.writeField("sinceStart", value.sinceStart)
|
||||
writer.writeField("sincePrev", value.sincePrev)
|
||||
writer.writeField("size", value.size)
|
||||
writer.endRecord()
|
||||
|
||||
proc readValue*(
|
||||
reader: var JsonReader[RestJson], value: var ProtocolTesterMessage
|
||||
) {.gcsafe, raises: [SerializationError, IOError].} =
|
||||
var
|
||||
sender: Option[string]
|
||||
index: Option[uint32]
|
||||
count: Option[uint32]
|
||||
startedAt: Option[int64]
|
||||
sinceStart: Option[int64]
|
||||
sincePrev: Option[int64]
|
||||
size: Option[uint64]
|
||||
|
||||
for fieldName in readObjectFields(reader):
|
||||
case fieldName
|
||||
of "sender":
|
||||
if sender.isSome():
|
||||
reader.raiseUnexpectedField(
|
||||
"Multiple `sender` fields found", "ProtocolTesterMessage"
|
||||
)
|
||||
sender = some(reader.readValue(string))
|
||||
of "index":
|
||||
if index.isSome():
|
||||
reader.raiseUnexpectedField(
|
||||
"Multiple `index` fields found", "ProtocolTesterMessage"
|
||||
)
|
||||
index = some(reader.readValue(uint32))
|
||||
of "count":
|
||||
if count.isSome():
|
||||
reader.raiseUnexpectedField(
|
||||
"Multiple `count` fields found", "ProtocolTesterMessage"
|
||||
)
|
||||
count = some(reader.readValue(uint32))
|
||||
of "startedAt":
|
||||
if startedAt.isSome():
|
||||
reader.raiseUnexpectedField(
|
||||
"Multiple `startedAt` fields found", "ProtocolTesterMessage"
|
||||
)
|
||||
startedAt = some(reader.readValue(int64))
|
||||
of "sinceStart":
|
||||
if sinceStart.isSome():
|
||||
reader.raiseUnexpectedField(
|
||||
"Multiple `sinceStart` fields found", "ProtocolTesterMessage"
|
||||
)
|
||||
sinceStart = some(reader.readValue(int64))
|
||||
of "sincePrev":
|
||||
if sincePrev.isSome():
|
||||
reader.raiseUnexpectedField(
|
||||
"Multiple `sincePrev` fields found", "ProtocolTesterMessage"
|
||||
)
|
||||
sincePrev = some(reader.readValue(int64))
|
||||
of "size":
|
||||
if size.isSome():
|
||||
reader.raiseUnexpectedField(
|
||||
"Multiple `size` fields found", "ProtocolTesterMessage"
|
||||
)
|
||||
size = some(reader.readValue(uint64))
|
||||
else:
|
||||
unrecognizedFieldWarning(value)
|
||||
|
||||
if sender.isNone():
|
||||
reader.raiseUnexpectedValue("Field `sender` is missing")
|
||||
|
||||
if index.isNone():
|
||||
reader.raiseUnexpectedValue("Field `index` is missing")
|
||||
|
||||
if count.isNone():
|
||||
reader.raiseUnexpectedValue("Field `count` is missing")
|
||||
|
||||
if startedAt.isNone():
|
||||
reader.raiseUnexpectedValue("Field `startedAt` is missing")
|
||||
|
||||
if sinceStart.isNone():
|
||||
reader.raiseUnexpectedValue("Field `sinceStart` is missing")
|
||||
|
||||
if sincePrev.isNone():
|
||||
reader.raiseUnexpectedValue("Field `sincePrev` is missing")
|
||||
|
||||
if size.isNone():
|
||||
reader.raiseUnexpectedValue("Field `size` is missing")
|
||||
|
||||
value = ProtocolTesterMessage(
|
||||
sender: sender.get(),
|
||||
index: index.get(),
|
||||
count: count.get(),
|
||||
startedAt: startedAt.get(),
|
||||
sinceStart: sinceStart.get(),
|
||||
sincePrev: sincePrev.get(),
|
||||
size: size.get(),
|
||||
)
|
||||
29
third-party/nwaku/apps/liteprotocoltester/v3_publisher.nim
vendored
Normal file
29
third-party/nwaku/apps/liteprotocoltester/v3_publisher.nim
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
import results, options, chronos
|
||||
import waku/[waku_node, waku_core, waku_lightpush, waku_lightpush/common]
|
||||
import publisher_base
|
||||
|
||||
type V3Publisher* = ref object of PublisherBase
|
||||
|
||||
proc new*(T: type V3Publisher, wakuNode: WakuNode): T =
|
||||
if isNil(wakuNode.wakuLightpushClient):
|
||||
wakuNode.mountLightPushClient()
|
||||
|
||||
return V3Publisher(wakuNode: wakuNode)
|
||||
|
||||
method send*(
|
||||
self: V3Publisher,
|
||||
topic: PubsubTopic,
|
||||
message: WakuMessage,
|
||||
servicePeer: RemotePeerInfo,
|
||||
): Future[Result[void, string]] {.async.} =
|
||||
# when error it must return original error desc due the text is used for distinction between error types in metrics.
|
||||
discard (
|
||||
await self.wakuNode.lightpushPublish(some(topic), message, some(servicePeer))
|
||||
).valueOr:
|
||||
if error.code == LightPushErrorCode.NO_PEERS_TO_RELAY and
|
||||
error.desc != some("No peers for topic, skipping publish"):
|
||||
# TODO: We need better separation of errors happening on the client side or the server side.-
|
||||
return err("dial_failure")
|
||||
else:
|
||||
return err($error.code)
|
||||
return ok()
|
||||
84
third-party/nwaku/apps/networkmonitor/README.md
vendored
Normal file
84
third-party/nwaku/apps/networkmonitor/README.md
vendored
Normal file
@ -0,0 +1,84 @@
|
||||
# networkmonitor
|
||||
|
||||
Monitoring tool to run in an existing `waku` network with the following features:
|
||||
|
||||
* Keeps discovering new peers using `discv5`
|
||||
* Tracks advertised capabilities of each node as per stored in the ENR `waku` field
|
||||
* Attempts to connect to all nodes, tracking which protocols each node supports
|
||||
* Presents grafana-ready metrics showing the state of the network in terms of locations, ips, number discovered peers, number of peers we could connect to, user-agent that each peer contains, content topics and the amount of rx messages in each one.
|
||||
* Metrics are exposed through prometheus metrics but also with a custom rest api, presenting detailed information about each peer. These metrics are exposed via a rest api.
|
||||
|
||||
## Usage
|
||||
|
||||
```console
|
||||
./build/networkmonitor --help
|
||||
Usage:
|
||||
|
||||
networkmonitor [OPTIONS]...
|
||||
|
||||
The following options are available:
|
||||
|
||||
-l, --log-level Sets the log level [=LogLevel.INFO].
|
||||
-t, --timeout Timeout to consider that the connection failed [=chronos.seconds(10)].
|
||||
-b, --bootstrap-node Bootstrap ENR node. Argument may be repeated. [=@[""]].
|
||||
--dns-discovery-url URL for DNS node list in format 'enrtree://<key>@<fqdn>'.
|
||||
--pubsub-topic Default pubsub topic to subscribe to. Argument may be repeated..
|
||||
-r, --refresh-interval How often new peers are discovered and connected to (in seconds) [=5].
|
||||
--cluster-id Cluster id that the node is running in. Node in a different cluster id is
|
||||
disconnected. [=1].
|
||||
--rln-relay Enable spam protection through rln-relay: true|false [=true].
|
||||
--rln-relay-dynamic Enable waku-rln-relay with on-chain dynamic group management: true|false
|
||||
[=true].
|
||||
--rln-relay-eth-client-address HTTP address of an Ethereum testnet client e.g., http://localhost:8540/
|
||||
[=http://localhost:8540/].
|
||||
--rln-relay-eth-contract-address Address of membership contract on an Ethereum testnet.
|
||||
--rln-relay-epoch-sec Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.
|
||||
[=1].
|
||||
--rln-relay-user-message-limit Set a user message limit for the rln membership registration. Must be a positive
|
||||
integer. Default is 1. [=1].
|
||||
--metrics-server Enable the metrics server: true|false [=true].
|
||||
--metrics-server-address Listening address of the metrics server. [=parseIpAddress("127.0.0.1")].
|
||||
--metrics-server-port Listening HTTP port of the metrics server. [=8008].
|
||||
--metrics-rest-address Listening address of the metrics rest server. [=127.0.0.1].
|
||||
--metrics-rest-port Listening HTTP port of the metrics rest server. [=8009].
|
||||
```
|
||||
|
||||
## Example
|
||||
|
||||
Connect to the network through a given bootstrap node, with default parameters. See metrics section for the data that it exposes.
|
||||
|
||||
```console
|
||||
./build/networkmonitor --log-level=INFO --b="enr:-QEkuEB3WHNS-xA3RDpfu9A2Qycr3bN3u7VoArMEiDIFZJ66F1EB3d4wxZN1hcdcOX-RfuXB-MQauhJGQbpz3qUofOtLAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQPK35Nnz0cWUtSAhBp7zvHEhyU_AqeQUlqzLiLxfP2L4oN0Y3CCdl-DdWRwgiMohXdha3UyDw"
|
||||
```
|
||||
|
||||
```console
|
||||
./build/networkmonitor --log-level=INFO --dns-discovery-url=enrtree://AL65EKLJAUXKKPG43HVTML5EFFWEZ7L4LOKTLZCLJASG4DSESQZEC@prod.status.nodes.status.im
|
||||
```
|
||||
|
||||
## Metrics
|
||||
|
||||
Metrics are divided into two categories:
|
||||
|
||||
* Prometheus metrics, exposed as i.e. gauges.
|
||||
* Custom metrics, used for unconstrained labels such as peer information or content topics.
|
||||
- These metrics are not exposed through prometheus because since they are unconstrained, they can end up breaking the backend, as a new datapoint is generated for each one and it can reach up a point where is too much to handle.
|
||||
|
||||
### Prometheus Metrics
|
||||
|
||||
The following metrics are available. See `http://localhost:8008/metrics`
|
||||
|
||||
* `peer_type_as_per_enr`: Number of peers supporting each capability according to the ENR (Relay, Store, Lightpush, Filter)
|
||||
* `peer_type_as_per_protocol`: Number of peers supporting each protocol, after a successful connection)
|
||||
* `peer_user_agents`: List of useragents found in the network and their count
|
||||
|
||||
Other relevant metrics reused from `nim-eth`:
|
||||
|
||||
* `routing_table_nodes`: Inherited from nim-eth, number of nodes in the routing table
|
||||
* `discovery_message_requests_outgoing_total`: Inherited from nim-eth, number of outgoing discovery requests, useful to know if the node is actively looking for new peers
|
||||
|
||||
### Custom Metrics
|
||||
|
||||
The following endpoints are available:
|
||||
|
||||
* `http://localhost:8009/allpeersinfo`: json list of all peers with extra information such as ip, location, supported protocols and last connection time.
|
||||
* `http://localhost:8009/contenttopics`: content topic messages and its message count.
|
||||
34
third-party/nwaku/apps/networkmonitor/docker-compose.yml
vendored
Normal file
34
third-party/nwaku/apps/networkmonitor/docker-compose.yml
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
version: '3.8'
|
||||
networks:
|
||||
monitoring:
|
||||
driver: bridge
|
||||
|
||||
volumes:
|
||||
prometheus-data:
|
||||
driver: local
|
||||
grafana-data:
|
||||
driver: local
|
||||
|
||||
# Services definitions
|
||||
services:
|
||||
|
||||
prometheus:
|
||||
image: docker.io/prom/prometheus:latest
|
||||
container_name: prometheus
|
||||
ports:
|
||||
- 9090:9090
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yaml'
|
||||
volumes:
|
||||
- ./prometheus.yaml:/etc/prometheus/prometheus.yaml:ro
|
||||
- ./data:/prometheus
|
||||
restart: unless-stopped
|
||||
|
||||
grafana:
|
||||
image: grafana/grafana-oss:latest
|
||||
container_name: grafana
|
||||
ports:
|
||||
- '3000:3000'
|
||||
volumes:
|
||||
- grafana-data:/var/lib/grafana
|
||||
restart: unless-stopped
|
||||
668
third-party/nwaku/apps/networkmonitor/networkmonitor.nim
vendored
Normal file
668
third-party/nwaku/apps/networkmonitor/networkmonitor.nim
vendored
Normal file
@ -0,0 +1,668 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[net, tables, strutils, times, sequtils, random],
|
||||
results,
|
||||
chronicles,
|
||||
chronicles/topics_registry,
|
||||
chronos,
|
||||
chronos/timer as ctime,
|
||||
confutils,
|
||||
eth/keys,
|
||||
eth/p2p/discoveryv5/enr,
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/nameresolving/dnsresolver,
|
||||
libp2p/protocols/ping,
|
||||
metrics,
|
||||
metrics/chronos_httpserver,
|
||||
presto/[route, server, client]
|
||||
import
|
||||
waku/[
|
||||
waku_core,
|
||||
node/peer_manager,
|
||||
waku_node,
|
||||
waku_enr,
|
||||
discovery/waku_discv5,
|
||||
discovery/waku_dnsdisc,
|
||||
waku_relay,
|
||||
waku_rln_relay,
|
||||
factory/builder,
|
||||
factory/networks_config,
|
||||
],
|
||||
./networkmonitor_metrics,
|
||||
./networkmonitor_config,
|
||||
./networkmonitor_utils
|
||||
|
||||
logScope:
|
||||
topics = "networkmonitor"
|
||||
|
||||
const ReconnectTime = 60
|
||||
const MaxConnectionRetries = 5
|
||||
const ResetRetriesAfter = 1200
|
||||
const PingSmoothing = 0.3
|
||||
const MaxConnectedPeers = 150
|
||||
|
||||
const git_version* {.strdefine.} = "n/a"
|
||||
|
||||
proc setDiscoveredPeersCapabilities(routingTableNodes: seq[waku_enr.Record]) =
|
||||
for capability in @[Relay, Store, Filter, Lightpush]:
|
||||
let nOfNodesWithCapability =
|
||||
routingTableNodes.countIt(it.supportsCapability(capability))
|
||||
info "capabilities as per ENR waku flag",
|
||||
capability = capability, amount = nOfNodesWithCapability
|
||||
networkmonitor_peer_type_as_per_enr.set(
|
||||
int64(nOfNodesWithCapability), labelValues = [$capability]
|
||||
)
|
||||
|
||||
proc setDiscoveredPeersCluster(routingTableNodes: seq[Node]) =
|
||||
var clusters: CountTable[uint16]
|
||||
|
||||
for node in routingTableNodes:
|
||||
let typedRec = node.record.toTyped().valueOr:
|
||||
clusters.inc(0)
|
||||
continue
|
||||
|
||||
let relayShard = typedRec.relaySharding().valueOr:
|
||||
clusters.inc(0)
|
||||
continue
|
||||
|
||||
clusters.inc(relayShard.clusterId)
|
||||
|
||||
for (key, value) in clusters.pairs:
|
||||
networkmonitor_peer_cluster_as_per_enr.set(int64(value), labelValues = [$key])
|
||||
|
||||
proc analyzePeer(
|
||||
customPeerInfo: CustomPeerInfoRef,
|
||||
peerInfo: RemotePeerInfo,
|
||||
node: WakuNode,
|
||||
timeout: chronos.Duration,
|
||||
): Future[Result[string, string]] {.async.} =
|
||||
var pingDelay: chronos.Duration
|
||||
|
||||
proc ping(): Future[Result[void, string]] {.async, gcsafe.} =
|
||||
try:
|
||||
let conn = await node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec)
|
||||
pingDelay = await node.libp2pPing.ping(conn)
|
||||
return ok()
|
||||
except CatchableError:
|
||||
var msg = getCurrentExceptionMsg()
|
||||
if msg == "Future operation cancelled!":
|
||||
msg = "timedout"
|
||||
warn "failed to ping the peer", peer = peerInfo, err = msg
|
||||
|
||||
customPeerInfo.connError = msg
|
||||
return err("could not ping peer: " & msg)
|
||||
|
||||
let timedOut = not await ping().withTimeout(timeout)
|
||||
# need this check for pingDelat == 0 because there may be a conn error before timeout
|
||||
if timedOut or pingDelay == 0.millis:
|
||||
customPeerInfo.retries += 1
|
||||
return err(customPeerInfo.connError)
|
||||
|
||||
customPeerInfo.connError = ""
|
||||
info "successfully pinged peer", peer = peerInfo, duration = pingDelay.millis
|
||||
networkmonitor_peer_ping.observe(pingDelay.millis)
|
||||
|
||||
# We are using a smoothed moving average
|
||||
customPeerInfo.avgPingDuration =
|
||||
if customPeerInfo.avgPingDuration.millis == 0:
|
||||
pingDelay
|
||||
else:
|
||||
let newAvg =
|
||||
(float64(pingDelay.millis) * PingSmoothing) +
|
||||
float64(customPeerInfo.avgPingDuration.millis) * (1.0 - PingSmoothing)
|
||||
|
||||
int64(newAvg).millis
|
||||
|
||||
customPeerInfo.lastPingDuration = pingDelay
|
||||
|
||||
return ok(customPeerInfo.peerId)
|
||||
|
||||
proc shouldReconnect(customPeerInfo: CustomPeerInfoRef): bool =
|
||||
let reconnetIntervalCheck =
|
||||
getTime().toUnix() >= customPeerInfo.lastTimeConnected + ReconnectTime
|
||||
var retriesCheck = customPeerInfo.retries < MaxConnectionRetries
|
||||
|
||||
if not retriesCheck and
|
||||
getTime().toUnix() >= customPeerInfo.lastTimeConnected + ResetRetriesAfter:
|
||||
customPeerInfo.retries = 0
|
||||
retriesCheck = true
|
||||
info "resetting retries counter", peerId = customPeerInfo.peerId
|
||||
|
||||
return reconnetIntervalCheck and retriesCheck
|
||||
|
||||
# TODO: Split in discover, connect
|
||||
proc setConnectedPeersMetrics(
|
||||
discoveredNodes: seq[waku_enr.Record],
|
||||
node: WakuNode,
|
||||
timeout: chronos.Duration,
|
||||
restClient: RestClientRef,
|
||||
allPeers: CustomPeersTableRef,
|
||||
) {.async.} =
|
||||
let currentTime = getTime().toUnix()
|
||||
|
||||
var newPeers = 0
|
||||
var successfulConnections = 0
|
||||
|
||||
var analyzeFuts: seq[Future[Result[string, string]]]
|
||||
|
||||
var (inConns, outConns) = node.peer_manager.connectedPeers(WakuRelayCodec)
|
||||
info "connected peers", inConns = inConns.len, outConns = outConns.len
|
||||
|
||||
shuffle(outConns)
|
||||
|
||||
if outConns.len >= toInt(MaxConnectedPeers / 2):
|
||||
for p in outConns[0 ..< toInt(outConns.len / 2)]:
|
||||
trace "Pruning Peer", Peer = $p
|
||||
asyncSpawn(node.switch.disconnect(p))
|
||||
|
||||
# iterate all newly discovered nodes
|
||||
for discNode in discoveredNodes:
|
||||
let peerRes = toRemotePeerInfo(discNode)
|
||||
|
||||
let peerInfo = peerRes.valueOr:
|
||||
warn "error converting record to remote peer info", record = discNode
|
||||
continue
|
||||
|
||||
# create new entry if new peerId found
|
||||
let peerId = $peerInfo.peerId
|
||||
|
||||
if not allPeers.hasKey(peerId):
|
||||
allPeers[peerId] = CustomPeerInfoRef(peerId: peerId)
|
||||
newPeers += 1
|
||||
else:
|
||||
info "already seen", peerId = peerId
|
||||
|
||||
let customPeerInfo = allPeers[peerId]
|
||||
|
||||
customPeerInfo.lastTimeDiscovered = currentTime
|
||||
customPeerInfo.enr = discNode.toURI()
|
||||
customPeerInfo.enrCapabilities = discNode.getCapabilities().mapIt($it)
|
||||
customPeerInfo.discovered += 1
|
||||
|
||||
for maddr in peerInfo.addrs:
|
||||
if $maddr notin customPeerInfo.maddrs:
|
||||
customPeerInfo.maddrs.add $maddr
|
||||
let typedRecord = discNode.toTypedRecord()
|
||||
if not typedRecord.isOk():
|
||||
warn "could not convert record to typed record", record = discNode
|
||||
continue
|
||||
if not typedRecord.get().ip.isSome():
|
||||
warn "ip field is not set", record = typedRecord.get()
|
||||
continue
|
||||
|
||||
let ip = $typedRecord.get().ip.get().join(".")
|
||||
customPeerInfo.ip = ip
|
||||
|
||||
# try to ping the peer
|
||||
if shouldReconnect(customPeerInfo):
|
||||
if customPeerInfo.retries > 0:
|
||||
warn "trying to dial failed peer again",
|
||||
peerId = peerId, retry = customPeerInfo.retries
|
||||
analyzeFuts.add(analyzePeer(customPeerInfo, peerInfo, node, timeout))
|
||||
|
||||
# Wait for all connection attempts to finish
|
||||
let analyzedPeers = await allFinished(analyzeFuts)
|
||||
|
||||
for peerIdFut in analyzedPeers:
|
||||
let peerIdRes = await peerIdFut
|
||||
let peerIdStr = peerIdRes.valueOr:
|
||||
continue
|
||||
|
||||
successfulConnections += 1
|
||||
let peerId = PeerId.init(peerIdStr).valueOr:
|
||||
warn "failed to parse peerId", peerId = peerIdStr
|
||||
continue
|
||||
var customPeerInfo = allPeers[peerIdStr]
|
||||
|
||||
debug "connected to peer", peer = customPeerInfo[]
|
||||
|
||||
# after connection, get supported protocols
|
||||
let lp2pPeerStore = node.switch.peerStore
|
||||
let nodeProtocols = lp2pPeerStore[ProtoBook][peerId]
|
||||
customPeerInfo.supportedProtocols = nodeProtocols
|
||||
customPeerInfo.lastTimeConnected = currentTime
|
||||
|
||||
# after connection, get user-agent
|
||||
let nodeUserAgent = lp2pPeerStore[AgentBook][peerId]
|
||||
customPeerInfo.userAgent = nodeUserAgent
|
||||
|
||||
info "number of newly discovered peers", amount = newPeers
|
||||
# inform the total connections that we did in this round
|
||||
info "number of successful connections", amount = successfulConnections
|
||||
|
||||
proc updateMetrics(allPeersRef: CustomPeersTableRef) {.gcsafe.} =
|
||||
var allProtocols: Table[string, int]
|
||||
var allAgentStrings: Table[string, int]
|
||||
var countries: Table[string, int]
|
||||
var connectedPeers = 0
|
||||
var failedPeers = 0
|
||||
|
||||
for peerInfo in allPeersRef.values:
|
||||
if peerInfo.connError == "":
|
||||
for protocol in peerInfo.supportedProtocols:
|
||||
allProtocols[protocol] = allProtocols.mgetOrPut(protocol, 0) + 1
|
||||
|
||||
# store available user-agents in the network
|
||||
allAgentStrings[peerInfo.userAgent] =
|
||||
allAgentStrings.mgetOrPut(peerInfo.userAgent, 0) + 1
|
||||
|
||||
if peerInfo.country != "":
|
||||
countries[peerInfo.country] = countries.mgetOrPut(peerInfo.country, 0) + 1
|
||||
|
||||
connectedPeers += 1
|
||||
else:
|
||||
failedPeers += 1
|
||||
|
||||
networkmonitor_peer_count.set(int64(connectedPeers), labelValues = ["true"])
|
||||
networkmonitor_peer_count.set(int64(failedPeers), labelValues = ["false"])
|
||||
# update count on each protocol
|
||||
for protocol in allProtocols.keys():
|
||||
let countOfProtocols = allProtocols.mgetOrPut(protocol, 0)
|
||||
networkmonitor_peer_type_as_per_protocol.set(
|
||||
int64(countOfProtocols), labelValues = [protocol]
|
||||
)
|
||||
info "supported protocols in the network",
|
||||
protocol = protocol, count = countOfProtocols
|
||||
|
||||
# update count on each user-agent
|
||||
for userAgent in allAgentStrings.keys():
|
||||
let countOfUserAgent = allAgentStrings.mgetOrPut(userAgent, 0)
|
||||
networkmonitor_peer_user_agents.set(
|
||||
int64(countOfUserAgent), labelValues = [userAgent]
|
||||
)
|
||||
info "user agents participating in the network",
|
||||
userAgent = userAgent, count = countOfUserAgent
|
||||
|
||||
for country in countries.keys():
|
||||
let peerCount = countries.mgetOrPut(country, 0)
|
||||
networkmonitor_peer_country_count.set(int64(peerCount), labelValues = [country])
|
||||
info "number of peers per country", country = country, count = peerCount
|
||||
|
||||
proc populateInfoFromIp(
|
||||
allPeersRef: CustomPeersTableRef, restClient: RestClientRef
|
||||
) {.async.} =
|
||||
for peer in allPeersRef.keys():
|
||||
if allPeersRef[peer].country != "" and allPeersRef[peer].city != "":
|
||||
continue
|
||||
# TODO: Update also if last update > x
|
||||
if allPeersRef[peer].ip == "":
|
||||
continue
|
||||
# get more info the peers from its ip address
|
||||
var location: NodeLocation
|
||||
try:
|
||||
# IP-API endpoints are now limited to 45 HTTP requests per minute
|
||||
await sleepAsync(1400.millis)
|
||||
let response = await restClient.ipToLocation(allPeersRef[peer].ip)
|
||||
location = response.data
|
||||
except CatchableError:
|
||||
warn "could not get location", ip = allPeersRef[peer].ip
|
||||
continue
|
||||
allPeersRef[peer].country = location.country
|
||||
allPeersRef[peer].city = location.city
|
||||
|
||||
# TODO: Split in discovery, connections, and ip2location
|
||||
# crawls the network discovering peers and trying to connect to them
|
||||
# metrics are processed and exposed
|
||||
proc crawlNetwork(
|
||||
node: WakuNode,
|
||||
wakuDiscv5: WakuDiscoveryV5,
|
||||
restClient: RestClientRef,
|
||||
conf: NetworkMonitorConf,
|
||||
allPeersRef: CustomPeersTableRef,
|
||||
) {.async.} =
|
||||
let crawlInterval = conf.refreshInterval * 1000
|
||||
while true:
|
||||
let startTime = Moment.now()
|
||||
# discover new random nodes
|
||||
let discoveredNodes = await wakuDiscv5.findRandomPeers()
|
||||
|
||||
# nodes are nested into bucket, flat it
|
||||
let flatNodes = wakuDiscv5.protocol.routingTable.buckets.mapIt(it.nodes).flatten()
|
||||
|
||||
# populate metrics related to capabilities as advertised by the ENR (see waku field)
|
||||
setDiscoveredPeersCapabilities(discoveredNodes)
|
||||
|
||||
# populate cluster metrics as advertised by the ENR
|
||||
setDiscoveredPeersCluster(flatNodes)
|
||||
|
||||
# tries to connect to all newly discovered nodes
|
||||
# and populates metrics related to peers we could connect
|
||||
# note random discovered nodes can be already known
|
||||
await setConnectedPeersMetrics(
|
||||
discoveredNodes, node, conf.timeout, restClient, allPeersRef
|
||||
)
|
||||
|
||||
updateMetrics(allPeersRef)
|
||||
|
||||
# populate info from ip addresses
|
||||
await populateInfoFromIp(allPeersRef, restClient)
|
||||
|
||||
let totalNodes = discoveredNodes.len
|
||||
#let seenNodes = totalNodes
|
||||
|
||||
info "discovered nodes: ", total = totalNodes #, seen = seenNodes
|
||||
|
||||
# Notes:
|
||||
# we dont run ipMajorityLoop
|
||||
# we dont run revalidateLoop
|
||||
let endTime = Moment.now()
|
||||
let elapsed = (endTime - startTime).nanos
|
||||
|
||||
info "crawl duration", time = elapsed.millis
|
||||
|
||||
await sleepAsync(crawlInterval.millis - elapsed.millis)
|
||||
|
||||
proc retrieveDynamicBootstrapNodes(
|
||||
dnsDiscoveryUrl: string, dnsAddrsNameServers: seq[IpAddress]
|
||||
): Future[Result[seq[RemotePeerInfo], string]] {.async.} =
|
||||
## Retrieve dynamic bootstrap nodes (DNS discovery)
|
||||
|
||||
if dnsDiscoveryUrl != "":
|
||||
# DNS discovery
|
||||
debug "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl
|
||||
|
||||
var nameServers: seq[TransportAddress]
|
||||
for ip in dnsAddrsNameServers:
|
||||
nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
|
||||
|
||||
let dnsResolver = DnsResolver.new(nameServers)
|
||||
|
||||
proc resolver(domain: string): Future[string] {.async, gcsafe.} =
|
||||
trace "resolving", domain = domain
|
||||
let resolved = await dnsResolver.resolveTxt(domain)
|
||||
if resolved.len > 0:
|
||||
return resolved[0] # Use only first answer
|
||||
|
||||
var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl, resolver)
|
||||
if wakuDnsDiscovery.isOk():
|
||||
return (await wakuDnsDiscovery.get().findPeers()).mapErr(
|
||||
proc(e: cstring): string =
|
||||
$e
|
||||
)
|
||||
else:
|
||||
warn "Failed to init Waku DNS discovery"
|
||||
|
||||
debug "No method for retrieving dynamic bootstrap nodes specified."
|
||||
ok(newSeq[RemotePeerInfo]()) # Return an empty seq by default
|
||||
|
||||
proc getBootstrapFromDiscDns(
|
||||
conf: NetworkMonitorConf
|
||||
): Future[Result[seq[enr.Record], string]] {.async.} =
|
||||
try:
|
||||
let dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")]
|
||||
let dynamicBootstrapNodesRes =
|
||||
await retrieveDynamicBootstrapNodes(conf.dnsDiscoveryUrl, dnsNameServers)
|
||||
if not dynamicBootstrapNodesRes.isOk():
|
||||
error("failed discovering peers from DNS")
|
||||
let dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
|
||||
|
||||
# select dynamic bootstrap nodes that have an ENR containing a udp port.
|
||||
# Discv5 only supports UDP https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md)
|
||||
var discv5BootstrapEnrs: seq[enr.Record]
|
||||
for n in dynamicBootstrapNodes:
|
||||
if n.enr.isSome():
|
||||
let
|
||||
enr = n.enr.get()
|
||||
tenrRes = enr.toTypedRecord()
|
||||
if tenrRes.isOk() and (
|
||||
tenrRes.get().udp.isSome() or tenrRes.get().udp6.isSome()
|
||||
):
|
||||
discv5BootstrapEnrs.add(enr)
|
||||
return ok(discv5BootstrapEnrs)
|
||||
except CatchableError:
|
||||
error("failed discovering peers from DNS")
|
||||
|
||||
proc initAndStartApp(
|
||||
conf: NetworkMonitorConf
|
||||
): Future[Result[(WakuNode, WakuDiscoveryV5), string]] {.async.} =
|
||||
let bindIp =
|
||||
try:
|
||||
parseIpAddress("0.0.0.0")
|
||||
except CatchableError:
|
||||
return err("could not start node: " & getCurrentExceptionMsg())
|
||||
|
||||
let extIp =
|
||||
try:
|
||||
parseIpAddress("127.0.0.1")
|
||||
except CatchableError:
|
||||
return err("could not start node: " & getCurrentExceptionMsg())
|
||||
|
||||
let
|
||||
# some hardcoded parameters
|
||||
rng = keys.newRng()
|
||||
key = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||
nodeTcpPort = Port(60000)
|
||||
nodeUdpPort = Port(9000)
|
||||
flags = CapabilitiesBitfield.init(
|
||||
lightpush = false, filter = false, store = false, relay = true
|
||||
)
|
||||
|
||||
var builder = EnrBuilder.init(key)
|
||||
|
||||
builder.withIpAddressAndPorts(
|
||||
ipAddr = some(extIp), tcpPort = some(nodeTcpPort), udpPort = some(nodeUdpPort)
|
||||
)
|
||||
builder.withWakuCapabilities(flags)
|
||||
|
||||
builder.withWakuRelaySharding(
|
||||
RelayShards(clusterId: conf.clusterId, shardIds: conf.shards)
|
||||
).isOkOr:
|
||||
error "failed to add sharded topics to ENR", error = error
|
||||
return err("failed to add sharded topics to ENR: " & $error)
|
||||
|
||||
let recordRes = builder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
return err("cannot build record: " & $recordRes.error)
|
||||
else:
|
||||
recordRes.get()
|
||||
|
||||
var nodeBuilder = WakuNodeBuilder.init()
|
||||
|
||||
nodeBuilder.withNodeKey(key)
|
||||
nodeBuilder.withRecord(record)
|
||||
nodeBuilder.withSwitchConfiguration(maxConnections = some(MaxConnectedPeers))
|
||||
|
||||
nodeBuilder.withPeerManagerConfig(
|
||||
maxConnections = MaxConnectedPeers,
|
||||
relayServiceRatio = "13.33:86.67",
|
||||
shardAware = true,
|
||||
)
|
||||
let res = nodeBuilder.withNetworkConfigurationDetails(bindIp, nodeTcpPort)
|
||||
if res.isErr():
|
||||
return err("node building error" & $res.error)
|
||||
|
||||
let nodeRes = nodeBuilder.build()
|
||||
let node =
|
||||
if nodeRes.isErr():
|
||||
return err("node building error" & $res.error)
|
||||
else:
|
||||
nodeRes.get()
|
||||
|
||||
var discv5BootstrapEnrsRes = await getBootstrapFromDiscDns(conf)
|
||||
if discv5BootstrapEnrsRes.isErr():
|
||||
error("failed discovering peers from DNS")
|
||||
var discv5BootstrapEnrs = discv5BootstrapEnrsRes.get()
|
||||
|
||||
# parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq
|
||||
for enrUri in conf.bootstrapNodes:
|
||||
addBootstrapNode(enrUri, discv5BootstrapEnrs)
|
||||
|
||||
# discv5
|
||||
let discv5Conf = WakuDiscoveryV5Config(
|
||||
discv5Config: none(DiscoveryConfig),
|
||||
address: bindIp,
|
||||
port: nodeUdpPort,
|
||||
privateKey: keys.PrivateKey(key.skkey),
|
||||
bootstrapRecords: discv5BootstrapEnrs,
|
||||
autoupdateRecord: false,
|
||||
)
|
||||
|
||||
let wakuDiscv5 = WakuDiscoveryV5.new(node.rng, discv5Conf, some(record))
|
||||
|
||||
try:
|
||||
wakuDiscv5.protocol.open()
|
||||
except CatchableError:
|
||||
return err("could not start node: " & getCurrentExceptionMsg())
|
||||
|
||||
ok((node, wakuDiscv5))
|
||||
|
||||
proc startRestApiServer(
|
||||
conf: NetworkMonitorConf,
|
||||
allPeersInfo: CustomPeersTableRef,
|
||||
numMessagesPerContentTopic: ContentTopicMessageTableRef,
|
||||
): Result[void, string] =
|
||||
try:
|
||||
let serverAddress =
|
||||
initTAddress(conf.metricsRestAddress & ":" & $conf.metricsRestPort)
|
||||
proc validate(pattern: string, value: string): int =
|
||||
if pattern.startsWith("{") and pattern.endsWith("}"): 0 else: 1
|
||||
|
||||
var router = RestRouter.init(validate)
|
||||
router.installHandler(allPeersInfo, numMessagesPerContentTopic)
|
||||
var sres = RestServerRef.new(router, serverAddress)
|
||||
let restServer = sres.get()
|
||||
restServer.start()
|
||||
except CatchableError:
|
||||
error("could not start rest api server")
|
||||
ok()
|
||||
|
||||
# handles rx of messages over a topic (see subscribe)
|
||||
# counts the number of messages per content topic
|
||||
proc subscribeAndHandleMessages(
|
||||
node: WakuNode,
|
||||
pubsubTopic: PubsubTopic,
|
||||
msgPerContentTopic: ContentTopicMessageTableRef,
|
||||
) =
|
||||
# handle function
|
||||
proc handler(
|
||||
pubsubTopic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
trace "rx message", pubsubTopic = pubsubTopic, contentTopic = msg.contentTopic
|
||||
|
||||
# If we reach a table limit size, remove c topics with the least messages.
|
||||
let tableSize = 100
|
||||
if msgPerContentTopic.len > (tableSize - 1):
|
||||
let minIndex = toSeq(msgPerContentTopic.values()).minIndex()
|
||||
msgPerContentTopic.del(toSeq(msgPerContentTopic.keys())[minIndex])
|
||||
|
||||
# TODO: Will overflow at some point
|
||||
# +1 if content topic existed, init to 1 otherwise
|
||||
if msgPerContentTopic.hasKey(msg.contentTopic):
|
||||
msgPerContentTopic[msg.contentTopic] += 1
|
||||
else:
|
||||
msgPerContentTopic[msg.contentTopic] = 1
|
||||
|
||||
node.subscribe((kind: PubsubSub, topic: pubsubTopic), WakuRelayHandler(handler)).isOkOr:
|
||||
error "failed to subscribe to pubsub topic", pubsubTopic, error
|
||||
quit(1)
|
||||
|
||||
when isMainModule:
|
||||
# known issue: confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
||||
{.pop.}
|
||||
let confRes = NetworkMonitorConf.loadConfig()
|
||||
if confRes.isErr():
|
||||
error "could not load cli variables", err = confRes.error
|
||||
quit(1)
|
||||
|
||||
var conf = confRes.get()
|
||||
info "cli flags", conf = conf
|
||||
|
||||
if conf.clusterId == 1:
|
||||
let twnNetworkConf = NetworkConf.TheWakuNetworkConf()
|
||||
|
||||
conf.bootstrapNodes = twnNetworkConf.discv5BootstrapNodes
|
||||
conf.rlnRelayDynamic = twnNetworkConf.rlnRelayDynamic
|
||||
conf.rlnRelayEthContractAddress = twnNetworkConf.rlnRelayEthContractAddress
|
||||
conf.rlnEpochSizeSec = twnNetworkConf.rlnEpochSizeSec
|
||||
conf.rlnRelayUserMessageLimit = twnNetworkConf.rlnRelayUserMessageLimit
|
||||
conf.numShardsInNetwork = twnNetworkConf.shardingConf.numShardsInCluster
|
||||
|
||||
if conf.shards.len == 0:
|
||||
conf.shards =
|
||||
toSeq(uint16(0) .. uint16(twnNetworkConf.shardingConf.numShardsInCluster - 1))
|
||||
|
||||
if conf.logLevel != LogLevel.NONE:
|
||||
setLogLevel(conf.logLevel)
|
||||
|
||||
# list of peers that we have discovered/connected
|
||||
var allPeersInfo = CustomPeersTableRef()
|
||||
|
||||
# content topic and the number of messages that were received
|
||||
var msgPerContentTopic = ContentTopicMessageTableRef()
|
||||
|
||||
# start metrics server
|
||||
if conf.metricsServer:
|
||||
let res =
|
||||
startMetricsServer(conf.metricsServerAddress, Port(conf.metricsServerPort))
|
||||
if res.isErr():
|
||||
error "could not start metrics server", err = res.error
|
||||
quit(1)
|
||||
|
||||
# start rest server for custom metrics
|
||||
let res = startRestApiServer(conf, allPeersInfo, msgPerContentTopic)
|
||||
if res.isErr():
|
||||
error "could not start rest api server", err = res.error
|
||||
quit(1)
|
||||
|
||||
# create a rest client
|
||||
let clientRest =
|
||||
RestClientRef.new(url = "http://ip-api.com", connectTimeout = ctime.seconds(2))
|
||||
if clientRest.isErr():
|
||||
error "could not start rest api client", err = res.error
|
||||
quit(1)
|
||||
let restClient = clientRest.get()
|
||||
|
||||
# start waku node
|
||||
let nodeRes = waitFor initAndStartApp(conf)
|
||||
if nodeRes.isErr():
|
||||
error "could not start node"
|
||||
quit 1
|
||||
|
||||
let (node, discv5) = nodeRes.get()
|
||||
|
||||
(waitFor node.mountRelay()).isOkOr:
|
||||
error "failed to mount waku relay protocol: ", err = error
|
||||
quit 1
|
||||
|
||||
waitFor node.mountLibp2pPing()
|
||||
|
||||
var onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} =
|
||||
## Action to be taken when an internal error occurs during the node run.
|
||||
## e.g. the connection with the database is lost and not recovered.
|
||||
error "Unrecoverable error occurred", error = msg
|
||||
quit(QuitFailure)
|
||||
|
||||
if conf.rlnRelay and conf.rlnRelayEthContractAddress != "":
|
||||
let rlnConf = WakuRlnConfig(
|
||||
dynamic: conf.rlnRelayDynamic,
|
||||
credIndex: some(uint(0)),
|
||||
ethContractAddress: conf.rlnRelayEthContractAddress,
|
||||
ethClientUrls: conf.ethClientUrls.mapIt(string(it)),
|
||||
epochSizeSec: conf.rlnEpochSizeSec,
|
||||
creds: none(RlnRelayCreds),
|
||||
onFatalErrorAction: onFatalErrorAction,
|
||||
)
|
||||
|
||||
try:
|
||||
waitFor node.mountRlnRelay(rlnConf)
|
||||
except CatchableError:
|
||||
error "failed to setup RLN", err = getCurrentExceptionMsg()
|
||||
quit 1
|
||||
|
||||
node.mountMetadata(conf.clusterId, conf.shards).isOkOr:
|
||||
error "failed to mount waku metadata protocol: ", err = error
|
||||
quit 1
|
||||
|
||||
for shard in conf.shards:
|
||||
# Subscribe the node to the shards, to count messages
|
||||
subscribeAndHandleMessages(
|
||||
node, $RelayShard(shardId: shard, clusterId: conf.clusterId), msgPerContentTopic
|
||||
)
|
||||
|
||||
# spawn the routine that crawls the network
|
||||
# TODO: split into 3 routines (discovery, connections, ip2location)
|
||||
asyncSpawn crawlNetwork(node, discv5, restClient, conf, allPeersInfo)
|
||||
|
||||
runForever()
|
||||
190
third-party/nwaku/apps/networkmonitor/networkmonitor_config.nim
vendored
Normal file
190
third-party/nwaku/apps/networkmonitor/networkmonitor_config.nim
vendored
Normal file
@ -0,0 +1,190 @@
|
||||
import
|
||||
chronicles,
|
||||
chronicles/topics_registry,
|
||||
confutils,
|
||||
chronos,
|
||||
std/strutils,
|
||||
results,
|
||||
regex
|
||||
|
||||
const git_version* {.strdefine.} = "n/a"
|
||||
|
||||
type EthRpcUrl* = distinct string
|
||||
|
||||
proc `$`*(u: EthRpcUrl): string =
|
||||
string(u)
|
||||
|
||||
type NetworkMonitorConf* = object
|
||||
logLevel* {.
|
||||
desc: "Sets the log level",
|
||||
defaultValue: LogLevel.INFO,
|
||||
name: "log-level",
|
||||
abbr: "l"
|
||||
.}: LogLevel
|
||||
|
||||
timeout* {.
|
||||
desc: "Timeout to consider that the connection failed",
|
||||
defaultValue: chronos.seconds(10),
|
||||
name: "timeout",
|
||||
abbr: "t"
|
||||
.}: chronos.Duration
|
||||
|
||||
bootstrapNodes* {.
|
||||
desc: "Bootstrap ENR node. Argument may be repeated.",
|
||||
defaultValue: @[""],
|
||||
name: "bootstrap-node",
|
||||
abbr: "b"
|
||||
.}: seq[string]
|
||||
|
||||
dnsDiscoveryUrl* {.
|
||||
desc: "URL for DNS node list in format 'enrtree://<key>@<fqdn>'",
|
||||
defaultValue: "",
|
||||
name: "dns-discovery-url"
|
||||
.}: string
|
||||
|
||||
shards* {.
|
||||
desc:
|
||||
"Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.",
|
||||
name: "shard"
|
||||
.}: seq[uint16]
|
||||
|
||||
numShardsInNetwork* {.
|
||||
desc: "Number of shards in the network",
|
||||
name: "num-shards-in-network",
|
||||
defaultValue: 8
|
||||
.}: uint32
|
||||
|
||||
refreshInterval* {.
|
||||
desc: "How often new peers are discovered and connected to (in seconds)",
|
||||
defaultValue: 5,
|
||||
name: "refresh-interval",
|
||||
abbr: "r"
|
||||
.}: int
|
||||
|
||||
clusterId* {.
|
||||
desc:
|
||||
"Cluster id that the node is running in. Node in a different cluster id is disconnected.",
|
||||
defaultValue: 1,
|
||||
name: "cluster-id"
|
||||
.}: uint16
|
||||
|
||||
rlnRelay* {.
|
||||
desc: "Enable spam protection through rln-relay: true|false",
|
||||
defaultValue: true,
|
||||
name: "rln-relay"
|
||||
.}: bool
|
||||
|
||||
rlnRelayDynamic* {.
|
||||
desc: "Enable waku-rln-relay with on-chain dynamic group management: true|false",
|
||||
defaultValue: true,
|
||||
name: "rln-relay-dynamic"
|
||||
.}: bool
|
||||
|
||||
ethClientUrls* {.
|
||||
desc:
|
||||
"HTTP address of an Ethereum testnet client e.g., http://localhost:8540/. Argument may be repeated.",
|
||||
defaultValue: newSeq[EthRpcUrl](0),
|
||||
name: "rln-relay-eth-client-address"
|
||||
.}: seq[EthRpcUrl]
|
||||
|
||||
rlnRelayEthContractAddress* {.
|
||||
desc: "Address of membership contract on an Ethereum testnet",
|
||||
defaultValue: "",
|
||||
name: "rln-relay-eth-contract-address"
|
||||
.}: string
|
||||
|
||||
rlnEpochSizeSec* {.
|
||||
desc:
|
||||
"Epoch size in seconds used to rate limit RLN memberships. Default is 1 second.",
|
||||
defaultValue: 1,
|
||||
name: "rln-relay-epoch-sec"
|
||||
.}: uint64
|
||||
|
||||
rlnRelayUserMessageLimit* {.
|
||||
desc:
|
||||
"Set a user message limit for the rln membership registration. Must be a positive integer. Default is 1.",
|
||||
defaultValue: 1,
|
||||
name: "rln-relay-user-message-limit"
|
||||
.}: uint64
|
||||
|
||||
## Prometheus metrics config
|
||||
metricsServer* {.
|
||||
desc: "Enable the metrics server: true|false",
|
||||
defaultValue: true,
|
||||
name: "metrics-server"
|
||||
.}: bool
|
||||
|
||||
metricsServerAddress* {.
|
||||
desc: "Listening address of the metrics server.",
|
||||
defaultValue: parseIpAddress("127.0.0.1"),
|
||||
name: "metrics-server-address"
|
||||
.}: IpAddress
|
||||
|
||||
metricsServerPort* {.
|
||||
desc: "Listening HTTP port of the metrics server.",
|
||||
defaultValue: 8008,
|
||||
name: "metrics-server-port"
|
||||
.}: uint16
|
||||
|
||||
## Custom metrics rest server
|
||||
metricsRestAddress* {.
|
||||
desc: "Listening address of the metrics rest server.",
|
||||
defaultValue: "127.0.0.1",
|
||||
name: "metrics-rest-address"
|
||||
.}: string
|
||||
metricsRestPort* {.
|
||||
desc: "Listening HTTP port of the metrics rest server.",
|
||||
defaultValue: 8009,
|
||||
name: "metrics-rest-port"
|
||||
.}: uint16
|
||||
|
||||
proc parseCmdArg*(T: type IpAddress, p: string): T =
|
||||
try:
|
||||
result = parseIpAddress(p)
|
||||
except CatchableError as e:
|
||||
raise newException(ValueError, "Invalid IP address")
|
||||
|
||||
proc completeCmdArg*(T: type IpAddress, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type chronos.Duration, p: string): T =
|
||||
try:
|
||||
result = chronos.seconds(parseInt(p))
|
||||
except CatchableError as e:
|
||||
raise newException(ValueError, "Invalid duration value")
|
||||
|
||||
proc completeCmdArg*(T: type chronos.Duration, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc completeCmdArg*(T: type EthRpcUrl, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type EthRpcUrl, s: string): T =
|
||||
## allowed patterns:
|
||||
## http://url:port
|
||||
## https://url:port
|
||||
## http://url:port/path
|
||||
## https://url:port/path
|
||||
## http://url/with/path
|
||||
## http://url:port/path?query
|
||||
## https://url:port/path?query
|
||||
## disallowed patterns:
|
||||
## any valid/invalid ws or wss url
|
||||
var httpPattern =
|
||||
re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
var wsPattern =
|
||||
re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
if regex.match(s, wsPattern):
|
||||
raise newException(
|
||||
ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL"
|
||||
)
|
||||
if not regex.match(s, httpPattern):
|
||||
raise newException(ValueError, "Invalid HTTP RPC URL")
|
||||
return EthRpcUrl(s)
|
||||
|
||||
proc loadConfig*(T: type NetworkMonitorConf): Result[T, string] =
|
||||
try:
|
||||
let conf = NetworkMonitorConf.load(version = git_version)
|
||||
ok(conf)
|
||||
except CatchableError:
|
||||
err(getCurrentExceptionMsg())
|
||||
107
third-party/nwaku/apps/networkmonitor/networkmonitor_metrics.nim
vendored
Normal file
107
third-party/nwaku/apps/networkmonitor/networkmonitor_metrics.nim
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[net, json, tables, sequtils],
|
||||
chronicles,
|
||||
chronicles/topics_registry,
|
||||
chronos,
|
||||
json_serialization,
|
||||
metrics,
|
||||
metrics/chronos_httpserver,
|
||||
presto/route,
|
||||
presto/server,
|
||||
results
|
||||
|
||||
logScope:
|
||||
topics = "networkmonitor_metrics"
|
||||
|
||||
# On top of our custom metrics, the following are reused from nim-eth
|
||||
#routing_table_nodes{state=""}
|
||||
#routing_table_nodes{state="seen"}
|
||||
#discovery_message_requests_outgoing_total{response=""}
|
||||
#discovery_message_requests_outgoing_total{response="no_response"}
|
||||
|
||||
declarePublicGauge networkmonitor_peer_type_as_per_enr,
|
||||
"Number of peers supporting each capability according to the ENR",
|
||||
labels = ["capability"]
|
||||
|
||||
declarePublicGauge networkmonitor_peer_cluster_as_per_enr,
|
||||
"Number of peers on each cluster according to the ENR", labels = ["cluster"]
|
||||
|
||||
declarePublicGauge networkmonitor_peer_type_as_per_protocol,
|
||||
"Number of peers supporting each protocol, after a successful connection) ",
|
||||
labels = ["protocols"]
|
||||
|
||||
declarePublicGauge networkmonitor_peer_user_agents,
|
||||
"Number of peers with each user agent", labels = ["user_agent"]
|
||||
|
||||
declarePublicHistogram networkmonitor_peer_ping,
|
||||
"Histogram tracking ping durations for discovered peers",
|
||||
buckets = [10.0, 20.0, 50.0, 100.0, 200.0, 300.0, 500.0, 800.0, 1000.0, 2000.0, Inf]
|
||||
|
||||
declarePublicGauge networkmonitor_peer_count,
|
||||
"Number of discovered peers", labels = ["connected"]
|
||||
|
||||
declarePublicGauge networkmonitor_peer_country_count,
|
||||
"Number of peers per country", labels = ["country"]
|
||||
|
||||
type
|
||||
CustomPeerInfo* = object # populated after discovery
|
||||
lastTimeDiscovered*: int64
|
||||
discovered*: int64
|
||||
peerId*: string
|
||||
enr*: string
|
||||
ip*: string
|
||||
enrCapabilities*: seq[string]
|
||||
country*: string
|
||||
city*: string
|
||||
maddrs*: seq[string]
|
||||
|
||||
# only after ok connection
|
||||
lastTimeConnected*: int64
|
||||
retries*: int64
|
||||
supportedProtocols*: seq[string]
|
||||
userAgent*: string
|
||||
lastPingDuration*: Duration
|
||||
avgPingDuration*: Duration
|
||||
|
||||
# only after a ok/nok connection
|
||||
connError*: string
|
||||
|
||||
CustomPeerInfoRef* = ref CustomPeerInfo
|
||||
|
||||
# Stores information about all discovered/connected peers
|
||||
CustomPeersTableRef* = TableRef[string, CustomPeerInfoRef]
|
||||
|
||||
# stores the content topic and the count of rx messages
|
||||
ContentTopicMessageTableRef* = TableRef[string, int]
|
||||
|
||||
proc installHandler*(
|
||||
router: var RestRouter,
|
||||
allPeers: CustomPeersTableRef,
|
||||
numMessagesPerContentTopic: ContentTopicMessageTableRef,
|
||||
) =
|
||||
router.api(MethodGet, "/allpeersinfo") do() -> RestApiResponse:
|
||||
let values = toSeq(allPeers.values())
|
||||
return RestApiResponse.response(values.toJson(), contentType = "application/json")
|
||||
router.api(MethodGet, "/contenttopics") do() -> RestApiResponse:
|
||||
# TODO: toJson() includes the hash
|
||||
return RestApiResponse.response(
|
||||
$(%numMessagesPerContentTopic), contentType = "application/json"
|
||||
)
|
||||
|
||||
proc startMetricsServer*(serverIp: IpAddress, serverPort: Port): Result[void, string] =
|
||||
info "Starting metrics HTTP server", serverIp, serverPort
|
||||
|
||||
try:
|
||||
startMetricsHttpServer($serverIp, serverPort)
|
||||
except Exception as e:
|
||||
error(
|
||||
"Failed to start metrics HTTP server",
|
||||
serverIp = serverIp,
|
||||
serverPort = serverPort,
|
||||
msg = e.msg,
|
||||
)
|
||||
|
||||
info "Metrics HTTP server started", serverIp, serverPort
|
||||
ok()
|
||||
53
third-party/nwaku/apps/networkmonitor/networkmonitor_utils.nim
vendored
Normal file
53
third-party/nwaku/apps/networkmonitor/networkmonitor_utils.nim
vendored
Normal file
@ -0,0 +1,53 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/json,
|
||||
results,
|
||||
chronicles,
|
||||
chronicles/topics_registry,
|
||||
chronos,
|
||||
presto/[client, common]
|
||||
|
||||
type NodeLocation* = object
|
||||
country*: string
|
||||
city*: string
|
||||
lat*: string
|
||||
long*: string
|
||||
isp*: string
|
||||
|
||||
proc flatten*[T](a: seq[seq[T]]): seq[T] =
|
||||
var aFlat = newSeq[T](0)
|
||||
for subseq in a:
|
||||
aFlat &= subseq
|
||||
return aFlat
|
||||
|
||||
proc decodeBytes*(
|
||||
t: typedesc[NodeLocation], value: openArray[byte], contentType: Opt[ContentTypeData]
|
||||
): RestResult[NodeLocation] =
|
||||
var res: string
|
||||
if len(value) > 0:
|
||||
res = newString(len(value))
|
||||
copyMem(addr res[0], unsafeAddr value[0], len(value))
|
||||
try:
|
||||
let jsonContent = parseJson(res)
|
||||
if $jsonContent["status"].getStr() != "success":
|
||||
error "query failed", result = $jsonContent
|
||||
return err("query failed")
|
||||
return ok(
|
||||
NodeLocation(
|
||||
country: jsonContent["country"].getStr(),
|
||||
city: jsonContent["city"].getStr(),
|
||||
lat: $jsonContent["lat"].getFloat(),
|
||||
long: $jsonContent["lon"].getFloat(),
|
||||
isp: jsonContent["isp"].getStr(),
|
||||
)
|
||||
)
|
||||
except Exception:
|
||||
return err("failed to get the location: " & getCurrentExceptionMsg())
|
||||
|
||||
proc encodeString*(value: string): RestResult[string] =
|
||||
ok(value)
|
||||
|
||||
proc ipToLocation*(
|
||||
ip: string
|
||||
): RestResponse[NodeLocation] {.rest, endpoint: "json/{ip}", meth: MethodGet.}
|
||||
4
third-party/nwaku/apps/networkmonitor/nim.cfg
vendored
Normal file
4
third-party/nwaku/apps/networkmonitor/nim.cfg
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
-d:chronicles_line_numbers
|
||||
-d:chronicles_runtime_filtering:on
|
||||
-d:discv5_protocol_id:d5waku
|
||||
path = "../.."
|
||||
9
third-party/nwaku/apps/networkmonitor/prometheus.yaml
vendored
Normal file
9
third-party/nwaku/apps/networkmonitor/prometheus.yaml
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'prometheus'
|
||||
scrape_interval: 5s
|
||||
static_configs:
|
||||
- targets: ['host.docker.internal:8008']
|
||||
metrics_path: '/metrics'
|
||||
44
third-party/nwaku/apps/sonda/.env.example
vendored
Normal file
44
third-party/nwaku/apps/sonda/.env.example
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
# RPC URL for accessing testnet via HTTP.
|
||||
# e.g. https://linea-sepolia.infura.io/v3/123aa110320f4aec179150fba1e1b1b1
|
||||
RLN_RELAY_ETH_CLIENT_ADDRESS=
|
||||
|
||||
# Account of testnet where you have Linea Sepolia ETH that would be staked into RLN contract.
|
||||
ETH_TESTNET_ACCOUNT=
|
||||
|
||||
# Private key of testnet where you have Linea Sepolia ETH that would be staked into RLN contract.
|
||||
# Note: make sure you don't use the '0x' prefix.
|
||||
# e.g. 0116196e9a8abed42dd1a22eb63fa2a5a17b0c27d716b87ded2c54f1bf192a0b
|
||||
ETH_TESTNET_KEY=
|
||||
|
||||
# Address of the RLN contract on Linea Sepolia.
|
||||
RLN_CONTRACT_ADDRESS=0xB9cd878C90E49F797B4431fBF4fb333108CB90e6
|
||||
# Address of the RLN Membership Token contract on Linea Sepolia used to pay for membership.
|
||||
TOKEN_CONTRACT_ADDRESS=0x185A0015aC462a0aECb81beCc0497b649a64B9ea
|
||||
|
||||
# Password you would like to use to protect your RLN membership.
|
||||
RLN_RELAY_CRED_PASSWORD=
|
||||
|
||||
# Advanced. Can be left empty in normal use cases.
|
||||
NWAKU_IMAGE=
|
||||
NODEKEY=
|
||||
DOMAIN=
|
||||
EXTRA_ARGS=
|
||||
STORAGE_SIZE=
|
||||
|
||||
|
||||
# -------------------- SONDA CONFIG ------------------
|
||||
METRICS_PORT=8004
|
||||
NODE_REST_ADDRESS="http://nwaku:8645"
|
||||
CLUSTER_ID=16
|
||||
SHARD=32
|
||||
# Comma separated list of store nodes to poll
|
||||
STORE_NODES="/dns4/store-01.do-ams3.shards.test.status.im/tcp/30303/p2p/16Uiu2HAmAUdrQ3uwzuE4Gy4D56hX6uLKEeerJAnhKEHZ3DxF1EfT,
|
||||
/dns4/store-02.do-ams3.shards.test.status.im/tcp/30303/p2p/16Uiu2HAm9aDJPkhGxc2SFcEACTFdZ91Q5TJjp76qZEhq9iF59x7R,
|
||||
/dns4/store-01.gc-us-central1-a.shards.test.status.im/tcp/30303/p2p/16Uiu2HAmMELCo218hncCtTvC2Dwbej3rbyHQcR8erXNnKGei7WPZ,
|
||||
/dns4/store-02.gc-us-central1-a.shards.test.status.im/tcp/30303/p2p/16Uiu2HAmJnVR7ZzFaYvciPVafUXuYGLHPzSUigqAmeNw9nJUVGeM,
|
||||
/dns4/store-01.ac-cn-hongkong-c.shards.test.status.im/tcp/30303/p2p/16Uiu2HAm2M7xs7cLPc3jamawkEqbr7cUJX11uvY7LxQ6WFUdUKUT,
|
||||
/dns4/store-02.ac-cn-hongkong-c.shards.test.status.im/tcp/30303/p2p/16Uiu2HAm9CQhsuwPR54q27kNj9iaQVfyRzTGKrhFmr94oD8ujU6P"
|
||||
# Wait time in seconds between two consecutive queries
|
||||
QUERY_DELAY=60
|
||||
# Consecutive successful store requests to consider a store node healthy
|
||||
HEALTH_THRESHOLD=5
|
||||
4
third-party/nwaku/apps/sonda/.gitignore
vendored
Normal file
4
third-party/nwaku/apps/sonda/.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
.env
|
||||
keystore
|
||||
rln_tree
|
||||
.env
|
||||
23
third-party/nwaku/apps/sonda/Dockerfile.sonda
vendored
Normal file
23
third-party/nwaku/apps/sonda/Dockerfile.sonda
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
FROM python:3.9.18-alpine3.18
|
||||
|
||||
ENV METRICS_PORT=8004
|
||||
ENV NODE_REST_ADDRESS="http://nwaku:8645"
|
||||
ENV QUERY_DELAY=60
|
||||
ENV STORE_NODES=""
|
||||
ENV CLUSTER_ID=1
|
||||
ENV SHARD=1
|
||||
ENV HEALTH_THRESHOLD=5
|
||||
|
||||
WORKDIR /opt
|
||||
|
||||
COPY sonda.py /opt/sonda.py
|
||||
|
||||
RUN pip install requests argparse prometheus_client
|
||||
|
||||
CMD python -u /opt/sonda.py \
|
||||
--metrics-port=$METRICS_PORT \
|
||||
--node-rest-address="${NODE_REST_ADDRESS}" \
|
||||
--delay-seconds=$QUERY_DELAY \
|
||||
--pubsub-topic="/waku/2/rs/${CLUSTER_ID}/${SHARD}" \
|
||||
--store-nodes="${STORE_NODES}" \
|
||||
--health-threshold=$HEALTH_THRESHOLD
|
||||
52
third-party/nwaku/apps/sonda/README.md
vendored
Normal file
52
third-party/nwaku/apps/sonda/README.md
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
# Sonda
|
||||
|
||||
Sonda is a tool to monitor store nodes and measure their performance.
|
||||
|
||||
It works by running a `nwaku` node, publishing a message from it every fixed interval and performing a store query to all the store nodes we want to monitor to check they respond with the last message we published.
|
||||
|
||||
## Instructions
|
||||
|
||||
1. Create an `.env` file which will contain the configuration parameters.
|
||||
You can start by copying `.env.example` and adapting it for your use case
|
||||
|
||||
```
|
||||
cp .env.example .env
|
||||
${EDITOR} .env
|
||||
```
|
||||
|
||||
The variables that have to be filled for Sonda are
|
||||
|
||||
```
|
||||
CLUSTER_ID=
|
||||
SHARD=
|
||||
# Comma separated list of store nodes to poll
|
||||
STORE_NODES=
|
||||
# Wait time in seconds between two consecutive queries
|
||||
QUERY_DELAY=
|
||||
# Consecutive successful store requests to consider a store node healthy
|
||||
HEALTH_THRESHOLD=
|
||||
```
|
||||
|
||||
2. If you want to query nodes in `cluster-id` 1, then you have to follow the steps of registering an RLN membership. Otherwise, you can skip this step.
|
||||
|
||||
For it, you need:
|
||||
* Ethereum Linea Sepolia WebSocket endpoint. Get one free from [Infura](https://linea-sepolia.infura.io/).
|
||||
* Ethereum Linea Sepolia account with minimum 0.01ETH. Get some [here](https://docs.metamask.io/developer-tools/faucet/).
|
||||
* A password to protect your rln membership.
|
||||
|
||||
Fill the `RLN_RELAY_ETH_CLIENT_ADDRESS`, `ETH_TESTNET_KEY` and `RLN_RELAY_CRED_PASSWORD` env variables and run
|
||||
|
||||
```
|
||||
./register_rln.sh
|
||||
```
|
||||
|
||||
3. Start Sonda by running
|
||||
|
||||
```
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
4. Browse to http://localhost:3000/dashboards and monitor the performance
|
||||
|
||||
There's two Grafana dashboards: `nwaku-monitoring` to track the stats of your node that is publishing messages and performing queries, and `sonda-monitoring` to monitor the responses of the store nodes.
|
||||
|
||||
114
third-party/nwaku/apps/sonda/docker-compose.yml
vendored
Normal file
114
third-party/nwaku/apps/sonda/docker-compose.yml
vendored
Normal file
@ -0,0 +1,114 @@
|
||||
|
||||
x-logging: &logging
|
||||
logging:
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: 1000m
|
||||
|
||||
# Environment variable definitions
|
||||
x-rln-relay-eth-client-address: &rln_relay_eth_client_address ${RLN_RELAY_ETH_CLIENT_ADDRESS:-} # Add your RLN_RELAY_ETH_CLIENT_ADDRESS after the "-"
|
||||
|
||||
x-rln-environment: &rln_env
|
||||
RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xB9cd878C90E49F797B4431fBF4fb333108CB90e6}
|
||||
RLN_RELAY_CRED_PATH: ${RLN_RELAY_CRED_PATH:-} # Optional: Add your RLN_RELAY_CRED_PATH after the "-"
|
||||
RLN_RELAY_CRED_PASSWORD: ${RLN_RELAY_CRED_PASSWORD:-} # Optional: Add your RLN_RELAY_CRED_PASSWORD after the "-"
|
||||
|
||||
x-sonda-env: &sonda_env
|
||||
METRICS_PORT: ${METRICS_PORT:-8004}
|
||||
NODE_REST_ADDRESS: ${NODE_REST_ADDRESS:-"http://nwaku:8645"}
|
||||
CLUSTER_ID: ${CLUSTER_ID:-1}
|
||||
SHARD: ${SHARD:-0}
|
||||
STORE_NODES: ${STORE_NODES:-}
|
||||
QUERY_DELAY: ${QUERY_DELAY-60}
|
||||
HEALTH_THRESHOLD: ${HEALTH_THRESHOLD-5}
|
||||
|
||||
# Services definitions
|
||||
services:
|
||||
nwaku:
|
||||
image: ${NWAKU_IMAGE:-harbor.status.im/wakuorg/nwaku:deploy-status-prod}
|
||||
container_name: nwaku
|
||||
restart: on-failure
|
||||
ports:
|
||||
- 30304:30304/tcp
|
||||
- 30304:30304/udp
|
||||
- 9005:9005/udp
|
||||
- 127.0.0.1:8003:8003
|
||||
- 80:80 #Let's Encrypt
|
||||
- 8000:8000/tcp #WSS
|
||||
- 127.0.0.1:8645:8645
|
||||
<<:
|
||||
- *logging
|
||||
environment:
|
||||
DOMAIN: ${DOMAIN}
|
||||
NODEKEY: ${NODEKEY}
|
||||
RLN_RELAY_CRED_PASSWORD: "${RLN_RELAY_CRED_PASSWORD}"
|
||||
RLN_RELAY_ETH_CLIENT_ADDRESS: *rln_relay_eth_client_address
|
||||
EXTRA_ARGS: ${EXTRA_ARGS}
|
||||
STORAGE_SIZE: ${STORAGE_SIZE}
|
||||
<<:
|
||||
- *rln_env
|
||||
- *sonda_env
|
||||
volumes:
|
||||
- ./run_node.sh:/opt/run_node.sh:Z
|
||||
- ${CERTS_DIR:-./certs}:/etc/letsencrypt/:Z
|
||||
- ./rln_tree:/etc/rln_tree/:Z
|
||||
- ./keystore:/keystore:Z
|
||||
entrypoint: sh
|
||||
command:
|
||||
- /opt/run_node.sh
|
||||
networks:
|
||||
- nwaku-sonda
|
||||
|
||||
sonda:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.sonda
|
||||
container_name: sonda
|
||||
ports:
|
||||
- 127.0.0.1:${METRICS_PORT}:${METRICS_PORT}
|
||||
environment:
|
||||
<<:
|
||||
- *sonda_env
|
||||
depends_on:
|
||||
- nwaku
|
||||
networks:
|
||||
- nwaku-sonda
|
||||
|
||||
prometheus:
|
||||
image: docker.io/prom/prometheus:latest
|
||||
container_name: prometheus
|
||||
volumes:
|
||||
- ./monitoring/prometheus-config.yml:/etc/prometheus/prometheus.yml:Z
|
||||
command:
|
||||
- --config.file=/etc/prometheus/prometheus.yml
|
||||
# ports:
|
||||
# - 127.0.0.1:9090:9090
|
||||
restart: on-failure:5
|
||||
depends_on:
|
||||
- nwaku
|
||||
networks:
|
||||
- nwaku-sonda
|
||||
|
||||
grafana:
|
||||
image: docker.io/grafana/grafana:latest
|
||||
container_name: grafana
|
||||
env_file:
|
||||
- ./monitoring/configuration/grafana-plugins.env
|
||||
volumes:
|
||||
- ./monitoring/configuration/grafana.ini:/etc/grafana/grafana.ini:Z
|
||||
- ./monitoring/configuration/dashboards.yaml:/etc/grafana/provisioning/dashboards/dashboards.yaml:Z
|
||||
- ./monitoring/configuration/datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml:Z
|
||||
- ./monitoring/configuration/dashboards:/var/lib/grafana/dashboards/:Z
|
||||
- ./monitoring/configuration/customizations/custom-logo.svg:/usr/share/grafana/public/img/grafana_icon.svg:Z
|
||||
- ./monitoring/configuration/customizations/custom-logo.svg:/usr/share/grafana/public/img/grafana_typelogo.svg:Z
|
||||
- ./monitoring/configuration/customizations/custom-logo.png:/usr/share/grafana/public/img/fav32.png:Z
|
||||
ports:
|
||||
- 0.0.0.0:3000:3000
|
||||
restart: on-failure:5
|
||||
depends_on:
|
||||
- prometheus
|
||||
networks:
|
||||
- nwaku-sonda
|
||||
|
||||
networks:
|
||||
nwaku-sonda:
|
||||
BIN
third-party/nwaku/apps/sonda/monitoring/configuration/customizations/custom-logo.png
vendored
Normal file
BIN
third-party/nwaku/apps/sonda/monitoring/configuration/customizations/custom-logo.png
vendored
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 11 KiB |
3
third-party/nwaku/apps/sonda/monitoring/configuration/customizations/custom-logo.svg
vendored
Normal file
3
third-party/nwaku/apps/sonda/monitoring/configuration/customizations/custom-logo.svg
vendored
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 13 KiB |
9
third-party/nwaku/apps/sonda/monitoring/configuration/dashboards.yaml
vendored
Normal file
9
third-party/nwaku/apps/sonda/monitoring/configuration/dashboards.yaml
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'Prometheus'
|
||||
orgId: 1
|
||||
folder: ''
|
||||
type: file
|
||||
options:
|
||||
path: /var/lib/grafana/dashboards
|
||||
5303
third-party/nwaku/apps/sonda/monitoring/configuration/dashboards/nwaku-monitoring.json
vendored
Normal file
5303
third-party/nwaku/apps/sonda/monitoring/configuration/dashboards/nwaku-monitoring.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1571
third-party/nwaku/apps/sonda/monitoring/configuration/dashboards/sonda-monitoring.json
vendored
Normal file
1571
third-party/nwaku/apps/sonda/monitoring/configuration/dashboards/sonda-monitoring.json
vendored
Normal file
File diff suppressed because it is too large
Load Diff
11
third-party/nwaku/apps/sonda/monitoring/configuration/datasources.yaml
vendored
Normal file
11
third-party/nwaku/apps/sonda/monitoring/configuration/datasources.yaml
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
org_id: 1
|
||||
url: http://prometheus:9090
|
||||
is_default: true
|
||||
version: 1
|
||||
editable: true
|
||||
2
third-party/nwaku/apps/sonda/monitoring/configuration/grafana-plugins.env
vendored
Normal file
2
third-party/nwaku/apps/sonda/monitoring/configuration/grafana-plugins.env
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
#GF_INSTALL_PLUGINS=grafana-worldmap-panel,grafana-piechart-panel,digrich-bubblechart-panel,yesoreyeram-boomtheme-panel,briangann-gauge-panel,jdbranham-diagram-panel,agenty-flowcharting-panel,citilogics-geoloop-panel,savantly-heatmap-panel,mtanda-histogram-panel,pierosavi-imageit-panel,michaeldmoore-multistat-panel,zuburqan-parity-report-panel,natel-plotly-panel,bessler-pictureit-panel,grafana-polystat-panel,corpglory-progresslist-panel,snuids-radar-panel,fzakaria-simple-config.config.annotations-datasource,vonage-status-panel,snuids-trafficlights-panel,pr0ps-trackmap-panel,alexandra-trackmap-panel,btplc-trend-box-panel
|
||||
GF_INSTALL_PLUGINS=grafana-worldmap-panel,grafana-piechart-panel,yesoreyeram-boomtheme-panel,briangann-gauge-panel,pierosavi-imageit-panel,bessler-pictureit-panel,vonage-status-panel
|
||||
51
third-party/nwaku/apps/sonda/monitoring/configuration/grafana.ini
vendored
Normal file
51
third-party/nwaku/apps/sonda/monitoring/configuration/grafana.ini
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
instance_name = nwaku dashboard
|
||||
|
||||
;[dashboards.json]
|
||||
;enabled = true
|
||||
;path = /home/git/grafana/grafana-dashboards/dashboards
|
||||
|
||||
|
||||
#################################### Auth ##########################
|
||||
[auth]
|
||||
disable_login_form = false
|
||||
|
||||
#################################### Anonymous Auth ##########################
|
||||
[auth.anonymous]
|
||||
# enable anonymous access
|
||||
enabled = true
|
||||
|
||||
# specify organization name that should be used for unauthenticated users
|
||||
;org_name = Public
|
||||
|
||||
# specify role for unauthenticated users
|
||||
org_role = Admin
|
||||
; org_role = Viewer
|
||||
|
||||
;[security]
|
||||
;admin_user = ocr
|
||||
;admin_password = ocr
|
||||
|
||||
;[users]
|
||||
# disable user signup / registration
|
||||
;allow_sign_up = false
|
||||
|
||||
# Set to true to automatically assign new users to the default organization (id 1)
|
||||
;auto_assign_org = true
|
||||
|
||||
# Default role new users will be automatically assigned (if disabled above is set to true)
|
||||
;auto_assign_org_role = Viewer
|
||||
|
||||
#################################### SMTP / Emailing ##########################
|
||||
;[smtp]
|
||||
;enabled = false
|
||||
;host = localhost:25
|
||||
;user =
|
||||
;password =
|
||||
;cert_file =
|
||||
;key_file =
|
||||
;skip_verify = false
|
||||
;from_address = admin@grafana.localhost
|
||||
|
||||
;[emails]
|
||||
;welcome_email_on_sign_up = false
|
||||
|
||||
10
third-party/nwaku/apps/sonda/monitoring/prometheus-config.yml
vendored
Normal file
10
third-party/nwaku/apps/sonda/monitoring/prometheus-config.yml
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
global:
|
||||
scrape_interval: 15s
|
||||
evaluation_interval: 15s
|
||||
external_labels:
|
||||
monitor: "Monitoring"
|
||||
|
||||
scrape_configs:
|
||||
- job_name: "nwaku"
|
||||
static_configs:
|
||||
- targets: ["nwaku:8003", "sonda:8004"]
|
||||
31
third-party/nwaku/apps/sonda/register_rln.sh
vendored
Executable file
31
third-party/nwaku/apps/sonda/register_rln.sh
vendored
Executable file
@ -0,0 +1,31 @@
|
||||
#!/bin/sh
|
||||
|
||||
|
||||
if test -f ./keystore/keystore.json; then
|
||||
echo "keystore/keystore.json already exists. Use it instead of creating a new one."
|
||||
echo "Exiting"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
if test -f .env; then
|
||||
echo "Using .env file"
|
||||
. $(pwd)/.env
|
||||
fi
|
||||
|
||||
# TODO: Set nwaku release when ready instead of quay
|
||||
|
||||
if test -n "${ETH_CLIENT_ADDRESS}"; then
|
||||
echo "ETH_CLIENT_ADDRESS variable was renamed to RLN_RELAY_ETH_CLIENT_ADDRESS"
|
||||
echo "Please update your .env file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker run -v $(pwd)/keystore:/keystore/:Z harbor.status.im/wakuorg/nwaku:v0.30.1 generateRlnKeystore \
|
||||
--rln-relay-eth-client-address=${RLN_RELAY_ETH_CLIENT_ADDRESS} \
|
||||
--rln-relay-eth-private-key=${ETH_TESTNET_KEY} \
|
||||
--rln-relay-eth-contract-address=0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 \
|
||||
--rln-relay-cred-path=/keystore/keystore.json \
|
||||
--rln-relay-cred-password="${RLN_RELAY_CRED_PASSWORD}" \
|
||||
--rln-relay-user-message-limit=20 \
|
||||
--execute
|
||||
110
third-party/nwaku/apps/sonda/run_node.sh
vendored
Normal file
110
third-party/nwaku/apps/sonda/run_node.sh
vendored
Normal file
@ -0,0 +1,110 @@
|
||||
#!/bin/sh
|
||||
|
||||
echo "I am a nwaku node"
|
||||
|
||||
if test -n "${ETH_CLIENT_ADDRESS}" -o ; then
|
||||
echo "ETH_CLIENT_ADDRESS variable was renamed to RLN_RELAY_ETH_CLIENT_ADDRESS"
|
||||
echo "Please update your .env file"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "${RLN_RELAY_ETH_CLIENT_ADDRESS}" ] && [ "${CLUSTER_ID}" -eq 1 ]; then
|
||||
echo "Missing Eth client address, please refer to README.md for detailed instructions"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "${CLUSTER_ID}" -ne 1 ]; then
|
||||
echo "CLUSTER_ID is not equal to 1, clearing RLN configurations"
|
||||
RLN_RELAY_CRED_PATH=""
|
||||
RLN_RELAY_ETH_CLIENT_ADDRESS=""
|
||||
RLN_RELAY_CRED_PASSWORD=""
|
||||
fi
|
||||
|
||||
MY_EXT_IP=$(wget -qO- https://api4.ipify.org)
|
||||
DNS_WSS_CMD=
|
||||
|
||||
if [ -n "${DOMAIN}" ]; then
|
||||
|
||||
LETSENCRYPT_PATH=/etc/letsencrypt/live/${DOMAIN}
|
||||
|
||||
if ! [ -d "${LETSENCRYPT_PATH}" ]; then
|
||||
apk add --no-cache certbot
|
||||
|
||||
certbot certonly\
|
||||
--non-interactive\
|
||||
--agree-tos\
|
||||
--no-eff-email\
|
||||
--no-redirect\
|
||||
--email admin@${DOMAIN}\
|
||||
-d ${DOMAIN}\
|
||||
--standalone
|
||||
fi
|
||||
|
||||
if ! [ -e "${LETSENCRYPT_PATH}/privkey.pem" ]; then
|
||||
echo "The certificate does not exist"
|
||||
sleep 60
|
||||
exit 1
|
||||
fi
|
||||
|
||||
WS_SUPPORT="--websocket-support=true"
|
||||
WSS_SUPPORT="--websocket-secure-support=true"
|
||||
WSS_KEY="--websocket-secure-key-path=${LETSENCRYPT_PATH}/privkey.pem"
|
||||
WSS_CERT="--websocket-secure-cert-path=${LETSENCRYPT_PATH}/cert.pem"
|
||||
DNS4_DOMAIN="--dns4-domain-name=${DOMAIN}"
|
||||
|
||||
DNS_WSS_CMD="${WS_SUPPORT} ${WSS_SUPPORT} ${WSS_CERT} ${WSS_KEY} ${DNS4_DOMAIN}"
|
||||
fi
|
||||
|
||||
if [ -n "${NODEKEY}" ]; then
|
||||
NODEKEY=--nodekey=${NODEKEY}
|
||||
fi
|
||||
|
||||
if [ "${CLUSTER_ID}" -eq 1 ]; then
|
||||
RLN_RELAY_CRED_PATH=--rln-relay-cred-path=${RLN_RELAY_CRED_PATH:-/keystore/keystore.json}
|
||||
fi
|
||||
|
||||
if [ -n "${RLN_RELAY_CRED_PASSWORD}" ]; then
|
||||
RLN_RELAY_CRED_PASSWORD=--rln-relay-cred-password="${RLN_RELAY_CRED_PASSWORD}"
|
||||
fi
|
||||
|
||||
if [ -n "${RLN_RELAY_ETH_CLIENT_ADDRESS}" ]; then
|
||||
RLN_RELAY_ETH_CLIENT_ADDRESS=--rln-relay-eth-client-address="${RLN_RELAY_ETH_CLIENT_ADDRESS}"
|
||||
fi
|
||||
|
||||
# TO DO: configure bootstrap nodes in env
|
||||
|
||||
exec /usr/bin/wakunode\
|
||||
--relay=true\
|
||||
--filter=false\
|
||||
--lightpush=false\
|
||||
--keep-alive=true\
|
||||
--max-connections=150\
|
||||
--cluster-id="${CLUSTER_ID}"\
|
||||
--discv5-discovery=true\
|
||||
--discv5-udp-port=9005\
|
||||
--discv5-enr-auto-update=True\
|
||||
--log-level=DEBUG\
|
||||
--tcp-port=30304\
|
||||
--metrics-server=True\
|
||||
--metrics-server-port=8003\
|
||||
--metrics-server-address=0.0.0.0\
|
||||
--rest=true\
|
||||
--rest-admin=true\
|
||||
--rest-address=0.0.0.0\
|
||||
--rest-port=8645\
|
||||
--rest-allow-origin="waku-org.github.io"\
|
||||
--rest-allow-origin="localhost:*"\
|
||||
--nat=extip:"${MY_EXT_IP}"\
|
||||
--store=false\
|
||||
--pubsub-topic="/waku/2/rs/${CLUSTER_ID}/${SHARD}"\
|
||||
--discv5-bootstrap-node="enr:-QEKuECA0zhRJej2eaOoOPddNcYr7-5NdRwuoLCe2EE4wfEYkAZhFotg6Kkr8K15pMAGyUyt0smHkZCjLeld0BUzogNtAYJpZIJ2NIJpcISnYxMvim11bHRpYWRkcnO4WgAqNiVib290LTAxLmRvLWFtczMuc2hhcmRzLnRlc3Quc3RhdHVzLmltBnZfACw2JWJvb3QtMDEuZG8tYW1zMy5zaGFyZHMudGVzdC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEC3rRtFQSgc24uWewzXaxTY8hDAHB8sgnxr9k8Rjb5GeSDdGNwgnZfg3VkcIIjKIV3YWt1Mg0"\
|
||||
--discv5-bootstrap-node="enr:-QEcuEAgXDqrYd_TrpUWtn3zmxZ9XPm7O3GS6lV7aMJJOTsbOAAeQwSd_eoHcCXqVzTUtwTyB4855qtbd8DARnExyqHPAYJpZIJ2NIJpcIQihw1Xim11bHRpYWRkcnO4bAAzNi5ib290LTAxLmdjLXVzLWNlbnRyYWwxLWEuc2hhcmRzLnRlc3Quc3RhdHVzLmltBnZfADU2LmJvb3QtMDEuZ2MtdXMtY2VudHJhbDEtYS5zaGFyZHMudGVzdC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaECxjqgDQ0WyRSOilYU32DA5k_XNlDis3m1VdXkK9xM6kODdGNwgnZfg3VkcIIjKIV3YWt1Mg0"\
|
||||
--discv5-bootstrap-node="enr:-QEcuEAX6Qk-vVAoJLxR4A_4UVogGhvQrqKW4DFKlf8MA1PmCjgowL-LBtSC9BLjXbb8gf42FdDHGtSjEvvWKD10erxqAYJpZIJ2NIJpcIQI2hdMim11bHRpYWRkcnO4bAAzNi5ib290LTAxLmFjLWNuLWhvbmdrb25nLWMuc2hhcmRzLnRlc3Quc3RhdHVzLmltBnZfADU2LmJvb3QtMDEuYWMtY24taG9uZ2tvbmctYy5zaGFyZHMudGVzdC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEDP7CbRk-YKJwOFFM4Z9ney0GPc7WPJaCwGkpNRyla7mCDdGNwgnZfg3VkcIIjKIV3YWt1Mg0"\
|
||||
${RLN_RELAY_CRED_PATH}\
|
||||
${RLN_RELAY_CRED_PASSWORD}\
|
||||
${RLN_RELAY_TREE_PATH}\
|
||||
${RLN_RELAY_ETH_CLIENT_ADDRESS}\
|
||||
${DNS_WSS_CMD}\
|
||||
${NODEKEY}\
|
||||
${EXTRA_ARGS}
|
||||
|
||||
207
third-party/nwaku/apps/sonda/sonda.py
vendored
Normal file
207
third-party/nwaku/apps/sonda/sonda.py
vendored
Normal file
@ -0,0 +1,207 @@
|
||||
import requests
|
||||
import time
|
||||
import json
|
||||
import os
|
||||
import base64
|
||||
import sys
|
||||
import urllib.parse
|
||||
import requests
|
||||
import argparse
|
||||
from datetime import datetime
|
||||
from prometheus_client import Counter, Gauge, start_http_server
|
||||
|
||||
# Content topic where Sona messages are going to be sent
|
||||
SONDA_CONTENT_TOPIC = '/sonda/2/polls/proto'
|
||||
|
||||
# Prometheus metrics
|
||||
successful_sonda_msgs = Counter('successful_sonda_msgs', 'Number of successful Sonda messages sent')
|
||||
failed_sonda_msgs = Counter('failed_sonda_msgs', 'Number of failed Sonda messages attempts')
|
||||
successful_store_queries = Counter('successful_store_queries', 'Number of successful store queries', ['node'])
|
||||
failed_store_queries = Counter('failed_store_queries', 'Number of failed store queries', ['node', 'error'])
|
||||
empty_store_responses = Counter('empty_store_responses', "Number of store responses without the latest Sonda message", ['node'])
|
||||
store_query_latency = Gauge('store_query_latency', 'Latency of the last store query in seconds', ['node'])
|
||||
consecutive_successful_responses = Gauge('consecutive_successful_responses', 'Consecutive successful store responses', ['node'])
|
||||
node_health = Gauge('node_health', "Binary indicator of a node's health. 1 is healthy, 0 is not", ['node'])
|
||||
|
||||
|
||||
# Argparser configuration
|
||||
parser = argparse.ArgumentParser(description='')
|
||||
parser.add_argument('-m', '--metrics-port', type=int, default=8004, help='Port to expose prometheus metrics.')
|
||||
parser.add_argument('-a', '--node-rest-address', type=str, default="http://nwaku:8645", help='Address of the waku node to send messages to.')
|
||||
parser.add_argument('-p', '--pubsub-topic', type=str, default='/waku/2/rs/1/0', help='PubSub topic.')
|
||||
parser.add_argument('-d', '--delay-seconds', type=int, default=60, help='Delay in seconds between messages.')
|
||||
parser.add_argument('-n', '--store-nodes', type=str, required=True, help='Comma separated list of store nodes to query.')
|
||||
parser.add_argument('-t', '--health-threshold', type=int, default=5, help='Consecutive successful store requests to consider a store node healthy.')
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
# Logs message including current UTC time
|
||||
def log_with_utc(message):
|
||||
utc_time = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
|
||||
print(f"[{utc_time} UTC] {message}")
|
||||
|
||||
|
||||
# Sends Sonda message. Returns True if successful, False otherwise
|
||||
def send_sonda_msg(rest_address, pubsub_topic, content_topic, timestamp):
|
||||
message = "Hi, I'm Sonda"
|
||||
base64_message = base64.b64encode(message.encode('utf-8')).decode('ascii')
|
||||
body = {
|
||||
'payload': base64_message,
|
||||
'contentTopic': content_topic,
|
||||
'version': 1,
|
||||
'timestamp': timestamp
|
||||
}
|
||||
|
||||
encoded_pubsub_topic = urllib.parse.quote(pubsub_topic, safe='')
|
||||
url = f'{rest_address}/relay/v1/messages/{encoded_pubsub_topic}'
|
||||
headers = {'content-type': 'application/json'}
|
||||
|
||||
log_with_utc(f'Sending Sonda message via REST: {url} PubSubTopic: {pubsub_topic}, ContentTopic: {content_topic}, timestamp: {timestamp}')
|
||||
|
||||
try:
|
||||
start_time = time.time()
|
||||
response = requests.post(url, json=body, headers=headers, timeout=10)
|
||||
elapsed_seconds = time.time() - start_time
|
||||
|
||||
log_with_utc(f'Response from {rest_address}: status:{response.status_code} content:{response.text} [{elapsed_seconds:.4f} s.]')
|
||||
|
||||
if response.status_code == 200:
|
||||
successful_sonda_msgs.inc()
|
||||
return True
|
||||
else:
|
||||
response.raise_for_status()
|
||||
except requests.RequestException as e:
|
||||
log_with_utc(f'Error sending request: {e}')
|
||||
|
||||
failed_sonda_msgs.inc()
|
||||
return False
|
||||
|
||||
|
||||
# We return true if both our node and the queried Store node returned a 200
|
||||
# If our message isn't found but we did get a store 200 response, this function still returns true
|
||||
def check_store_response(json_response, store_node, timestamp):
|
||||
# Check for the store node status code
|
||||
if json_response.get('statusCode') != 200:
|
||||
error = f"{json_response.get('statusCode')} {json_response.get('statusDesc')}"
|
||||
log_with_utc(f'Failed performing store query {error}')
|
||||
failed_store_queries.labels(node=store_node, error=error).inc()
|
||||
consecutive_successful_responses.labels(node=store_node).set(0)
|
||||
|
||||
return False
|
||||
|
||||
messages = json_response.get('messages')
|
||||
# If there's no message in the response, increase counters and return
|
||||
if not messages:
|
||||
log_with_utc("No messages in store response")
|
||||
empty_store_responses.labels(node=store_node).inc()
|
||||
consecutive_successful_responses.labels(node=store_node).set(0)
|
||||
return True
|
||||
|
||||
# Search for the Sonda message in the returned messages
|
||||
for message in messages:
|
||||
# If message field is missing in current message, continue
|
||||
if not message.get("message"):
|
||||
log_with_utc("Could not retrieve message")
|
||||
continue
|
||||
|
||||
# If a message is found with the same timestamp as sonda message, increase counters and return
|
||||
if timestamp == message.get('message').get('timestamp'):
|
||||
log_with_utc(f'Found Sonda message in store response node={store_node}')
|
||||
successful_store_queries.labels(node=store_node).inc()
|
||||
consecutive_successful_responses.labels(node=store_node).inc()
|
||||
return True
|
||||
|
||||
# If our message wasn't found in the returned messages, increase counter and return
|
||||
empty_store_responses.labels(node=store_node).inc()
|
||||
consecutive_successful_responses.labels(node=store_node).set(0)
|
||||
return True
|
||||
|
||||
|
||||
def send_store_query(rest_address, store_node, encoded_pubsub_topic, encoded_content_topic, timestamp):
|
||||
url = f'{rest_address}/store/v3/messages'
|
||||
params = {
|
||||
'peerAddr': urllib.parse.quote(store_node, safe=''),
|
||||
'pubsubTopic': encoded_pubsub_topic,
|
||||
'contentTopics': encoded_content_topic,
|
||||
'includeData': 'true',
|
||||
'startTime': timestamp
|
||||
}
|
||||
|
||||
s_time = time.time()
|
||||
|
||||
try:
|
||||
log_with_utc(f'Sending store request to {store_node}')
|
||||
response = requests.get(url, params=params)
|
||||
except Exception as e:
|
||||
log_with_utc(f'Error sending request: {e}')
|
||||
failed_store_queries.labels(node=store_node, error=str(e)).inc()
|
||||
consecutive_successful_responses.labels(node=store_node).set(0)
|
||||
return False
|
||||
|
||||
elapsed_seconds = time.time() - s_time
|
||||
log_with_utc(f'Response from {rest_address}: status:{response.status_code} [{elapsed_seconds:.4f} s.]')
|
||||
|
||||
if response.status_code != 200:
|
||||
failed_store_queries.labels(node=store_node, error=f'{response.status_code} {response.content}').inc()
|
||||
consecutive_successful_responses.labels(node=store_node).set(0)
|
||||
return False
|
||||
|
||||
# Parse REST response into JSON
|
||||
try:
|
||||
json_response = response.json()
|
||||
except Exception as e:
|
||||
log_with_utc(f'Error parsing response JSON: {e}')
|
||||
failed_store_queries.labels(node=store_node, error="JSON parse error").inc()
|
||||
consecutive_successful_responses.labels(node=store_node).set(0)
|
||||
return False
|
||||
|
||||
# Analyze Store response. Return false if response is incorrect or has an error status
|
||||
if not check_store_response(json_response, store_node, timestamp):
|
||||
return False
|
||||
|
||||
store_query_latency.labels(node=store_node).set(elapsed_seconds)
|
||||
return True
|
||||
|
||||
|
||||
def send_store_queries(rest_address, store_nodes, pubsub_topic, content_topic, timestamp):
|
||||
log_with_utc(f'Sending store queries. nodes = {store_nodes} timestamp = {timestamp}')
|
||||
encoded_pubsub_topic = urllib.parse.quote(pubsub_topic, safe='')
|
||||
encoded_content_topic = urllib.parse.quote(content_topic, safe='')
|
||||
|
||||
for node in store_nodes:
|
||||
send_store_query(rest_address, node, encoded_pubsub_topic, encoded_content_topic, timestamp)
|
||||
|
||||
|
||||
def main():
|
||||
log_with_utc(f'Running Sonda with args={args}')
|
||||
|
||||
store_nodes = []
|
||||
if args.store_nodes is not None:
|
||||
store_nodes = [s.strip() for s in args.store_nodes.split(",")]
|
||||
log_with_utc(f'Store nodes to query: {store_nodes}')
|
||||
|
||||
# Start Prometheus HTTP server at port set by the CLI(default 8004)
|
||||
start_http_server(args.metrics_port)
|
||||
|
||||
while True:
|
||||
timestamp = time.time_ns()
|
||||
|
||||
# Send Sonda message
|
||||
res = send_sonda_msg(args.node_rest_address, args.pubsub_topic, SONDA_CONTENT_TOPIC, timestamp)
|
||||
|
||||
log_with_utc(f'sleeping: {args.delay_seconds} seconds')
|
||||
time.sleep(args.delay_seconds)
|
||||
|
||||
# Only send store query if message was successfully published
|
||||
if(res):
|
||||
send_store_queries(args.node_rest_address, store_nodes, args.pubsub_topic, SONDA_CONTENT_TOPIC, timestamp)
|
||||
|
||||
# Update node health metrics
|
||||
for store_node in store_nodes:
|
||||
if consecutive_successful_responses.labels(node=store_node)._value.get() >= args.health_threshold:
|
||||
node_health.labels(node=store_node).set(1)
|
||||
else:
|
||||
node_health.labels(node=store_node).set(0)
|
||||
|
||||
|
||||
main()
|
||||
58
third-party/nwaku/apps/wakucanary/README.md
vendored
Normal file
58
third-party/nwaku/apps/wakucanary/README.md
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
# waku canary tool
|
||||
|
||||
Attempts to dial a peer and asserts it supports a given set of protocols.
|
||||
|
||||
```console
|
||||
./build/wakucanary --help
|
||||
Usage:
|
||||
|
||||
wakucanary [OPTIONS]...
|
||||
|
||||
The following options are available:
|
||||
|
||||
-a, --address Multiaddress of the peer node to attempt to dial.
|
||||
-t, --timeout Timeout to consider that the connection failed [=chronos.seconds(10)].
|
||||
-p, --protocol Protocol required to be supported: store,relay,lightpush,filter (can be used
|
||||
multiple times).
|
||||
-l, --log-level Sets the log level [=LogLevel.DEBUG].
|
||||
-np, --node-port Listening port for waku node [=60000].
|
||||
--websocket-secure-key-path Secure websocket key path: '/path/to/key.txt' .
|
||||
--websocket-secure-cert-path Secure websocket Certificate path: '/path/to/cert.txt' .
|
||||
-c, --cluster-id Cluster ID of the fleet node to check status [Default=1]
|
||||
-s, --shard Shards index to subscribe to topics [ Argument may be repeated ]
|
||||
|
||||
```
|
||||
|
||||
The tool can be built as:
|
||||
|
||||
```console
|
||||
$ make wakucanary
|
||||
```
|
||||
|
||||
And used as follows. A reachable node that supports both `store` and `filter` protocols.
|
||||
|
||||
```console
|
||||
$ ./build/wakucanary --address=/dns4/node-01.ac-cn-hongkong-c.waku.sandbox.status.im/tcp/30303/p2p/16Uiu2HAmSJvSJphxRdbnigUV5bjRRZFBhTtWFTSyiKaQByCjwmpV --protocol=store --protocol=filter
|
||||
$ echo $?
|
||||
0
|
||||
```
|
||||
|
||||
A node that can't be reached.
|
||||
```console
|
||||
$ ./build/wakucanary --address=/dns4/node-01.ac-cn-hongkong-c.waku.sandbox.status.im/tcp/1000/p2p/16Uiu2HAmSJvSJphxRdbnigUV5bjRRZFBhTtWFTSyiKaQByCjwmpV --protocol=store --protocol=filter
|
||||
$ echo $?
|
||||
1
|
||||
```
|
||||
|
||||
Note that a domain name can also be used.
|
||||
```console
|
||||
$ ./build/wakucanary --address=/dns4/node-01.do-ams3.status.test.status.im/tcp/30303/p2p/16Uiu2HAkukebeXjTQ9QDBeNDWuGfbaSg79wkkhK4vPocLgR6QFDf --protocol=store --protocol=filter
|
||||
$ echo $?
|
||||
0
|
||||
```
|
||||
|
||||
Websockets are also supported. The websocket port openned by waku canary is calculated as `$(--node-port) + 1000` (e.g. when you set `-np 60000`, the WS port will be `61000`)
|
||||
```console
|
||||
$ ./build/wakucanary --address=/ip4/127.0.0.1/tcp/7777/ws/p2p/16Uiu2HAm4ng2DaLPniRoZtMQbLdjYYWnXjrrJkGoXWCoBWAdn1tu --protocol=store --protocol=filter
|
||||
$ ./build/wakucanary --address=/ip4/127.0.0.1/tcp/7777/wss/p2p/16Uiu2HAmB6JQpewXScGoQ2syqmimbe4GviLxRwfsR8dCpwaGBPSE --protocol=store --websocket-secure-key-path=MyKey.key --websocket-secure-cert-path=MyCertificate.crt
|
||||
```
|
||||
37
third-party/nwaku/apps/wakucanary/certsgenerator.nim
vendored
Normal file
37
third-party/nwaku/apps/wakucanary/certsgenerator.nim
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
import osproc, os, httpclient, strutils
|
||||
|
||||
proc getPublicIP(): string =
|
||||
let client = newHttpClient()
|
||||
try:
|
||||
let response = client.get("http://api.ipify.org")
|
||||
return response.body
|
||||
except Exception as e:
|
||||
echo "Could not fetch public IP: " & e.msg
|
||||
return "127.0.0.1"
|
||||
|
||||
# Function to generate a self-signed certificate
|
||||
proc generateSelfSignedCertificate*(certPath: string, keyPath: string): int =
|
||||
# Ensure the OpenSSL is installed
|
||||
if findExe("openssl") == "":
|
||||
echo "OpenSSL is not installed or not in the PATH."
|
||||
return 1
|
||||
|
||||
let publicIP = getPublicIP()
|
||||
|
||||
if publicIP != "127.0.0.1":
|
||||
echo "Your public IP address is: ", publicIP
|
||||
|
||||
# Command to generate private key and cert
|
||||
let
|
||||
cmd =
|
||||
"openssl req -x509 -newkey rsa:4096 -keyout " & keyPath & " -out " & certPath &
|
||||
" -sha256 -days 3650 -nodes -subj '/C=XX/ST=StateName/L=CityName/O=CompanyName/OU=CompanySectionName/CN=" &
|
||||
publicIP & "'"
|
||||
res = execCmd(cmd)
|
||||
|
||||
if res == 0:
|
||||
echo "Successfully generated self-signed certificate and key."
|
||||
else:
|
||||
echo "Failed to generate certificate and key."
|
||||
|
||||
return res
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user