mirror of
https://github.com/logos-messaging/logos-messaging-nim.git
synced 2026-01-02 14:03:06 +00:00
Compare commits
125 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dafdee9f5f | ||
|
|
96196ab8bc | ||
|
|
e3dd6203ae | ||
|
|
834eea945d | ||
|
|
2d40cb9d62 | ||
|
|
7c24a15459 | ||
|
|
bc5059083e | ||
|
|
3323325526 | ||
|
|
2477c4980f | ||
|
|
10dc3d3eb4 | ||
|
|
9e2b3830e9 | ||
|
|
7d1c6abaac | ||
|
|
868d43164e | ||
|
|
12952d070f | ||
|
|
7920368a36 | ||
|
|
2cf4fe559a | ||
|
|
a8590a0a7d | ||
|
|
8c30a8e1bb | ||
|
|
54f4ad8fa2 | ||
|
|
ae74b9018a | ||
|
|
7eb1fdb0ac | ||
|
|
c6cf34df06 | ||
|
|
1e73213a36 | ||
|
|
c0a7debfd1 | ||
|
|
454b098ac5 | ||
|
|
088e3108c8 | ||
|
|
b0cd75f4cb | ||
|
|
31e1a81552 | ||
|
|
e54851d9d6 | ||
|
|
adeb1a928e | ||
|
|
cd5909fafe | ||
|
|
1762548741 | ||
|
|
262d33e394 | ||
|
|
7b580dbf39 | ||
| 36bc01ac0d | |||
|
|
8be45180aa | ||
|
|
9808e205af | ||
|
|
7a009c8b27 | ||
|
|
deebee45d7 | ||
|
|
7e5041d5e1 | ||
|
|
7e3617cd48 | ||
|
|
a6710b4995 | ||
|
|
62be30da19 | ||
|
|
a87b787c4e | ||
|
|
4d68e2abd5 | ||
|
|
4b0bb29aa9 | ||
|
|
797370ec80 | ||
|
|
63f3234876 | ||
|
|
682c76c714 | ||
|
|
74b3770f6c | ||
|
|
5b5ff4cbe7 | ||
|
|
6958eac6f1 | ||
|
|
d94cb7c736 | ||
|
|
7819a6e09a | ||
|
|
bc8acf7611 | ||
|
|
08d14fb082 | ||
|
|
3c9b355879 | ||
|
|
04fdf0a8c1 | ||
|
|
cc7a6406f5 | ||
|
|
794c3a850d | ||
|
|
2691dcb325 | ||
|
|
b1616e55fc | ||
|
|
3d0c6279e3 | ||
|
|
9327da5a7b | ||
|
|
a1bbb61f47 | ||
|
|
7df526f8e3 | ||
|
|
028bf297af | ||
|
|
eb7a3d137a | ||
|
|
9bba8b0f9c | ||
|
|
5fc8c59f54 | ||
|
|
a36601ab0d | ||
|
|
82926f9dd6 | ||
|
|
cc7db99982 | ||
|
|
6cf3644097 | ||
|
|
228e637c9f | ||
|
|
4db4f830f5 | ||
|
|
84cfdba010 | ||
|
|
4d7f857c42 | ||
|
|
09a407ee40 | ||
|
|
cb54db6c2f | ||
|
|
2936ba838d | ||
|
|
4379f9ec50 | ||
|
|
e4358c9718 | ||
|
|
393e3cce1f | ||
|
|
f68d79996e | ||
|
|
a27eec90d1 | ||
|
|
029022d201 | ||
|
|
89a3f735ef | ||
|
|
c3da29fd63 | ||
|
|
5640232085 | ||
|
|
b6855e85ab | ||
|
|
184cc4a694 | ||
|
|
c2934de79d | ||
|
|
aabd98120b | ||
|
|
2cff70d158 | ||
|
|
61171ed551 | ||
|
|
827aada89d | ||
|
|
b7f8728f23 | ||
|
|
5d1d538b45 | ||
|
|
d05469fd6d | ||
|
|
0830898530 | ||
|
|
8fd862b52e | ||
|
|
a4f8b2bedd | ||
|
|
7123c5532c | ||
|
|
012d719722 | ||
|
|
3133aaaf71 | ||
|
|
4e527ee045 | ||
|
|
b713b6e5f4 | ||
|
|
dde023eacf | ||
|
|
994d485b49 | ||
|
|
0ed3fc8079 | ||
|
|
4b186a4b28 | ||
|
|
ac094eae38 | ||
|
|
5f9625f332 | ||
|
|
cc30666016 | ||
|
|
bed5c9ab52 | ||
|
|
7181d9ca63 | ||
|
|
d820976eaf | ||
|
|
edf416f9e0 | ||
|
|
671a4f0ae2 | ||
|
|
26c2b96cfe | ||
|
|
5c38a53f7c | ||
|
|
15025fe6cc | ||
|
|
d7a3a85db9 | ||
|
|
5f5e0893e0 |
56
.github/ISSUE_TEMPLATE/prepare_beta_release.md
vendored
Normal file
56
.github/ISSUE_TEMPLATE/prepare_beta_release.md
vendored
Normal file
@ -0,0 +1,56 @@
|
||||
---
|
||||
name: Prepare Beta Release
|
||||
about: Execute tasks for the creation and publishing of a new beta release
|
||||
title: 'Prepare beta release 0.0.0'
|
||||
labels: beta-release
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Add appropriate release number to title!
|
||||
|
||||
For detailed info on the release process refer to https://github.com/logos-messaging/nwaku/blob/master/docs/contributors/release-process.md
|
||||
-->
|
||||
|
||||
### Items to complete
|
||||
|
||||
All items below are to be completed by the owner of the given release.
|
||||
|
||||
- [ ] Create release branch with major and minor only ( e.g. release/v0.X ) if it doesn't exist.
|
||||
- [ ] Assign release candidate tag to the release branch HEAD (e.g. `v0.X.0-beta-rc.0`, `v0.X.0-beta-rc.1`, ... `v0.X.0-beta-rc.N`).
|
||||
- [ ] Generate and edit release notes in CHANGELOG.md.
|
||||
|
||||
- [ ] **Waku test and fleets validation**
|
||||
- [ ] Ensure all the unit tests (specifically js-waku tests) are green against the release candidate.
|
||||
- [ ] Deploy the release candidate to `waku.test` only through [deploy-waku-test job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-test/) and wait for it to finish (Jenkins access required; ask the infra team if you don't have it).
|
||||
- After completion, disable [deployment job](https://ci.infra.status.im/job/nim-waku/) so that its version is not updated on every merge to master.
|
||||
- Verify the deployed version at https://fleets.waku.org/.
|
||||
- Confirm the container image exists on [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab).
|
||||
- [ ] Analyze Kibana logs from the previous month (since the last release was deployed) for possible crashes or errors in `waku.test`.
|
||||
- Most relevant logs are `(fleet: "waku.test" AND message: "SIGSEGV")`.
|
||||
- [ ] Enable again the `waku.test` fleet to resume auto-deployment of the latest `master` commit.
|
||||
|
||||
- [ ] **Proceed with release**
|
||||
|
||||
- [ ] Assign a final release tag (`v0.X.0-beta`) to the same commit that contains the validated release-candidate tag (e.g. `v0.X.0-beta-rc.N`) and submit a PR from the release branch to `master`.
|
||||
- [ ] Update [nwaku-compose](https://github.com/logos-messaging/nwaku-compose) and [waku-simulator](https://github.com/logos-messaging/waku-simulator) according to the new release.
|
||||
- [ ] Bump nwaku dependency in [waku-rust-bindings](https://github.com/logos-messaging/waku-rust-bindings) and make sure all examples and tests work.
|
||||
- [ ] Bump nwaku dependency in [waku-go-bindings](https://github.com/logos-messaging/waku-go-bindings) and make sure all tests work.
|
||||
- [ ] Create GitHub release (https://github.com/logos-messaging/nwaku/releases).
|
||||
- [ ] Submit a PR to merge the release branch back to `master`. Make sure you use the option "Merge pull request (Create a merge commit)" to perform the merge. Ping repo admin if this option is not available.
|
||||
|
||||
- [ ] **Promote release to fleets**
|
||||
- [ ] Ask the PM lead to announce the release.
|
||||
- [ ] Update infra config with any deprecated arguments or changed options.
|
||||
- [ ] Update waku.sandbox with [this deployment job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/).
|
||||
|
||||
### Links
|
||||
|
||||
- [Release process](https://github.com/logos-messaging/nwaku/blob/master/docs/contributors/release-process.md)
|
||||
- [Release notes](https://github.com/logos-messaging/nwaku/blob/master/CHANGELOG.md)
|
||||
- [Fleet ownership](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64)
|
||||
- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku)
|
||||
- [Jenkins](https://ci.infra.status.im/job/nim-waku/)
|
||||
- [Fleets](https://fleets.waku.org/)
|
||||
- [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab)
|
||||
76
.github/ISSUE_TEMPLATE/prepare_full_release.md
vendored
Normal file
76
.github/ISSUE_TEMPLATE/prepare_full_release.md
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
---
|
||||
name: Prepare Full Release
|
||||
about: Execute tasks for the creation and publishing of a new full release
|
||||
title: 'Prepare full release 0.0.0'
|
||||
labels: full-release
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Add appropriate release number to title!
|
||||
|
||||
For detailed info on the release process refer to https://github.com/logos-messaging/nwaku/blob/master/docs/contributors/release-process.md
|
||||
-->
|
||||
|
||||
### Items to complete
|
||||
|
||||
All items below are to be completed by the owner of the given release.
|
||||
|
||||
- [ ] Create release branch with major and minor only ( e.g. release/v0.X ) if it doesn't exist.
|
||||
- [ ] Assign release candidate tag to the release branch HEAD (e.g. `v0.X.0-rc.0`, `v0.X.0-rc.1`, ... `v0.X.0-rc.N`).
|
||||
- [ ] Generate and edit release notes in CHANGELOG.md.
|
||||
|
||||
- [ ] **Validation of release candidate**
|
||||
|
||||
- [ ] **Automated testing**
|
||||
- [ ] Ensure all the unit tests (specifically js-waku tests) are green against the release candidate.
|
||||
- [ ] Ask Vac-QA and Vac-DST to perform the available tests against the release candidate.
|
||||
- [ ] Vac-DST (an additional report is needed; see [this](https://www.notion.so/DST-Reports-1228f96fb65c80729cd1d98a7496fe6f))
|
||||
|
||||
- [ ] **Waku fleet testing**
|
||||
- [ ] Deploy the release candidate to `waku.test` and `waku.sandbox` fleets.
|
||||
- Start the [deployment job](https://ci.infra.status.im/job/nim-waku/) for both fleets and wait for it to finish (Jenkins access required; ask the infra team if you don't have it).
|
||||
- After completion, disable [deployment job](https://ci.infra.status.im/job/nim-waku/) so that its version is not updated on every merge to `master`.
|
||||
- Verify the deployed version at https://fleets.waku.org/.
|
||||
- Confirm the container image exists on [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab).
|
||||
- [ ] Search _Kibana_ logs from the previous month (since the last release was deployed) for possible crashes or errors in `waku.test` and `waku.sandbox`.
|
||||
- Most relevant logs are `(fleet: "waku.test" AND message: "SIGSEGV")` OR `(fleet: "waku.sandbox" AND message: "SIGSEGV")`.
|
||||
- [ ] Enable again the `waku.test` fleet to resume auto-deployment of the latest `master` commit.
|
||||
|
||||
- [ ] **Status fleet testing**
|
||||
- [ ] Deploy release candidate to `status.staging`
|
||||
- [ ] Perform [sanity check](https://www.notion.so/How-to-test-Nwaku-on-Status-12c6e4b9bf06420ca868bd199129b425) and log results as comments in this issue.
|
||||
- [ ] Connect 2 instances to `status.staging` fleet, one in relay mode, the other one in light client.
|
||||
- 1:1 Chats with each other
|
||||
- Send and receive messages in a community
|
||||
- Close one instance, send messages with second instance, reopen first instance and confirm messages sent while offline are retrieved from store
|
||||
- [ ] Perform checks based on _end user impact_
|
||||
- [ ] Inform other (Waku and Status) CCs to point their instances to `status.staging` for a few days. Ping Status colleagues on their Discord server or in the [Status community](https://status.app/c/G3kAAMSQtb05kog3aGbr3kiaxN4tF5xy4BAGEkkLwILk2z3GcoYlm5hSJXGn7J3laft-tnTwDWmYJ18dP_3bgX96dqr_8E3qKAvxDf3NrrCMUBp4R9EYkQez9XSM4486mXoC3mIln2zc-TNdvjdfL9eHVZ-mGgs=#zQ3shZeEJqTC1xhGUjxuS4rtHSrhJ8vUYp64v6qWkLpvdy9L9) (this is not a blocking point.)
|
||||
- [ ] Ask Status-QA to perform sanity checks (as described above) and checks based on _end user impact_; specify the version being tested
|
||||
- [ ] Ask Status-QA or infra to run the automated Status e2e tests against `status.staging`
|
||||
- [ ] Get other CCs' sign-off: they should comment on this PR, e.g., "Used the app for a week, no problem." If problems are reported, resolve them and create a new RC.
|
||||
- [ ] **Get Status-QA sign-off**, ensuring that the `status.test` update will not disturb ongoing activities.
|
||||
|
||||
- [ ] **Proceed with release**
|
||||
|
||||
- [ ] Assign a final release tag (`v0.X.0`) to the same commit that contains the validated release-candidate tag (e.g. `v0.X.0`).
|
||||
- [ ] Update [nwaku-compose](https://github.com/logos-messaging/nwaku-compose) and [waku-simulator](https://github.com/logos-messaging/waku-simulator) according to the new release.
|
||||
- [ ] Bump nwaku dependency in [waku-rust-bindings](https://github.com/logos-messaging/waku-rust-bindings) and make sure all examples and tests work.
|
||||
- [ ] Bump nwaku dependency in [waku-go-bindings](https://github.com/logos-messaging/waku-go-bindings) and make sure all tests work.
|
||||
- [ ] Create GitHub release (https://github.com/logos-messaging/nwaku/releases).
|
||||
- [ ] Submit a PR to merge the release branch back to `master`. Make sure you use the option "Merge pull request (Create a merge commit)" to perform the merge. Ping repo admin if this option is not available.
|
||||
|
||||
- [ ] **Promote release to fleets**
|
||||
- [ ] Ask the PM lead to announce the release.
|
||||
- [ ] Update infra config with any deprecated arguments or changed options.
|
||||
|
||||
### Links
|
||||
|
||||
- [Release process](https://github.com/logos-messaging/nwaku/blob/master/docs/contributors/release-process.md)
|
||||
- [Release notes](https://github.com/logos-messaging/nwaku/blob/master/CHANGELOG.md)
|
||||
- [Fleet ownership](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64)
|
||||
- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku)
|
||||
- [Jenkins](https://ci.infra.status.im/job/nim-waku/)
|
||||
- [Fleets](https://fleets.waku.org/)
|
||||
- [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab)
|
||||
72
.github/ISSUE_TEMPLATE/prepare_release.md
vendored
72
.github/ISSUE_TEMPLATE/prepare_release.md
vendored
@ -1,72 +0,0 @@
|
||||
---
|
||||
name: Prepare release
|
||||
about: Execute tasks for the creation and publishing of a new release
|
||||
title: 'Prepare release 0.0.0'
|
||||
labels: release
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!--
|
||||
Add appropriate release number to title!
|
||||
|
||||
For detailed info on the release process refer to https://github.com/waku-org/nwaku/blob/master/docs/contributors/release-process.md
|
||||
-->
|
||||
|
||||
### Items to complete
|
||||
|
||||
All items below are to be completed by the owner of the given release.
|
||||
|
||||
- [ ] Create release branch
|
||||
- [ ] Assign release candidate tag to the release branch HEAD. e.g. v0.30.0-rc.0
|
||||
- [ ] Generate and edit releases notes in CHANGELOG.md
|
||||
- [ ] Review possible update of [config-options](https://github.com/waku-org/docs.waku.org/blob/develop/docs/guides/nwaku/config-options.md)
|
||||
- [ ] _End user impact_: Summarize impact of changes on Status end users (can be a comment in this issue).
|
||||
- [ ] **Validate release candidate**
|
||||
- [ ] Bump nwaku dependency in [waku-rust-bindings](https://github.com/waku-org/waku-rust-bindings) and make sure all examples and tests work
|
||||
|
||||
- [ ] Automated testing
|
||||
- [ ] Ensures js-waku tests are green against release candidate
|
||||
- [ ] Ask Vac-QA and Vac-DST to perform available tests against release candidate
|
||||
- [ ] Vac-QA
|
||||
- [ ] Vac-DST (we need additional report. see [this](https://www.notion.so/DST-Reports-1228f96fb65c80729cd1d98a7496fe6f))
|
||||
|
||||
- [ ] **On Waku fleets**
|
||||
- [ ] Lock `waku.test` fleet to release candidate version
|
||||
- [ ] Continuously stress `waku.test` fleet for a week (e.g. from `wakudev`)
|
||||
- [ ] Search _Kibana_ logs from the previous month (since last release was deployed), for possible crashes or errors in `waku.test` and `waku.sandbox`.
|
||||
- Most relevant logs are `(fleet: "waku.test" OR fleet: "waku.sandbox") AND message: "SIGSEGV"`
|
||||
- [ ] Run release candidate with `waku-simulator`, ensure that nodes connected to each other
|
||||
- [ ] Unlock `waku.test` to resume auto-deployment of latest `master` commit
|
||||
|
||||
- [ ] **On Status fleet**
|
||||
- [ ] Deploy release candidate to `status.staging`
|
||||
- [ ] Perform [sanity check](https://www.notion.so/How-to-test-Nwaku-on-Status-12c6e4b9bf06420ca868bd199129b425) and log results as comments in this issue.
|
||||
- [ ] Connect 2 instances to `status.staging` fleet, one in relay mode, the other one in light client.
|
||||
- [ ] 1:1 Chats with each other
|
||||
- [ ] Send and receive messages in a community
|
||||
- [ ] Close one instance, send messages with second instance, reopen first instance and confirm messages sent while offline are retrieved from store
|
||||
- [ ] Perform checks based _end user impact_
|
||||
- [ ] Inform other (Waku and Status) CCs to point their instance to `status.staging` for a few days. Ping Status colleagues from their Discord server or [Status community](https://status.app/c/G3kAAMSQtb05kog3aGbr3kiaxN4tF5xy4BAGEkkLwILk2z3GcoYlm5hSJXGn7J3laft-tnTwDWmYJ18dP_3bgX96dqr_8E3qKAvxDf3NrrCMUBp4R9EYkQez9XSM4486mXoC3mIln2zc-TNdvjdfL9eHVZ-mGgs=#zQ3shZeEJqTC1xhGUjxuS4rtHSrhJ8vUYp64v6qWkLpvdy9L9) (not blocking point.)
|
||||
- [ ] Ask Status-QA to perform sanity checks (as described above) + checks based on _end user impact_; do specify the version being tested
|
||||
- [ ] Ask Status-QA or infra to run the automated Status e2e tests against `status.staging`
|
||||
- [ ] Get other CCs sign-off: they comment on this PR "used app for a week, no problem", or problem reported, resolved and new RC
|
||||
- [ ] **Get Status-QA sign-off**. Ensuring that `status.test` update will not disturb ongoing activities.
|
||||
|
||||
- [ ] **Proceed with release**
|
||||
|
||||
- [ ] Assign a release tag to the same commit that contains the validated release-candidate tag
|
||||
- [ ] Create GitHub release
|
||||
- [ ] Deploy the release to DockerHub
|
||||
- [ ] Announce the release
|
||||
|
||||
- [ ] **Promote release to fleets**.
|
||||
- [ ] Update infra config with any deprecated arguments or changed options
|
||||
- [ ] [Deploy final release to `waku.sandbox` fleet](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox)
|
||||
- [ ] [Deploy final release to `status.staging` fleet](https://ci.infra.status.im/job/nim-waku/job/deploy-shards-staging/)
|
||||
- [ ] [Deploy final release to `status.prod` fleet](https://ci.infra.status.im/job/nim-waku/job/deploy-shards-test/)
|
||||
|
||||
- [ ] **Post release**
|
||||
- [ ] Submit a PR from the release branch to master. Important to commit the PR with "create a merge commit" option.
|
||||
- [ ] Update waku-org/nwaku-compose with the new release version.
|
||||
- [ ] Update version in js-waku repo. [update only this](https://github.com/waku-org/js-waku/blob/7c0ce7b2eca31cab837da0251e1e4255151be2f7/.github/workflows/ci.yml#L135) by submitting a PR.
|
||||
29
.github/workflows/ci.yml
vendored
29
.github/workflows/ci.yml
vendored
@ -54,9 +54,9 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-22.04, macos-13]
|
||||
os: [ubuntu-22.04, macos-15]
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
timeout-minutes: 45
|
||||
|
||||
name: build-${{ matrix.os }}
|
||||
steps:
|
||||
@ -76,10 +76,15 @@ jobs:
|
||||
.git/modules
|
||||
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
|
||||
|
||||
- name: Make update
|
||||
run: make update
|
||||
|
||||
- name: Build binaries
|
||||
run: make V=1 QUICK_AND_DIRTY_COMPILER=1 all tools
|
||||
|
||||
|
||||
build-windows:
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }}
|
||||
uses: ./.github/workflows/windows-build.yml
|
||||
with:
|
||||
branch: ${{ github.ref }}
|
||||
@ -90,9 +95,9 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-22.04, macos-13]
|
||||
os: [ubuntu-22.04, macos-15]
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
timeout-minutes: 45
|
||||
|
||||
name: test-${{ matrix.os }}
|
||||
steps:
|
||||
@ -112,6 +117,9 @@ jobs:
|
||||
.git/modules
|
||||
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
|
||||
|
||||
- name: Make update
|
||||
run: make update
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
postgres_enabled=0
|
||||
@ -124,17 +132,18 @@ jobs:
|
||||
export NIMFLAGS="--colors:off -d:chronicles_colors:none"
|
||||
export USE_LIBBACKTRACE=0
|
||||
|
||||
make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test testwakunode2
|
||||
make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test
|
||||
make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled testwakunode2
|
||||
|
||||
build-docker-image:
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' || needs.changes.outputs.docker == 'true' }}
|
||||
uses: waku-org/nwaku/.github/workflows/container-image.yml@master
|
||||
uses: logos-messaging/logos-messaging-nim/.github/workflows/container-image.yml@10dc3d3eb4b6a3d4313f7b2cc4a85a925e9ce039
|
||||
secrets: inherit
|
||||
|
||||
nwaku-nwaku-interop-tests:
|
||||
needs: build-docker-image
|
||||
uses: waku-org/waku-interop-tests/.github/workflows/nim_waku_PR.yml@SMOKE_TEST_0.0.1
|
||||
uses: logos-messaging/logos-messaging-interop-tests/.github/workflows/nim_waku_PR.yml@SMOKE_TEST_STABLE
|
||||
with:
|
||||
node_nwaku: ${{ needs.build-docker-image.outputs.image }}
|
||||
|
||||
@ -142,14 +151,14 @@ jobs:
|
||||
|
||||
js-waku-node:
|
||||
needs: build-docker-image
|
||||
uses: waku-org/js-waku/.github/workflows/test-node.yml@master
|
||||
uses: logos-messaging/js-waku/.github/workflows/test-node.yml@master
|
||||
with:
|
||||
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
|
||||
test_type: node
|
||||
|
||||
js-waku-node-optional:
|
||||
needs: build-docker-image
|
||||
uses: waku-org/js-waku/.github/workflows/test-node.yml@master
|
||||
uses: logos-messaging/js-waku/.github/workflows/test-node.yml@master
|
||||
with:
|
||||
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
|
||||
test_type: node-optional
|
||||
|
||||
3
.github/workflows/container-image.yml
vendored
3
.github/workflows/container-image.yml
vendored
@ -41,7 +41,7 @@ jobs:
|
||||
env:
|
||||
QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }}
|
||||
QUAY_USER: ${{ secrets.QUAY_USER }}
|
||||
|
||||
|
||||
- name: Checkout code
|
||||
if: ${{ steps.secrets.outcome == 'success' }}
|
||||
uses: actions/checkout@v4
|
||||
@ -65,6 +65,7 @@ jobs:
|
||||
id: build
|
||||
if: ${{ steps.secrets.outcome == 'success' }}
|
||||
run: |
|
||||
make update
|
||||
|
||||
make -j${NPROC} V=1 QUICK_AND_DIRTY_COMPILER=1 NIMFLAGS="-d:disableMarchNative -d:postgres -d:chronicles_colors:none" wakunode2
|
||||
|
||||
|
||||
14
.github/workflows/pre-release.yml
vendored
14
.github/workflows/pre-release.yml
vendored
@ -34,10 +34,10 @@ jobs:
|
||||
needs: tag-name
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-22.04, macos-13]
|
||||
os: [ubuntu-22.04, macos-15]
|
||||
arch: [amd64]
|
||||
include:
|
||||
- os: macos-13
|
||||
- os: macos-15
|
||||
arch: arm64
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
@ -47,7 +47,7 @@ jobs:
|
||||
- name: prep variables
|
||||
id: vars
|
||||
run: |
|
||||
ARCH=${{matrix.arch}}
|
||||
ARCH=${{matrix.arch}}
|
||||
|
||||
echo "arch=${ARCH}" >> $GITHUB_OUTPUT
|
||||
|
||||
@ -91,14 +91,14 @@ jobs:
|
||||
|
||||
build-docker-image:
|
||||
needs: tag-name
|
||||
uses: waku-org/nwaku/.github/workflows/container-image.yml@master
|
||||
uses: logos-messaging/nwaku/.github/workflows/container-image.yml@master
|
||||
with:
|
||||
image_tag: ${{ needs.tag-name.outputs.tag }}
|
||||
secrets: inherit
|
||||
|
||||
js-waku-node:
|
||||
needs: build-docker-image
|
||||
uses: waku-org/js-waku/.github/workflows/test-node.yml@master
|
||||
uses: logos-messaging/js-waku/.github/workflows/test-node.yml@master
|
||||
with:
|
||||
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
|
||||
test_type: node
|
||||
@ -106,7 +106,7 @@ jobs:
|
||||
|
||||
js-waku-node-optional:
|
||||
needs: build-docker-image
|
||||
uses: waku-org/js-waku/.github/workflows/test-node.yml@master
|
||||
uses: logos-messaging/js-waku/.github/workflows/test-node.yml@master
|
||||
with:
|
||||
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
|
||||
test_type: node-optional
|
||||
@ -150,7 +150,7 @@ jobs:
|
||||
-u $(id -u) \
|
||||
docker.io/wakuorg/sv4git:latest \
|
||||
release-notes ${RELEASE_NOTES_TAG} --previous $(git tag -l --sort -creatordate | grep -e "^v[0-9]*\.[0-9]*\.[0-9]*$") |\
|
||||
sed -E 's@#([0-9]+)@[#\1](https://github.com/waku-org/nwaku/issues/\1)@g' > release_notes.md
|
||||
sed -E 's@#([0-9]+)@[#\1](https://github.com/logos-messaging/nwaku/issues/\1)@g' > release_notes.md
|
||||
|
||||
sed -i "s/^## .*/Generated at $(date)/" release_notes.md
|
||||
|
||||
|
||||
79
.github/workflows/release-assets.yml
vendored
79
.github/workflows/release-assets.yml
vendored
@ -14,10 +14,10 @@ jobs:
|
||||
build-and-upload:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-22.04, macos-13]
|
||||
os: [ubuntu-22.04, macos-15]
|
||||
arch: [amd64]
|
||||
include:
|
||||
- os: macos-13
|
||||
- os: macos-15
|
||||
arch: arm64
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
@ -41,25 +41,84 @@ jobs:
|
||||
.git/modules
|
||||
key: ${{ runner.os }}-${{matrix.arch}}-submodules-${{ steps.submodules.outputs.hash }}
|
||||
|
||||
- name: prep variables
|
||||
- name: Get tag
|
||||
id: version
|
||||
run: |
|
||||
# Use full tag, e.g., v0.37.0
|
||||
echo "version=${GITHUB_REF_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Prep variables
|
||||
id: vars
|
||||
run: |
|
||||
NWAKU_ARTIFACT_NAME=$(echo "nwaku-${{matrix.arch}}-${{runner.os}}.tar.gz" | tr "[:upper:]" "[:lower:]")
|
||||
VERSION=${{ steps.version.outputs.version }}
|
||||
|
||||
echo "nwaku=${NWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT
|
||||
NWAKU_ARTIFACT_NAME=$(echo "waku-${{matrix.arch}}-${{runner.os}}.tar.gz" | tr "[:upper:]" "[:lower:]")
|
||||
echo "waku=${NWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install dependencies
|
||||
if [[ "${{ runner.os }}" == "Linux" ]]; then
|
||||
LIBWAKU_ARTIFACT_NAME=$(echo "libwaku-${VERSION}-${{matrix.arch}}-${{runner.os}}-linux.deb" | tr "[:upper:]" "[:lower:]")
|
||||
fi
|
||||
|
||||
if [[ "${{ runner.os }}" == "macOS" ]]; then
|
||||
LIBWAKU_ARTIFACT_NAME=$(echo "libwaku-${VERSION}-${{matrix.arch}}-macos.tar.gz" | tr "[:upper:]" "[:lower:]")
|
||||
fi
|
||||
|
||||
echo "libwaku=${LIBWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
if [[ "${{ runner.os }}" == "Linux" ]]; then
|
||||
sudo apt-get update && sudo apt-get install -y build-essential dpkg-dev
|
||||
fi
|
||||
|
||||
- name: Build Waku artifacts
|
||||
run: |
|
||||
OS=$([[ "${{runner.os}}" == "macOS" ]] && echo "macosx" || echo "linux")
|
||||
|
||||
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" V=1 update
|
||||
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false wakunode2
|
||||
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" CI=false chat2
|
||||
tar -cvzf ${{steps.vars.outputs.nwaku}} ./build/
|
||||
tar -cvzf ${{steps.vars.outputs.waku}} ./build/
|
||||
|
||||
- name: Upload asset
|
||||
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false libwaku
|
||||
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false STATIC=1 libwaku
|
||||
|
||||
- name: Create distributable libwaku package
|
||||
run: |
|
||||
VERSION=${{ steps.version.outputs.version }}
|
||||
|
||||
if [[ "${{ runner.os }}" == "Linux" ]]; then
|
||||
rm -rf pkg
|
||||
mkdir -p pkg/DEBIAN pkg/usr/local/lib pkg/usr/local/include
|
||||
cp build/libwaku.so pkg/usr/local/lib/
|
||||
cp build/libwaku.a pkg/usr/local/lib/
|
||||
cp library/libwaku.h pkg/usr/local/include/
|
||||
|
||||
echo "Package: waku" >> pkg/DEBIAN/control
|
||||
echo "Version: ${VERSION}" >> pkg/DEBIAN/control
|
||||
echo "Priority: optional" >> pkg/DEBIAN/control
|
||||
echo "Section: libs" >> pkg/DEBIAN/control
|
||||
echo "Architecture: ${{matrix.arch}}" >> pkg/DEBIAN/control
|
||||
echo "Maintainer: Waku Team <ivansete@status.im>" >> pkg/DEBIAN/control
|
||||
echo "Description: Waku library" >> pkg/DEBIAN/control
|
||||
|
||||
dpkg-deb --build pkg ${{steps.vars.outputs.libwaku}}
|
||||
fi
|
||||
|
||||
if [[ "${{ runner.os }}" == "macOS" ]]; then
|
||||
tar -cvzf ${{steps.vars.outputs.libwaku}} ./build/libwaku.dylib ./build/libwaku.a ./library/libwaku.h
|
||||
fi
|
||||
|
||||
- name: Upload waku artifact
|
||||
uses: actions/upload-artifact@v4.4.0
|
||||
with:
|
||||
name: ${{steps.vars.outputs.nwaku}}
|
||||
path: ${{steps.vars.outputs.nwaku}}
|
||||
name: waku-${{ steps.version.outputs.version }}-${{ matrix.arch }}-${{ runner.os }}
|
||||
path: ${{ steps.vars.outputs.waku }}
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Upload libwaku artifact
|
||||
uses: actions/upload-artifact@v4.4.0
|
||||
with:
|
||||
name: libwaku-${{ steps.version.outputs.version }}-${{ matrix.arch }}-${{ runner.os }}
|
||||
path: ${{ steps.vars.outputs.libwaku }}
|
||||
if-no-files-found: error
|
||||
|
||||
13
.github/workflows/windows-build.yml
vendored
13
.github/workflows/windows-build.yml
vendored
@ -71,7 +71,6 @@ jobs:
|
||||
- name: Building miniupnpc
|
||||
run: |
|
||||
cd vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc
|
||||
git checkout little_chore_windows_support
|
||||
make -f Makefile.mingw CC=gcc CXX=g++ libminiupnpc.a V=1
|
||||
cd ../../../../..
|
||||
|
||||
@ -81,9 +80,13 @@ jobs:
|
||||
make CC="gcc -fPIC -D_WIN32_WINNT=0x0600 -DNATPMP_STATICLIB" libnatpmp.a V=1
|
||||
cd ../../../../
|
||||
|
||||
- name: Building wakunode2
|
||||
- name: Building wakunode2.exe
|
||||
run: |
|
||||
make wakunode2 LOG_LEVEL=DEBUG V=3 -j8
|
||||
|
||||
- name: Building libwaku.dll
|
||||
run: |
|
||||
make libwaku STATIC=0 LOG_LEVEL=DEBUG V=1 -j
|
||||
|
||||
- name: Check Executable
|
||||
run: |
|
||||
@ -93,3 +96,9 @@ jobs:
|
||||
echo "Build failed: wakunode2.exe not found"
|
||||
exit 1
|
||||
fi
|
||||
if [ -f "./build/libwaku.dll" ]; then
|
||||
echo "libwaku.dll build successful"
|
||||
else
|
||||
echo "Build failed: libwaku.dll not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@ -59,6 +59,10 @@ nimbus-build-system.paths
|
||||
/examples/nodejs/build/
|
||||
/examples/rust/target/
|
||||
|
||||
# Xcode user data
|
||||
xcuserdata/
|
||||
*.xcuserstate
|
||||
|
||||
|
||||
# Coverage
|
||||
coverage_html_report/
|
||||
@ -79,3 +83,9 @@ waku_handler.moc.cpp
|
||||
|
||||
# Nix build result
|
||||
result
|
||||
|
||||
# llms
|
||||
AGENTS.md
|
||||
nimble.develop
|
||||
nimble.paths
|
||||
nimbledeps
|
||||
|
||||
7
.gitmodules
vendored
7
.gitmodules
vendored
@ -181,6 +181,11 @@
|
||||
branch = master
|
||||
[submodule "vendor/waku-rlnv2-contract"]
|
||||
path = vendor/waku-rlnv2-contract
|
||||
url = https://github.com/waku-org/waku-rlnv2-contract.git
|
||||
url = https://github.com/logos-messaging/waku-rlnv2-contract.git
|
||||
ignore = untracked
|
||||
branch = master
|
||||
[submodule "vendor/nim-ffi"]
|
||||
path = vendor/nim-ffi
|
||||
url = https://github.com/logos-messaging/nim-ffi/
|
||||
ignore = untracked
|
||||
branch = master
|
||||
|
||||
509
AGENTS.md
Normal file
509
AGENTS.md
Normal file
@ -0,0 +1,509 @@
|
||||
# AGENTS.md - AI Coding Context
|
||||
|
||||
This file provides essential context for LLMs assisting with Logos Messaging development.
|
||||
|
||||
## Project Identity
|
||||
|
||||
Logos Messaging is designed as a shared public network for generalized messaging, not application-specific infrastructure.
|
||||
|
||||
This project is a Nim implementation of a libp2p protocol suite for private, censorship-resistant P2P messaging. It targets resource-restricted devices and privacy-preserving communication.
|
||||
|
||||
Logos Messaging was formerly known as Waku. Waku-related terminology remains within the codebase for historical reasons.
|
||||
|
||||
### Design Philosophy
|
||||
|
||||
Key architectural decisions:
|
||||
|
||||
Resource-restricted first: Protocols differentiate between full nodes (relay) and light clients (filter, lightpush, store). Light clients can participate without maintaining full message history or relay capabilities. This explains the client/server split in protocol implementations.
|
||||
|
||||
Privacy through unlinkability: RLN (Rate Limiting Nullifier) provides DoS protection while preserving sender anonymity. Messages are routed through pubsub topics with automatic sharding across 8 shards. Code prioritizes metadata privacy alongside content encryption.
|
||||
|
||||
Scalability via sharding: The network uses automatic content-topic-based sharding to distribute traffic. This is why you'll see sharding logic throughout the codebase and why pubsub topic selection is protocol-level, not application-level.
|
||||
|
||||
See [documentation](https://docs.waku.org/learn/) for architectural details.
|
||||
|
||||
### Core Protocols
|
||||
- Relay: Pub/sub message routing using GossipSub
|
||||
- Store: Historical message retrieval and persistence
|
||||
- Filter: Lightweight message filtering for resource-restricted clients
|
||||
- Lightpush: Lightweight message publishing for clients
|
||||
- Peer Exchange: Peer discovery mechanism
|
||||
- RLN Relay: Rate limiting nullifier for spam protection
|
||||
- Metadata: Cluster and shard metadata exchange between peers
|
||||
- Mix: Mixnet protocol for enhanced privacy through onion routing
|
||||
- Rendezvous: Alternative peer discovery mechanism
|
||||
|
||||
### Key Terminology
|
||||
- ENR (Ethereum Node Record): Node identity and capability advertisement
|
||||
- Multiaddr: libp2p addressing format (e.g., `/ip4/127.0.0.1/tcp/60000/p2p/16Uiu2...`)
|
||||
- PubsubTopic: Gossipsub topic for message routing (e.g., `/waku/2/default-waku/proto`)
|
||||
- ContentTopic: Application-level message categorization (e.g., `/my-app/1/chat/proto`)
|
||||
- Sharding: Partitioning network traffic across topics (static or auto-sharding)
|
||||
- RLN (Rate Limiting Nullifier): Zero-knowledge proof system for spam prevention
|
||||
|
||||
### Specifications
|
||||
All specs are at [rfc.vac.dev/waku](https://rfc.vac.dev/waku). RFCs use `WAKU2-XXX` format (not legacy `WAKU-XXX`).
|
||||
|
||||
## Architecture
|
||||
|
||||
### Protocol Module Pattern
|
||||
Each protocol typically follows this structure:
|
||||
```
|
||||
waku_<protocol>/
|
||||
├── protocol.nim # Main protocol type and handler logic
|
||||
├── client.nim # Client-side API
|
||||
├── rpc.nim # RPC message types
|
||||
├── rpc_codec.nim # Protobuf encoding/decoding
|
||||
├── common.nim # Shared types and constants
|
||||
└── protocol_metrics.nim # Prometheus metrics
|
||||
```
|
||||
|
||||
### WakuNode Architecture
|
||||
- WakuNode (`waku/node/waku_node.nim`) is the central orchestrator
|
||||
- Protocols are "mounted" onto the node's switch (libp2p component)
|
||||
- PeerManager handles peer selection and connection management
|
||||
- Switch provides libp2p transport, security, and multiplexing
|
||||
|
||||
Example protocol type definition:
|
||||
```nim
|
||||
type WakuFilter* = ref object of LPProtocol
|
||||
subscriptions*: FilterSubscriptions
|
||||
peerManager: PeerManager
|
||||
messageCache: TimedCache[string]
|
||||
```
|
||||
|
||||
## Development Essentials
|
||||
|
||||
### Build Requirements
|
||||
- Nim 2.x (check `waku.nimble` for minimum version)
|
||||
- Rust toolchain (required for RLN dependencies)
|
||||
- Build system: Make with nimbus-build-system
|
||||
|
||||
### Build System
|
||||
The project uses Makefile with nimbus-build-system (Status's Nim build framework):
|
||||
```bash
|
||||
# Initial build (updates submodules)
|
||||
make wakunode2
|
||||
|
||||
# After git pull, update submodules
|
||||
make update
|
||||
|
||||
# Build with custom flags
|
||||
make wakunode2 NIMFLAGS="-d:chronicles_log_level=DEBUG"
|
||||
```
|
||||
|
||||
Note: The build system uses `--mm:refc` memory management (automatically enforced). Only relevant if compiling outside the standard build system.
|
||||
|
||||
### Common Make Targets
|
||||
```bash
|
||||
make wakunode2 # Build main node binary
|
||||
make test # Run all tests
|
||||
make testcommon # Run common tests only
|
||||
make libwakuStatic # Build static C library
|
||||
make chat2 # Build chat example
|
||||
make install-nph # Install git hook for auto-formatting
|
||||
```
|
||||
|
||||
### Testing
|
||||
```bash
|
||||
# Run all tests
|
||||
make test
|
||||
|
||||
# Run specific test file
|
||||
make test tests/test_waku_enr.nim
|
||||
|
||||
# Run specific test case from file
|
||||
make test tests/test_waku_enr.nim "check capabilities support"
|
||||
|
||||
# Build and run test separately (for development iteration)
|
||||
make test tests/test_waku_enr.nim
|
||||
```
|
||||
|
||||
Test structure uses `testutils/unittests`:
|
||||
```nim
|
||||
import testutils/unittests
|
||||
|
||||
suite "Waku ENR - Capabilities":
|
||||
test "check capabilities support":
|
||||
## Given
|
||||
let bitfield: CapabilitiesBitfield = 0b0000_1101u8
|
||||
|
||||
## Then
|
||||
check:
|
||||
bitfield.supportsCapability(Capabilities.Relay)
|
||||
not bitfield.supportsCapability(Capabilities.Store)
|
||||
```
|
||||
|
||||
### Code Formatting
|
||||
Mandatory: All code must be formatted with `nph` (vendored in `vendor/nph`)
|
||||
```bash
|
||||
# Format specific file
|
||||
make nph/waku/waku_core.nim
|
||||
|
||||
# Install git pre-commit hook (auto-formats on commit)
|
||||
make install-nph
|
||||
```
|
||||
The nph formatter handles all formatting details automatically, especially with the pre-commit hook installed. Focus on semantic correctness.
|
||||
|
||||
### Logging
|
||||
Uses `chronicles` library with compile-time configuration:
|
||||
```nim
|
||||
import chronicles
|
||||
|
||||
logScope:
|
||||
topics = "waku lightpush"
|
||||
|
||||
info "handling request", peerId = peerId, topic = pubsubTopic
|
||||
error "request failed", error = msg
|
||||
```
|
||||
|
||||
Compile with log level:
|
||||
```bash
|
||||
nim c -d:chronicles_log_level=TRACE myfile.nim
|
||||
```
|
||||
|
||||
|
||||
## Code Conventions
|
||||
|
||||
Common pitfalls:
|
||||
- Always handle Result types explicitly
|
||||
- Avoid global mutable state: Pass state through parameters
|
||||
- Keep functions focused: Under 50 lines when possible
|
||||
- Prefer compile-time checks (`static assert`) over runtime checks
|
||||
|
||||
### Naming
|
||||
- Files/Directories: `snake_case` (e.g., `waku_lightpush`, `peer_manager`)
|
||||
- Procedures: `camelCase` (e.g., `handleRequest`, `pushMessage`)
|
||||
- Types: `PascalCase` (e.g., `WakuFilter`, `PubsubTopic`)
|
||||
- Constants: `PascalCase` (e.g., `MaxContentTopicsPerRequest`)
|
||||
- Constructors: `func init(T: type Xxx, params): T`
|
||||
- For ref types: `func new(T: type Xxx, params): ref T`
|
||||
- Exceptions: `XxxError` for CatchableError, `XxxDefect` for Defect
|
||||
- ref object types: `XxxRef` suffix
|
||||
|
||||
### Imports Organization
|
||||
Group imports: stdlib, external libs, internal modules:
|
||||
```nim
|
||||
import
|
||||
std/[options, sequtils], # stdlib
|
||||
results, chronicles, chronos, # external
|
||||
libp2p/peerid
|
||||
import
|
||||
../node/peer_manager, # internal (separate import block)
|
||||
../waku_core,
|
||||
./common
|
||||
```
|
||||
|
||||
### Async Programming
|
||||
Uses chronos, not stdlib `asyncdispatch`:
|
||||
```nim
|
||||
proc handleRequest(
|
||||
wl: WakuLightPush, peerId: PeerId
|
||||
): Future[WakuLightPushResult] {.async.} =
|
||||
let res = await wl.pushHandler(peerId, pubsubTopic, message)
|
||||
return res
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
The project uses both Result types and exceptions:
|
||||
|
||||
Result types from nim-results are used for protocol and API-level errors:
|
||||
```nim
|
||||
proc subscribe(
|
||||
wf: WakuFilter, peerId: PeerID
|
||||
): Future[FilterSubscribeResult] {.async.} =
|
||||
if contentTopics.len > MaxContentTopicsPerRequest:
|
||||
return err(FilterSubscribeError.badRequest("exceeds maximum"))
|
||||
|
||||
# Handle Result with isOkOr
|
||||
(await wf.subscriptions.addSubscription(peerId, criteria)).isOkOr:
|
||||
return err(FilterSubscribeError.serviceUnavailable(error))
|
||||
|
||||
ok()
|
||||
```
|
||||
|
||||
Exceptions still used for:
|
||||
- chronos async failures (CancelledError, etc.)
|
||||
- Database/system errors
|
||||
- Library interop
|
||||
|
||||
Most files start with `{.push raises: [].}` to disable exception tracking, then use try/catch blocks where needed.
|
||||
|
||||
### Pragma Usage
|
||||
```nim
|
||||
{.push raises: [].} # Disable default exception tracking (at file top)
|
||||
|
||||
proc myProc(): Result[T, E] {.async.} = # Async proc
|
||||
```
|
||||
|
||||
### Protocol Inheritance
|
||||
Protocols inherit from libp2p's `LPProtocol`:
|
||||
```nim
|
||||
type WakuLightPush* = ref object of LPProtocol
|
||||
rng*: ref rand.HmacDrbgContext
|
||||
peerManager*: PeerManager
|
||||
pushHandler*: PushMessageHandler
|
||||
```
|
||||
|
||||
### Type Visibility
|
||||
- Public exports use `*` suffix: `type WakuFilter* = ...`
|
||||
- Fields without `*` are module-private
|
||||
|
||||
## Style Guide Essentials
|
||||
|
||||
This section summarizes key Nim style guidelines relevant to this project. Full guide: https://status-im.github.io/nim-style-guide/
|
||||
|
||||
### Language Features
|
||||
|
||||
Import and Export
|
||||
- Use explicit import paths with std/ prefix for stdlib
|
||||
- Group imports: stdlib, external, internal (separate blocks)
|
||||
- Export modules whose types appear in public API
|
||||
- Avoid include
|
||||
|
||||
Macros and Templates
|
||||
- Avoid macros and templates - prefer simple constructs
|
||||
- Avoid generating public API with macros
|
||||
- Put logic in templates, use macros only for glue code
|
||||
|
||||
Object Construction
|
||||
- Prefer Type(field: value) syntax
|
||||
- Use Type.init(params) convention for constructors
|
||||
- Default zero-initialization should be valid state
|
||||
- Avoid using result variable for construction
|
||||
|
||||
ref object Types
|
||||
- Avoid ref object unless needed for:
|
||||
- Resource handles requiring reference semantics
|
||||
- Shared ownership
|
||||
- Reference-based data structures (trees, lists)
|
||||
- Stable pointer for FFI
|
||||
- Use explicit ref MyType where possible
|
||||
- Name ref object types with Ref suffix: XxxRef
|
||||
|
||||
Memory Management
|
||||
- Prefer stack-based and statically sized types in core code
|
||||
- Use heap allocation in glue layers
|
||||
- Avoid alloca
|
||||
- For FFI: use create/dealloc or createShared/deallocShared
|
||||
|
||||
Variable Usage
|
||||
- Use most restrictive of const, let, var (prefer const over let over var)
|
||||
- Prefer expressions for initialization over var then assignment
|
||||
- Avoid result variable - use explicit return or expression-based returns
|
||||
|
||||
Functions
|
||||
- Prefer func over proc
|
||||
- Avoid public (*) symbols not part of intended API
|
||||
- Prefer openArray over seq for function parameters
|
||||
|
||||
Methods (runtime polymorphism)
|
||||
- Avoid method keyword for dynamic dispatch
|
||||
- Prefer manual vtable with proc closures for polymorphism
|
||||
- Methods lack support for generics
|
||||
|
||||
Miscellaneous
|
||||
- Annotate callback proc types with {.raises: [], gcsafe.}
|
||||
- Avoid explicit {.inline.} pragma
|
||||
- Avoid converters
|
||||
- Avoid finalizers
|
||||
|
||||
Type Guidelines
|
||||
|
||||
Binary Data
|
||||
- Use byte for binary data
|
||||
- Use seq[byte] for dynamic arrays
|
||||
- Convert string to seq[byte] early if stdlib returns binary as string
|
||||
|
||||
Integers
|
||||
- Prefer signed (int, int64) for counting, lengths, indexing
|
||||
- Use unsigned with explicit size (uint8, uint64) for binary data, bit ops
|
||||
- Avoid Natural
|
||||
- Check ranges before converting to int
|
||||
- Avoid casting pointers to int
|
||||
- Avoid range types
|
||||
|
||||
Strings
|
||||
- Use string for text
|
||||
- Use seq[byte] for binary data instead of string
|
||||
|
||||
### Error Handling
|
||||
|
||||
Philosophy
|
||||
- Prefer Result, Opt for explicit error handling
|
||||
- Use Exceptions only for legacy code compatibility
|
||||
|
||||
Result Types
|
||||
- Use Result[T, E] for operations that can fail
|
||||
- Use cstring for simple error messages: Result[T, cstring]
|
||||
- Use enum for errors needing differentiation: Result[T, SomeErrorEnum]
|
||||
- Use Opt[T] for simple optional values
|
||||
- Annotate all modules: {.push raises: [].} at top
|
||||
|
||||
Exceptions (when unavoidable)
|
||||
- Inherit from CatchableError, name XxxError
|
||||
- Use Defect for panics/logic errors, name XxxDefect
|
||||
- Annotate functions explicitly: {.raises: [SpecificError].}
|
||||
- Catch specific error types, avoid catching CatchableError
|
||||
- Use expression-based try blocks
|
||||
- Isolate legacy exception code with try/except, convert to Result
|
||||
|
||||
Common Defect Sources
|
||||
- Overflow in signed arithmetic
|
||||
- Array/seq indexing with []
|
||||
- Implicit range type conversions
|
||||
|
||||
Status Codes
|
||||
- Avoid status code pattern
|
||||
- Use Result instead
|
||||
|
||||
### Library Usage
|
||||
|
||||
Standard Library
|
||||
- Use judiciously, prefer focused packages
|
||||
- Prefer these replacements:
|
||||
- async: chronos
|
||||
- bitops: stew/bitops2
|
||||
- endians: stew/endians2
|
||||
- exceptions: results
|
||||
- io: stew/io2
|
||||
|
||||
Results Library
|
||||
- Use cstring errors for diagnostics without differentiation
|
||||
- Use enum errors when caller needs to act on specific errors
|
||||
- Use complex types when additional error context needed
|
||||
- Use isOkOr pattern for chaining
|
||||
|
||||
Wrappers (C/FFI)
|
||||
- Prefer native Nim when available
|
||||
- For C libraries: use {.compile.} to build from source
|
||||
- Create xxx_abi.nim for raw ABI wrapper
|
||||
- Avoid C++ libraries
|
||||
|
||||
Miscellaneous
|
||||
- Print hex output in lowercase, accept both cases
|
||||
|
||||
### Common Pitfalls
|
||||
|
||||
- Defects lack tracking by {.raises.}
|
||||
- nil ref causes runtime crashes
|
||||
- result variable disables branch checking
|
||||
- Exception hierarchy unclear between Nim versions
|
||||
- Range types have compiler bugs
|
||||
- Finalizers infect all instances of type
|
||||
|
||||
## Common Workflows
|
||||
|
||||
### Adding a New Protocol
|
||||
1. Create directory: `waku/waku_myprotocol/`
|
||||
2. Define core files:
|
||||
- `rpc.nim` - Message types
|
||||
- `rpc_codec.nim` - Protobuf encoding
|
||||
- `protocol.nim` - Protocol handler
|
||||
- `client.nim` - Client API
|
||||
- `common.nim` - Shared types
|
||||
3. Define protocol type in `protocol.nim`:
|
||||
```nim
|
||||
type WakuMyProtocol* = ref object of LPProtocol
|
||||
peerManager: PeerManager
|
||||
# ... fields
|
||||
```
|
||||
4. Implement request handler
|
||||
5. Mount in WakuNode (`waku/node/waku_node.nim`)
|
||||
6. Add tests in `tests/waku_myprotocol/`
|
||||
7. Export module via `waku/waku_myprotocol.nim`
|
||||
|
||||
### Adding a REST API Endpoint
|
||||
1. Define handler in `waku/rest_api/endpoint/myprotocol/`
|
||||
2. Implement endpoint following pattern:
|
||||
```nim
|
||||
proc installMyProtocolApiHandlers*(
|
||||
router: var RestRouter, node: WakuNode
|
||||
) =
|
||||
router.api(MethodGet, "/waku/v2/myprotocol/endpoint") do () -> RestApiResponse:
|
||||
# Implementation
|
||||
return RestApiResponse.jsonResponse(data, status = Http200)
|
||||
```
|
||||
3. Register in `waku/rest_api/handlers.nim`
|
||||
|
||||
### Adding Database Migration
|
||||
For message_store (SQLite):
|
||||
1. Create `migrations/message_store/NNNNN_description.up.sql`
|
||||
2. Create corresponding `.down.sql` for rollback
|
||||
3. Increment version number sequentially
|
||||
4. Test migration locally before committing
|
||||
|
||||
For PostgreSQL: add in `migrations/message_store_postgres/`
|
||||
|
||||
### Running Single Test During Development
|
||||
```bash
|
||||
# Build test binary
|
||||
make test tests/waku_filter_v2/test_waku_client.nim
|
||||
|
||||
# Binary location
|
||||
./build/tests/waku_filter_v2/test_waku_client.nim.bin
|
||||
|
||||
# Or combine
|
||||
make test tests/waku_filter_v2/test_waku_client.nim "specific test name"
|
||||
```
|
||||
|
||||
### Debugging with Chronicles
|
||||
Set log level and filter topics:
|
||||
```bash
|
||||
nim c -r \
|
||||
-d:chronicles_log_level=TRACE \
|
||||
-d:chronicles_disabled_topics="eth,dnsdisc" \
|
||||
tests/mytest.nim
|
||||
```
|
||||
|
||||
## Key Constraints
|
||||
|
||||
### Vendor Directory
|
||||
- Never edit files directly in vendor - it is auto-generated from git submodules
|
||||
- Always run `make update` after pulling changes
|
||||
- Managed by `nimbus-build-system`
|
||||
|
||||
### Chronicles Performance
|
||||
- Log levels are configured at compile time for performance
|
||||
- Runtime filtering is available but should be used sparingly: `-d:chronicles_runtime_filtering=on`
|
||||
- Default sinks are optimized for production
|
||||
|
||||
### Memory Management
|
||||
- Uses `refc` (reference counting with cycle collection)
|
||||
- Automatically enforced by the build system (hardcoded in `waku.nimble`)
|
||||
- Do not override unless absolutely necessary, as it breaks compatibility
|
||||
|
||||
### RLN Dependencies
|
||||
- RLN code requires a Rust toolchain, which explains Rust imports in some modules
|
||||
- Pre-built `librln` libraries are checked into the repository
|
||||
|
||||
## Quick Reference
|
||||
|
||||
Language: Nim 2.x | License: MIT or Apache 2.0
|
||||
|
||||
### Important Files
|
||||
- `Makefile` - Primary build interface
|
||||
- `waku.nimble` - Package definition and build tasks (called via nimbus-build-system)
|
||||
- `vendor/nimbus-build-system/` - Status's build framework
|
||||
- `waku/node/waku_node.nim` - Core node implementation
|
||||
- `apps/wakunode2/wakunode2.nim` - Main CLI application
|
||||
- `waku/factory/waku_conf.nim` - Configuration types
|
||||
- `library/libwaku.nim` - C bindings entry point
|
||||
|
||||
### Testing Entry Points
|
||||
- `tests/all_tests_waku.nim` - All Waku protocol tests
|
||||
- `tests/all_tests_wakunode2.nim` - Node application tests
|
||||
- `tests/all_tests_common.nim` - Common utilities tests
|
||||
|
||||
### Key Dependencies
|
||||
- `chronos` - Async framework
|
||||
- `nim-results` - Result type for error handling
|
||||
- `chronicles` - Logging
|
||||
- `libp2p` - P2P networking
|
||||
- `confutils` - CLI argument parsing
|
||||
- `presto` - REST server
|
||||
- `nimcrypto` - Cryptographic primitives
|
||||
|
||||
Note: For specific version requirements, check `waku.nimble`.
|
||||
|
||||
|
||||
71
CHANGELOG.md
71
CHANGELOG.md
@ -1,7 +1,76 @@
|
||||
## v0.36.0 (2025-06-20)
|
||||
## v0.37.1-beta (2025-12-10)
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Remove ENR cache from peer exchange ([#3652](https://github.com/logos-messaging/logos-messaging-nim/pull/3652)) ([7920368a](https://github.com/logos-messaging/logos-messaging-nim/commit/7920368a36687cd5f12afa52d59866792d8457ca))
|
||||
|
||||
## v0.37.0-beta (2025-10-01)
|
||||
|
||||
### Notes
|
||||
|
||||
- Deprecated parameters:
|
||||
- `tree_path` and `rlnDB` (RLN-related storage paths)
|
||||
- `--dns-discovery` (fully removed, including dns-discovery-name-server)
|
||||
- `keepAlive` (deprecated, config updated accordingly)
|
||||
- Legacy `store` protocol is no longer supported by default.
|
||||
- Improved sharding configuration: now explicit and shard-specific metrics added.
|
||||
- Mix nodes are limited to IPv4 addresses only.
|
||||
- [lightpush legacy](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) is being deprecated. Use [lightpush v3](https://github.com/waku-org/specs/blob/master/standards/core/lightpush.md) instead.
|
||||
|
||||
### Features
|
||||
|
||||
- Waku API: create node via API ([#3580](https://github.com/waku-org/nwaku/pull/3580)) ([bc8acf76](https://github.com/waku-org/nwaku/commit/bc8acf76))
|
||||
- Waku Sync: full topic support ([#3275](https://github.com/waku-org/nwaku/pull/3275)) ([9327da5a](https://github.com/waku-org/nwaku/commit/9327da5a))
|
||||
- Mix PoC implementation ([#3284](https://github.com/waku-org/nwaku/pull/3284)) ([eb7a3d13](https://github.com/waku-org/nwaku/commit/eb7a3d13))
|
||||
- Rendezvous: add request interval option ([#3569](https://github.com/waku-org/nwaku/pull/3569)) ([cc7a6406](https://github.com/waku-org/nwaku/commit/cc7a6406))
|
||||
- Shard-specific metrics tracking ([#3520](https://github.com/waku-org/nwaku/pull/3520)) ([c3da29fd](https://github.com/waku-org/nwaku/commit/c3da29fd))
|
||||
- Libwaku: build Windows DLL for Status-go ([#3460](https://github.com/waku-org/nwaku/pull/3460)) ([5c38a53f](https://github.com/waku-org/nwaku/commit/5c38a53f))
|
||||
- RLN: add Stateless RLN support ([#3621](https://github.com/waku-org/nwaku/pull/3621))
|
||||
- LOG: Reduce log level of messages from debug to info for better visibility ([#3622](https://github.com/waku-org/nwaku/pull/3622))
|
||||
|
||||
### Bug Fixes
|
||||
|
||||
- Prevent invalid pubsub topic subscription via Relay REST API ([#3559](https://github.com/waku-org/nwaku/pull/3559)) ([a36601ab](https://github.com/waku-org/nwaku/commit/a36601ab))
|
||||
- Fixed node crash when RLN is unregistered ([#3573](https://github.com/waku-org/nwaku/pull/3573)) ([3d0c6279](https://github.com/waku-org/nwaku/commit/3d0c6279))
|
||||
- REST: fixed sync protocol issues ([#3503](https://github.com/waku-org/nwaku/pull/3503)) ([393e3cce](https://github.com/waku-org/nwaku/commit/393e3cce))
|
||||
- Regex pattern fix for `username:password@` in URLs ([#3517](https://github.com/waku-org/nwaku/pull/3517)) ([89a3f735](https://github.com/waku-org/nwaku/commit/89a3f735))
|
||||
- Sharding: applied modulus fix ([#3530](https://github.com/waku-org/nwaku/pull/3530)) ([f68d7999](https://github.com/waku-org/nwaku/commit/f68d7999))
|
||||
- Metrics: switched to counter instead of gauge ([#3355](https://github.com/waku-org/nwaku/pull/3355)) ([a27eec90](https://github.com/waku-org/nwaku/commit/a27eec90))
|
||||
- Fixed lightpush metrics and diagnostics ([#3486](https://github.com/waku-org/nwaku/pull/3486)) ([0ed3fc80](https://github.com/waku-org/nwaku/commit/0ed3fc80))
|
||||
- Misc sync, dashboard, and CI fixes ([#3434](https://github.com/waku-org/nwaku/pull/3434), [#3508](https://github.com/waku-org/nwaku/pull/3508), [#3464](https://github.com/waku-org/nwaku/pull/3464))
|
||||
- Raise log level of numerous operational messages from debug to info for better visibility ([#3622](https://github.com/waku-org/nwaku/pull/3622))
|
||||
|
||||
### Changes
|
||||
|
||||
- Enable peer-exchange by default ([#3557](https://github.com/waku-org/nwaku/pull/3557)) ([7df526f8](https://github.com/waku-org/nwaku/commit/7df526f8))
|
||||
- Refactor peer-exchange client and service implementations ([#3523](https://github.com/waku-org/nwaku/pull/3523)) ([4379f9ec](https://github.com/waku-org/nwaku/commit/4379f9ec))
|
||||
- Updated rendezvous to use callback-based shard/capability updates ([#3558](https://github.com/waku-org/nwaku/pull/3558)) ([028bf297](https://github.com/waku-org/nwaku/commit/028bf297))
|
||||
- Config updates and explicit sharding setup ([#3468](https://github.com/waku-org/nwaku/pull/3468)) ([994d485b](https://github.com/waku-org/nwaku/commit/994d485b))
|
||||
- Bumped libp2p to v1.13.0 ([#3574](https://github.com/waku-org/nwaku/pull/3574)) ([b1616e55](https://github.com/waku-org/nwaku/commit/b1616e55))
|
||||
- Removed legacy dependencies (e.g., libpcre in Docker builds) ([#3552](https://github.com/waku-org/nwaku/pull/3552)) ([4db4f830](https://github.com/waku-org/nwaku/commit/4db4f830))
|
||||
- Benchmarks for RLN proof generation & verification ([#3567](https://github.com/waku-org/nwaku/pull/3567)) ([794c3a85](https://github.com/waku-org/nwaku/commit/794c3a85))
|
||||
- Various CI/CD & infra updates ([#3515](https://github.com/waku-org/nwaku/pull/3515), [#3505](https://github.com/waku-org/nwaku/pull/3505))
|
||||
|
||||
### This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/):
|
||||
|
||||
| Protocol | Spec status | Protocol id |
|
||||
| ---: | :---: | :--- |
|
||||
| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` |
|
||||
| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` <br />`/vac/waku/filter-subscribe/2.0.0-beta1` <br />`/vac/waku/filter-push/2.0.0-beta1` |
|
||||
| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` |
|
||||
| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` |
|
||||
| [`WAKU2-LIGHTPUSH v3`](https://github.com/waku-org/specs/blob/master/standards/core/lightpush.md) | `draft` | `/vac/waku/lightpush/3.0.0` |
|
||||
| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` |
|
||||
| [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/master/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` |
|
||||
|
||||
## v0.36.0 (2025-06-20)
|
||||
### Notes
|
||||
|
||||
- Extended REST API for better debugging
|
||||
- Extended `/health` report
|
||||
- Very detailed access to peers and actual status through [`/admin/v1/peers/...` endpoints](https://waku-org.github.io/waku-rest-api/#get-/admin/v1/peers/stats)
|
||||
- Dynamic log level change with[ `/admin/v1/log-level`](https://waku-org.github.io/waku-rest-api/#post-/admin/v1/log-level/-logLevel-)
|
||||
|
||||
- The `rln-relay-eth-client-address` parameter, from now on, should be passed as an array of RPC addresses.
|
||||
- new `preset` parameter. `preset=twn` is the RLN-protected Waku Network (cluster 1). Overrides other values.
|
||||
- Removed `dns-addrs` parameter as it was duplicated and unused.
|
||||
|
||||
14
Dockerfile
14
Dockerfile
@ -1,13 +1,14 @@
|
||||
# BUILD NIM APP ----------------------------------------------------------------
|
||||
FROM rust:1.81.0-alpine3.19 AS nim-build
|
||||
FROM rustlang/rust:nightly-alpine3.19 AS nim-build
|
||||
|
||||
ARG NIMFLAGS
|
||||
ARG MAKE_TARGET=wakunode2
|
||||
ARG NIM_COMMIT
|
||||
ARG LOG_LEVEL=TRACE
|
||||
ARG HEAPTRACK_BUILD=0
|
||||
|
||||
# Get build tools and required header files
|
||||
RUN apk add --no-cache bash git build-base openssl-dev pcre-dev linux-headers curl jq
|
||||
RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
@ -18,6 +19,10 @@ RUN apk update && apk upgrade
|
||||
# Ran separately from 'make' to avoid re-doing
|
||||
RUN git submodule update --init --recursive
|
||||
|
||||
RUN if [ "$HEAPTRACK_BUILD" = "1" ]; then \
|
||||
git apply --directory=vendor/nimbus-build-system/vendor/Nim docs/tutorial/nim.2.2.4_heaptracker_addon.patch; \
|
||||
fi
|
||||
|
||||
# Slowest build step for the sake of caching layers
|
||||
RUN make -j$(nproc) deps QUICK_AND_DIRTY_COMPILER=1 ${NIM_COMMIT}
|
||||
|
||||
@ -41,10 +46,7 @@ LABEL version="unknown"
|
||||
EXPOSE 30303 60000 8545
|
||||
|
||||
# Referenced in the binary
|
||||
RUN apk add --no-cache libgcc pcre-dev libpq-dev bind-tools
|
||||
|
||||
# Fix for 'Error loading shared library libpcre.so.3: No such file or directory'
|
||||
RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3
|
||||
RUN apk add --no-cache libgcc libpq-dev bind-tools
|
||||
|
||||
# Copy to separate location to accomodate different MAKE_TARGET values
|
||||
COPY --from=nim-build /app/build/$MAKE_TARGET /usr/local/bin/
|
||||
|
||||
56
Dockerfile.lightpushWithMix.compile
Normal file
56
Dockerfile.lightpushWithMix.compile
Normal file
@ -0,0 +1,56 @@
|
||||
# BUILD NIM APP ----------------------------------------------------------------
|
||||
FROM rustlang/rust:nightly-alpine3.19 AS nim-build
|
||||
|
||||
ARG NIMFLAGS
|
||||
ARG MAKE_TARGET=lightpushwithmix
|
||||
ARG NIM_COMMIT
|
||||
ARG LOG_LEVEL=TRACE
|
||||
|
||||
# Get build tools and required header files
|
||||
RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
|
||||
# workaround for alpine issue: https://github.com/alpinelinux/docker-alpine/issues/383
|
||||
RUN apk update && apk upgrade
|
||||
|
||||
# Ran separately from 'make' to avoid re-doing
|
||||
RUN git submodule update --init --recursive
|
||||
|
||||
# Slowest build step for the sake of caching layers
|
||||
RUN make -j$(nproc) deps QUICK_AND_DIRTY_COMPILER=1 ${NIM_COMMIT}
|
||||
|
||||
# Build the final node binary
|
||||
RUN make -j$(nproc) ${NIM_COMMIT} $MAKE_TARGET LOG_LEVEL=${LOG_LEVEL} NIMFLAGS="${NIMFLAGS}"
|
||||
|
||||
|
||||
# REFERENCE IMAGE as BASE for specialized PRODUCTION IMAGES----------------------------------------
|
||||
FROM alpine:3.18 AS base_lpt
|
||||
|
||||
ARG MAKE_TARGET=lightpushwithmix
|
||||
|
||||
LABEL maintainer="prem@waku.org"
|
||||
LABEL source="https://github.com/waku-org/nwaku"
|
||||
LABEL description="Lite Push With Mix: Waku light-client"
|
||||
LABEL commit="unknown"
|
||||
LABEL version="unknown"
|
||||
|
||||
# DevP2P, LibP2P, and JSON RPC ports
|
||||
EXPOSE 30303 60000 8545
|
||||
|
||||
# Referenced in the binary
|
||||
RUN apk add --no-cache libgcc libpq-dev \
|
||||
wget \
|
||||
iproute2 \
|
||||
python3 \
|
||||
jq
|
||||
|
||||
|
||||
COPY --from=nim-build /app/build/lightpush_publisher_mix /usr/bin/
|
||||
RUN chmod +x /usr/bin/lightpush_publisher_mix
|
||||
|
||||
# Standalone image to be used manually and in lpt-runner -------------------------------------------
|
||||
FROM base_lpt AS standalone_lpt
|
||||
|
||||
ENTRYPOINT ["/usr/bin/lightpush_publisher_mix"]
|
||||
126
Makefile
126
Makefile
@ -41,8 +41,11 @@ ifeq ($(detected_OS),Windows)
|
||||
NIM_PARAMS += --passL:"-Lvendor/nim-nat-traversal/vendor/miniupnp/miniupnpc"
|
||||
NIM_PARAMS += --passL:"-Lvendor/nim-nat-traversal/vendor/libnatpmp-upstream"
|
||||
|
||||
LIBS = -static -lws2_32 -lbcrypt -liphlpapi -luserenv -lntdll -lminiupnpc -lnatpmp -lpq
|
||||
LIBS = -lws2_32 -lbcrypt -liphlpapi -luserenv -lntdll -lminiupnpc -lnatpmp -lpq
|
||||
NIM_PARAMS += $(foreach lib,$(LIBS),--passL:"$(lib)")
|
||||
|
||||
export PATH := /c/msys64/usr/bin:/c/msys64/mingw64/bin:/c/msys64/usr/lib:/c/msys64/mingw64/lib:$(PATH)
|
||||
|
||||
endif
|
||||
|
||||
##########
|
||||
@ -53,7 +56,21 @@ endif
|
||||
# default target, because it's the first one that doesn't start with '.'
|
||||
all: | wakunode2 example2 chat2 chat2bridge libwaku
|
||||
|
||||
test: | testcommon testwaku
|
||||
test_file := $(word 2,$(MAKECMDGOALS))
|
||||
define test_name
|
||||
$(shell echo '$(MAKECMDGOALS)' | cut -d' ' -f3-)
|
||||
endef
|
||||
|
||||
test:
|
||||
ifeq ($(strip $(test_file)),)
|
||||
$(MAKE) testcommon
|
||||
$(MAKE) testwaku
|
||||
else
|
||||
$(MAKE) compile-test TEST_FILE="$(test_file)" TEST_NAME="$(call test_name)"
|
||||
endif
|
||||
# this prevents make from erroring on unknown targets like "Index"
|
||||
%:
|
||||
@true
|
||||
|
||||
waku.nims:
|
||||
ln -s waku.nimble $@
|
||||
@ -81,10 +98,8 @@ NIM_PARAMS := $(NIM_PARAMS) -d:git_version=\"$(GIT_VERSION)\"
|
||||
HEAPTRACKER ?= 0
|
||||
HEAPTRACKER_INJECT ?= 0
|
||||
ifeq ($(HEAPTRACKER), 1)
|
||||
# Needed to make nimbus-build-system use the Nim's 'heaptrack_support' branch
|
||||
DOCKER_NIM_COMMIT := NIM_COMMIT=heaptrack_support_v2.0.12
|
||||
# Assumes Nim's lib/system/alloc.nim is patched!
|
||||
TARGET := debug-with-heaptrack
|
||||
NIM_COMMIT := heaptrack_support_v2.0.12
|
||||
|
||||
ifeq ($(HEAPTRACKER_INJECT), 1)
|
||||
# the Nim compiler will load 'libheaptrack_inject.so'
|
||||
@ -104,6 +119,10 @@ endif
|
||||
##################
|
||||
.PHONY: deps libbacktrace
|
||||
|
||||
FOUNDRY_VERSION := 1.5.0
|
||||
PNPM_VERSION := 10.23.0
|
||||
|
||||
|
||||
rustup:
|
||||
ifeq (, $(shell which cargo))
|
||||
# Install Rustup if it's not installed
|
||||
@ -113,7 +132,7 @@ ifeq (, $(shell which cargo))
|
||||
endif
|
||||
|
||||
rln-deps: rustup
|
||||
./scripts/install_rln_tests_dependencies.sh
|
||||
./scripts/install_rln_tests_dependencies.sh $(FOUNDRY_VERSION) $(PNPM_VERSION)
|
||||
|
||||
deps: | deps-common nat-libs waku.nims
|
||||
|
||||
@ -131,6 +150,9 @@ ifeq ($(USE_LIBBACKTRACE), 0)
|
||||
NIM_PARAMS := $(NIM_PARAMS) -d:disable_libbacktrace
|
||||
endif
|
||||
|
||||
# enable experimental exit is dest feature in libp2p mix
|
||||
NIM_PARAMS := $(NIM_PARAMS) -d:libp2p_mix_experimental_exit_is_dest
|
||||
|
||||
libbacktrace:
|
||||
+ $(MAKE) -C vendor/nim-libbacktrace --no-print-directory BUILD_CXX_LIB=0
|
||||
|
||||
@ -165,7 +187,7 @@ nimbus-build-system-nimble-dir:
|
||||
.PHONY: librln
|
||||
|
||||
LIBRLN_BUILDDIR := $(CURDIR)/vendor/zerokit
|
||||
LIBRLN_VERSION := v0.7.0
|
||||
LIBRLN_VERSION := v0.9.0
|
||||
|
||||
ifeq ($(detected_OS),Windows)
|
||||
LIBRLN_FILE := rln.lib
|
||||
@ -228,6 +250,10 @@ chat2: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim chat2 $(NIM_PARAMS) waku.nims
|
||||
|
||||
chat2mix: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim chat2mix $(NIM_PARAMS) waku.nims
|
||||
|
||||
rln-db-inspector: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim rln_db_inspector $(NIM_PARAMS) waku.nims
|
||||
@ -240,13 +266,18 @@ liteprotocoltester: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim liteprotocoltester $(NIM_PARAMS) waku.nims
|
||||
|
||||
lightpushwithmix: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim lightpushwithmix $(NIM_PARAMS) waku.nims
|
||||
|
||||
build/%: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$*" && \
|
||||
$(ENV_SCRIPT) nim buildone $(NIM_PARAMS) waku.nims $*
|
||||
|
||||
test/%: | build deps librln
|
||||
echo -e $(BUILD_MSG) "test/$*" && \
|
||||
$(ENV_SCRIPT) nim testone $(NIM_PARAMS) waku.nims $*
|
||||
compile-test: | build deps librln
|
||||
echo -e $(BUILD_MSG) "$(TEST_FILE)" "\"$(TEST_NAME)\"" && \
|
||||
$(ENV_SCRIPT) nim buildTest $(NIM_PARAMS) waku.nims $(TEST_FILE) && \
|
||||
$(ENV_SCRIPT) nim execTest $(NIM_PARAMS) waku.nims $(TEST_FILE) "\"$(TEST_NAME)\""; \
|
||||
|
||||
################
|
||||
## Waku tools ##
|
||||
@ -336,6 +367,7 @@ docker-image:
|
||||
--build-arg="NIMFLAGS=$(DOCKER_IMAGE_NIMFLAGS)" \
|
||||
--build-arg="NIM_COMMIT=$(DOCKER_NIM_COMMIT)" \
|
||||
--build-arg="LOG_LEVEL=$(LOG_LEVEL)" \
|
||||
--build-arg="HEAPTRACK_BUILD=$(HEAPTRACKER)" \
|
||||
--label="commit=$(shell git rev-parse HEAD)" \
|
||||
--label="version=$(GIT_VERSION)" \
|
||||
--target $(TARGET) \
|
||||
@ -344,6 +376,7 @@ docker-image:
|
||||
docker-quick-image: MAKE_TARGET ?= wakunode2
|
||||
docker-quick-image: DOCKER_IMAGE_TAG ?= $(MAKE_TARGET)-$(GIT_VERSION)
|
||||
docker-quick-image: DOCKER_IMAGE_NAME ?= wakuorg/nwaku:$(DOCKER_IMAGE_TAG)
|
||||
docker-quick-image: NIM_PARAMS := $(NIM_PARAMS) -d:chronicles_colors:none -d:insecure -d:postgres --passL:$(LIBRLN_FILE) --passL:-lm
|
||||
docker-quick-image: | build deps librln wakunode2
|
||||
docker build \
|
||||
--build-arg="MAKE_TARGET=$(MAKE_TARGET)" \
|
||||
@ -397,16 +430,27 @@ docker-liteprotocoltester-push:
|
||||
.PHONY: cbindings cwaku_example libwaku
|
||||
|
||||
STATIC ?= 0
|
||||
BUILD_COMMAND ?= libwakuDynamic
|
||||
|
||||
ifeq ($(detected_OS),Windows)
|
||||
LIB_EXT_DYNAMIC = dll
|
||||
LIB_EXT_STATIC = lib
|
||||
else ifeq ($(detected_OS),Darwin)
|
||||
LIB_EXT_DYNAMIC = dylib
|
||||
LIB_EXT_STATIC = a
|
||||
else ifeq ($(detected_OS),Linux)
|
||||
LIB_EXT_DYNAMIC = so
|
||||
LIB_EXT_STATIC = a
|
||||
endif
|
||||
|
||||
LIB_EXT := $(LIB_EXT_DYNAMIC)
|
||||
ifeq ($(STATIC), 1)
|
||||
LIB_EXT = $(LIB_EXT_STATIC)
|
||||
BUILD_COMMAND = libwakuStatic
|
||||
endif
|
||||
|
||||
libwaku: | build deps librln
|
||||
rm -f build/libwaku*
|
||||
ifeq ($(STATIC), 1)
|
||||
echo -e $(BUILD_MSG) "build/$@.a" && \
|
||||
$(ENV_SCRIPT) nim libwakuStatic $(NIM_PARAMS) waku.nims
|
||||
else
|
||||
echo -e $(BUILD_MSG) "build/$@.so" && \
|
||||
$(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims
|
||||
endif
|
||||
echo -e $(BUILD_MSG) "build/$@.$(LIB_EXT)" && $(ENV_SCRIPT) nim $(BUILD_COMMAND) $(NIM_PARAMS) waku.nims $@.$(LIB_EXT)
|
||||
|
||||
#####################
|
||||
## Mobile Bindings ##
|
||||
@ -473,6 +517,51 @@ libwaku-android:
|
||||
# It's likely this architecture is not used so we might just not support it.
|
||||
# $(MAKE) libwaku-android-arm
|
||||
|
||||
#################
|
||||
## iOS Bindings #
|
||||
#################
|
||||
.PHONY: libwaku-ios-precheck \
|
||||
libwaku-ios-device \
|
||||
libwaku-ios-simulator \
|
||||
libwaku-ios
|
||||
|
||||
IOS_DEPLOYMENT_TARGET ?= 18.0
|
||||
|
||||
# Get SDK paths dynamically using xcrun
|
||||
define get_ios_sdk_path
|
||||
$(shell xcrun --sdk $(1) --show-sdk-path 2>/dev/null)
|
||||
endef
|
||||
|
||||
libwaku-ios-precheck:
|
||||
ifeq ($(detected_OS),Darwin)
|
||||
@command -v xcrun >/dev/null 2>&1 || { echo "Error: Xcode command line tools not installed"; exit 1; }
|
||||
else
|
||||
$(error iOS builds are only supported on macOS)
|
||||
endif
|
||||
|
||||
# Build for iOS architecture
|
||||
build-libwaku-for-ios-arch:
|
||||
IOS_SDK=$(IOS_SDK) IOS_ARCH=$(IOS_ARCH) IOS_SDK_PATH=$(IOS_SDK_PATH) $(ENV_SCRIPT) nim libWakuIOS $(NIM_PARAMS) waku.nims
|
||||
|
||||
# iOS device (arm64)
|
||||
libwaku-ios-device: IOS_ARCH=arm64
|
||||
libwaku-ios-device: IOS_SDK=iphoneos
|
||||
libwaku-ios-device: IOS_SDK_PATH=$(call get_ios_sdk_path,iphoneos)
|
||||
libwaku-ios-device: | libwaku-ios-precheck build deps
|
||||
$(MAKE) build-libwaku-for-ios-arch IOS_ARCH=$(IOS_ARCH) IOS_SDK=$(IOS_SDK) IOS_SDK_PATH=$(IOS_SDK_PATH)
|
||||
|
||||
# iOS simulator (arm64 - Apple Silicon Macs)
|
||||
libwaku-ios-simulator: IOS_ARCH=arm64
|
||||
libwaku-ios-simulator: IOS_SDK=iphonesimulator
|
||||
libwaku-ios-simulator: IOS_SDK_PATH=$(call get_ios_sdk_path,iphonesimulator)
|
||||
libwaku-ios-simulator: | libwaku-ios-precheck build deps
|
||||
$(MAKE) build-libwaku-for-ios-arch IOS_ARCH=$(IOS_ARCH) IOS_SDK=$(IOS_SDK) IOS_SDK_PATH=$(IOS_SDK_PATH)
|
||||
|
||||
# Build all iOS targets
|
||||
libwaku-ios:
|
||||
$(MAKE) libwaku-ios-device
|
||||
$(MAKE) libwaku-ios-simulator
|
||||
|
||||
cwaku_example: | build libwaku
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
cc -o "build/$@" \
|
||||
@ -518,4 +607,3 @@ release-notes:
|
||||
sed -E 's@#([0-9]+)@[#\1](https://github.com/waku-org/nwaku/issues/\1)@g'
|
||||
# I could not get the tool to replace issue ids with links, so using sed for now,
|
||||
# asked here: https://github.com/bvieira/sv4git/discussions/101
|
||||
|
||||
|
||||
26
README.md
26
README.md
@ -1,19 +1,21 @@
|
||||
# Nwaku
|
||||
# Logos Messaging Nim
|
||||
|
||||
## Introduction
|
||||
|
||||
The nwaku repository implements Waku, and provides tools related to it.
|
||||
The logos-messaging-nim, a.k.a. lmn or nwaku, repository implements a set of libp2p protocols aimed to bring
|
||||
private communications.
|
||||
|
||||
- A Nim implementation of the [Waku (v2) protocol](https://specs.vac.dev/specs/waku/v2/waku-v2.html).
|
||||
- CLI application `wakunode2` that allows you to run a Waku node.
|
||||
- Examples of Waku usage.
|
||||
- Nim implementation of [these specs](https://github.com/vacp2p/rfc-index/tree/main/waku).
|
||||
- C library that exposes the implemented protocols.
|
||||
- CLI application that allows you to run an lmn node.
|
||||
- Examples.
|
||||
- Various tests of above.
|
||||
|
||||
For more details see the [source code](waku/README.md)
|
||||
|
||||
## How to Build & Run ( Linux, MacOS & WSL )
|
||||
|
||||
These instructions are generic. For more detailed instructions, see the Waku source code above.
|
||||
These instructions are generic. For more detailed instructions, see the source code above.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
@ -87,7 +89,7 @@ pacman -S --noconfirm --needed mingw-w64-x86_64-python
|
||||
#### 3. Build Wakunode
|
||||
- Open Git Bash as administrator
|
||||
- clone nwaku and cd nwaku
|
||||
- Execute: `./scripts/build_wakunode_windows.sh`
|
||||
- Execute: `./scripts/build_windows.sh`
|
||||
|
||||
#### 4. Troubleshooting
|
||||
If `wakunode2.exe` isn't generated:
|
||||
@ -110,11 +112,19 @@ source env.sh
|
||||
```
|
||||
If everything went well, you should see your prompt suffixed with `[Nimbus env]$`. Now you can run `nim` commands as usual.
|
||||
|
||||
### Waku Protocol Test Suite
|
||||
### Test Suite
|
||||
|
||||
```bash
|
||||
# Run all the Waku tests
|
||||
make test
|
||||
|
||||
# Run a specific test file
|
||||
make test <test_file_path>
|
||||
# e.g. : make test tests/wakunode2/test_all.nim
|
||||
|
||||
# Run a specific test name from a specific test file
|
||||
make test <test_file_path> <test_name>
|
||||
# e.g. : make test tests/wakunode2/test_all.nim "node setup is successful with default configuration"
|
||||
```
|
||||
|
||||
### Building single test files
|
||||
|
||||
@ -1,49 +1,73 @@
|
||||
import
|
||||
math,
|
||||
std/sequtils,
|
||||
results,
|
||||
options,
|
||||
std/[strutils, times, sequtils, osproc], math, results, options, testutils/unittests
|
||||
|
||||
import
|
||||
waku/[
|
||||
waku_rln_relay/protocol_types,
|
||||
waku_rln_relay/rln,
|
||||
waku_rln_relay,
|
||||
waku_rln_relay/conversion_utils,
|
||||
waku_rln_relay/group_manager/static/group_manager,
|
||||
]
|
||||
waku_rln_relay/group_manager/on_chain/group_manager,
|
||||
],
|
||||
tests/waku_rln_relay/utils_onchain
|
||||
|
||||
import std/[times, os]
|
||||
proc benchmark(
|
||||
manager: OnChainGroupManager, registerCount: int, messageLimit: int
|
||||
): Future[string] {.async, gcsafe.} =
|
||||
# Register a new member so that we can later generate proofs
|
||||
let idCredentials = generateCredentials(registerCount)
|
||||
|
||||
proc main(): Future[string] {.async, gcsafe.} =
|
||||
let rlnIns = createRLNInstance(20).get()
|
||||
let credentials = toSeq(0 .. 1000).mapIt(membershipKeyGen(rlnIns).get())
|
||||
var start_time = getTime()
|
||||
for i in 0 .. registerCount - 1:
|
||||
try:
|
||||
await manager.register(idCredentials[i], UserMessageLimit(messageLimit + 1))
|
||||
except Exception, CatchableError:
|
||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
|
||||
let manager = StaticGroupManager(
|
||||
rlnInstance: rlnIns,
|
||||
groupSize: 1000,
|
||||
membershipIndex: some(MembershipIndex(900)),
|
||||
groupKeys: credentials,
|
||||
)
|
||||
info "registration finished",
|
||||
iter = i, elapsed_ms = (getTime() - start_time).inMilliseconds
|
||||
|
||||
await manager.init()
|
||||
discard await manager.updateRoots()
|
||||
manager.merkleProofCache = (await manager.fetchMerkleProofElements()).valueOr:
|
||||
error "Failed to fetch Merkle proof", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
let epoch = default(Epoch)
|
||||
info "epoch in bytes", epochHex = epoch.inHex()
|
||||
let data: seq[byte] = newSeq[byte](1024)
|
||||
|
||||
var proofGenTimes: seq[times.Duration] = @[]
|
||||
var proofVerTimes: seq[times.Duration] = @[]
|
||||
for i in 0 .. 50:
|
||||
var time = getTime()
|
||||
let proof = manager.generateProof(data, default(Epoch)).get()
|
||||
proofGenTimes.add(getTime() - time)
|
||||
|
||||
time = getTime()
|
||||
let res = manager.verifyProof(data, proof).get()
|
||||
proofVerTimes.add(getTime() - time)
|
||||
start_time = getTime()
|
||||
for i in 1 .. messageLimit:
|
||||
var generate_time = getTime()
|
||||
let proof = manager.generateProof(data, epoch, MessageId(i.uint8)).valueOr:
|
||||
raiseAssert $error
|
||||
proofGenTimes.add(getTime() - generate_time)
|
||||
|
||||
let verify_time = getTime()
|
||||
let ok = manager.verifyProof(data, proof).valueOr:
|
||||
raiseAssert $error
|
||||
proofVerTimes.add(getTime() - verify_time)
|
||||
info "iteration finished",
|
||||
iter = i, elapsed_ms = (getTime() - start_time).inMilliseconds
|
||||
|
||||
echo "Proof generation times: ", sum(proofGenTimes) div len(proofGenTimes)
|
||||
echo "Proof verification times: ", sum(proofVerTimes) div len(proofVerTimes)
|
||||
|
||||
proc main() =
|
||||
# Start a local Ethereum JSON-RPC (Anvil) so that the group-manager setup can connect.
|
||||
let anvilProc = runAnvil()
|
||||
defer:
|
||||
stopAnvil(anvilProc)
|
||||
|
||||
# Set up an On-chain group manager (includes contract deployment)
|
||||
let manager = waitFor setupOnchainGroupManager()
|
||||
(waitFor manager.init()).isOkOr:
|
||||
raiseAssert $error
|
||||
|
||||
discard waitFor benchmark(manager, 200, 20)
|
||||
|
||||
when isMainModule:
|
||||
try:
|
||||
waitFor(main())
|
||||
except CatchableError as e:
|
||||
raise e
|
||||
main()
|
||||
|
||||
@ -132,25 +132,14 @@ proc showChatPrompt(c: Chat) =
|
||||
except IOError:
|
||||
discard
|
||||
|
||||
proc getChatLine(c: Chat, msg: WakuMessage): Result[string, string] =
|
||||
proc getChatLine(payload: seq[byte]): string =
|
||||
# No payload encoding/encryption from Waku
|
||||
let
|
||||
pb = Chat2Message.init(msg.payload)
|
||||
chatLine =
|
||||
if pb.isOk:
|
||||
pb[].toString()
|
||||
else:
|
||||
string.fromBytes(msg.payload)
|
||||
return ok(chatline)
|
||||
let pb = Chat2Message.init(payload).valueOr:
|
||||
return string.fromBytes(payload)
|
||||
return $pb
|
||||
|
||||
proc printReceivedMessage(c: Chat, msg: WakuMessage) =
|
||||
let
|
||||
pb = Chat2Message.init(msg.payload)
|
||||
chatLine =
|
||||
if pb.isOk:
|
||||
pb[].toString()
|
||||
else:
|
||||
string.fromBytes(msg.payload)
|
||||
let chatLine = getChatLine(msg.payload)
|
||||
try:
|
||||
echo &"{chatLine}"
|
||||
except ValueError:
|
||||
@ -173,18 +162,16 @@ proc startMetricsServer(
|
||||
): Result[MetricsHttpServerRef, string] =
|
||||
info "Starting metrics HTTP server", serverIp = $serverIp, serverPort = $serverPort
|
||||
|
||||
let metricsServerRes = MetricsHttpServerRef.new($serverIp, serverPort)
|
||||
if metricsServerRes.isErr():
|
||||
return err("metrics HTTP server start failed: " & $metricsServerRes.error)
|
||||
let server = MetricsHttpServerRef.new($serverIp, serverPort).valueOr:
|
||||
return err("metrics HTTP server start failed: " & $error)
|
||||
|
||||
let server = metricsServerRes.value
|
||||
try:
|
||||
waitFor server.start()
|
||||
except CatchableError:
|
||||
return err("metrics HTTP server start failed: " & getCurrentExceptionMsg())
|
||||
|
||||
info "Metrics HTTP server started", serverIp = $serverIp, serverPort = $serverPort
|
||||
ok(metricsServerRes.value)
|
||||
ok(server)
|
||||
|
||||
proc publish(c: Chat, line: string) =
|
||||
# First create a Chat2Message protobuf with this line of text
|
||||
@ -202,19 +189,17 @@ proc publish(c: Chat, line: string) =
|
||||
version: 0,
|
||||
timestamp: getNanosecondTime(time),
|
||||
)
|
||||
|
||||
if not isNil(c.node.wakuRlnRelay):
|
||||
# for future version when we support more than one rln protected content topic,
|
||||
# we should check the message content topic as well
|
||||
let appendRes = c.node.wakuRlnRelay.appendRLNProof(message, float64(time))
|
||||
if appendRes.isErr():
|
||||
debug "could not append rate limit proof to the message"
|
||||
if c.node.wakuRlnRelay.appendRLNProof(message, float64(time)).isErr():
|
||||
info "could not append rate limit proof to the message"
|
||||
else:
|
||||
debug "rate limit proof is appended to the message"
|
||||
let decodeRes = RateLimitProof.init(message.proof)
|
||||
if decodeRes.isErr():
|
||||
info "rate limit proof is appended to the message"
|
||||
let proof = RateLimitProof.init(message.proof).valueOr:
|
||||
error "could not decode the RLN proof"
|
||||
|
||||
let proof = decodeRes.get()
|
||||
return
|
||||
# TODO move it to log after dogfooding
|
||||
let msgEpoch = fromEpoch(proof.epoch)
|
||||
if fromEpoch(c.node.wakuRlnRelay.lastEpoch) == msgEpoch:
|
||||
@ -332,27 +317,19 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
if conf.logLevel != LogLevel.NONE:
|
||||
setLogLevel(conf.logLevel)
|
||||
|
||||
let natRes = setupNat(
|
||||
let (extIp, extTcpPort, extUdpPort) = setupNat(
|
||||
conf.nat,
|
||||
clientId,
|
||||
Port(uint16(conf.tcpPort) + conf.portsShift),
|
||||
Port(uint16(conf.udpPort) + conf.portsShift),
|
||||
)
|
||||
|
||||
if natRes.isErr():
|
||||
raise newException(ValueError, "setupNat error " & natRes.error)
|
||||
|
||||
let (extIp, extTcpPort, extUdpPort) = natRes.get()
|
||||
).valueOr:
|
||||
raise newException(ValueError, "setupNat error " & error)
|
||||
|
||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||
|
||||
let recordRes = enrBuilder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
error "failed to create enr record", error = recordRes.error
|
||||
quit(QuitFailure)
|
||||
else:
|
||||
recordRes.get()
|
||||
let record = enrBuilder.build().valueOr:
|
||||
error "failed to create enr record", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
let node = block:
|
||||
var builder = WakuNodeBuilder.init()
|
||||
@ -421,14 +398,14 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
)
|
||||
elif conf.dnsDiscoveryUrl != "":
|
||||
# No pre-selected fleet. Discover nodes via DNS using user config
|
||||
debug "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl
|
||||
info "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl
|
||||
dnsDiscoveryUrl = some(conf.dnsDiscoveryUrl)
|
||||
|
||||
var discoveredNodes: seq[RemotePeerInfo]
|
||||
|
||||
if dnsDiscoveryUrl.isSome:
|
||||
var nameServers: seq[TransportAddress]
|
||||
for ip in conf.dnsDiscoveryNameServers:
|
||||
for ip in conf.dnsAddrsNameServers:
|
||||
nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
|
||||
|
||||
let dnsResolver = DnsResolver.new(nameServers)
|
||||
@ -438,7 +415,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
let resolved = await dnsResolver.resolveTxt(domain)
|
||||
return resolved[0] # Use only first answer
|
||||
|
||||
var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl.get(), resolver)
|
||||
let wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl.get(), resolver)
|
||||
if wakuDnsDiscovery.isOk:
|
||||
let discoveredPeers = await wakuDnsDiscovery.get().findPeers()
|
||||
if discoveredPeers.isOk:
|
||||
@ -446,8 +423,10 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
discoveredNodes = discoveredPeers.get()
|
||||
echo "Discovered and connecting to " & $discoveredNodes
|
||||
waitFor chat.node.connectToNodes(discoveredNodes)
|
||||
else:
|
||||
warn "Failed to find peers via DNS discovery", error = discoveredPeers.error
|
||||
else:
|
||||
warn "Failed to init Waku DNS discovery"
|
||||
warn "Failed to init Waku DNS discovery", error = wakuDnsDiscovery.error
|
||||
|
||||
let peerInfo = node.switch.peerInfo
|
||||
let listenStr = $peerInfo.addrs[0] & "/p2p/" & $peerInfo.peerId
|
||||
@ -483,27 +462,27 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
else:
|
||||
newSeq[byte](0)
|
||||
|
||||
let
|
||||
pb = Chat2Message.init(payload)
|
||||
chatLine =
|
||||
if pb.isOk:
|
||||
pb[].toString()
|
||||
else:
|
||||
string.fromBytes(payload)
|
||||
let chatLine = getChatLine(payload)
|
||||
echo &"{chatLine}"
|
||||
info "Hit store handler"
|
||||
|
||||
let queryRes = await node.query(
|
||||
StoreQueryRequest(contentTopics: @[chat.contentTopic]), storenode.get()
|
||||
)
|
||||
if queryRes.isOk():
|
||||
storeHandler(queryRes.value)
|
||||
block storeQueryBlock:
|
||||
let queryRes = (
|
||||
await node.query(
|
||||
StoreQueryRequest(contentTopics: @[chat.contentTopic]), storenode.get()
|
||||
)
|
||||
).valueOr:
|
||||
error "Store query failed", error = error
|
||||
break storeQueryBlock
|
||||
storeHandler(queryRes)
|
||||
|
||||
# NOTE Must be mounted after relay
|
||||
if conf.lightpushnode != "":
|
||||
let peerInfo = parsePeerInfo(conf.lightpushnode)
|
||||
if peerInfo.isOk():
|
||||
await mountLegacyLightPush(node)
|
||||
(await node.mountLegacyLightPush()).isOkOr:
|
||||
error "failed to mount legacy lightpush", error = error
|
||||
quit(QuitFailure)
|
||||
node.mountLegacyLightPushClient()
|
||||
node.peerManager.addServicePeer(peerInfo.value, WakuLightpushCodec)
|
||||
else:
|
||||
@ -511,8 +490,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
error = peerInfo.error
|
||||
|
||||
if conf.filternode != "":
|
||||
let peerInfo = parsePeerInfo(conf.filternode)
|
||||
if peerInfo.isOk():
|
||||
if (let peerInfo = parsePeerInfo(conf.filternode); peerInfo.isErr()):
|
||||
error "Filter not mounted. Couldn't parse conf.filternode", error = peerInfo.error
|
||||
else:
|
||||
await node.mountFilter()
|
||||
await node.mountFilterClient()
|
||||
|
||||
@ -523,8 +503,6 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
chat.printReceivedMessage(msg)
|
||||
|
||||
# TODO: Here to support FilterV2 relevant subscription.
|
||||
else:
|
||||
error "Filter not mounted. Couldn't parse conf.filternode", error = peerInfo.error
|
||||
|
||||
# Subscribe to a topic, if relay is mounted
|
||||
if conf.relay:
|
||||
@ -544,12 +522,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
info "WakuRLNRelay is enabled"
|
||||
|
||||
proc spamHandler(wakuMessage: WakuMessage) {.gcsafe, closure.} =
|
||||
debug "spam handler is called"
|
||||
let chatLineResult = chat.getChatLine(wakuMessage)
|
||||
if chatLineResult.isOk():
|
||||
echo "A spam message is found and discarded : ", chatLineResult.value
|
||||
else:
|
||||
echo "A spam message is found and discarded"
|
||||
info "spam handler is called"
|
||||
let chatLineResult = getChatLine(wakuMessage.payload)
|
||||
echo "spam message is found and discarded : " & chatLineResult
|
||||
chat.prompt = false
|
||||
showChatPrompt(chat)
|
||||
|
||||
@ -567,7 +542,6 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
),
|
||||
userMessageLimit: conf.rlnRelayUserMessageLimit,
|
||||
epochSizeSec: conf.rlnEpochSizeSec,
|
||||
treePath: conf.rlnRelayTreePath,
|
||||
)
|
||||
|
||||
waitFor node.mountRlnRelay(rlnConf, spamHandler = some(spamHandler))
|
||||
@ -590,9 +564,6 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
|
||||
await chat.readWriteLoop()
|
||||
|
||||
if conf.keepAlive:
|
||||
node.startKeepalive()
|
||||
|
||||
runForever()
|
||||
|
||||
proc main(rng: ref HmacDrbgContext) {.async.} =
|
||||
|
||||
@ -170,10 +170,11 @@ type
|
||||
name: "dns-discovery-url"
|
||||
.}: string
|
||||
|
||||
dnsDiscoveryNameServers* {.
|
||||
desc: "DNS name server IPs to query. Argument may be repeated.",
|
||||
dnsAddrsNameServers* {.
|
||||
desc:
|
||||
"DNS name server IPs to query for DNS multiaddrs resolution. Argument may be repeated.",
|
||||
defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")],
|
||||
name: "dns-discovery-name-server"
|
||||
name: "dns-addrs-name-server"
|
||||
.}: seq[IpAddress]
|
||||
|
||||
## Chat2 configuration
|
||||
@ -282,12 +283,6 @@ type
|
||||
name: "rln-relay-epoch-sec"
|
||||
.}: uint64
|
||||
|
||||
rlnRelayTreePath* {.
|
||||
desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)",
|
||||
defaultValue: "",
|
||||
name: "rln-relay-tree-path"
|
||||
.}: string
|
||||
|
||||
# NOTE: Keys are different in nim-libp2p
|
||||
proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T =
|
||||
try:
|
||||
|
||||
@ -23,6 +23,7 @@ import
|
||||
waku_store,
|
||||
factory/builder,
|
||||
common/utils/matterbridge_client,
|
||||
common/rate_limit/setting,
|
||||
],
|
||||
# Chat 2 imports
|
||||
../chat2/chat2,
|
||||
@ -125,25 +126,20 @@ proc toMatterbridge(
|
||||
|
||||
assert chat2Msg.isOk
|
||||
|
||||
let postRes = cmb.mbClient.postMessage(
|
||||
text = string.fromBytes(chat2Msg[].payload), username = chat2Msg[].nick
|
||||
)
|
||||
|
||||
if postRes.isErr() or (postRes[] == false):
|
||||
if not cmb.mbClient
|
||||
.postMessage(text = string.fromBytes(chat2Msg[].payload), username = chat2Msg[].nick)
|
||||
.containsValue(true):
|
||||
chat2_mb_dropped.inc(labelValues = ["duplicate"])
|
||||
error "Matterbridge host unreachable. Dropping message."
|
||||
|
||||
proc pollMatterbridge(cmb: Chat2MatterBridge, handler: MbMessageHandler) {.async.} =
|
||||
while cmb.running:
|
||||
let getRes = cmb.mbClient.getMessages()
|
||||
|
||||
if getRes.isOk():
|
||||
for jsonNode in getRes[]:
|
||||
await handler(jsonNode)
|
||||
else:
|
||||
let msg = cmb.mbClient.getMessages().valueOr:
|
||||
error "Matterbridge host unreachable. Sleeping before retrying."
|
||||
await sleepAsync(chronos.seconds(10))
|
||||
|
||||
continue
|
||||
for jsonNode in msg:
|
||||
await handler(jsonNode)
|
||||
await sleepAsync(cmb.pollPeriod)
|
||||
|
||||
##############
|
||||
@ -168,9 +164,7 @@ proc new*(
|
||||
let mbClient = MatterbridgeClient.new(mbHostUri, mbGateway)
|
||||
|
||||
# Let's verify the Matterbridge configuration before continuing
|
||||
let clientHealth = mbClient.isHealthy()
|
||||
|
||||
if clientHealth.isOk() and clientHealth[]:
|
||||
if mbClient.isHealthy().valueOr(false):
|
||||
info "Reached Matterbridge host", host = mbClient.host
|
||||
else:
|
||||
raise newException(ValueError, "Matterbridge client not reachable/healthy")
|
||||
@ -200,7 +194,7 @@ proc start*(cmb: Chat2MatterBridge) {.async.} =
|
||||
|
||||
cmb.running = true
|
||||
|
||||
debug "Start polling Matterbridge"
|
||||
info "Start polling Matterbridge"
|
||||
|
||||
# Start Matterbridge polling (@TODO: use streaming interface)
|
||||
proc mbHandler(jsonNode: JsonNode) {.async.} =
|
||||
@ -210,7 +204,7 @@ proc start*(cmb: Chat2MatterBridge) {.async.} =
|
||||
asyncSpawn cmb.pollMatterbridge(mbHandler)
|
||||
|
||||
# Start Waku v2 node
|
||||
debug "Start listening on Waku v2"
|
||||
info "Start listening on Waku v2"
|
||||
await cmb.nodev2.start()
|
||||
|
||||
# Always mount relay for bridge
|
||||
@ -246,7 +240,7 @@ proc stop*(cmb: Chat2MatterBridge) {.async: (raises: [Exception]).} =
|
||||
{.pop.}
|
||||
# @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
||||
when isMainModule:
|
||||
import waku/common/utils/nat, waku/waku_api/message_cache
|
||||
import waku/common/utils/nat, waku/rest_api/message_cache
|
||||
|
||||
let
|
||||
rng = newRng()
|
||||
@ -255,25 +249,21 @@ when isMainModule:
|
||||
if conf.logLevel != LogLevel.NONE:
|
||||
setLogLevel(conf.logLevel)
|
||||
|
||||
let natRes = setupNat(
|
||||
let (nodev2ExtIp, nodev2ExtPort, _) = setupNat(
|
||||
conf.nat,
|
||||
clientId,
|
||||
Port(uint16(conf.libp2pTcpPort) + conf.portsShift),
|
||||
Port(uint16(conf.udpPort) + conf.portsShift),
|
||||
)
|
||||
if natRes.isErr():
|
||||
error "Error in setupNat", error = natRes.error
|
||||
).valueOr:
|
||||
raise newException(ValueError, "setupNat error " & error)
|
||||
|
||||
# Load address configuration
|
||||
let
|
||||
(nodev2ExtIp, nodev2ExtPort, _) = natRes.get()
|
||||
## The following heuristic assumes that, in absence of manual
|
||||
## config, the external port is the same as the bind port.
|
||||
extPort =
|
||||
if nodev2ExtIp.isSome() and nodev2ExtPort.isNone():
|
||||
some(Port(uint16(conf.libp2pTcpPort) + conf.portsShift))
|
||||
else:
|
||||
nodev2ExtPort
|
||||
## The following heuristic assumes that, in absence of manual
|
||||
## config, the external port is the same as the bind port.
|
||||
let extPort =
|
||||
if nodev2ExtIp.isSome() and nodev2ExtPort.isNone():
|
||||
some(Port(uint16(conf.libp2pTcpPort) + conf.portsShift))
|
||||
else:
|
||||
nodev2ExtPort
|
||||
|
||||
let bridge = Chat2Matterbridge.new(
|
||||
mbHostUri = "http://" & $initTAddress(conf.mbHostAddress, Port(conf.mbHostPort)),
|
||||
|
||||
@ -91,7 +91,7 @@ type Chat2MatterbridgeConf* = object
|
||||
name: "filternode"
|
||||
.}: string
|
||||
|
||||
# Matterbridge options
|
||||
# Matterbridge options
|
||||
mbHostAddress* {.
|
||||
desc: "Listening address of the Matterbridge host",
|
||||
defaultValue: parseIpAddress("127.0.0.1"),
|
||||
@ -126,11 +126,9 @@ proc completeCmdArg*(T: type keys.KeyPair, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T =
|
||||
let key = SkPrivateKey.init(p)
|
||||
if key.isOk():
|
||||
crypto.PrivateKey(scheme: Secp256k1, skkey: key.get())
|
||||
else:
|
||||
let key = SkPrivateKey.init(p).valueOr:
|
||||
raise newException(ValueError, "Invalid private key")
|
||||
return crypto.PrivateKey(scheme: Secp256k1, skkey: key)
|
||||
|
||||
proc completeCmdArg*(T: type crypto.PrivateKey, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
663
apps/chat2mix/chat2mix.nim
Normal file
663
apps/chat2mix/chat2mix.nim
Normal file
@ -0,0 +1,663 @@
|
||||
## chat2 is an example of usage of Waku v2. For suggested usage options, please
|
||||
## see dingpu tutorial in docs folder.
|
||||
|
||||
when not (compileOption("threads")):
|
||||
{.fatal: "Please, compile this program with the --threads:on option!".}
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[strformat, strutils, times, options, random, sequtils]
|
||||
import
|
||||
confutils,
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/keys,
|
||||
bearssl,
|
||||
results,
|
||||
stew/[byteutils],
|
||||
metrics,
|
||||
metrics/chronos_httpserver
|
||||
import
|
||||
libp2p/[
|
||||
switch, # manage transports, a single entry point for dialing and listening
|
||||
crypto/crypto, # cryptographic functions
|
||||
stream/connection, # create and close stream read / write connections
|
||||
multiaddress,
|
||||
# encode different addressing schemes. For example, /ip4/7.7.7.7/tcp/6543 means it is using IPv4 protocol and TCP
|
||||
peerinfo,
|
||||
# manage the information of a peer, such as peer ID and public / private key
|
||||
peerid, # Implement how peers interact
|
||||
protobuf/minprotobuf, # message serialisation/deserialisation from and to protobufs
|
||||
nameresolving/dnsresolver,
|
||||
protocols/mix/curve25519,
|
||||
] # define DNS resolution
|
||||
import
|
||||
waku/[
|
||||
waku_core,
|
||||
waku_lightpush/common,
|
||||
waku_lightpush/rpc,
|
||||
waku_enr,
|
||||
discovery/waku_dnsdisc,
|
||||
waku_node,
|
||||
node/waku_metrics,
|
||||
node/peer_manager,
|
||||
factory/builder,
|
||||
common/utils/nat,
|
||||
waku_store/common,
|
||||
waku_filter_v2/client,
|
||||
common/logging,
|
||||
],
|
||||
./config_chat2mix
|
||||
|
||||
import libp2p/protocols/pubsub/rpc/messages, libp2p/protocols/pubsub/pubsub
|
||||
import ../../waku/waku_rln_relay
|
||||
|
||||
logScope:
|
||||
topics = "chat2 mix"
|
||||
|
||||
const Help =
|
||||
"""
|
||||
Commands: /[?|help|connect|nick|exit]
|
||||
help: Prints this help
|
||||
connect: dials a remote peer
|
||||
nick: change nickname for current chat session
|
||||
exit: exits chat session
|
||||
"""
|
||||
|
||||
# XXX Connected is a bit annoying, because incoming connections don't trigger state change
|
||||
# Could poll connection pool or something here, I suppose
|
||||
# TODO Ensure connected turns true on incoming connections, or get rid of it
|
||||
type Chat = ref object
|
||||
node: WakuNode # waku node for publishing, subscribing, etc
|
||||
transp: StreamTransport # transport streams between read & write file descriptor
|
||||
subscribed: bool # indicates if a node is subscribed or not to a topic
|
||||
connected: bool # if the node is connected to another peer
|
||||
started: bool # if the node has started
|
||||
nick: string # nickname for this chat session
|
||||
prompt: bool # chat prompt is showing
|
||||
contentTopic: string # default content topic for chat messages
|
||||
conf: Chat2Conf # configuration for chat2
|
||||
|
||||
type
|
||||
PrivateKey* = crypto.PrivateKey
|
||||
Topic* = waku_core.PubsubTopic
|
||||
|
||||
const MinMixNodePoolSize = 4
|
||||
|
||||
#####################
|
||||
## chat2 protobufs ##
|
||||
#####################
|
||||
|
||||
type
|
||||
SelectResult*[T] = Result[T, string]
|
||||
|
||||
Chat2Message* = object
|
||||
timestamp*: int64
|
||||
nick*: string
|
||||
payload*: seq[byte]
|
||||
|
||||
proc getPubsubTopic*(
|
||||
conf: Chat2Conf, node: WakuNode, contentTopic: string
|
||||
): PubsubTopic =
|
||||
let shard = node.wakuAutoSharding.get().getShard(contentTopic).valueOr:
|
||||
echo "Could not parse content topic: " & error
|
||||
return "" #TODO: fix this.
|
||||
return $RelayShard(clusterId: conf.clusterId, shardId: shard.shardId)
|
||||
|
||||
proc init*(T: type Chat2Message, buffer: seq[byte]): ProtoResult[T] =
|
||||
var msg = Chat2Message()
|
||||
let pb = initProtoBuffer(buffer)
|
||||
|
||||
var timestamp: uint64
|
||||
discard ?pb.getField(1, timestamp)
|
||||
msg.timestamp = int64(timestamp)
|
||||
|
||||
discard ?pb.getField(2, msg.nick)
|
||||
discard ?pb.getField(3, msg.payload)
|
||||
|
||||
ok(msg)
|
||||
|
||||
proc encode*(message: Chat2Message): ProtoBuffer =
|
||||
var serialised = initProtoBuffer()
|
||||
|
||||
serialised.write(1, uint64(message.timestamp))
|
||||
serialised.write(2, message.nick)
|
||||
serialised.write(3, message.payload)
|
||||
|
||||
return serialised
|
||||
|
||||
proc `$`*(message: Chat2Message): string =
|
||||
# Get message date and timestamp in local time
|
||||
let time = message.timestamp.fromUnix().local().format("'<'MMM' 'dd,' 'HH:mm'>'")
|
||||
|
||||
return time & " " & message.nick & ": " & string.fromBytes(message.payload)
|
||||
|
||||
#####################
|
||||
|
||||
proc connectToNodes(c: Chat, nodes: seq[string]) {.async.} =
|
||||
echo "Connecting to nodes"
|
||||
await c.node.connectToNodes(nodes)
|
||||
c.connected = true
|
||||
|
||||
proc showChatPrompt(c: Chat) =
|
||||
if not c.prompt:
|
||||
try:
|
||||
stdout.write(">> ")
|
||||
stdout.flushFile()
|
||||
c.prompt = true
|
||||
except IOError:
|
||||
discard
|
||||
|
||||
proc getChatLine(payload: seq[byte]): string =
|
||||
# No payload encoding/encryption from Waku
|
||||
let pb = Chat2Message.init(payload).valueOr:
|
||||
return string.fromBytes(payload)
|
||||
return $pb
|
||||
|
||||
proc printReceivedMessage(c: Chat, msg: WakuMessage) =
|
||||
let chatLine = getChatLine(msg.payload)
|
||||
try:
|
||||
echo &"{chatLine}"
|
||||
except ValueError:
|
||||
# Formatting fail. Print chat line in any case.
|
||||
echo chatLine
|
||||
|
||||
c.prompt = false
|
||||
showChatPrompt(c)
|
||||
trace "Printing message", chatLine, contentTopic = msg.contentTopic
|
||||
|
||||
proc readNick(transp: StreamTransport): Future[string] {.async.} =
|
||||
# Chat prompt
|
||||
stdout.write("Choose a nickname >> ")
|
||||
stdout.flushFile()
|
||||
return await transp.readLine()
|
||||
|
||||
proc startMetricsServer(
|
||||
serverIp: IpAddress, serverPort: Port
|
||||
): Result[MetricsHttpServerRef, string] =
|
||||
info "Starting metrics HTTP server", serverIp = $serverIp, serverPort = $serverPort
|
||||
|
||||
let server = MetricsHttpServerRef.new($serverIp, serverPort).valueOr:
|
||||
return err("metrics HTTP server start failed: " & $error)
|
||||
|
||||
try:
|
||||
waitFor server.start()
|
||||
except CatchableError:
|
||||
return err("metrics HTTP server start failed: " & getCurrentExceptionMsg())
|
||||
|
||||
info "Metrics HTTP server started", serverIp = $serverIp, serverPort = $serverPort
|
||||
ok(server)
|
||||
|
||||
proc publish(c: Chat, line: string) {.async.} =
|
||||
# First create a Chat2Message protobuf with this line of text
|
||||
let time = getTime().toUnix()
|
||||
let chat2pb =
|
||||
Chat2Message(timestamp: time, nick: c.nick, payload: line.toBytes()).encode()
|
||||
|
||||
## @TODO: error handling on failure
|
||||
proc handler(response: LightPushResponse) {.gcsafe, closure.} =
|
||||
trace "lightpush response received", response = response
|
||||
|
||||
var message = WakuMessage(
|
||||
payload: chat2pb.buffer,
|
||||
contentTopic: c.contentTopic,
|
||||
version: 0,
|
||||
timestamp: getNanosecondTime(time),
|
||||
)
|
||||
|
||||
try:
|
||||
if not c.node.wakuLightpushClient.isNil():
|
||||
# Attempt lightpush with mix
|
||||
|
||||
(
|
||||
waitFor c.node.lightpushPublish(
|
||||
some(c.conf.getPubsubTopic(c.node, c.contentTopic)),
|
||||
message,
|
||||
none(RemotePeerInfo),
|
||||
true,
|
||||
)
|
||||
).isOkOr:
|
||||
error "failed to publish lightpush message", error = error
|
||||
else:
|
||||
error "failed to publish message as lightpush client is not initialized"
|
||||
except CatchableError:
|
||||
error "caught error publishing message: ", error = getCurrentExceptionMsg()
|
||||
|
||||
# TODO This should read or be subscribe handler subscribe
|
||||
proc readAndPrint(c: Chat) {.async.} =
|
||||
while true:
|
||||
# while p.connected:
|
||||
# # TODO: echo &"{p.id} -> "
|
||||
#
|
||||
# echo cast[string](await p.conn.readLp(1024))
|
||||
#echo "readAndPrint subscribe NYI"
|
||||
await sleepAsync(100)
|
||||
|
||||
# TODO Implement
|
||||
proc writeAndPrint(c: Chat) {.async.} =
|
||||
while true:
|
||||
# Connect state not updated on incoming WakuRelay connections
|
||||
# if not c.connected:
|
||||
# echo "type an address or wait for a connection:"
|
||||
# echo "type /[help|?] for help"
|
||||
|
||||
# Chat prompt
|
||||
showChatPrompt(c)
|
||||
|
||||
let line = await c.transp.readLine()
|
||||
if line.startsWith("/help") or line.startsWith("/?") or not c.started:
|
||||
echo Help
|
||||
continue
|
||||
|
||||
# if line.startsWith("/disconnect"):
|
||||
# echo "Ending current session"
|
||||
# if p.connected and p.conn.closed.not:
|
||||
# await p.conn.close()
|
||||
# p.connected = false
|
||||
elif line.startsWith("/connect"):
|
||||
# TODO Should be able to connect to multiple peers for Waku chat
|
||||
if c.connected:
|
||||
echo "already connected to at least one peer"
|
||||
continue
|
||||
|
||||
echo "enter address of remote peer"
|
||||
let address = await c.transp.readLine()
|
||||
if address.len > 0:
|
||||
await c.connectToNodes(@[address])
|
||||
elif line.startsWith("/nick"):
|
||||
# Set a new nickname
|
||||
c.nick = await readNick(c.transp)
|
||||
echo "You are now known as " & c.nick
|
||||
elif line.startsWith("/exit"):
|
||||
echo "quitting..."
|
||||
|
||||
try:
|
||||
await c.node.stop()
|
||||
except:
|
||||
echo "exception happened when stopping: " & getCurrentExceptionMsg()
|
||||
|
||||
quit(QuitSuccess)
|
||||
else:
|
||||
# XXX connected state problematic
|
||||
if c.started:
|
||||
echo "publishing message: " & line
|
||||
await c.publish(line)
|
||||
# TODO Connect to peer logic?
|
||||
else:
|
||||
try:
|
||||
if line.startsWith("/") and "p2p" in line:
|
||||
await c.connectToNodes(@[line])
|
||||
except:
|
||||
echo &"unable to dial remote peer {line}"
|
||||
echo getCurrentExceptionMsg()
|
||||
|
||||
proc readWriteLoop(c: Chat) {.async.} =
|
||||
asyncSpawn c.writeAndPrint() # execute the async function but does not block
|
||||
asyncSpawn c.readAndPrint()
|
||||
|
||||
proc readInput(wfd: AsyncFD) {.thread, raises: [Defect, CatchableError].} =
|
||||
## This procedure performs reading from `stdin` and sends data over
|
||||
## pipe to main thread.
|
||||
let transp = fromPipe(wfd)
|
||||
|
||||
while true:
|
||||
let line = stdin.readLine()
|
||||
discard waitFor transp.write(line & "\r\n")
|
||||
|
||||
var alreadyUsedServicePeers {.threadvar.}: seq[RemotePeerInfo]
|
||||
|
||||
proc selectRandomServicePeer*(
|
||||
pm: PeerManager, actualPeer: Option[RemotePeerInfo], codec: string
|
||||
): Result[RemotePeerInfo, void] =
|
||||
if actualPeer.isSome():
|
||||
alreadyUsedServicePeers.add(actualPeer.get())
|
||||
|
||||
let supportivePeers = pm.switch.peerStore.getPeersByProtocol(codec).filterIt(
|
||||
it notin alreadyUsedServicePeers
|
||||
)
|
||||
if supportivePeers.len == 0:
|
||||
return err()
|
||||
|
||||
let rndPeerIndex = rand(0 .. supportivePeers.len - 1)
|
||||
return ok(supportivePeers[rndPeerIndex])
|
||||
|
||||
proc maintainSubscription(
|
||||
wakuNode: WakuNode,
|
||||
filterPubsubTopic: PubsubTopic,
|
||||
filterContentTopic: ContentTopic,
|
||||
filterPeer: RemotePeerInfo,
|
||||
preventPeerSwitch: bool,
|
||||
) {.async.} =
|
||||
var actualFilterPeer = filterPeer
|
||||
const maxFailedSubscribes = 3
|
||||
const maxFailedServiceNodeSwitches = 10
|
||||
var noFailedSubscribes = 0
|
||||
var noFailedServiceNodeSwitches = 0
|
||||
# Use chronos.Duration explicitly to avoid mismatch with std/times.Duration
|
||||
let RetryWait = chronos.seconds(2) # Quick retry interval
|
||||
let SubscriptionMaintenance = chronos.seconds(30) # Subscription maintenance interval
|
||||
while true:
|
||||
info "maintaining subscription at", peer = constructMultiaddrStr(actualFilterPeer)
|
||||
# First use filter-ping to check if we have an active subscription
|
||||
let pingErr = (await wakuNode.wakuFilterClient.ping(actualFilterPeer)).errorOr:
|
||||
await sleepAsync(SubscriptionMaintenance)
|
||||
info "subscription is live."
|
||||
continue
|
||||
|
||||
# No subscription found. Let's subscribe.
|
||||
error "ping failed.", error = pingErr
|
||||
trace "no subscription found. Sending subscribe request"
|
||||
|
||||
let subscribeErr = (
|
||||
await wakuNode.filterSubscribe(
|
||||
some(filterPubsubTopic), filterContentTopic, actualFilterPeer
|
||||
)
|
||||
).errorOr:
|
||||
await sleepAsync(SubscriptionMaintenance)
|
||||
if noFailedSubscribes > 0:
|
||||
noFailedSubscribes -= 1
|
||||
notice "subscribe request successful."
|
||||
continue
|
||||
|
||||
noFailedSubscribes += 1
|
||||
error "Subscribe request failed.",
|
||||
error = subscribeErr, peer = actualFilterPeer, failCount = noFailedSubscribes
|
||||
|
||||
# TODO: disconnet from failed actualFilterPeer
|
||||
# asyncSpawn(wakuNode.peerManager.switch.disconnect(p))
|
||||
# wakunode.peerManager.peerStore.delete(actualFilterPeer)
|
||||
|
||||
if noFailedSubscribes < maxFailedSubscribes:
|
||||
await sleepAsync(RetryWait) # Wait a bit before retrying
|
||||
elif not preventPeerSwitch:
|
||||
# try again with new peer without delay
|
||||
let actualFilterPeer = selectRandomServicePeer(
|
||||
wakuNode.peerManager, some(actualFilterPeer), WakuFilterSubscribeCodec
|
||||
).valueOr:
|
||||
error "Failed to find new service peer. Exiting."
|
||||
noFailedServiceNodeSwitches += 1
|
||||
break
|
||||
|
||||
info "Found new peer for codec",
|
||||
codec = filterPubsubTopic, peer = constructMultiaddrStr(actualFilterPeer)
|
||||
|
||||
noFailedSubscribes = 0
|
||||
else:
|
||||
await sleepAsync(SubscriptionMaintenance)
|
||||
|
||||
{.pop.}
|
||||
# @TODO confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
||||
proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
let
|
||||
transp = fromPipe(rfd)
|
||||
conf = Chat2Conf.load()
|
||||
nodekey =
|
||||
if conf.nodekey.isSome():
|
||||
conf.nodekey.get()
|
||||
else:
|
||||
PrivateKey.random(Secp256k1, rng[]).tryGet()
|
||||
|
||||
# set log level
|
||||
if conf.logLevel != LogLevel.NONE:
|
||||
setLogLevel(conf.logLevel)
|
||||
|
||||
let (extIp, extTcpPort, extUdpPort) = setupNat(
|
||||
conf.nat,
|
||||
clientId,
|
||||
Port(uint16(conf.tcpPort) + conf.portsShift),
|
||||
Port(uint16(conf.udpPort) + conf.portsShift),
|
||||
).valueOr:
|
||||
raise newException(ValueError, "setupNat error " & error)
|
||||
|
||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||
|
||||
enrBuilder.withWakuRelaySharding(
|
||||
RelayShards(clusterId: conf.clusterId, shardIds: conf.shards)
|
||||
).isOkOr:
|
||||
error "failed to add sharded topics to ENR", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
let record = enrBuilder.build().valueOr:
|
||||
error "failed to create enr record", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
let node = block:
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodeKey)
|
||||
builder.withRecord(record)
|
||||
|
||||
builder
|
||||
.withNetworkConfigurationDetails(
|
||||
conf.listenAddress,
|
||||
Port(uint16(conf.tcpPort) + conf.portsShift),
|
||||
extIp,
|
||||
extTcpPort,
|
||||
wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift),
|
||||
wsEnabled = conf.websocketSupport,
|
||||
wssEnabled = conf.websocketSecureSupport,
|
||||
)
|
||||
.tryGet()
|
||||
builder.build().tryGet()
|
||||
|
||||
node.mountAutoSharding(conf.clusterId, conf.numShardsInNetwork).isOkOr:
|
||||
error "failed to mount waku sharding: ", error = error
|
||||
quit(QuitFailure)
|
||||
node.mountMetadata(conf.clusterId, conf.shards).isOkOr:
|
||||
error "failed to mount waku metadata protocol: ", err = error
|
||||
quit(QuitFailure)
|
||||
|
||||
let (mixPrivKey, mixPubKey) = generateKeyPair().valueOr:
|
||||
error "failed to generate mix key pair", error = error
|
||||
return
|
||||
|
||||
(await node.mountMix(conf.clusterId, mixPrivKey, conf.mixnodes)).isOkOr:
|
||||
error "failed to mount waku mix protocol: ", error = $error
|
||||
quit(QuitFailure)
|
||||
await node.mountRendezvousClient(conf.clusterId)
|
||||
|
||||
await node.start()
|
||||
|
||||
node.peerManager.start()
|
||||
|
||||
await node.mountLibp2pPing()
|
||||
await node.mountPeerExchangeClient()
|
||||
let pubsubTopic = conf.getPubsubTopic(node, conf.contentTopic)
|
||||
echo "pubsub topic is: " & pubsubTopic
|
||||
let nick = await readNick(transp)
|
||||
echo "Welcome, " & nick & "!"
|
||||
|
||||
var chat = Chat(
|
||||
node: node,
|
||||
transp: transp,
|
||||
subscribed: true,
|
||||
connected: false,
|
||||
started: true,
|
||||
nick: nick,
|
||||
prompt: false,
|
||||
contentTopic: conf.contentTopic,
|
||||
conf: conf,
|
||||
)
|
||||
|
||||
var dnsDiscoveryUrl = none(string)
|
||||
|
||||
if conf.fleet != Fleet.none:
|
||||
# Use DNS discovery to connect to selected fleet
|
||||
echo "Connecting to " & $conf.fleet & " fleet using DNS discovery..."
|
||||
|
||||
if conf.fleet == Fleet.test:
|
||||
dnsDiscoveryUrl = some(
|
||||
"enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im"
|
||||
)
|
||||
else:
|
||||
# Connect to sandbox by default
|
||||
dnsDiscoveryUrl = some(
|
||||
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im"
|
||||
)
|
||||
elif conf.dnsDiscoveryUrl != "":
|
||||
# No pre-selected fleet. Discover nodes via DNS using user config
|
||||
info "Discovering nodes using Waku DNS discovery", url = conf.dnsDiscoveryUrl
|
||||
dnsDiscoveryUrl = some(conf.dnsDiscoveryUrl)
|
||||
|
||||
var discoveredNodes: seq[RemotePeerInfo]
|
||||
|
||||
if dnsDiscoveryUrl.isSome:
|
||||
var nameServers: seq[TransportAddress]
|
||||
for ip in conf.dnsDiscoveryNameServers:
|
||||
nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
|
||||
|
||||
let dnsResolver = DnsResolver.new(nameServers)
|
||||
|
||||
proc resolver(domain: string): Future[string] {.async, gcsafe.} =
|
||||
trace "resolving", domain = domain
|
||||
let resolved = await dnsResolver.resolveTxt(domain)
|
||||
return resolved[0] # Use only first answer
|
||||
|
||||
let wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl.get(), resolver)
|
||||
if wakuDnsDiscovery.isOk:
|
||||
let discoveredPeers = await wakuDnsDiscovery.get().findPeers()
|
||||
if discoveredPeers.isOk:
|
||||
info "Connecting to discovered peers"
|
||||
discoveredNodes = discoveredPeers.get()
|
||||
echo "Discovered and connecting to " & $discoveredNodes
|
||||
waitFor chat.node.connectToNodes(discoveredNodes)
|
||||
else:
|
||||
warn "Failed to find peers via DNS discovery", error = discoveredPeers.error
|
||||
else:
|
||||
warn "Failed to init Waku DNS discovery", error = wakuDnsDiscovery.error
|
||||
|
||||
let peerInfo = node.switch.peerInfo
|
||||
let listenStr = $peerInfo.addrs[0] & "/p2p/" & $peerInfo.peerId
|
||||
echo &"Listening on\n {listenStr}"
|
||||
|
||||
if (conf.storenode != "") or (conf.store == true):
|
||||
await node.mountStore()
|
||||
|
||||
var storenode: Option[RemotePeerInfo]
|
||||
|
||||
if conf.storenode != "":
|
||||
let peerInfo = parsePeerInfo(conf.storenode)
|
||||
if peerInfo.isOk():
|
||||
storenode = some(peerInfo.value)
|
||||
else:
|
||||
error "Incorrect conf.storenode", error = peerInfo.error
|
||||
elif discoveredNodes.len > 0:
|
||||
echo "Store enabled, but no store nodes configured. Choosing one at random from discovered peers"
|
||||
storenode = some(discoveredNodes[rand(0 .. len(discoveredNodes) - 1)])
|
||||
|
||||
if storenode.isSome():
|
||||
# We have a viable storenode. Let's query it for historical messages.
|
||||
echo "Connecting to storenode: " & $(storenode.get())
|
||||
|
||||
node.mountStoreClient()
|
||||
node.peerManager.addServicePeer(storenode.get(), WakuStoreCodec)
|
||||
|
||||
proc storeHandler(response: StoreQueryResponse) {.gcsafe.} =
|
||||
for msg in response.messages:
|
||||
let payload =
|
||||
if msg.message.isSome():
|
||||
msg.message.get().payload
|
||||
else:
|
||||
newSeq[byte](0)
|
||||
|
||||
let chatLine = getChatLine(payload)
|
||||
echo &"{chatLine}"
|
||||
info "Hit store handler"
|
||||
|
||||
let queryRes = await node.query(
|
||||
StoreQueryRequest(contentTopics: @[chat.contentTopic]), storenode.get()
|
||||
)
|
||||
if queryRes.isOk():
|
||||
storeHandler(queryRes.value)
|
||||
|
||||
if conf.edgemode: #Mount light protocol clients
|
||||
node.mountLightPushClient()
|
||||
await node.mountFilterClient()
|
||||
let filterHandler = proc(
|
||||
pubsubTopic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async, closure.} =
|
||||
trace "Hit filter handler", contentTopic = msg.contentTopic
|
||||
chat.printReceivedMessage(msg)
|
||||
|
||||
node.wakuFilterClient.registerPushHandler(filterHandler)
|
||||
var servicePeerInfo: RemotePeerInfo
|
||||
if conf.serviceNode != "":
|
||||
servicePeerInfo = parsePeerInfo(conf.serviceNode).valueOr:
|
||||
error "Couldn't parse conf.serviceNode", error = error
|
||||
RemotePeerInfo()
|
||||
if servicePeerInfo == nil or $servicePeerInfo.peerId == "":
|
||||
# Assuming that service node supports all services
|
||||
servicePeerInfo = selectRandomServicePeer(
|
||||
node.peerManager, none(RemotePeerInfo), WakuLightpushCodec
|
||||
).valueOr:
|
||||
error "Couldn't find any service peer"
|
||||
quit(QuitFailure)
|
||||
|
||||
node.peerManager.addServicePeer(servicePeerInfo, WakuLightpushCodec)
|
||||
node.peerManager.addServicePeer(servicePeerInfo, WakuPeerExchangeCodec)
|
||||
#node.peerManager.addServicePeer(servicePeerInfo, WakuRendezVousCodec)
|
||||
|
||||
# Start maintaining subscription
|
||||
asyncSpawn maintainSubscription(
|
||||
node, pubsubTopic, conf.contentTopic, servicePeerInfo, false
|
||||
)
|
||||
echo "waiting for mix nodes to be discovered..."
|
||||
while true:
|
||||
if node.getMixNodePoolSize() >= MinMixNodePoolSize:
|
||||
break
|
||||
discard await node.fetchPeerExchangePeers()
|
||||
await sleepAsync(1000)
|
||||
|
||||
while node.getMixNodePoolSize() < MinMixNodePoolSize:
|
||||
info "waiting for mix nodes to be discovered",
|
||||
currentpoolSize = node.getMixNodePoolSize()
|
||||
await sleepAsync(1000)
|
||||
notice "ready to publish with mix node pool size ",
|
||||
currentpoolSize = node.getMixNodePoolSize()
|
||||
echo "ready to publish messages now"
|
||||
|
||||
# Once min mixnodes are discovered loop as per default setting
|
||||
node.startPeerExchangeLoop()
|
||||
|
||||
if conf.metricsLogging:
|
||||
startMetricsLog()
|
||||
|
||||
if conf.metricsServer:
|
||||
let metricsServer = startMetricsServer(
|
||||
conf.metricsServerAddress, Port(conf.metricsServerPort + conf.portsShift)
|
||||
)
|
||||
|
||||
await chat.readWriteLoop()
|
||||
|
||||
runForever()
|
||||
|
||||
proc main(rng: ref HmacDrbgContext) {.async.} =
|
||||
let (rfd, wfd) = createAsyncPipe()
|
||||
if rfd == asyncInvalidPipe or wfd == asyncInvalidPipe:
|
||||
raise newException(ValueError, "Could not initialize pipe!")
|
||||
|
||||
var thread: Thread[AsyncFD]
|
||||
thread.createThread(readInput, wfd)
|
||||
try:
|
||||
await processInput(rfd, rng)
|
||||
# Handle only ConfigurationError for now
|
||||
# TODO: Throw other errors from the mounting procedure
|
||||
except ConfigurationError as e:
|
||||
raise e
|
||||
|
||||
when isMainModule: # isMainModule = true when the module is compiled as the main file
|
||||
let rng = crypto.newRng()
|
||||
try:
|
||||
waitFor(main(rng))
|
||||
except CatchableError as e:
|
||||
raise e
|
||||
|
||||
## Dump of things that can be improved:
|
||||
##
|
||||
## - Incoming dialed peer does not change connected state (not relying on it for now)
|
||||
## - Unclear if staticnode argument works (can enter manually)
|
||||
## - Don't trigger self / double publish own messages
|
||||
## - Test/default to cluster node connection (diff protocol version)
|
||||
## - Redirect logs to separate file
|
||||
## - Expose basic publish/subscribe etc commands with /syntax
|
||||
## - Show part of peerid to know who sent message
|
||||
## - Deal with protobuf messages (e.g. other chat protocol, or encrypted)
|
||||
315
apps/chat2mix/config_chat2mix.nim
Normal file
315
apps/chat2mix/config_chat2mix.nim
Normal file
@ -0,0 +1,315 @@
|
||||
import chronicles, chronos, std/strutils, regex
|
||||
|
||||
import
|
||||
eth/keys,
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/crypto/secp,
|
||||
libp2p/crypto/curve25519,
|
||||
libp2p/multiaddress,
|
||||
libp2p/multicodec,
|
||||
nimcrypto/utils,
|
||||
confutils,
|
||||
confutils/defs,
|
||||
confutils/std/net
|
||||
|
||||
import waku/waku_core, waku/waku_mix
|
||||
|
||||
type
|
||||
Fleet* = enum
|
||||
none
|
||||
sandbox
|
||||
test
|
||||
|
||||
EthRpcUrl* = distinct string
|
||||
|
||||
Chat2Conf* = object ## General node config
|
||||
edgemode* {.
|
||||
defaultValue: true, desc: "Run the app in edge mode", name: "edge-mode"
|
||||
.}: bool
|
||||
|
||||
logLevel* {.
|
||||
desc: "Sets the log level.", defaultValue: LogLevel.INFO, name: "log-level"
|
||||
.}: LogLevel
|
||||
|
||||
nodekey* {.desc: "P2P node private key as 64 char hex string.", name: "nodekey".}:
|
||||
Option[crypto.PrivateKey]
|
||||
|
||||
listenAddress* {.
|
||||
defaultValue: defaultListenAddress(config),
|
||||
desc: "Listening address for the LibP2P traffic.",
|
||||
name: "listen-address"
|
||||
.}: IpAddress
|
||||
|
||||
tcpPort* {.desc: "TCP listening port.", defaultValue: 60000, name: "tcp-port".}:
|
||||
Port
|
||||
|
||||
udpPort* {.desc: "UDP listening port.", defaultValue: 60000, name: "udp-port".}:
|
||||
Port
|
||||
|
||||
portsShift* {.
|
||||
desc: "Add a shift to all port numbers.", defaultValue: 0, name: "ports-shift"
|
||||
.}: uint16
|
||||
|
||||
nat* {.
|
||||
desc:
|
||||
"Specify method to use for determining public address. " &
|
||||
"Must be one of: any, none, upnp, pmp, extip:<IP>.",
|
||||
defaultValue: "any"
|
||||
.}: string
|
||||
|
||||
## Persistence config
|
||||
dbPath* {.
|
||||
desc: "The database path for peristent storage", defaultValue: "", name: "db-path"
|
||||
.}: string
|
||||
|
||||
persistPeers* {.
|
||||
desc: "Enable peer persistence: true|false",
|
||||
defaultValue: false,
|
||||
name: "persist-peers"
|
||||
.}: bool
|
||||
|
||||
persistMessages* {.
|
||||
desc: "Enable message persistence: true|false",
|
||||
defaultValue: false,
|
||||
name: "persist-messages"
|
||||
.}: bool
|
||||
|
||||
## Relay config
|
||||
relay* {.
|
||||
desc: "Enable relay protocol: true|false", defaultValue: true, name: "relay"
|
||||
.}: bool
|
||||
|
||||
staticnodes* {.
|
||||
desc: "Peer multiaddr to directly connect with. Argument may be repeated.",
|
||||
name: "staticnode",
|
||||
defaultValue: @[]
|
||||
.}: seq[string]
|
||||
|
||||
mixnodes* {.
|
||||
desc:
|
||||
"Multiaddress and mix-key of mix node to be statically specified in format multiaddr:mixPubKey. Argument may be repeated.",
|
||||
name: "mixnode"
|
||||
.}: seq[MixNodePubInfo]
|
||||
|
||||
keepAlive* {.
|
||||
desc: "Enable keep-alive for idle connections: true|false",
|
||||
defaultValue: false,
|
||||
name: "keep-alive"
|
||||
.}: bool
|
||||
|
||||
clusterId* {.
|
||||
desc:
|
||||
"Cluster id that the node is running in. Node in a different cluster id is disconnected.",
|
||||
defaultValue: 1,
|
||||
name: "cluster-id"
|
||||
.}: uint16
|
||||
|
||||
numShardsInNetwork* {.
|
||||
desc: "Number of shards in the network",
|
||||
defaultValue: 8,
|
||||
name: "num-shards-in-network"
|
||||
.}: uint32
|
||||
|
||||
shards* {.
|
||||
desc:
|
||||
"Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.",
|
||||
defaultValue:
|
||||
@[
|
||||
uint16(0),
|
||||
uint16(1),
|
||||
uint16(2),
|
||||
uint16(3),
|
||||
uint16(4),
|
||||
uint16(5),
|
||||
uint16(6),
|
||||
uint16(7),
|
||||
],
|
||||
name: "shard"
|
||||
.}: seq[uint16]
|
||||
|
||||
## Store config
|
||||
store* {.
|
||||
desc: "Enable store protocol: true|false", defaultValue: false, name: "store"
|
||||
.}: bool
|
||||
|
||||
storenode* {.
|
||||
desc: "Peer multiaddr to query for storage.", defaultValue: "", name: "storenode"
|
||||
.}: string
|
||||
|
||||
## Filter config
|
||||
filter* {.
|
||||
desc: "Enable filter protocol: true|false", defaultValue: false, name: "filter"
|
||||
.}: bool
|
||||
|
||||
## Lightpush config
|
||||
lightpush* {.
|
||||
desc: "Enable lightpush protocol: true|false",
|
||||
defaultValue: false,
|
||||
name: "lightpush"
|
||||
.}: bool
|
||||
|
||||
servicenode* {.
|
||||
desc: "Peer multiaddr to request lightpush and filter services",
|
||||
defaultValue: "",
|
||||
name: "servicenode"
|
||||
.}: string
|
||||
|
||||
## Metrics config
|
||||
metricsServer* {.
|
||||
desc: "Enable the metrics server: true|false",
|
||||
defaultValue: false,
|
||||
name: "metrics-server"
|
||||
.}: bool
|
||||
|
||||
metricsServerAddress* {.
|
||||
desc: "Listening address of the metrics server.",
|
||||
defaultValue: parseIpAddress("127.0.0.1"),
|
||||
name: "metrics-server-address"
|
||||
.}: IpAddress
|
||||
|
||||
metricsServerPort* {.
|
||||
desc: "Listening HTTP port of the metrics server.",
|
||||
defaultValue: 8008,
|
||||
name: "metrics-server-port"
|
||||
.}: uint16
|
||||
|
||||
metricsLogging* {.
|
||||
desc: "Enable metrics logging: true|false",
|
||||
defaultValue: true,
|
||||
name: "metrics-logging"
|
||||
.}: bool
|
||||
|
||||
## DNS discovery config
|
||||
dnsDiscovery* {.
|
||||
desc:
|
||||
"Deprecated, please set dns-discovery-url instead. Enable discovering nodes via DNS",
|
||||
defaultValue: false,
|
||||
name: "dns-discovery"
|
||||
.}: bool
|
||||
|
||||
dnsDiscoveryUrl* {.
|
||||
desc: "URL for DNS node list in format 'enrtree://<key>@<fqdn>'",
|
||||
defaultValue: "",
|
||||
name: "dns-discovery-url"
|
||||
.}: string
|
||||
|
||||
dnsDiscoveryNameServers* {.
|
||||
desc: "DNS name server IPs to query. Argument may be repeated.",
|
||||
defaultValue: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")],
|
||||
name: "dns-discovery-name-server"
|
||||
.}: seq[IpAddress]
|
||||
|
||||
## Chat2 configuration
|
||||
fleet* {.
|
||||
desc:
|
||||
"Select the fleet to connect to. This sets the DNS discovery URL to the selected fleet.",
|
||||
defaultValue: Fleet.test,
|
||||
name: "fleet"
|
||||
.}: Fleet
|
||||
|
||||
contentTopic* {.
|
||||
desc: "Content topic for chat messages.",
|
||||
defaultValue: "/toy-chat-mix/2/huilong/proto",
|
||||
name: "content-topic"
|
||||
.}: string
|
||||
|
||||
## Websocket Configuration
|
||||
websocketSupport* {.
|
||||
desc: "Enable websocket: true|false",
|
||||
defaultValue: false,
|
||||
name: "websocket-support"
|
||||
.}: bool
|
||||
|
||||
websocketPort* {.
|
||||
desc: "WebSocket listening port.", defaultValue: 8000, name: "websocket-port"
|
||||
.}: Port
|
||||
|
||||
websocketSecureSupport* {.
|
||||
desc: "WebSocket Secure Support.",
|
||||
defaultValue: false,
|
||||
name: "websocket-secure-support"
|
||||
.}: bool ## rln-relay configuration
|
||||
|
||||
proc parseCmdArg*(T: type MixNodePubInfo, p: string): T =
|
||||
let elements = p.split(":")
|
||||
if elements.len != 2:
|
||||
raise newException(
|
||||
ValueError, "Invalid format for mix node expected multiaddr:mixPublicKey"
|
||||
)
|
||||
let multiaddr = MultiAddress.init(elements[0]).valueOr:
|
||||
raise newException(ValueError, "Invalid multiaddress format")
|
||||
if not multiaddr.contains(multiCodec("ip4")).get():
|
||||
raise newException(
|
||||
ValueError, "Invalid format for ip address, expected a ipv4 multiaddress"
|
||||
)
|
||||
|
||||
return MixNodePubInfo(
|
||||
multiaddr: elements[0], pubKey: intoCurve25519Key(ncrutils.fromHex(elements[1]))
|
||||
)
|
||||
|
||||
# NOTE: Keys are different in nim-libp2p
|
||||
proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T =
|
||||
try:
|
||||
let key = SkPrivateKey.init(utils.fromHex(p)).tryGet()
|
||||
# XXX: Here at the moment
|
||||
result = crypto.PrivateKey(scheme: Secp256k1, skkey: key)
|
||||
except CatchableError as e:
|
||||
raise newException(ValueError, "Invalid private key")
|
||||
|
||||
proc completeCmdArg*(T: type crypto.PrivateKey, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type IpAddress, p: string): T =
|
||||
try:
|
||||
result = parseIpAddress(p)
|
||||
except CatchableError as e:
|
||||
raise newException(ValueError, "Invalid IP address")
|
||||
|
||||
proc completeCmdArg*(T: type IpAddress, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type Port, p: string): T =
|
||||
try:
|
||||
result = Port(parseInt(p))
|
||||
except CatchableError as e:
|
||||
raise newException(ValueError, "Invalid Port number")
|
||||
|
||||
proc completeCmdArg*(T: type Port, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type Option[uint], p: string): T =
|
||||
try:
|
||||
some(parseUint(p))
|
||||
except CatchableError:
|
||||
raise newException(ValueError, "Invalid unsigned integer")
|
||||
|
||||
proc completeCmdArg*(T: type EthRpcUrl, val: string): seq[string] =
|
||||
return @[]
|
||||
|
||||
proc parseCmdArg*(T: type EthRpcUrl, s: string): T =
|
||||
## allowed patterns:
|
||||
## http://url:port
|
||||
## https://url:port
|
||||
## http://url:port/path
|
||||
## https://url:port/path
|
||||
## http://url/with/path
|
||||
## http://url:port/path?query
|
||||
## https://url:port/path?query
|
||||
## disallowed patterns:
|
||||
## any valid/invalid ws or wss url
|
||||
var httpPattern =
|
||||
re2"^(https?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
var wsPattern =
|
||||
re2"^(wss?):\/\/((localhost)|([\w_-]+(?:(?:\.[\w_-]+)+)))(:[0-9]{1,5})?([\w.,@?^=%&:\/~+#-]*[\w@?^=%&\/~+#-])*"
|
||||
if regex.match(s, wsPattern):
|
||||
raise newException(
|
||||
ValueError, "Websocket RPC URL is not supported, Please use an HTTP URL"
|
||||
)
|
||||
if not regex.match(s, httpPattern):
|
||||
raise newException(ValueError, "Invalid HTTP RPC URL")
|
||||
return EthRpcUrl(s)
|
||||
|
||||
func defaultListenAddress*(conf: Chat2Conf): IpAddress =
|
||||
# TODO: How should we select between IPv4 and IPv6
|
||||
# Maybe there should be a config option for this.
|
||||
(static parseIpAddress("0.0.0.0"))
|
||||
4
apps/chat2mix/nim.cfg
Normal file
4
apps/chat2mix/nim.cfg
Normal file
@ -0,0 +1,4 @@
|
||||
-d:chronicles_line_numbers
|
||||
-d:chronicles_runtime_filtering:on
|
||||
-d:discv5_protocol_id:d5waku
|
||||
path = "../.."
|
||||
@ -1,37 +1,33 @@
|
||||
# TESTING IMAGE --------------------------------------------------------------
|
||||
# TESTING IMAGE --------------------------------------------------------------
|
||||
|
||||
## NOTICE: This is a short cut build file for ubuntu users who compiles nwaku in ubuntu distro.
|
||||
## This is used for faster turnaround time for testing the compiled binary.
|
||||
## Prerequisites: compiled liteprotocoltester binary in build/ directory
|
||||
## NOTICE: This is a short cut build file for ubuntu users who compiles nwaku in ubuntu distro.
|
||||
## This is used for faster turnaround time for testing the compiled binary.
|
||||
## Prerequisites: compiled liteprotocoltester binary in build/ directory
|
||||
|
||||
FROM ubuntu:noble AS prod
|
||||
FROM ubuntu:noble AS prod
|
||||
|
||||
LABEL maintainer="zoltan@status.im"
|
||||
LABEL source="https://github.com/waku-org/nwaku"
|
||||
LABEL description="Lite Protocol Tester: Waku light-client"
|
||||
LABEL commit="unknown"
|
||||
LABEL version="unknown"
|
||||
LABEL maintainer="zoltan@status.im"
|
||||
LABEL source="https://github.com/waku-org/nwaku"
|
||||
LABEL description="Lite Protocol Tester: Waku light-client"
|
||||
LABEL commit="unknown"
|
||||
LABEL version="unknown"
|
||||
|
||||
# DevP2P, LibP2P, and JSON RPC ports
|
||||
EXPOSE 30303 60000 8545
|
||||
# DevP2P, LibP2P, and JSON RPC ports
|
||||
EXPOSE 30303 60000 8545
|
||||
|
||||
# Referenced in the binary
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libgcc1 \
|
||||
libpcre3 \
|
||||
libpq-dev \
|
||||
wget \
|
||||
iproute2 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
# Referenced in the binary
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libgcc1 \
|
||||
libpq-dev \
|
||||
wget \
|
||||
iproute2 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Fix for 'Error loading shared library libpcre.so.3: No such file or directory'
|
||||
RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3
|
||||
COPY build/liteprotocoltester /usr/bin/
|
||||
COPY apps/liteprotocoltester/run_tester_node.sh /usr/bin/
|
||||
COPY apps/liteprotocoltester/run_tester_node_on_fleet.sh /usr/bin/
|
||||
|
||||
COPY build/liteprotocoltester /usr/bin/
|
||||
COPY apps/liteprotocoltester/run_tester_node.sh /usr/bin/
|
||||
COPY apps/liteprotocoltester/run_tester_node_on_fleet.sh /usr/bin/
|
||||
ENTRYPOINT ["/usr/bin/run_tester_node.sh", "/usr/bin/liteprotocoltester"]
|
||||
|
||||
ENTRYPOINT ["/usr/bin/run_tester_node.sh", "/usr/bin/liteprotocoltester"]
|
||||
|
||||
# # By default just show help if called without arguments
|
||||
CMD ["--help"]
|
||||
# # By default just show help if called without arguments
|
||||
CMD ["--help"]
|
||||
|
||||
@ -7,7 +7,7 @@ ARG NIM_COMMIT
|
||||
ARG LOG_LEVEL=TRACE
|
||||
|
||||
# Get build tools and required header files
|
||||
RUN apk add --no-cache bash git build-base openssl-dev pcre-dev linux-headers curl jq
|
||||
RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq
|
||||
|
||||
WORKDIR /app
|
||||
COPY . .
|
||||
@ -40,14 +40,11 @@ LABEL version="unknown"
|
||||
EXPOSE 30303 60000 8545
|
||||
|
||||
# Referenced in the binary
|
||||
RUN apk add --no-cache libgcc pcre-dev libpq-dev \
|
||||
RUN apk add --no-cache libgcc libpq-dev \
|
||||
wget \
|
||||
iproute2 \
|
||||
python3
|
||||
|
||||
# Fix for 'Error loading shared library libpcre.so.3: No such file or directory'
|
||||
RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3
|
||||
|
||||
COPY --from=nim-build /app/build/liteprotocoltester /usr/bin/
|
||||
RUN chmod +x /usr/bin/liteprotocoltester
|
||||
|
||||
|
||||
@ -14,8 +14,8 @@ import
|
||||
libp2p/wire
|
||||
|
||||
import
|
||||
tools/confutils/cli_args,
|
||||
waku/[
|
||||
factory/external_config,
|
||||
node/peer_manager,
|
||||
waku_lightpush/common,
|
||||
waku_relay,
|
||||
@ -27,19 +27,6 @@ import
|
||||
logScope:
|
||||
topics = "diagnose connections"
|
||||
|
||||
proc `$`*(cap: Capabilities): string =
|
||||
case cap
|
||||
of Capabilities.Relay:
|
||||
return "Relay"
|
||||
of Capabilities.Store:
|
||||
return "Store"
|
||||
of Capabilities.Filter:
|
||||
return "Filter"
|
||||
of Capabilities.Lightpush:
|
||||
return "Lightpush"
|
||||
of Capabilities.Sync:
|
||||
return "Sync"
|
||||
|
||||
proc allPeers(pm: PeerManager): string =
|
||||
var allStr: string = ""
|
||||
for idx, peer in pm.switch.peerStore.peers():
|
||||
@ -72,7 +59,4 @@ proc logSelfPeers*(pm: PeerManager) =
|
||||
{allPeers(pm)}
|
||||
*------------------------------------------------------------------------------------------*""".fmt()
|
||||
|
||||
if printable.isErr():
|
||||
echo "Error while printing statistics: " & printable.error().msg
|
||||
else:
|
||||
echo printable.get()
|
||||
echo printable.valueOr("Error while printing statistics: " & error.msg)
|
||||
|
||||
@ -9,7 +9,7 @@ x-logging: &logging
|
||||
x-eth-client-address: ð_client_address ${ETH_CLIENT_ADDRESS:-} # Add your ETH_CLIENT_ADDRESS after the "-"
|
||||
|
||||
x-rln-environment: &rln_env
|
||||
RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4}
|
||||
RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xB9cd878C90E49F797B4431fBF4fb333108CB90e6}
|
||||
RLN_RELAY_CRED_PATH: ${RLN_RELAY_CRED_PATH:-} # Optional: Add your RLN_RELAY_CRED_PATH after the "-"
|
||||
RLN_RELAY_CRED_PASSWORD: ${RLN_RELAY_CRED_PASSWORD:-} # Optional: Add your RLN_RELAY_CRED_PASSWORD after the "-"
|
||||
|
||||
|
||||
@ -11,11 +11,11 @@ import
|
||||
confutils
|
||||
|
||||
import
|
||||
tools/confutils/cli_args,
|
||||
waku/[
|
||||
common/enr,
|
||||
common/logging,
|
||||
factory/waku as waku_factory,
|
||||
factory/external_config,
|
||||
waku_node,
|
||||
node/waku_metrics,
|
||||
node/peer_manager,
|
||||
@ -49,13 +49,10 @@ when isMainModule:
|
||||
|
||||
const versionString = "version / git commit hash: " & waku_factory.git_version
|
||||
|
||||
let confRes = LiteProtocolTesterConf.load(version = versionString)
|
||||
if confRes.isErr():
|
||||
error "failure while loading the configuration", error = confRes.error
|
||||
let conf = LiteProtocolTesterConf.load(version = versionString).valueOr:
|
||||
error "failure while loading the configuration", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
var conf = confRes.get()
|
||||
|
||||
## Logging setup
|
||||
logging.setupLog(conf.logLevel, conf.logFormat)
|
||||
|
||||
@ -122,7 +119,7 @@ when isMainModule:
|
||||
error "Issue converting toWakuConf", error = $error
|
||||
quit(QuitFailure)
|
||||
|
||||
var waku = Waku.new(wakuConf).valueOr:
|
||||
var waku = (waitFor Waku.new(wakuConf)).valueOr:
|
||||
error "Waku initialization failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
@ -130,7 +127,7 @@ when isMainModule:
|
||||
error "Starting waku failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
debug "Setting up shutdown hooks"
|
||||
info "Setting up shutdown hooks"
|
||||
|
||||
proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} =
|
||||
await waku.stop()
|
||||
@ -187,7 +184,7 @@ when isMainModule:
|
||||
error "Service node not found in time via PX"
|
||||
quit(QuitFailure)
|
||||
|
||||
if futForServiceNode.read().isErr():
|
||||
futForServiceNode.read().isOkOr:
|
||||
error "Service node for test not found via PX"
|
||||
quit(QuitFailure)
|
||||
|
||||
|
||||
@ -89,10 +89,7 @@ proc reportSentMessages() =
|
||||
|{numMessagesToSend+failedToSendCount:>11} |{messagesSent:>11} |{failedToSendCount:>11} |
|
||||
*----------------------------------------*""".fmt()
|
||||
|
||||
if report.isErr:
|
||||
echo "Error while printing statistics"
|
||||
else:
|
||||
echo report.get()
|
||||
echo report.valueOr("Error while printing statistics")
|
||||
|
||||
echo "*--------------------------------------------------------------------------------------------------*"
|
||||
echo "| Failure cause | count |"
|
||||
@ -190,25 +187,22 @@ proc publishMessages(
|
||||
)
|
||||
if not preventPeerSwitch and noFailedPush > maxFailedPush:
|
||||
info "Max push failure limit reached, Try switching peer."
|
||||
let peerOpt = selectRandomServicePeer(
|
||||
actualServicePeer = selectRandomServicePeer(
|
||||
wakuNode.peerManager, some(actualServicePeer), WakuLightPushCodec
|
||||
)
|
||||
if peerOpt.isOk():
|
||||
actualServicePeer = peerOpt.get()
|
||||
|
||||
info "New service peer in use",
|
||||
codec = lightpushPubsubTopic,
|
||||
peer = constructMultiaddrStr(actualServicePeer)
|
||||
|
||||
noFailedPush = 0
|
||||
noOfServicePeerSwitches += 1
|
||||
lpt_change_service_peer_count.inc(labelValues = ["publisher"])
|
||||
continue # try again with new peer without delay
|
||||
else:
|
||||
).valueOr:
|
||||
error "Failed to find new service peer. Exiting."
|
||||
noFailedServiceNodeSwitches += 1
|
||||
break
|
||||
|
||||
info "New service peer in use",
|
||||
codec = lightpushPubsubTopic,
|
||||
peer = constructMultiaddrStr(actualServicePeer)
|
||||
|
||||
noFailedPush = 0
|
||||
noOfServicePeerSwitches += 1
|
||||
lpt_change_service_peer_count.inc(labelValues = ["publisher"])
|
||||
continue # try again with new peer without delay
|
||||
|
||||
await sleepAsync(messageInterval)
|
||||
|
||||
proc setupAndPublish*(
|
||||
|
||||
@ -54,67 +54,65 @@ proc maintainSubscription(
|
||||
var noFailedSubscribes = 0
|
||||
var noFailedServiceNodeSwitches = 0
|
||||
var isFirstPingOnNewPeer = true
|
||||
const RetryWaitMs = 2.seconds # Quick retry interval
|
||||
const SubscriptionMaintenanceMs = 30.seconds # Subscription maintenance interval
|
||||
while true:
|
||||
info "maintaining subscription at", peer = constructMultiaddrStr(actualFilterPeer)
|
||||
# First use filter-ping to check if we have an active subscription
|
||||
let pingRes = await wakuNode.wakuFilterClient.ping(actualFilterPeer)
|
||||
if pingRes.isErr():
|
||||
if isFirstPingOnNewPeer == false:
|
||||
# Very first ping expected to fail as we have not yet subscribed at all
|
||||
lpt_receiver_lost_subscription_count.inc()
|
||||
isFirstPingOnNewPeer = false
|
||||
# No subscription found. Let's subscribe.
|
||||
error "ping failed.", err = pingRes.error
|
||||
trace "no subscription found. Sending subscribe request"
|
||||
let pingErr = (await wakuNode.wakuFilterClient.ping(actualFilterPeer)).errorOr:
|
||||
await sleepAsync(SubscriptionMaintenanceMs)
|
||||
info "subscription is live."
|
||||
continue
|
||||
|
||||
let subscribeRes = await wakuNode.filterSubscribe(
|
||||
if isFirstPingOnNewPeer == false:
|
||||
# Very first ping expected to fail as we have not yet subscribed at all
|
||||
lpt_receiver_lost_subscription_count.inc()
|
||||
isFirstPingOnNewPeer = false
|
||||
# No subscription found. Let's subscribe.
|
||||
error "ping failed.", error = pingErr
|
||||
trace "no subscription found. Sending subscribe request"
|
||||
|
||||
let subscribeErr = (
|
||||
await wakuNode.filterSubscribe(
|
||||
some(filterPubsubTopic), filterContentTopic, actualFilterPeer
|
||||
)
|
||||
).errorOr:
|
||||
await sleepAsync(SubscriptionMaintenanceMs)
|
||||
if noFailedSubscribes > 0:
|
||||
noFailedSubscribes -= 1
|
||||
notice "subscribe request successful."
|
||||
continue
|
||||
|
||||
if subscribeRes.isErr():
|
||||
noFailedSubscribes += 1
|
||||
lpt_service_peer_failure_count.inc(
|
||||
labelValues = ["receiver", actualFilterPeer.getAgent()]
|
||||
)
|
||||
error "Subscribe request failed.",
|
||||
err = subscribeRes.error,
|
||||
peer = actualFilterPeer,
|
||||
failCount = noFailedSubscribes
|
||||
noFailedSubscribes += 1
|
||||
lpt_service_peer_failure_count.inc(
|
||||
labelValues = ["receiver", actualFilterPeer.getAgent()]
|
||||
)
|
||||
error "Subscribe request failed.",
|
||||
err = subscribeErr, peer = actualFilterPeer, failCount = noFailedSubscribes
|
||||
|
||||
# TODO: disconnet from failed actualFilterPeer
|
||||
# asyncSpawn(wakuNode.peerManager.switch.disconnect(p))
|
||||
# wakunode.peerManager.peerStore.delete(actualFilterPeer)
|
||||
# TODO: disconnet from failed actualFilterPeer
|
||||
# asyncSpawn(wakuNode.peerManager.switch.disconnect(p))
|
||||
# wakunode.peerManager.peerStore.delete(actualFilterPeer)
|
||||
|
||||
if noFailedSubscribes < maxFailedSubscribes:
|
||||
await sleepAsync(2.seconds) # Wait a bit before retrying
|
||||
continue
|
||||
elif not preventPeerSwitch:
|
||||
let peerOpt = selectRandomServicePeer(
|
||||
wakuNode.peerManager, some(actualFilterPeer), WakuFilterSubscribeCodec
|
||||
)
|
||||
if peerOpt.isOk():
|
||||
actualFilterPeer = peerOpt.get()
|
||||
if noFailedSubscribes < maxFailedSubscribes:
|
||||
await sleepAsync(RetryWaitMs) # Wait a bit before retrying
|
||||
elif not preventPeerSwitch:
|
||||
# try again with new peer without delay
|
||||
actualFilterPeer = selectRandomServicePeer(
|
||||
wakuNode.peerManager, some(actualFilterPeer), WakuFilterSubscribeCodec
|
||||
).valueOr:
|
||||
error "Failed to find new service peer. Exiting."
|
||||
noFailedServiceNodeSwitches += 1
|
||||
break
|
||||
|
||||
info "Found new peer for codec",
|
||||
codec = filterPubsubTopic, peer = constructMultiaddrStr(actualFilterPeer)
|
||||
info "Found new peer for codec",
|
||||
codec = filterPubsubTopic, peer = constructMultiaddrStr(actualFilterPeer)
|
||||
|
||||
noFailedSubscribes = 0
|
||||
lpt_change_service_peer_count.inc(labelValues = ["receiver"])
|
||||
isFirstPingOnNewPeer = true
|
||||
continue # try again with new peer without delay
|
||||
else:
|
||||
error "Failed to find new service peer. Exiting."
|
||||
noFailedServiceNodeSwitches += 1
|
||||
break
|
||||
else:
|
||||
if noFailedSubscribes > 0:
|
||||
noFailedSubscribes -= 1
|
||||
|
||||
notice "subscribe request successful."
|
||||
noFailedSubscribes = 0
|
||||
lpt_change_service_peer_count.inc(labelValues = ["receiver"])
|
||||
isFirstPingOnNewPeer = true
|
||||
else:
|
||||
info "subscription is live."
|
||||
|
||||
await sleepAsync(30.seconds) # Subscription maintenance interval
|
||||
await sleepAsync(SubscriptionMaintenanceMs)
|
||||
|
||||
proc setupAndListen*(
|
||||
wakuNode: WakuNode, conf: LiteProtocolTesterConf, servicePeer: RemotePeerInfo
|
||||
|
||||
@ -11,8 +11,8 @@ import
|
||||
libp2p/wire
|
||||
|
||||
import
|
||||
tools/confutils/cli_args,
|
||||
waku/[
|
||||
factory/external_config,
|
||||
common/enr,
|
||||
waku_node,
|
||||
node/peer_manager,
|
||||
@ -73,7 +73,7 @@ proc selectRandomCapablePeer*(
|
||||
let rndPeerIndex = rand(0 .. supportivePeers.len - 1)
|
||||
let randomPeer = supportivePeers[rndPeerIndex]
|
||||
|
||||
debug "Dialing random peer",
|
||||
info "Dialing random peer",
|
||||
idx = $rndPeerIndex, peer = constructMultiaddrStr(randomPeer)
|
||||
|
||||
supportivePeers.delete(rndPeerIndex .. rndPeerIndex)
|
||||
@ -82,12 +82,12 @@ proc selectRandomCapablePeer*(
|
||||
if (await connOpt.withTimeout(10.seconds)):
|
||||
if connOpt.value().isSome():
|
||||
found = some(randomPeer)
|
||||
debug "Dialing successful",
|
||||
info "Dialing successful",
|
||||
peer = constructMultiaddrStr(randomPeer), codec = codec
|
||||
else:
|
||||
debug "Dialing failed", peer = constructMultiaddrStr(randomPeer), codec = codec
|
||||
info "Dialing failed", peer = constructMultiaddrStr(randomPeer), codec = codec
|
||||
else:
|
||||
debug "Timeout dialing service peer",
|
||||
info "Timeout dialing service peer",
|
||||
peer = constructMultiaddrStr(randomPeer), codec = codec
|
||||
|
||||
return found
|
||||
@ -105,8 +105,8 @@ proc tryCallAllPxPeers*(
|
||||
var supportivePeers = pm.switch.peerStore.getPeersByCapability(capability)
|
||||
|
||||
lpt_px_peers.set(supportivePeers.len)
|
||||
debug "Found supportive peers count", count = supportivePeers.len()
|
||||
debug "Found supportive peers", supportivePeers = $supportivePeers
|
||||
info "Found supportive peers count", count = supportivePeers.len()
|
||||
info "Found supportive peers", supportivePeers = $supportivePeers
|
||||
if supportivePeers.len == 0:
|
||||
return none(seq[RemotePeerInfo])
|
||||
|
||||
@ -116,7 +116,7 @@ proc tryCallAllPxPeers*(
|
||||
let rndPeerIndex = rand(0 .. supportivePeers.len - 1)
|
||||
let randomPeer = supportivePeers[rndPeerIndex]
|
||||
|
||||
debug "Dialing random peer",
|
||||
info "Dialing random peer",
|
||||
idx = $rndPeerIndex, peer = constructMultiaddrStr(randomPeer)
|
||||
|
||||
supportivePeers.delete(rndPeerIndex, rndPeerIndex)
|
||||
@ -181,7 +181,7 @@ proc pxLookupServiceNode*(
|
||||
if not await futPeers.withTimeout(30.seconds):
|
||||
notice "Cannot get peers from PX", round = 5 - trialCount
|
||||
else:
|
||||
if futPeers.value().isErr():
|
||||
futPeers.value().isOkOr:
|
||||
info "PeerExchange reported error", error = futPeers.read().error
|
||||
return err()
|
||||
|
||||
|
||||
@ -8,6 +8,8 @@ import
|
||||
results,
|
||||
libp2p/peerid
|
||||
|
||||
from std/sugar import `=>`
|
||||
|
||||
import ./tester_message, ./lpt_metrics
|
||||
|
||||
type
|
||||
@ -114,12 +116,7 @@ proc addMessage*(
|
||||
if not self.contains(peerId):
|
||||
self[peerId] = Statistics.init()
|
||||
|
||||
let shortSenderId = block:
|
||||
let senderPeer = PeerId.init(msg.sender)
|
||||
if senderPeer.isErr():
|
||||
msg.sender
|
||||
else:
|
||||
senderPeer.get().shortLog()
|
||||
let shortSenderId = PeerId.init(msg.sender).map(p => p.shortLog()).valueOr(msg.sender)
|
||||
|
||||
discard catch:
|
||||
self[peerId].addMessage(shortSenderId, msg, msgHash)
|
||||
@ -220,10 +217,7 @@ proc echoStat*(self: Statistics, peerId: string) =
|
||||
| {self.missingIndices()} |
|
||||
*------------------------------------------------------------------------------------------*""".fmt()
|
||||
|
||||
if printable.isErr():
|
||||
echo "Error while printing statistics: " & printable.error().msg
|
||||
else:
|
||||
echo printable.get()
|
||||
echo printable.valueOr("Error while printing statistics: " & error.msg)
|
||||
|
||||
proc jsonStat*(self: Statistics): string =
|
||||
let minL, maxL, avgL = self.calcLatency()
|
||||
@ -243,20 +237,18 @@ proc jsonStat*(self: Statistics): string =
|
||||
}},
|
||||
"lostIndices": {self.missingIndices()}
|
||||
}}""".fmt()
|
||||
if json.isErr:
|
||||
return "{\"result:\": \"" & json.error.msg & "\"}"
|
||||
|
||||
return json.get()
|
||||
return json.valueOr("{\"result:\": \"" & error.msg & "\"}")
|
||||
|
||||
proc echoStats*(self: var PerPeerStatistics) =
|
||||
for peerId, stats in self.pairs:
|
||||
let peerLine = catch:
|
||||
"Receiver statistics from peer {peerId}".fmt()
|
||||
if peerLine.isErr:
|
||||
peerLine.isOkOr:
|
||||
echo "Error while printing statistics"
|
||||
else:
|
||||
echo peerLine.get()
|
||||
stats.echoStat(peerId)
|
||||
continue
|
||||
echo peerLine.get()
|
||||
stats.echoStat(peerId)
|
||||
|
||||
proc jsonStats*(self: PerPeerStatistics): string =
|
||||
try:
|
||||
|
||||
@ -12,14 +12,9 @@ import
|
||||
secp256k1
|
||||
|
||||
import
|
||||
waku/[
|
||||
common/confutils/envvar/defs as confEnvvarDefs,
|
||||
common/confutils/envvar/std/net as confEnvvarNet,
|
||||
common/logging,
|
||||
factory/external_config,
|
||||
waku_core,
|
||||
waku_core/topics/pubsub_topic,
|
||||
]
|
||||
../../tools/confutils/
|
||||
[cli_args, envvar as confEnvvarDefs, envvar_net as confEnvvarNet],
|
||||
waku/[common/logging, waku_core, waku_core/topics/pubsub_topic]
|
||||
|
||||
export confTomlDefs, confTomlNet, confEnvvarDefs, confEnvvarNet
|
||||
|
||||
|
||||
@ -6,7 +6,7 @@ import
|
||||
json_serialization/std/options,
|
||||
json_serialization/lexer
|
||||
|
||||
import ../../waku/waku_api/rest/serdes
|
||||
import waku/rest_api/endpoint/serdes
|
||||
|
||||
type ProtocolTesterMessage* = object
|
||||
sender*: string
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import results, options, chronos
|
||||
import waku/[waku_node, waku_core, waku_lightpush]
|
||||
import waku/[waku_node, waku_core, waku_lightpush, waku_lightpush/common]
|
||||
import publisher_base
|
||||
|
||||
type V3Publisher* = ref object of PublisherBase
|
||||
@ -20,7 +20,7 @@ method send*(
|
||||
discard (
|
||||
await self.wakuNode.lightpushPublish(some(topic), message, some(servicePeer))
|
||||
).valueOr:
|
||||
if error.code == NO_PEERS_TO_RELAY and
|
||||
if error.code == LightPushErrorCode.NO_PEERS_TO_RELAY and
|
||||
error.desc != some("No peers for topic, skipping publish"):
|
||||
# TODO: We need better separation of errors happening on the client side or the server side.-
|
||||
return err("dial_failure")
|
||||
|
||||
@ -29,7 +29,6 @@ The following options are available:
|
||||
--rln-relay Enable spam protection through rln-relay: true|false [=true].
|
||||
--rln-relay-dynamic Enable waku-rln-relay with on-chain dynamic group management: true|false
|
||||
[=true].
|
||||
--rln-relay-tree-path Path to the RLN merkle tree sled db (https://github.com/spacejam/sled).
|
||||
--rln-relay-eth-client-address HTTP address of an Ethereum testnet client e.g., http://localhost:8540/
|
||||
[=http://localhost:8540/].
|
||||
--rln-relay-eth-contract-address Address of membership contract on an Ethereum testnet.
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[net, tables, strutils, times, sequtils, random],
|
||||
std/[net, tables, strutils, times, sequtils, random, sugar],
|
||||
results,
|
||||
chronicles,
|
||||
chronicles/topics_registry,
|
||||
@ -183,16 +183,14 @@ proc setConnectedPeersMetrics(
|
||||
for maddr in peerInfo.addrs:
|
||||
if $maddr notin customPeerInfo.maddrs:
|
||||
customPeerInfo.maddrs.add $maddr
|
||||
let typedRecord = discNode.toTypedRecord()
|
||||
if not typedRecord.isOk():
|
||||
let typedRecord = discNode.toTypedRecord().valueOr:
|
||||
warn "could not convert record to typed record", record = discNode
|
||||
continue
|
||||
if not typedRecord.get().ip.isSome():
|
||||
warn "ip field is not set", record = typedRecord.get()
|
||||
let ipAddr = typedRecord.ip.valueOr:
|
||||
warn "ip field is not set", record = typedRecord
|
||||
continue
|
||||
|
||||
let ip = $typedRecord.get().ip.get().join(".")
|
||||
customPeerInfo.ip = ip
|
||||
customPeerInfo.ip = $ipAddr.join(".")
|
||||
|
||||
# try to ping the peer
|
||||
if shouldReconnect(customPeerInfo):
|
||||
@ -215,7 +213,7 @@ proc setConnectedPeersMetrics(
|
||||
continue
|
||||
var customPeerInfo = allPeers[peerIdStr]
|
||||
|
||||
debug "connected to peer", peer = customPeerInfo[]
|
||||
info "connected to peer", peer = customPeerInfo[]
|
||||
|
||||
# after connection, get supported protocols
|
||||
let lp2pPeerStore = node.switch.peerStore
|
||||
@ -354,16 +352,16 @@ proc crawlNetwork(
|
||||
await sleepAsync(crawlInterval.millis - elapsed.millis)
|
||||
|
||||
proc retrieveDynamicBootstrapNodes(
|
||||
dnsDiscoveryUrl: string, dnsDiscoveryNameServers: seq[IpAddress]
|
||||
dnsDiscoveryUrl: string, dnsAddrsNameServers: seq[IpAddress]
|
||||
): Future[Result[seq[RemotePeerInfo], string]] {.async.} =
|
||||
## Retrieve dynamic bootstrap nodes (DNS discovery)
|
||||
|
||||
if dnsDiscoveryUrl != "":
|
||||
# DNS discovery
|
||||
debug "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl
|
||||
info "Discovering nodes using Waku DNS discovery", url = dnsDiscoveryUrl
|
||||
|
||||
var nameServers: seq[TransportAddress]
|
||||
for ip in dnsDiscoveryNameServers:
|
||||
for ip in dnsAddrsNameServers:
|
||||
nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
|
||||
|
||||
let dnsResolver = DnsResolver.new(nameServers)
|
||||
@ -374,16 +372,11 @@ proc retrieveDynamicBootstrapNodes(
|
||||
if resolved.len > 0:
|
||||
return resolved[0] # Use only first answer
|
||||
|
||||
var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl, resolver)
|
||||
if wakuDnsDiscovery.isOk():
|
||||
return (await wakuDnsDiscovery.get().findPeers()).mapErr(
|
||||
proc(e: cstring): string =
|
||||
$e
|
||||
)
|
||||
else:
|
||||
warn "Failed to init Waku DNS discovery"
|
||||
var wakuDnsDiscovery = WakuDnsDiscovery.init(dnsDiscoveryUrl, resolver).errorOr:
|
||||
return (await value.findPeers()).mapErr(e => $e)
|
||||
warn "Failed to init Waku DNS discovery"
|
||||
|
||||
debug "No method for retrieving dynamic bootstrap nodes specified."
|
||||
info "No method for retrieving dynamic bootstrap nodes specified."
|
||||
ok(newSeq[RemotePeerInfo]()) # Return an empty seq by default
|
||||
|
||||
proc getBootstrapFromDiscDns(
|
||||
@ -391,11 +384,10 @@ proc getBootstrapFromDiscDns(
|
||||
): Future[Result[seq[enr.Record], string]] {.async.} =
|
||||
try:
|
||||
let dnsNameServers = @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")]
|
||||
let dynamicBootstrapNodesRes =
|
||||
let dynamicBootstrapNodes = (
|
||||
await retrieveDynamicBootstrapNodes(conf.dnsDiscoveryUrl, dnsNameServers)
|
||||
if not dynamicBootstrapNodesRes.isOk():
|
||||
error("failed discovering peers from DNS")
|
||||
let dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
|
||||
).valueOr:
|
||||
return err("Failed retrieving dynamic bootstrap nodes: " & $error)
|
||||
|
||||
# select dynamic bootstrap nodes that have an ENR containing a udp port.
|
||||
# Discv5 only supports UDP https://github.com/ethereum/devp2p/blob/master/discv5/discv5-theory.md)
|
||||
@ -411,7 +403,7 @@ proc getBootstrapFromDiscDns(
|
||||
discv5BootstrapEnrs.add(enr)
|
||||
return ok(discv5BootstrapEnrs)
|
||||
except CatchableError:
|
||||
error("failed discovering peers from DNS")
|
||||
error("failed discovering peers from DNS: " & getCurrentExceptionMsg())
|
||||
|
||||
proc initAndStartApp(
|
||||
conf: NetworkMonitorConf
|
||||
@ -451,12 +443,8 @@ proc initAndStartApp(
|
||||
error "failed to add sharded topics to ENR", error = error
|
||||
return err("failed to add sharded topics to ENR: " & $error)
|
||||
|
||||
let recordRes = builder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
return err("cannot build record: " & $recordRes.error)
|
||||
else:
|
||||
recordRes.get()
|
||||
let record = builder.build().valueOr:
|
||||
return err("cannot build record: " & $error)
|
||||
|
||||
var nodeBuilder = WakuNodeBuilder.init()
|
||||
|
||||
@ -469,21 +457,15 @@ proc initAndStartApp(
|
||||
relayServiceRatio = "13.33:86.67",
|
||||
shardAware = true,
|
||||
)
|
||||
let res = nodeBuilder.withNetworkConfigurationDetails(bindIp, nodeTcpPort)
|
||||
if res.isErr():
|
||||
return err("node building error" & $res.error)
|
||||
nodeBuilder.withNetworkConfigurationDetails(bindIp, nodeTcpPort).isOkOr:
|
||||
return err("node building error" & $error)
|
||||
|
||||
let nodeRes = nodeBuilder.build()
|
||||
let node =
|
||||
if nodeRes.isErr():
|
||||
return err("node building error" & $res.error)
|
||||
else:
|
||||
nodeRes.get()
|
||||
let node = nodeBuilder.build().valueOr:
|
||||
return err("node building error" & $error)
|
||||
|
||||
var discv5BootstrapEnrsRes = await getBootstrapFromDiscDns(conf)
|
||||
if discv5BootstrapEnrsRes.isErr():
|
||||
var discv5BootstrapEnrs = (await getBootstrapFromDiscDns(conf)).valueOr:
|
||||
error("failed discovering peers from DNS")
|
||||
var discv5BootstrapEnrs = discv5BootstrapEnrsRes.get()
|
||||
quit(QuitFailure)
|
||||
|
||||
# parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq
|
||||
for enrUri in conf.bootstrapNodes:
|
||||
@ -561,26 +543,25 @@ proc subscribeAndHandleMessages(
|
||||
when isMainModule:
|
||||
# known issue: confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
|
||||
{.pop.}
|
||||
let confRes = NetworkMonitorConf.loadConfig()
|
||||
if confRes.isErr():
|
||||
error "could not load cli variables", err = confRes.error
|
||||
quit(1)
|
||||
var conf = NetworkMonitorConf.loadConfig().valueOr:
|
||||
error "could not load cli variables", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
var conf = confRes.get()
|
||||
info "cli flags", conf = conf
|
||||
|
||||
if conf.clusterId == 1:
|
||||
let twnClusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let twnNetworkConf = NetworkConf.TheWakuNetworkConf()
|
||||
|
||||
conf.bootstrapNodes = twnClusterConf.discv5BootstrapNodes
|
||||
conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
|
||||
conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
|
||||
conf.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec
|
||||
conf.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit
|
||||
conf.numShardsInNetwork = twnClusterConf.numShardsInNetwork
|
||||
conf.bootstrapNodes = twnNetworkConf.discv5BootstrapNodes
|
||||
conf.rlnRelayDynamic = twnNetworkConf.rlnRelayDynamic
|
||||
conf.rlnRelayEthContractAddress = twnNetworkConf.rlnRelayEthContractAddress
|
||||
conf.rlnEpochSizeSec = twnNetworkConf.rlnEpochSizeSec
|
||||
conf.rlnRelayUserMessageLimit = twnNetworkConf.rlnRelayUserMessageLimit
|
||||
conf.numShardsInNetwork = twnNetworkConf.shardingConf.numShardsInCluster
|
||||
|
||||
if conf.shards.len == 0:
|
||||
conf.shards = toSeq(uint16(0) .. uint16(twnClusterConf.numShardsInNetwork - 1))
|
||||
conf.shards =
|
||||
toSeq(uint16(0) .. uint16(twnNetworkConf.shardingConf.numShardsInCluster - 1))
|
||||
|
||||
if conf.logLevel != LogLevel.NONE:
|
||||
setLogLevel(conf.logLevel)
|
||||
@ -593,37 +574,30 @@ when isMainModule:
|
||||
|
||||
# start metrics server
|
||||
if conf.metricsServer:
|
||||
let res =
|
||||
startMetricsServer(conf.metricsServerAddress, Port(conf.metricsServerPort))
|
||||
if res.isErr():
|
||||
error "could not start metrics server", err = res.error
|
||||
quit(1)
|
||||
startMetricsServer(conf.metricsServerAddress, Port(conf.metricsServerPort)).isOkOr:
|
||||
error "could not start metrics server", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
# start rest server for custom metrics
|
||||
let res = startRestApiServer(conf, allPeersInfo, msgPerContentTopic)
|
||||
if res.isErr():
|
||||
error "could not start rest api server", err = res.error
|
||||
quit(1)
|
||||
startRestApiServer(conf, allPeersInfo, msgPerContentTopic).isOkOr:
|
||||
error "could not start rest api server", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
# create a rest client
|
||||
let clientRest =
|
||||
RestClientRef.new(url = "http://ip-api.com", connectTimeout = ctime.seconds(2))
|
||||
if clientRest.isErr():
|
||||
error "could not start rest api client", err = res.error
|
||||
quit(1)
|
||||
let restClient = clientRest.get()
|
||||
let restClient = RestClientRef.new(
|
||||
url = "http://ip-api.com", connectTimeout = ctime.seconds(2)
|
||||
).valueOr:
|
||||
error "could not start rest api client", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
# start waku node
|
||||
let nodeRes = waitFor initAndStartApp(conf)
|
||||
if nodeRes.isErr():
|
||||
error "could not start node"
|
||||
quit 1
|
||||
|
||||
let (node, discv5) = nodeRes.get()
|
||||
let (node, discv5) = (waitFor initAndStartApp(conf)).valueOr:
|
||||
error "could not start node", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
(waitFor node.mountRelay()).isOkOr:
|
||||
error "failed to mount waku relay protocol: ", err = error
|
||||
quit 1
|
||||
error "failed to mount waku relay protocol: ", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
waitFor node.mountLibp2pPing()
|
||||
|
||||
@ -639,7 +613,6 @@ when isMainModule:
|
||||
credIndex: some(uint(0)),
|
||||
ethContractAddress: conf.rlnRelayEthContractAddress,
|
||||
ethClientUrls: conf.ethClientUrls.mapIt(string(it)),
|
||||
treePath: conf.rlnRelayTreePath,
|
||||
epochSizeSec: conf.rlnEpochSizeSec,
|
||||
creds: none(RlnRelayCreds),
|
||||
onFatalErrorAction: onFatalErrorAction,
|
||||
@ -648,12 +621,12 @@ when isMainModule:
|
||||
try:
|
||||
waitFor node.mountRlnRelay(rlnConf)
|
||||
except CatchableError:
|
||||
error "failed to setup RLN", err = getCurrentExceptionMsg()
|
||||
quit 1
|
||||
error "failed to setup RLN", error = getCurrentExceptionMsg()
|
||||
quit(QuitFailure)
|
||||
|
||||
node.mountMetadata(conf.clusterId).isOkOr:
|
||||
error "failed to mount waku metadata protocol: ", err = error
|
||||
quit 1
|
||||
node.mountMetadata(conf.clusterId, conf.shards).isOkOr:
|
||||
error "failed to mount waku metadata protocol: ", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
for shard in conf.shards:
|
||||
# Subscribe the node to the shards, to count messages
|
||||
|
||||
@ -7,8 +7,13 @@ import
|
||||
results,
|
||||
regex
|
||||
|
||||
const git_version* {.strdefine.} = "n/a"
|
||||
|
||||
type EthRpcUrl* = distinct string
|
||||
|
||||
proc `$`*(u: EthRpcUrl): string =
|
||||
string(u)
|
||||
|
||||
type NetworkMonitorConf* = object
|
||||
logLevel* {.
|
||||
desc: "Sets the log level",
|
||||
@ -75,12 +80,6 @@ type NetworkMonitorConf* = object
|
||||
name: "rln-relay-dynamic"
|
||||
.}: bool
|
||||
|
||||
rlnRelayTreePath* {.
|
||||
desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)",
|
||||
defaultValue: "",
|
||||
name: "rln-relay-tree-path"
|
||||
.}: string
|
||||
|
||||
ethClientUrls* {.
|
||||
desc:
|
||||
"HTTP address of an Ethereum testnet client e.g., http://localhost:8540/. Argument may be repeated.",
|
||||
|
||||
@ -31,7 +31,7 @@ proc decodeBytes*(
|
||||
try:
|
||||
let jsonContent = parseJson(res)
|
||||
if $jsonContent["status"].getStr() != "success":
|
||||
error "query failed", result = jsonContent
|
||||
error "query failed", result = $jsonContent
|
||||
return err("query failed")
|
||||
return ok(
|
||||
NodeLocation(
|
||||
|
||||
@ -1,12 +1,20 @@
|
||||
# RPC URL for accessing testnet via HTTP.
|
||||
# e.g. https://sepolia.infura.io/v3/123aa110320f4aec179150fba1e1b1b1
|
||||
# e.g. https://linea-sepolia.infura.io/v3/123aa110320f4aec179150fba1e1b1b1
|
||||
RLN_RELAY_ETH_CLIENT_ADDRESS=
|
||||
|
||||
# Private key of testnet where you have sepolia ETH that would be staked into RLN contract.
|
||||
# Account of testnet where you have Linea Sepolia ETH that would be staked into RLN contract.
|
||||
ETH_TESTNET_ACCOUNT=
|
||||
|
||||
# Private key of testnet where you have Linea Sepolia ETH that would be staked into RLN contract.
|
||||
# Note: make sure you don't use the '0x' prefix.
|
||||
# e.g. 0116196e9a8abed42dd1a22eb63fa2a5a17b0c27d716b87ded2c54f1bf192a0b
|
||||
ETH_TESTNET_KEY=
|
||||
|
||||
# Address of the RLN contract on Linea Sepolia.
|
||||
RLN_CONTRACT_ADDRESS=0xB9cd878C90E49F797B4431fBF4fb333108CB90e6
|
||||
# Address of the RLN Membership Token contract on Linea Sepolia used to pay for membership.
|
||||
TOKEN_CONTRACT_ADDRESS=0x185A0015aC462a0aECb81beCc0497b649a64B9ea
|
||||
|
||||
# Password you would like to use to protect your RLN membership.
|
||||
RLN_RELAY_CRED_PASSWORD=
|
||||
|
||||
@ -15,7 +23,8 @@ NWAKU_IMAGE=
|
||||
NODEKEY=
|
||||
DOMAIN=
|
||||
EXTRA_ARGS=
|
||||
RLN_RELAY_CONTRACT_ADDRESS=
|
||||
STORAGE_SIZE=
|
||||
|
||||
|
||||
# -------------------- SONDA CONFIG ------------------
|
||||
METRICS_PORT=8004
|
||||
|
||||
@ -30,13 +30,13 @@ It works by running a `nwaku` node, publishing a message from it every fixed int
|
||||
2. If you want to query nodes in `cluster-id` 1, then you have to follow the steps of registering an RLN membership. Otherwise, you can skip this step.
|
||||
|
||||
For it, you need:
|
||||
* Ethereum Sepolia WebSocket endpoint. Get one free from [Infura](https://www.infura.io/).
|
||||
* Ethereum Sepolia account with some balance <0.01 Eth. Get some [here](https://www.infura.io/faucet/sepolia).
|
||||
* Ethereum Linea Sepolia WebSocket endpoint. Get one free from [Infura](https://linea-sepolia.infura.io/).
|
||||
* Ethereum Linea Sepolia account with minimum 0.01ETH. Get some [here](https://docs.metamask.io/developer-tools/faucet/).
|
||||
* A password to protect your rln membership.
|
||||
|
||||
Fill the `RLN_RELAY_ETH_CLIENT_ADDRESS`, `ETH_TESTNET_KEY` and `RLN_RELAY_CRED_PASSWORD` env variables and run
|
||||
|
||||
```
|
||||
```
|
||||
./register_rln.sh
|
||||
```
|
||||
|
||||
|
||||
@ -61,7 +61,6 @@ fi
|
||||
|
||||
if [ "${CLUSTER_ID}" -eq 1 ]; then
|
||||
RLN_RELAY_CRED_PATH=--rln-relay-cred-path=${RLN_RELAY_CRED_PATH:-/keystore/keystore.json}
|
||||
RLN_TREE_PATH=--rln-relay-tree-path="/etc/rln_tree"
|
||||
fi
|
||||
|
||||
if [ -n "${RLN_RELAY_CRED_PASSWORD}" ]; then
|
||||
|
||||
@ -32,21 +32,31 @@ $ make wakucanary
|
||||
And used as follows. A reachable node that supports both `store` and `filter` protocols.
|
||||
|
||||
```console
|
||||
$ ./build/wakucanary --address=/dns4/node-01.ac-cn-hongkong-c.waku.sandbox.status.im/tcp/30303/p2p/16Uiu2HAmSJvSJphxRdbnigUV5bjRRZFBhTtWFTSyiKaQByCjwmpV --protocol=store --protocol=filter
|
||||
$ ./build/wakucanary \
|
||||
--address=/dns4/store-01.do-ams3.status.staging.status.im/tcp/30303/p2p/16Uiu2HAm3xVDaz6SRJ6kErwC21zBJEZjavVXg7VSkoWzaV1aMA3F \
|
||||
--protocol=store \
|
||||
--protocol=filter \
|
||||
--cluster-id=16 \
|
||||
--shard=64
|
||||
$ echo $?
|
||||
0
|
||||
```
|
||||
|
||||
A node that can't be reached.
|
||||
```console
|
||||
$ ./build/wakucanary --address=/dns4/node-01.ac-cn-hongkong-c.waku.sandbox.status.im/tcp/1000/p2p/16Uiu2HAmSJvSJphxRdbnigUV5bjRRZFBhTtWFTSyiKaQByCjwmpV --protocol=store --protocol=filter
|
||||
$ ./build/wakucanary \
|
||||
--address=/dns4/store-01.do-ams3.status.staging.status.im/tcp/1000/p2p/16Uiu2HAm3xVDaz6SRJ6kErwC21zBJEZjavVXg7VSkoWzaV1aMA3F \
|
||||
--protocol=store \
|
||||
--protocol=filter \
|
||||
--cluster-id=16 \
|
||||
--shard=64
|
||||
$ echo $?
|
||||
1
|
||||
```
|
||||
|
||||
Note that a domain name can also be used.
|
||||
```console
|
||||
$ ./build/wakucanary --address=/dns4/node-01.do-ams3.status.test.status.im/tcp/30303/p2p/16Uiu2HAkukebeXjTQ9QDBeNDWuGfbaSg79wkkhK4vPocLgR6QFDf --protocol=store --protocol=filter
|
||||
--- not defined yet
|
||||
$ echo $?
|
||||
0
|
||||
```
|
||||
|
||||
50
apps/wakucanary/scripts/run_waku_canary.sh
Executable file
50
apps/wakucanary/scripts/run_waku_canary.sh
Executable file
@ -0,0 +1,50 @@
|
||||
#!/bin/bash
|
||||
|
||||
#this script build the canary app and make basic run to connect to well-known peer via TCP .
|
||||
set -e
|
||||
|
||||
PEER_ADDRESS="/dns4/store-01.do-ams3.status.staging.status.im/tcp/30303/p2p/16Uiu2HAm3xVDaz6SRJ6kErwC21zBJEZjavVXg7VSkoWzaV1aMA3F"
|
||||
PROTOCOL="relay"
|
||||
LOG_DIR="logs"
|
||||
CLUSTER="16"
|
||||
SHARD="64"
|
||||
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
|
||||
LOG_FILE="$LOG_DIR/canary_run_$TIMESTAMP.log"
|
||||
|
||||
mkdir -p "$LOG_DIR"
|
||||
|
||||
echo "Building Waku Canary app..."
|
||||
( cd ../../../ && make wakucanary ) >> "$LOG_FILE" 2>&1
|
||||
|
||||
echo "Running Waku Canary against:"
|
||||
echo " Peer : $PEER_ADDRESS"
|
||||
echo " Protocol: $PROTOCOL"
|
||||
echo "Log file : $LOG_FILE"
|
||||
echo "-----------------------------------"
|
||||
|
||||
{
|
||||
echo "=== Canary Run: $TIMESTAMP ==="
|
||||
echo "Peer : $PEER_ADDRESS"
|
||||
echo "Protocol : $PROTOCOL"
|
||||
echo "LogLevel : DEBUG"
|
||||
echo "-----------------------------------"
|
||||
../../../build/wakucanary \
|
||||
--address="$PEER_ADDRESS" \
|
||||
--protocol="$PROTOCOL" \
|
||||
--cluster-id="$CLUSTER"\
|
||||
--shard="$SHARD"\
|
||||
--log-level=DEBUG
|
||||
echo "-----------------------------------"
|
||||
echo "Exit code: $?"
|
||||
} 2>&1 | tee "$LOG_FILE"
|
||||
|
||||
EXIT_CODE=${PIPESTATUS[0]}
|
||||
|
||||
|
||||
if [ $EXIT_CODE -eq 0 ]; then
|
||||
echo "SUCCESS: Connected to peer and protocol '$PROTOCOL' is supported."
|
||||
else
|
||||
echo "FAILURE: Could not connect or protocol '$PROTOCOL' is unsupported."
|
||||
fi
|
||||
|
||||
exit $EXIT_CODE
|
||||
46
apps/wakucanary/scripts/test_protocols.sh
Executable file
46
apps/wakucanary/scripts/test_protocols.sh
Executable file
@ -0,0 +1,46 @@
|
||||
#!/bin/bash
|
||||
|
||||
# === Configuration ===
|
||||
WAKUCANARY_BINARY="../../../build/wakucanary"
|
||||
PEER_ADDRESS="/dns4/store-01.do-ams3.status.staging.status.im/tcp/30303/p2p/16Uiu2HAm3xVDaz6SRJ6kErwC21zBJEZjavVXg7VSkoWzaV1aMA3F"
|
||||
TIMEOUT=5
|
||||
LOG_LEVEL="info"
|
||||
PROTOCOLS=("store" "relay" "lightpush" "filter")
|
||||
|
||||
# === Logging Setup ===
|
||||
LOG_DIR="logs"
|
||||
mkdir -p "$LOG_DIR"
|
||||
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
|
||||
LOG_FILE="$LOG_DIR/ping_test_$TIMESTAMP.log"
|
||||
|
||||
echo "Building Waku Canary app..."
|
||||
( cd ../../../ && make wakucanary ) >> "$LOG_FILE" 2>&1
|
||||
|
||||
echo "Protocol Support Test - $TIMESTAMP" | tee -a "$LOG_FILE"
|
||||
echo "Peer: $PEER_ADDRESS" | tee -a "$LOG_FILE"
|
||||
echo "---------------------------------------" | tee -a "$LOG_FILE"
|
||||
|
||||
# === Protocol Testing Loop ===
|
||||
for PROTOCOL in "${PROTOCOLS[@]}"; do
|
||||
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
|
||||
LOG_FILE="$LOG_DIR/ping_test_${PROTOCOL}_$TIMESTAMP.log"
|
||||
|
||||
{
|
||||
echo "=== Canary Run: $TIMESTAMP ==="
|
||||
echo "Peer : $PEER_ADDRESS"
|
||||
echo "Protocol : $PROTOCOL"
|
||||
echo "LogLevel : DEBUG"
|
||||
echo "-----------------------------------"
|
||||
$WAKUCANARY_BINARY \
|
||||
--address="$PEER_ADDRESS" \
|
||||
--protocol="$PROTOCOL" \
|
||||
--log-level=DEBUG
|
||||
echo "-----------------------------------"
|
||||
echo "Exit code: $?"
|
||||
} 2>&1 | tee "$LOG_FILE"
|
||||
|
||||
echo "✅ Log saved to: $LOG_FILE"
|
||||
echo ""
|
||||
done
|
||||
|
||||
echo "All protocol checks completed. Log saved to: $LOG_FILE"
|
||||
51
apps/wakucanary/scripts/web_socket.sh
Executable file
51
apps/wakucanary/scripts/web_socket.sh
Executable file
@ -0,0 +1,51 @@
|
||||
#!/bin/bash
|
||||
|
||||
#this script build the canary app and make basic run to connect to well-known peer via TCP .
|
||||
set -e
|
||||
|
||||
PEER_ADDRESS="/ip4/127.0.0.1/tcp/7777/ws/p2p/16Uiu2HAm4ng2DaLPniRoZtMQbLdjYYWnXjrrJkGoXWCoBWAdn1tu"
|
||||
PROTOCOL="relay"
|
||||
LOG_DIR="logs"
|
||||
CLUSTER="16"
|
||||
SHARD="64"
|
||||
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
|
||||
LOG_FILE="$LOG_DIR/canary_run_$TIMESTAMP.log"
|
||||
|
||||
mkdir -p "$LOG_DIR"
|
||||
|
||||
echo "Building Waku Canary app..."
|
||||
( cd ../../../ && make wakucanary ) >> "$LOG_FILE" 2>&1
|
||||
|
||||
|
||||
echo "Running Waku Canary against:"
|
||||
echo " Peer : $PEER_ADDRESS"
|
||||
echo " Protocol: $PROTOCOL"
|
||||
echo "Log file : $LOG_FILE"
|
||||
echo "-----------------------------------"
|
||||
|
||||
{
|
||||
echo "=== Canary Run: $TIMESTAMP ==="
|
||||
echo "Peer : $PEER_ADDRESS"
|
||||
echo "Protocol : $PROTOCOL"
|
||||
echo "LogLevel : DEBUG"
|
||||
echo "-----------------------------------"
|
||||
../../../build/wakucanary \
|
||||
--address="$PEER_ADDRESS" \
|
||||
--protocol="$PROTOCOL" \
|
||||
--cluster-id="$CLUSTER"\
|
||||
--shard="$SHARD"\
|
||||
--log-level=DEBUG
|
||||
echo "-----------------------------------"
|
||||
echo "Exit code: $?"
|
||||
} 2>&1 | tee "$LOG_FILE"
|
||||
|
||||
EXIT_CODE=${PIPESTATUS[0]}
|
||||
|
||||
|
||||
if [ $EXIT_CODE -eq 0 ]; then
|
||||
echo "SUCCESS: Connected to peer and protocol '$PROTOCOL' is supported."
|
||||
else
|
||||
echo "FAILURE: Could not connect or protocol '$PROTOCOL' is unsupported."
|
||||
fi
|
||||
|
||||
exit $EXIT_CODE
|
||||
43
apps/wakucanary/scripts/web_socket_certitficate.sh
Normal file
43
apps/wakucanary/scripts/web_socket_certitficate.sh
Normal file
@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
|
||||
WAKUCANARY_BINARY="../../../build/wakucanary"
|
||||
NODE_PORT=60000
|
||||
WSS_PORT=$((NODE_PORT + 1000))
|
||||
PEER_ID="16Uiu2HAmB6JQpewXScGoQ2syqmimbe4GviLxRwfsR8dCpwaGBPSE"
|
||||
PROTOCOL="relay"
|
||||
KEY_PATH="./certs/client.key"
|
||||
CERT_PATH="./certs/client.crt"
|
||||
LOG_DIR="logs"
|
||||
mkdir -p "$LOG_DIR"
|
||||
|
||||
PEER_ADDRESS="/ip4/127.0.0.1/tcp/$WSS_PORT/wss/p2p/$PEER_ID"
|
||||
TIMESTAMP=$(date +"%Y-%m-%d_%H-%M-%S")
|
||||
LOG_FILE="$LOG_DIR/wss_cert_test_$TIMESTAMP.log"
|
||||
|
||||
echo "Building Waku Canary app..."
|
||||
( cd ../../../ && make wakucanary ) >> "$LOG_FILE" 2>&1
|
||||
|
||||
{
|
||||
echo "=== Canary WSS + Cert Test ==="
|
||||
echo "Timestamp : $TIMESTAMP"
|
||||
echo "Node Port : $NODE_PORT"
|
||||
echo "WSS Port : $WSS_PORT"
|
||||
echo "Peer ID : $PEER_ID"
|
||||
echo "Protocol : $PROTOCOL"
|
||||
echo "Key Path : $KEY_PATH"
|
||||
echo "Cert Path : $CERT_PATH"
|
||||
echo "Address : $PEER_ADDRESS"
|
||||
echo "------------------------------------------"
|
||||
|
||||
$WAKUCANARY_BINARY \
|
||||
--address="$PEER_ADDRESS" \
|
||||
--protocol="$PROTOCOL" \
|
||||
--log-level=DEBUG \
|
||||
--websocket-secure-key-path="$KEY_PATH" \
|
||||
--websocket-secure-cert-path="$CERT_PATH"
|
||||
|
||||
echo "------------------------------------------"
|
||||
echo "Exit code: $?"
|
||||
} 2>&1 | tee "$LOG_FILE"
|
||||
|
||||
echo "✅ Log saved to: $LOG_FILE"
|
||||
@ -28,6 +28,7 @@ const ProtocolsTable = {
|
||||
"rendezvous": "/rendezvous/",
|
||||
"ipfs-ping": "/ipfs/ping/",
|
||||
"peer-exchange": "/vac/waku/peer-exchange/",
|
||||
"mix": "mix/1.0.0",
|
||||
}.toTable
|
||||
|
||||
const WebSocketPortOffset = 1000
|
||||
@ -122,7 +123,7 @@ proc areProtocolsSupported(
|
||||
|
||||
for rawProtocol in toValidateProtocols:
|
||||
let protocolTag = ProtocolsTable[rawProtocol]
|
||||
debug "Checking if protocol is supported", expected_protocol_tag = protocolTag
|
||||
info "Checking if protocol is supported", expected_protocol_tag = protocolTag
|
||||
|
||||
var protocolSupported = false
|
||||
for nodeProtocol in nodeProtocols:
|
||||
@ -142,16 +143,18 @@ proc areProtocolsSupported(
|
||||
|
||||
proc pingNode(
|
||||
node: WakuNode, peerInfo: RemotePeerInfo
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
): Future[bool] {.async, gcsafe.} =
|
||||
try:
|
||||
let conn = await node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec)
|
||||
let pingDelay = await node.libp2pPing.ping(conn)
|
||||
info "Peer response time (ms)", peerId = peerInfo.peerId, ping = pingDelay.millis
|
||||
return true
|
||||
except CatchableError:
|
||||
var msg = getCurrentExceptionMsg()
|
||||
if msg == "Future operation cancelled!":
|
||||
msg = "timedout"
|
||||
error "Failed to ping the peer", peer = peerInfo, err = msg
|
||||
return false
|
||||
|
||||
proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
|
||||
let conf: WakuCanaryConf = WakuCanaryConf.load()
|
||||
@ -180,13 +183,10 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
|
||||
protocols = conf.protocols,
|
||||
logLevel = conf.logLevel
|
||||
|
||||
let peerRes = parsePeerInfo(conf.address)
|
||||
if peerRes.isErr():
|
||||
error "Couldn't parse 'conf.address'", error = peerRes.error
|
||||
let peer = parsePeerInfo(conf.address).valueOr:
|
||||
error "Couldn't parse 'conf.address'", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
let peer = peerRes.value
|
||||
|
||||
let
|
||||
nodeKey = crypto.PrivateKey.random(Secp256k1, rng[])[]
|
||||
bindIp = parseIpAddress("0.0.0.0")
|
||||
@ -224,13 +224,9 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
|
||||
error "could not initialize ENR with shards", error
|
||||
quit(QuitFailure)
|
||||
|
||||
let recordRes = enrBuilder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
error "failed to create enr record", error = recordRes.error
|
||||
quit(QuitFailure)
|
||||
else:
|
||||
recordRes.get()
|
||||
let record = enrBuilder.build().valueOr:
|
||||
error "failed to create enr record", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
if isWss and
|
||||
(conf.websocketSecureKeyPath.len == 0 or conf.websocketSecureCertPath.len == 0):
|
||||
@ -256,7 +252,7 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
|
||||
error "failed to mount libp2p ping protocol: " & getCurrentExceptionMsg()
|
||||
quit(QuitFailure)
|
||||
|
||||
node.mountMetadata(conf.clusterId).isOkOr:
|
||||
node.mountMetadata(conf.clusterId, conf.shards).isOkOr:
|
||||
error "failed to mount metadata protocol", error
|
||||
quit(QuitFailure)
|
||||
|
||||
@ -274,8 +270,13 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
|
||||
let lp2pPeerStore = node.switch.peerStore
|
||||
let conStatus = node.peerManager.switch.peerStore[ConnectionBook][peer.peerId]
|
||||
|
||||
var pingSuccess = true
|
||||
if conf.ping:
|
||||
discard await pingFut
|
||||
try:
|
||||
pingSuccess = await pingFut
|
||||
except CatchableError as exc:
|
||||
pingSuccess = false
|
||||
error "Ping operation failed or timed out", error = exc.msg
|
||||
|
||||
if conStatus in [Connected, CanConnect]:
|
||||
let nodeProtocols = lp2pPeerStore[ProtoBook][peer.peerId]
|
||||
@ -284,6 +285,11 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
|
||||
error "Not all protocols are supported",
|
||||
expected = conf.protocols, supported = nodeProtocols
|
||||
quit(QuitFailure)
|
||||
|
||||
# Check ping result if ping was enabled
|
||||
if conf.ping and not pingSuccess:
|
||||
error "Node is reachable and supports protocols but ping failed - connection may be unstable"
|
||||
quit(QuitFailure)
|
||||
elif conStatus == CannotConnect:
|
||||
error "Could not connect", peerId = peer.peerId
|
||||
quit(QuitFailure)
|
||||
|
||||
@ -9,14 +9,13 @@ import
|
||||
system/ansi_c,
|
||||
libp2p/crypto/crypto
|
||||
import
|
||||
../../tools/rln_keystore_generator/rln_keystore_generator,
|
||||
../../tools/rln_db_inspector/rln_db_inspector,
|
||||
../../tools/[rln_keystore_generator/rln_keystore_generator, confutils/cli_args],
|
||||
waku/[
|
||||
common/logging,
|
||||
factory/external_config,
|
||||
factory/waku,
|
||||
node/health_monitor,
|
||||
waku_api/rest/builder as rest_server_builder,
|
||||
rest_api/endpoint/builder as rest_server_builder,
|
||||
waku_core/message/default_values,
|
||||
]
|
||||
|
||||
logScope:
|
||||
@ -48,15 +47,12 @@ when isMainModule:
|
||||
of generateRlnKeystore:
|
||||
let conf = wakuNodeConf.toKeystoreGeneratorConf()
|
||||
doRlnKeystoreGenerator(conf)
|
||||
of inspectRlnDb:
|
||||
let conf = wakuNodeConf.toInspectRlnDbConf()
|
||||
doInspectRlnDb(conf)
|
||||
of noCommand:
|
||||
let conf = wakuNodeConf.toWakuConf().valueOr:
|
||||
error "Waku configuration failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
var waku = Waku.new(conf).valueOr:
|
||||
var waku = (waitFor Waku.new(conf)).valueOr:
|
||||
error "Waku initialization failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
@ -64,7 +60,7 @@ when isMainModule:
|
||||
error "Starting waku failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
debug "Setting up shutdown hooks"
|
||||
info "Setting up shutdown hooks"
|
||||
proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} =
|
||||
await waku.stop()
|
||||
quit(QuitSuccess)
|
||||
|
||||
@ -2,11 +2,19 @@
|
||||
library 'status-jenkins-lib@v1.8.17'
|
||||
|
||||
pipeline {
|
||||
agent { label 'linux' }
|
||||
agent {
|
||||
docker {
|
||||
label 'linuxcontainer'
|
||||
image 'harbor.status.im/infra/ci-build-containers:linux-base-1.0.0'
|
||||
args '--volume=/var/run/docker.sock:/var/run/docker.sock ' +
|
||||
'--user jenkins'
|
||||
}
|
||||
}
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
timeout(time: 20, unit: 'MINUTES')
|
||||
disableRestartFromStage()
|
||||
buildDiscarder(logRotator(
|
||||
numToKeepStr: '10',
|
||||
daysToKeepStr: '30',
|
||||
|
||||
@ -36,6 +36,7 @@ pipeline {
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
disableRestartFromStage()
|
||||
/* Prevent Jenkins jobs from running forever */
|
||||
timeout(time: 30, unit: 'MINUTES')
|
||||
/* Limit builds retained. */
|
||||
|
||||
@ -2,10 +2,18 @@
|
||||
library 'status-jenkins-lib@v1.8.17'
|
||||
|
||||
pipeline {
|
||||
agent { label 'linux' }
|
||||
agent {
|
||||
docker {
|
||||
label 'linuxcontainer'
|
||||
image 'harbor.status.im/infra/ci-build-containers:linux-base-1.0.0'
|
||||
args '--volume=/var/run/docker.sock:/var/run/docker.sock ' +
|
||||
'--user jenkins'
|
||||
}
|
||||
}
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
disableRestartFromStage()
|
||||
timeout(time: 20, unit: 'MINUTES')
|
||||
buildDiscarder(logRotator(
|
||||
numToKeepStr: '10',
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
# Dockerfile to build a distributable container image from pre-existing binaries
|
||||
FROM debian:stable-slim AS prod
|
||||
FROM debian:bookworm-slim AS prod
|
||||
|
||||
ARG MAKE_TARGET=wakunode2
|
||||
|
||||
@ -13,12 +13,9 @@ EXPOSE 30303 60000 8545
|
||||
|
||||
# Referenced in the binary
|
||||
RUN apt-get update &&\
|
||||
apt-get install -y libpcre3 libpq-dev curl iproute2 wget dnsutils &&\
|
||||
apt-get install -y libpq-dev curl iproute2 wget dnsutils &&\
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Fix for 'Error loading shared library libpcre.so.3: No such file or directory'
|
||||
RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3
|
||||
|
||||
# Copy to separate location to accomodate different MAKE_TARGET values
|
||||
ADD ./build/$MAKE_TARGET /usr/local/bin/
|
||||
|
||||
|
||||
@ -14,12 +14,9 @@ EXPOSE 30303 60000 8545
|
||||
|
||||
# Referenced in the binary
|
||||
RUN apt-get update &&\
|
||||
apt-get install -y libpcre3 libpq-dev curl iproute2 wget jq dnsutils &&\
|
||||
apt-get install -y libpq-dev curl iproute2 wget jq dnsutils &&\
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Fix for 'Error loading shared library libpcre.so.3: No such file or directory'
|
||||
RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3
|
||||
|
||||
# Copy to separate location to accomodate different MAKE_TARGET values
|
||||
ADD ./build/$MAKE_TARGET /usr/local/bin/
|
||||
|
||||
|
||||
@ -38,6 +38,9 @@ A particular OpenAPI spec can be easily imported into [Postman](https://www.post
|
||||
curl http://localhost:8645/debug/v1/info -s | jq
|
||||
```
|
||||
|
||||
### Store API
|
||||
|
||||
The `page_size` flag in the Store API has a default value of 20 and a max value of 100.
|
||||
|
||||
### Node configuration
|
||||
Find details [here](https://github.com/waku-org/nwaku/tree/master/docs/operators/how-to/configure-rest-api.md)
|
||||
|
||||
@ -6,44 +6,52 @@ For more context, see https://trunkbaseddevelopment.com/branch-for-release/
|
||||
|
||||
## How to do releases
|
||||
|
||||
### Before release
|
||||
### Prerequisites
|
||||
|
||||
- All issues under the corresponding release [milestone](https://github.com/waku-org/nwaku/milestones) have been closed or, after consultation, deferred to the next release.
|
||||
- All submodules are up to date.
|
||||
> Updating submodules requires a PR (and very often several "fixes" to maintain compatibility with the changes in submodules). That PR process must be done and merged a couple of days before the release.
|
||||
|
||||
Ensure all items in this list are ticked:
|
||||
- [ ] All issues under the corresponding release [milestone](https://github.com/waku-org/nwaku/milestones) has been closed or, after consultation, deferred to a next release.
|
||||
- [ ] All submodules are up to date.
|
||||
> **IMPORTANT:** Updating submodules requires a PR (and very often several "fixes" to maintain compatibility with the changes in submodules). That PR process must be done and merged a couple of days before the release.
|
||||
> In case the submodules update has a low effort and/or risk for the release, follow the ["Update submodules"](./git-submodules.md) instructions.
|
||||
> If the effort or risk is too high, consider postponing the submodules upgrade for the subsequent release or delaying the current release until the submodules updates are included in the release candidate.
|
||||
- [ ] The [js-waku CI tests](https://github.com/waku-org/js-waku/actions/workflows/ci.yml) pass against the release candidate (i.e. nwaku latest `master`).
|
||||
> **NOTE:** This serves as a basic regression test against typical clients of nwaku.
|
||||
> The specific job that needs to pass is named `node_with_nwaku_master`.
|
||||
|
||||
### Performing the release
|
||||
> If the effort or risk is too high, consider postponing the submodules upgrade for the subsequent release or delaying the current release until the submodules updates are included in the release candidate.
|
||||
|
||||
### Release types
|
||||
|
||||
- **Full release**: follow the entire [Release process](#release-process--step-by-step).
|
||||
|
||||
- **Beta release**: skip just `6a` and `6c` steps from [Release process](#release-process--step-by-step).
|
||||
|
||||
- Choose the appropriate release process based on the release type:
|
||||
- [Full Release](../../.github/ISSUE_TEMPLATE/prepare_full_release.md)
|
||||
- [Beta Release](../../.github/ISSUE_TEMPLATE/prepare_beta_release.md)
|
||||
|
||||
### Release process ( step by step )
|
||||
|
||||
1. Checkout a release branch from master
|
||||
|
||||
```
|
||||
git checkout -b release/v0.1.0
|
||||
git checkout -b release/v0.X.0
|
||||
```
|
||||
|
||||
1. Update `CHANGELOG.md` and ensure it is up to date. Use the helper Make target to get PR based release-notes/changelog update.
|
||||
2. Update `CHANGELOG.md` and ensure it is up to date. Use the helper Make target to get PR based release-notes/changelog update.
|
||||
|
||||
```
|
||||
make release-notes
|
||||
```
|
||||
|
||||
1. Create a release-candidate tag with the same name as release and `-rc.N` suffix a few days before the official release and push it
|
||||
3. Create a release-candidate tag with the same name as release and `-rc.N` suffix a few days before the official release and push it
|
||||
|
||||
```
|
||||
git tag -as v0.1.0-rc.0 -m "Initial release."
|
||||
git push origin v0.1.0-rc.0
|
||||
git tag -as v0.X.0-rc.0 -m "Initial release."
|
||||
git push origin v0.X.0-rc.0
|
||||
```
|
||||
|
||||
This will trigger a [workflow](../../.github/workflows/pre-release.yml) which will build RC artifacts and create and publish a Github release
|
||||
This will trigger a [workflow](../../.github/workflows/pre-release.yml) which will build RC artifacts and create and publish a GitHub release
|
||||
|
||||
1. Open a PR from the release branch for others to review the included changes and the release-notes
|
||||
4. Open a PR from the release branch for others to review the included changes and the release-notes
|
||||
|
||||
1. In case additional changes are needed, create a new RC tag
|
||||
5. In case additional changes are needed, create a new RC tag
|
||||
|
||||
Make sure the new tag is associated
|
||||
with CHANGELOG update.
|
||||
@ -52,25 +60,57 @@ Ensure all items in this list are ticked:
|
||||
# Make changes, rebase and create new tag
|
||||
# Squash to one commit and make a nice commit message
|
||||
git rebase -i origin/master
|
||||
git tag -as v0.1.0-rc.1 -m "Initial release."
|
||||
git push origin v0.1.0-rc.1
|
||||
git tag -as v0.X.0-rc.1 -m "Initial release."
|
||||
git push origin v0.X.0-rc.1
|
||||
```
|
||||
|
||||
1. Validate the release. For the release validation process, please refer to the following [guide](https://www.notion.so/Release-Process-61234f335b904cd0943a5033ed8f42b4#47af557e7f9744c68fdbe5240bf93ca9)
|
||||
Similarly use v0.X.0-rc.2, v0.X.0-rc.3 etc. for additional RC tags.
|
||||
|
||||
1. Once the release-candidate has been validated, create a final release tag and push it.
|
||||
We also need to merge release branch back to master as a final step.
|
||||
6. **Validation of release candidate**
|
||||
|
||||
6a. **Automated testing**
|
||||
- Ensure all the unit tests (specifically js-waku tests) are green against the release candidate.
|
||||
- Ask Vac-QA and Vac-DST to run their available tests against the release candidate; share all release candidates with both teams.
|
||||
|
||||
> We need an additional report like [this](https://www.notion.so/DST-Reports-1228f96fb65c80729cd1d98a7496fe6f) specifically from the DST team.
|
||||
|
||||
6b. **Waku fleet testing**
|
||||
- Start job on `waku.sandbox` and `waku.test` [Deployment job](https://ci.infra.status.im/job/nim-waku/), wait for completion of the job. If it fails, then debug it.
|
||||
- After completion, disable [deployment job](https://ci.infra.status.im/job/nim-waku/) so that its version is not updated on every merge to `master`.
|
||||
- Verify at https://fleets.waku.org/ that the fleet is locked to the release candidate version.
|
||||
- Check if the image is created at [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab).
|
||||
- Search _Kibana_ logs from the previous month (since the last release was deployed) for possible crashes or errors in `waku.test` and `waku.sandbox`.
|
||||
- Most relevant logs are `(fleet: "waku.test" AND message: "SIGSEGV")` OR `(fleet: "waku.sandbox" AND message: "SIGSEGV")`.
|
||||
- Enable the `waku.test` fleet again to resume auto-deployment of the latest `master` commit.
|
||||
|
||||
6c. **Status fleet testing**
|
||||
- Deploy release candidate to `status.staging`
|
||||
- Perform [sanity check](https://www.notion.so/How-to-test-Nwaku-on-Status-12c6e4b9bf06420ca868bd199129b425) and log results as comments in this issue.
|
||||
- Connect 2 instances to `status.staging` fleet, one in relay mode, the other one in light client.
|
||||
- 1:1 Chats with each other
|
||||
- Send and receive messages in a community
|
||||
- Close one instance, send messages with second instance, reopen first instance and confirm messages sent while offline are retrieved from store
|
||||
- Perform checks based on _end-user impact_.
|
||||
- Inform other (Waku and Status) CCs to point their instances to `status.staging` for a few days. Ping Status colleagues from their Discord server or [Status community](https://status.app) (not a blocking point).
|
||||
- Ask Status-QA to perform sanity checks (as described above) and checks based on _end user impact_; specify the version being tested.
|
||||
- Ask Status-QA or infra to run the automated Status e2e tests against `status.staging`.
|
||||
- Get other CCs' sign-off: they should comment on this PR, e.g., "Used the app for a week, no problem." If problems are reported, resolve them and create a new RC.
|
||||
- **Get Status-QA sign-off**, ensuring that the `status.test` update will not disturb ongoing activities.
|
||||
|
||||
7. Once the release-candidate has been validated, create a final release tag and push it.
|
||||
We also need to merge the release branch back into master as a final step.
|
||||
|
||||
```
|
||||
git checkout release/v0.1.0
|
||||
git tag -as v0.1.0 -m "Initial release."
|
||||
git push origin v0.1.0
|
||||
git checkout release/v0.X.0
|
||||
git tag -as v0.X.0 -m "final release." (use v0.X.0-beta as the tag if you are creating a beta release)
|
||||
git push origin v0.X.0
|
||||
git switch master
|
||||
git pull
|
||||
git merge release/v0.1.0
|
||||
git merge release/v0.X.0
|
||||
```
|
||||
8. Update `waku-rust-bindings`, `waku-simulator` and `nwaku-compose` to use the new release.
|
||||
|
||||
1. Create a [Github release](https://github.com/waku-org/nwaku/releases) from the release tag.
|
||||
9. Create a [GitHub release](https://github.com/waku-org/nwaku/releases) from the release tag.
|
||||
|
||||
* Add binaries produced by the ["Upload Release Asset"](https://github.com/waku-org/nwaku/actions/workflows/release-assets.yml) workflow. Where possible, test the binaries before uploading to the release.
|
||||
|
||||
@ -80,22 +120,10 @@ We also need to merge release branch back to master as a final step.
|
||||
2. Deploy the release image to [Dockerhub](https://hub.docker.com/r/wakuorg/nwaku) by triggering [the manual Jenkins deployment job](https://ci.infra.status.im/job/nim-waku/job/docker-manual/).
|
||||
> Ensure the following build parameters are set:
|
||||
> - `MAKE_TARGET`: `wakunode2`
|
||||
> - `IMAGE_TAG`: the release tag (e.g. `v0.16.0`)
|
||||
> - `IMAGE_TAG`: the release tag (e.g. `v0.36.0`)
|
||||
> - `IMAGE_NAME`: `wakuorg/nwaku`
|
||||
> - `NIMFLAGS`: `--colors:off -d:disableMarchNative -d:chronicles_colors:none -d:postgres`
|
||||
> - `GIT_REF` the release tag (e.g. `v0.16.0`)
|
||||
3. Update the default nwaku image in [nwaku-compose](https://github.com/waku-org/nwaku-compose/blob/master/docker-compose.yml)
|
||||
4. Deploy the release to appropriate fleets:
|
||||
- Inform clients
|
||||
> **NOTE:** known clients are currently using some version of js-waku, go-waku, nwaku or waku-rs.
|
||||
> Clients are reachable via the corresponding channels on the Vac Discord server.
|
||||
> It should be enough to inform clients on the `#nwaku` and `#announce` channels on Discord.
|
||||
> Informal conversations with specific repo maintainers are often part of this process.
|
||||
- Check if nwaku configuration parameters changed. If so [update fleet configuration](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64) in [infra-nim-waku](https://github.com/status-im/infra-nim-waku)
|
||||
- Deploy release to the `waku.sandbox` fleet from [Jenkins](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/).
|
||||
- Ensure that nodes successfully start up and monitor health using [Grafana](https://grafana.infra.status.im/d/qrp_ZCTGz/nim-waku-v2?orgId=1) and [Kibana](https://kibana.infra.status.im/goto/a7728e70-eb26-11ec-81d1-210eb3022c76).
|
||||
- If necessary, revert by deploying the previous release. Download logs and open a bug report issue.
|
||||
5. Submit a PR to merge the release branch back to `master`. Make sure you use the option `Merge pull request (Create a merge commit)` to perform such merge.
|
||||
> - `GIT_REF` the release tag (e.g. `v0.36.0`)
|
||||
|
||||
### Performing a patch release
|
||||
|
||||
@ -116,4 +144,14 @@ We also need to merge release branch back to master as a final step.
|
||||
|
||||
4. Once the release-candidate has been validated and changelog PR got merged, cherry-pick the changelog update from master to the release branch. Create a final release tag and push it.
|
||||
|
||||
5. Create a [Github release](https://github.com/waku-org/nwaku/releases) from the release tag and follow the same post-release process as usual.
|
||||
5. Create a [GitHub release](https://github.com/waku-org/nwaku/releases) from the release tag and follow the same post-release process as usual.
|
||||
|
||||
### Links
|
||||
|
||||
- [Release process](https://github.com/waku-org/nwaku/blob/master/docs/contributors/release-process.md)
|
||||
- [Release notes](https://github.com/waku-org/nwaku/blob/master/CHANGELOG.md)
|
||||
- [Fleet ownership](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64)
|
||||
- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku)
|
||||
- [Jenkins](https://ci.infra.status.im/job/nim-waku/)
|
||||
- [Fleets](https://fleets.waku.org/)
|
||||
- [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab)
|
||||
@ -9,7 +9,6 @@ The following command line options are available:
|
||||
```
|
||||
--dns-discovery Enable DNS Discovery
|
||||
--dns-discovery-url URL for DNS node list in format 'enrtree://<key>@<fqdn>'
|
||||
--dns-discovery-name-server DNS name server IPs to query. Argument may be repeated.
|
||||
```
|
||||
|
||||
- `--dns-discovery` is used to enable DNS discovery on the node.
|
||||
@ -17,8 +16,6 @@ Waku DNS discovery is disabled by default.
|
||||
- `--dns-discovery-url` is mandatory if DNS discovery is enabled.
|
||||
It contains the URL for the node list.
|
||||
The URL must be in the format `enrtree://<key>@<fqdn>` where `<fqdn>` is the fully qualified domain name and `<key>` is the base32 encoding of the compressed 32-byte public key that signed the list at that location.
|
||||
- `--dns-discovery-name-server` is optional and contains the IP(s) of the DNS name servers to query.
|
||||
If left unspecified, the Cloudflare servers `1.1.1.1` and `1.0.0.1` will be used by default.
|
||||
|
||||
A node will attempt connection to all discovered nodes.
|
||||
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
|
||||
# Configure a REST API node
|
||||
|
||||
A subset of the node configuration can be used to modify the behaviour of the HTTP REST API.
|
||||
@ -21,3 +20,5 @@ Example:
|
||||
```shell
|
||||
wakunode2 --rest=true
|
||||
```
|
||||
|
||||
The `page_size` flag in the Store API has a default value of 20 and a max value of 100.
|
||||
|
||||
@ -33,8 +33,8 @@ make wakunode2
|
||||
Follow [Step 10](../droplet-quickstart.md#10-run-nwaku) of the [droplet quickstart](../droplet-quickstart.md) guide, while replacing the run command with -
|
||||
|
||||
```bash
|
||||
export SEPOLIA_HTTP_NODE_ADDRESS=<HTTP RPC URL to a Sepolia Node>
|
||||
export RLN_RELAY_CONTRACT_ADDRESS="0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4" # Replace this with any compatible implementation
|
||||
export LINEA_SEPOLIA_HTTP_NODE_ADDRESS=<HTTP RPC URL to a Linea Sepolia Node>
|
||||
export RLN_RELAY_CONTRACT_ADDRESS="0xB9cd878C90E49F797B4431fBF4fb333108CB90e6" # Replace this with any compatible implementation
|
||||
$WAKUNODE_DIR/wakunode2 \
|
||||
--store:true \
|
||||
--persist-messages \
|
||||
@ -44,7 +44,7 @@ $WAKUNODE_DIR/wakunode2 \
|
||||
--rln-relay:true \
|
||||
--rln-relay-dynamic:true \
|
||||
--rln-relay-eth-contract-address:"$RLN_RELAY_CONTRACT_ADDRESS" \
|
||||
--rln-relay-eth-client-address:"$SEPOLIA_HTTP_NODE_ADDRESS"
|
||||
--rln-relay-eth-client-address:"$LINEA_SEPOLIA_HTTP_NODE_ADDRESS"
|
||||
```
|
||||
|
||||
OR
|
||||
@ -53,9 +53,9 @@ If you are running the nwaku node within docker, follow [Step 2](../docker-quick
|
||||
|
||||
```bash
|
||||
export WAKU_FLEET=<entree of the fleet>
|
||||
export SEPOLIA_HTTP_NODE_ADDRESS=<HTTP RPC URL to a Sepolia Node>
|
||||
export RLN_RELAY_CONTRACT_ADDRESS="0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4" # Replace this with any compatible implementation
|
||||
docker run -i -t -p 60000:60000 -p 9000:9000/udp wakuorg/nwaku:v0.20.0 \
|
||||
export LINEA_SEPOLIA_HTTP_NODE_ADDRESS=<HTTP RPC URL to a Sepolia Node>
|
||||
export RLN_RELAY_CONTRACT_ADDRESS="0xB9cd878C90E49F797B4431fBF4fb333108CB90e6" # Replace this with any compatible implementation
|
||||
docker run -i -t -p 60000:60000 -p 9000:9000/udp wakuorg/nwaku:v0.36.0 \
|
||||
--dns-discovery:true \
|
||||
--dns-discovery-url:"$WAKU_FLEET" \
|
||||
--discv5-discovery \
|
||||
@ -63,7 +63,7 @@ docker run -i -t -p 60000:60000 -p 9000:9000/udp wakuorg/nwaku:v0.20.0 \
|
||||
--rln-relay:true \
|
||||
--rln-relay-dynamic:true \
|
||||
--rln-relay-eth-contract-address:"$RLN_RELAY_CONTRACT_ADDRESS" \
|
||||
--rln-relay-eth-client-address:"$SEPOLIA_HTTP_NODE_ADDRESS"
|
||||
--rln-relay-eth-client-address:"$LINEA_SEPOLIA_HTTP_NODE_ADDRESS"
|
||||
```
|
||||
|
||||
> Note: You can choose to keep connections to other nodes alive by adding the `--keep-alive` flag.
|
||||
@ -74,7 +74,7 @@ runtime arguments -
|
||||
1. `--rln-relay`: Allows waku-rln-relay to be mounted into the setup of the nwaku node
|
||||
2. `--rln-relay-dynamic`: Enables waku-rln-relay to connect to an ethereum node to fetch the membership group
|
||||
3. `--rln-relay-eth-contract-address`: The contract address of an RLN membership group
|
||||
4. `--rln-relay-eth-client-address`: The HTTP url to a Sepolia ethereum node
|
||||
4. `--rln-relay-eth-client-address`: The HTTP url to a Linea Sepolia ethereum node
|
||||
|
||||
You should now have nwaku running, with RLN enabled!
|
||||
|
||||
|
||||
@ -33,12 +33,10 @@ The following command line options are available for both `wakunode2` or `chat2`
|
||||
```
|
||||
--dns-discovery Enable DNS Discovery
|
||||
--dns-discovery-url URL for DNS node list in format 'enrtree://<key>@<fqdn>'
|
||||
--dns-discovery-name-server DNS name server IPs to query. Argument may be repeated.
|
||||
```
|
||||
|
||||
- `--dns-discovery` is used to enable DNS discovery on the node. Waku DNS discovery is disabled by default.
|
||||
- `--dns-discovery-url` is mandatory if DNS discovery is enabled. It contains the URL for the node list. The URL must be in the format `enrtree://<key>@<fqdn>` where `<fqdn>` is the fully qualified domain name and `<key>` is the base32 encoding of the compressed 32-byte public key that signed the list at that location. See [EIP-1459](https://eips.ethereum.org/EIPS/eip-1459#specification) or the example below to illustrate.
|
||||
- `--dns-discovery-name-server` is optional and contains the IP(s) of the DNS name servers to query. If left unspecified, the Cloudflare servers `1.1.1.1` and `1.0.0.1` will be used by default.
|
||||
|
||||
A node will attempt connection to all discovered nodes.
|
||||
|
||||
@ -63,9 +61,9 @@ Similarly, for `chat2`:
|
||||
|
||||
The node will discover and attempt connection to all `waku.test` nodes during setup procedures.
|
||||
|
||||
To use specific DNS name servers, one or more `--dns-discovery-name-server` arguments can be added:
|
||||
To use specific DNS name servers, one or more `--dns-addrs-name-server` arguments can be added:
|
||||
|
||||
```
|
||||
./build/wakunode2 --dns-discovery:true --dns-discovery-url:enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im --dns-dis
|
||||
covery-name-server:8.8.8.8 --dns-discovery-name-server:8.8.4.4
|
||||
covery-name-server:8.8.8.8 --dns-addrs-name-server:8.8.4.4
|
||||
```
|
||||
|
||||
@ -33,20 +33,33 @@ It operates in two modes:
|
||||
- `sudo apt install libkf5kio-dev`
|
||||
- `sudo apt install libkf5iconthemes-dev`
|
||||
- `make`
|
||||
- On completion, the `bin/heaptrack_gui` and `bin/heaptrack` binaries will get generated.
|
||||
- On completion, the `bin/heaptrack_gui` and `bin/heaptrack` binaries will be generated.
|
||||
- heaptrack: needed to generate the report.
|
||||
- heaptrack_gui: needed to analyse the report.
|
||||
|
||||
## Heaptrack & Nwaku
|
||||
nwaku supports heaptrack but it needs a special compilation setting.
|
||||
nwaku supports heaptrack, but it needs a special compilation setting.
|
||||
|
||||
### Patch Nim compiler to register allocations on Heaptrack
|
||||
|
||||
Currently, we rely on the official Nim repository. So we need to patch the Nim compiler to register allocations and deallocations on Heaptrack.
|
||||
For Nim 2.2.4 version, we created a patch that can be applied as:
|
||||
```bash
|
||||
git apply --directory=vendor/nimbus-build-system/vendor/Nim docs/tutorial/nim.2.2.4_heaptracker_addon.patch
|
||||
git add .
|
||||
git commit -m "Add heaptrack support to Nim compiler - temporary patch"
|
||||
```
|
||||
|
||||
> Until heaptrack support is not available in official Nim, so it is important to keep it in the `nimbus-build-system` repository.
|
||||
> Commit ensures that `make update` will not override the patch unintentionally.
|
||||
|
||||
> We are planning to make it available through an official PR for Nim.
|
||||
|
||||
When the patch is applied, we can build wakunode2 with heaptrack support.
|
||||
|
||||
### Build nwaku with heaptrack support
|
||||
|
||||
The make command should have the 'NIM_COMMIT' setting as:
|
||||
|
||||
`make -j<nproc> NIM_COMMIT=heaptrack_support ...`
|
||||
|
||||
This is to force the `nimbus-build-system` to use the Nim compiler that points at the [heaptrack_support](https://github.com/status-im/nim/tree/heaptrack_support) branch.
|
||||
`make -j<nproc> HEAPTRACKER=1 wakunode2`
|
||||
|
||||
### Create nwaku memory report with heaptrack
|
||||
|
||||
@ -69,9 +82,18 @@ Having Docker properly installed in your machine, do the next:
|
||||
|
||||
- cd to the `nwaku` root folder.
|
||||
- ```sudo make docker-image DOCKER_IMAGE_NAME=docker_repo:docker_tag HEAPTRACKER=1```
|
||||
- alternatively you can use the `docker-quick-image` target, this is faster but creates an ubuntu based image, so your local build environment must match.
|
||||
|
||||
That will create a Docker image with both nwaku and heaptrack. The container's entry point is `ENTRYPOINT ["/heaptrack/build/bin/heaptrack", "/usr/bin/wakunode"]`, so the memory report starts being generated from the beginning.
|
||||
|
||||
#### Notice for using heaptrack supporting image with `docker compose`
|
||||
|
||||
Take care that wakunode2 should be started as
|
||||
```
|
||||
exec /heaptrack/build/bin/heaptrack /usr/bin/wakunode\
|
||||
... all the arguments you want to pass to wakunode ...
|
||||
```
|
||||
|
||||
### Extract report file from a running Docker container
|
||||
Bear in mind that if you restart the container, the previous report will get lost. Therefore, before restarting, it is important to extract it from the container once you consider it has enough information.
|
||||
|
||||
|
||||
44
docs/tutorial/nim.2.2.4_heaptracker_addon.patch
Normal file
44
docs/tutorial/nim.2.2.4_heaptracker_addon.patch
Normal file
@ -0,0 +1,44 @@
|
||||
diff --git a/lib/system/alloc.nim b/lib/system/alloc.nim
|
||||
index e2dd43075..7f8c8e04e 100644
|
||||
--- a/lib/system/alloc.nim
|
||||
+++ b/lib/system/alloc.nim
|
||||
@@ -1,4 +1,4 @@
|
||||
-#
|
||||
+#!fmt: off
|
||||
#
|
||||
# Nim's Runtime Library
|
||||
# (c) Copyright 2012 Andreas Rumpf
|
||||
@@ -862,6 +862,15 @@ when defined(gcDestructors):
|
||||
dec maxIters
|
||||
if it == nil: break
|
||||
|
||||
+when defined(heaptracker):
|
||||
+ const heaptrackLib =
|
||||
+ when defined(heaptracker_inject):
|
||||
+ "libheaptrack_inject.so"
|
||||
+ else:
|
||||
+ "libheaptrack_preload.so"
|
||||
+ proc heaptrack_malloc(a: pointer, size: int) {.cdecl, importc, dynlib: heaptrackLib.}
|
||||
+ proc heaptrack_free(a: pointer) {.cdecl, importc, dynlib: heaptrackLib.}
|
||||
+
|
||||
proc rawAlloc(a: var MemRegion, requestedSize: int): pointer =
|
||||
when defined(nimTypeNames):
|
||||
inc(a.allocCounter)
|
||||
@@ -984,6 +993,8 @@ proc rawAlloc(a: var MemRegion, requestedSize: int): pointer =
|
||||
sysAssert(isAccessible(a, result), "rawAlloc 14")
|
||||
sysAssert(allocInv(a), "rawAlloc: end")
|
||||
when logAlloc: cprintf("var pointer_%p = alloc(%ld) # %p\n", result, requestedSize, addr a)
|
||||
+ when defined(heaptracker):
|
||||
+ heaptrack_malloc(result, requestedSize)
|
||||
|
||||
proc rawAlloc0(a: var MemRegion, requestedSize: int): pointer =
|
||||
result = rawAlloc(a, requestedSize)
|
||||
@@ -992,6 +1003,8 @@ proc rawAlloc0(a: var MemRegion, requestedSize: int): pointer =
|
||||
proc rawDealloc(a: var MemRegion, p: pointer) =
|
||||
when defined(nimTypeNames):
|
||||
inc(a.deallocCounter)
|
||||
+ when defined(heaptracker):
|
||||
+ heaptrack_free(p)
|
||||
#sysAssert(isAllocatedPtr(a, p), "rawDealloc: no allocated pointer")
|
||||
sysAssert(allocInv(a), "rawDealloc: begin")
|
||||
var c = pageAddr(p)
|
||||
@ -1,7 +1,7 @@
|
||||
# Spam-protected chat2 application with on-chain group management
|
||||
|
||||
This document is a tutorial on how to run the chat2 application in the spam-protected mode using the Waku-RLN-Relay protocol and with dynamic/on-chain group management.
|
||||
In the on-chain/dynamic group management, the state of the group members i.e., their identity commitment keys is moderated via a membership smart contract deployed on the Sepolia network which is one of the Ethereum test-nets.
|
||||
In the on-chain/dynamic group management, the state of the group members i.e., their identity commitment keys is moderated via a membership smart contract deployed on the Linea Sepolia network which is one of the test-nets.
|
||||
Members can be dynamically added to the group and the group size can grow up to 2^20 members.
|
||||
This differs from the prior test scenarios in which the RLN group was static and the set of members' keys was hardcoded and fixed.
|
||||
|
||||
@ -45,7 +45,7 @@ Run the following command to set up your chat2 client.
|
||||
--content-topic:/toy-chat/3/mingde/proto \
|
||||
--rln-relay:true \
|
||||
--rln-relay-dynamic:true \
|
||||
--rln-relay-eth-contract-address:0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4 \
|
||||
--rln-relay-eth-contract-address:0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 \
|
||||
--rln-relay-cred-path:xxx/xx/rlnKeystore.json \
|
||||
--rln-relay-cred-password:xxxx \
|
||||
--rln-relay-eth-client-address:xxxx \
|
||||
@ -58,11 +58,11 @@ In this command
|
||||
- the `rln-relay` flag is set to `true` to enable the Waku-RLN-Relay protocol for spam protection.
|
||||
- the `--rln-relay-dynamic` flag is set to `true` to enable the on-chain mode of Waku-RLN-Relay protocol with dynamic group management.
|
||||
- the `--rln-relay-eth-contract-address` option gets the address of the membership contract.
|
||||
The current address of the contract is `0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4`.
|
||||
You may check the state of the contract on the [Sepolia testnet](https://sepolia.etherscan.io/address/0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4).
|
||||
The current address of the contract is `0xB9cd878C90E49F797B4431fBF4fb333108CB90e6`.
|
||||
You may check the state of the contract on the [Linea Sepolia testnet](https://sepolia.lineascan.build/address/0xB9cd878C90E49F797B4431fBF4fb333108CB90e6).
|
||||
- the `--rln-relay-cred-path` option denotes the path to the keystore file described above
|
||||
- the `--rln-relay-cred-password` option denotes the password to the keystore
|
||||
- the `rln-relay-eth-client-address` is the WebSocket address of the hosted node on the Sepolia testnet.
|
||||
- the `rln-relay-eth-client-address` is the WebSocket address of the hosted node on the Linea Sepolia testnet.
|
||||
You need to replace the `xxxx` with the actual node's address.
|
||||
|
||||
For `rln-relay-eth-client-address`, if you do not know how to obtain it, you may use the following tutorial on the [prerequisites of running on-chain spam-protected chat2](./pre-requisites-of-running-on-chain-spam-protected-chat2.md).
|
||||
@ -166,7 +166,7 @@ You can check this fact by looking at `Bob`'s console, where `message3` is missi
|
||||
|
||||
**Alice**
|
||||
```bash
|
||||
./build/chat2 --fleet:test --content-topic:/toy-chat/3/mingde/proto --rln-relay:true --rln-relay-dynamic:true --rln-relay-eth-contract-address:0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4 --rln-relay-cred-path:rlnKeystore.json --rln-relay-cred-password:password --rln-relay-eth-client-address:https://sepolia.infura.io/v3/12345678901234567890123456789012 --ports-shift=1
|
||||
./build/chat2 --fleet:test --content-topic:/toy-chat/3/mingde/proto --rln-relay:true --rln-relay-dynamic:true --rln-relay-eth-contract-address:0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 --rln-relay-cred-path:rlnKeystore.json --rln-relay-cred-password:password --rln-relay-eth-client-address:https://sepolia.infura.io/v3/12345678901234567890123456789012 --ports-shift=1
|
||||
```
|
||||
|
||||
```
|
||||
@ -209,7 +209,7 @@ your rln identity commitment key is: bd093cbf14fb933d53f596c33f98b3df83b7e9f7a19
|
||||
|
||||
**Bob**
|
||||
```bash
|
||||
./build/chat2 --fleet:test --content-topic:/toy-chat/3/mingde/proto --rln-relay:true --rln-relay-dynamic:true --rln-relay-eth-contract-address:0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4 --rln-relay-cred-path:rlnKeystore.json --rln-relay-cred-index:1 --rln-relay-cred-password:password --rln-relay-eth-client-address:https://sepolia.infura.io/v3/12345678901234567890123456789012 --ports-shift=2
|
||||
./build/chat2 --fleet:test --content-topic:/toy-chat/3/mingde/proto --rln-relay:true --rln-relay-dynamic:true --rln-relay-eth-contract-address:0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 --rln-relay-cred-path:rlnKeystore.json --rln-relay-cred-index:1 --rln-relay-cred-password:password --rln-relay-eth-client-address:https://sepolia.infura.io/v3/12345678901234567890123456789012 --ports-shift=2
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
@ -1,36 +0,0 @@
|
||||
# rln-db-inspector
|
||||
|
||||
This document describes how to run and use the `rln-db-inspector` tool.
|
||||
It is meant to be used to debug and fetch the metadata stored in the RLN tree db.
|
||||
|
||||
## Pre-requisites
|
||||
|
||||
1. An existing RLN tree db
|
||||
|
||||
## Usage
|
||||
|
||||
1. First, we compile the binary
|
||||
|
||||
```bash
|
||||
make -j16 wakunode2
|
||||
```
|
||||
This command will fetch the rln static library and link it automatically.
|
||||
|
||||
|
||||
2. Define the arguments you wish to use
|
||||
|
||||
```bash
|
||||
export RLN_TREE_DB_PATH="xxx"
|
||||
```
|
||||
|
||||
3. Run the db inspector
|
||||
|
||||
```bash
|
||||
./build/wakunode2 inspectRlnDb \
|
||||
--rln-relay-tree-path:$RLN_TREE_DB_PATH
|
||||
```
|
||||
|
||||
What this does is -
|
||||
a. loads the tree db from the path provided
|
||||
b. Logs out the metadata, including, number of leaves set, past 5 merkle roots, last synced block number
|
||||
|
||||
@ -21,9 +21,9 @@ It is meant to be used to generate and persist a set of valid RLN credentials to
|
||||
2. Define the arguments you wish to use
|
||||
|
||||
```bash
|
||||
export RPC_URL="https://sepolia.infura.io/v3/..."
|
||||
export RPC_URL="https://linea-sepolia.infura.io/v3/..."
|
||||
export PRIVATE_KEY="0x..."
|
||||
export RLN_CONTRACT_ADDRESS="0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4"
|
||||
export RLN_CONTRACT_ADDRESS="0xB9cd878C90E49F797B4431fBF4fb333108CB90e6"
|
||||
export RLN_CREDENTIAL_PATH="rlnKeystore.json"
|
||||
export RLN_CREDENTIAL_PASSWORD="xxx"
|
||||
```
|
||||
|
||||
@ -7,9 +7,21 @@ Make all examples.
|
||||
make example2
|
||||
```
|
||||
|
||||
## basic2
|
||||
## Waku API
|
||||
|
||||
TODO
|
||||
Uses the simplified Waku API to create and start a node,
|
||||
you need an RPC endpoint for Linea Sepolia for RLN:
|
||||
|
||||
```console
|
||||
./build/waku_api --ethRpcEndpoint=https://linea-sepolia.infura.io/v3/<your key>
|
||||
```
|
||||
|
||||
If you can't be bothered but still want to see some action,
|
||||
just run the binary and it will use a non-RLN network:
|
||||
|
||||
```console
|
||||
./build/waku_api
|
||||
```
|
||||
|
||||
## publisher/subscriber
|
||||
|
||||
|
||||
@ -19,285 +19,312 @@ pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
|
||||
int callback_executed = 0;
|
||||
|
||||
void waitForCallback() {
|
||||
pthread_mutex_lock(&mutex);
|
||||
while (!callback_executed) {
|
||||
pthread_cond_wait(&cond, &mutex);
|
||||
}
|
||||
callback_executed = 0;
|
||||
pthread_mutex_unlock(&mutex);
|
||||
void waitForCallback()
|
||||
{
|
||||
pthread_mutex_lock(&mutex);
|
||||
while (!callback_executed)
|
||||
{
|
||||
pthread_cond_wait(&cond, &mutex);
|
||||
}
|
||||
callback_executed = 0;
|
||||
pthread_mutex_unlock(&mutex);
|
||||
}
|
||||
|
||||
#define WAKU_CALL(call) \
|
||||
do { \
|
||||
int ret = call; \
|
||||
if (ret != 0) { \
|
||||
printf("Failed the call to: %s. Returned code: %d\n", #call, ret); \
|
||||
exit(1); \
|
||||
} \
|
||||
waitForCallback(); \
|
||||
} while (0)
|
||||
#define WAKU_CALL(call) \
|
||||
do \
|
||||
{ \
|
||||
int ret = call; \
|
||||
if (ret != 0) \
|
||||
{ \
|
||||
printf("Failed the call to: %s. Returned code: %d\n", #call, ret); \
|
||||
exit(1); \
|
||||
} \
|
||||
waitForCallback(); \
|
||||
} while (0)
|
||||
|
||||
struct ConfigNode {
|
||||
char host[128];
|
||||
int port;
|
||||
char key[128];
|
||||
int relay;
|
||||
char peers[2048];
|
||||
int store;
|
||||
char storeNode[2048];
|
||||
char storeRetentionPolicy[64];
|
||||
char storeDbUrl[256];
|
||||
int storeVacuum;
|
||||
int storeDbMigration;
|
||||
int storeMaxNumDbConnections;
|
||||
struct ConfigNode
|
||||
{
|
||||
char host[128];
|
||||
int port;
|
||||
char key[128];
|
||||
int relay;
|
||||
char peers[2048];
|
||||
int store;
|
||||
char storeNode[2048];
|
||||
char storeRetentionPolicy[64];
|
||||
char storeDbUrl[256];
|
||||
int storeVacuum;
|
||||
int storeDbMigration;
|
||||
int storeMaxNumDbConnections;
|
||||
};
|
||||
|
||||
// libwaku Context
|
||||
void* ctx;
|
||||
void *ctx;
|
||||
|
||||
// For the case of C language we don't need to store a particular userData
|
||||
void* userData = NULL;
|
||||
void *userData = NULL;
|
||||
|
||||
// Arguments parsing
|
||||
static char doc[] = "\nC example that shows how to use the waku library.";
|
||||
static char args_doc[] = "";
|
||||
|
||||
static struct argp_option options[] = {
|
||||
{ "host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"},
|
||||
{ "port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"},
|
||||
{ "key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."},
|
||||
{ "relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"},
|
||||
{ "peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\
|
||||
{"host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"},
|
||||
{"port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"},
|
||||
{"key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."},
|
||||
{"relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"},
|
||||
{"peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\
|
||||
to. (default: \"\") e.g. \"/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\""},
|
||||
{ 0 }
|
||||
};
|
||||
{0}};
|
||||
|
||||
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
||||
static error_t parse_opt(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
|
||||
struct ConfigNode *cfgNode = state->input;
|
||||
switch (key) {
|
||||
case 'h':
|
||||
snprintf(cfgNode->host, 128, "%s", arg);
|
||||
break;
|
||||
case 'p':
|
||||
cfgNode->port = atoi(arg);
|
||||
break;
|
||||
case 'k':
|
||||
snprintf(cfgNode->key, 128, "%s", arg);
|
||||
break;
|
||||
case 'r':
|
||||
cfgNode->relay = atoi(arg);
|
||||
break;
|
||||
case 'a':
|
||||
snprintf(cfgNode->peers, 2048, "%s", arg);
|
||||
break;
|
||||
case ARGP_KEY_ARG:
|
||||
if (state->arg_num >= 1) /* Too many arguments. */
|
||||
argp_usage(state);
|
||||
break;
|
||||
case ARGP_KEY_END:
|
||||
break;
|
||||
default:
|
||||
return ARGP_ERR_UNKNOWN;
|
||||
}
|
||||
struct ConfigNode *cfgNode = state->input;
|
||||
switch (key)
|
||||
{
|
||||
case 'h':
|
||||
snprintf(cfgNode->host, 128, "%s", arg);
|
||||
break;
|
||||
case 'p':
|
||||
cfgNode->port = atoi(arg);
|
||||
break;
|
||||
case 'k':
|
||||
snprintf(cfgNode->key, 128, "%s", arg);
|
||||
break;
|
||||
case 'r':
|
||||
cfgNode->relay = atoi(arg);
|
||||
break;
|
||||
case 'a':
|
||||
snprintf(cfgNode->peers, 2048, "%s", arg);
|
||||
break;
|
||||
case ARGP_KEY_ARG:
|
||||
if (state->arg_num >= 1) /* Too many arguments. */
|
||||
argp_usage(state);
|
||||
break;
|
||||
case ARGP_KEY_END:
|
||||
break;
|
||||
default:
|
||||
return ARGP_ERR_UNKNOWN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void signal_cond() {
|
||||
pthread_mutex_lock(&mutex);
|
||||
callback_executed = 1;
|
||||
pthread_cond_signal(&cond);
|
||||
pthread_mutex_unlock(&mutex);
|
||||
void signal_cond()
|
||||
{
|
||||
pthread_mutex_lock(&mutex);
|
||||
callback_executed = 1;
|
||||
pthread_cond_signal(&cond);
|
||||
pthread_mutex_unlock(&mutex);
|
||||
}
|
||||
|
||||
static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 };
|
||||
static struct argp argp = {options, parse_opt, args_doc, doc, 0, 0, 0};
|
||||
|
||||
void event_handler(int callerRet, const char* msg, size_t len, void* userData) {
|
||||
if (callerRet == RET_ERR) {
|
||||
printf("Error: %s\n", msg);
|
||||
exit(1);
|
||||
}
|
||||
else if (callerRet == RET_OK) {
|
||||
printf("Receiving event: %s\n", msg);
|
||||
}
|
||||
void event_handler(int callerRet, const char *msg, size_t len, void *userData)
|
||||
{
|
||||
if (callerRet == RET_ERR)
|
||||
{
|
||||
printf("Error: %s\n", msg);
|
||||
exit(1);
|
||||
}
|
||||
else if (callerRet == RET_OK)
|
||||
{
|
||||
printf("Receiving event: %s\n", msg);
|
||||
}
|
||||
|
||||
signal_cond();
|
||||
signal_cond();
|
||||
}
|
||||
|
||||
void on_event_received(int callerRet, const char* msg, size_t len, void* userData) {
|
||||
if (callerRet == RET_ERR) {
|
||||
printf("Error: %s\n", msg);
|
||||
exit(1);
|
||||
}
|
||||
else if (callerRet == RET_OK) {
|
||||
printf("Receiving event: %s\n", msg);
|
||||
}
|
||||
void on_event_received(int callerRet, const char *msg, size_t len, void *userData)
|
||||
{
|
||||
if (callerRet == RET_ERR)
|
||||
{
|
||||
printf("Error: %s\n", msg);
|
||||
exit(1);
|
||||
}
|
||||
else if (callerRet == RET_OK)
|
||||
{
|
||||
printf("Receiving event: %s\n", msg);
|
||||
}
|
||||
}
|
||||
|
||||
char* contentTopic = NULL;
|
||||
void handle_content_topic(int callerRet, const char* msg, size_t len, void* userData) {
|
||||
if (contentTopic != NULL) {
|
||||
free(contentTopic);
|
||||
}
|
||||
char *contentTopic = NULL;
|
||||
void handle_content_topic(int callerRet, const char *msg, size_t len, void *userData)
|
||||
{
|
||||
if (contentTopic != NULL)
|
||||
{
|
||||
free(contentTopic);
|
||||
}
|
||||
|
||||
contentTopic = malloc(len * sizeof(char) + 1);
|
||||
strcpy(contentTopic, msg);
|
||||
signal_cond();
|
||||
contentTopic = malloc(len * sizeof(char) + 1);
|
||||
strcpy(contentTopic, msg);
|
||||
signal_cond();
|
||||
}
|
||||
|
||||
char* publishResponse = NULL;
|
||||
void handle_publish_ok(int callerRet, const char* msg, size_t len, void* userData) {
|
||||
printf("Publish Ok: %s %lu\n", msg, len);
|
||||
char *publishResponse = NULL;
|
||||
void handle_publish_ok(int callerRet, const char *msg, size_t len, void *userData)
|
||||
{
|
||||
printf("Publish Ok: %s %lu\n", msg, len);
|
||||
|
||||
if (publishResponse != NULL) {
|
||||
free(publishResponse);
|
||||
}
|
||||
if (publishResponse != NULL)
|
||||
{
|
||||
free(publishResponse);
|
||||
}
|
||||
|
||||
publishResponse = malloc(len * sizeof(char) + 1);
|
||||
strcpy(publishResponse, msg);
|
||||
publishResponse = malloc(len * sizeof(char) + 1);
|
||||
strcpy(publishResponse, msg);
|
||||
}
|
||||
|
||||
#define MAX_MSG_SIZE 65535
|
||||
|
||||
void publish_message(const char* msg) {
|
||||
char jsonWakuMsg[MAX_MSG_SIZE];
|
||||
char *msgPayload = b64_encode(msg, strlen(msg));
|
||||
void publish_message(const char *msg)
|
||||
{
|
||||
char jsonWakuMsg[MAX_MSG_SIZE];
|
||||
char *msgPayload = b64_encode(msg, strlen(msg));
|
||||
|
||||
WAKU_CALL( waku_content_topic(ctx,
|
||||
"appName",
|
||||
1,
|
||||
"contentTopicName",
|
||||
"encoding",
|
||||
handle_content_topic,
|
||||
userData) );
|
||||
snprintf(jsonWakuMsg,
|
||||
MAX_MSG_SIZE,
|
||||
"{\"payload\":\"%s\",\"contentTopic\":\"%s\"}",
|
||||
msgPayload, contentTopic);
|
||||
WAKU_CALL(waku_content_topic(ctx,
|
||||
handle_content_topic,
|
||||
userData,
|
||||
"appName",
|
||||
1,
|
||||
"contentTopicName",
|
||||
"encoding"));
|
||||
snprintf(jsonWakuMsg,
|
||||
MAX_MSG_SIZE,
|
||||
"{\"payload\":\"%s\",\"contentTopic\":\"%s\"}",
|
||||
msgPayload, contentTopic);
|
||||
|
||||
free(msgPayload);
|
||||
free(msgPayload);
|
||||
|
||||
WAKU_CALL( waku_relay_publish(ctx,
|
||||
"/waku/2/rs/16/32",
|
||||
jsonWakuMsg,
|
||||
10000 /*timeout ms*/,
|
||||
event_handler,
|
||||
userData) );
|
||||
WAKU_CALL(waku_relay_publish(ctx,
|
||||
event_handler,
|
||||
userData,
|
||||
"/waku/2/rs/16/32",
|
||||
jsonWakuMsg,
|
||||
10000 /*timeout ms*/));
|
||||
}
|
||||
|
||||
void show_help_and_exit() {
|
||||
printf("Wrong parameters\n");
|
||||
exit(1);
|
||||
void show_help_and_exit()
|
||||
{
|
||||
printf("Wrong parameters\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
void print_default_pubsub_topic(int callerRet, const char* msg, size_t len, void* userData) {
|
||||
printf("Default pubsub topic: %s\n", msg);
|
||||
signal_cond();
|
||||
void print_default_pubsub_topic(int callerRet, const char *msg, size_t len, void *userData)
|
||||
{
|
||||
printf("Default pubsub topic: %s\n", msg);
|
||||
signal_cond();
|
||||
}
|
||||
|
||||
void print_waku_version(int callerRet, const char* msg, size_t len, void* userData) {
|
||||
printf("Git Version: %s\n", msg);
|
||||
signal_cond();
|
||||
void print_waku_version(int callerRet, const char *msg, size_t len, void *userData)
|
||||
{
|
||||
printf("Git Version: %s\n", msg);
|
||||
signal_cond();
|
||||
}
|
||||
|
||||
// Beginning of UI program logic
|
||||
|
||||
enum PROGRAM_STATE {
|
||||
MAIN_MENU,
|
||||
SUBSCRIBE_TOPIC_MENU,
|
||||
CONNECT_TO_OTHER_NODE_MENU,
|
||||
PUBLISH_MESSAGE_MENU
|
||||
enum PROGRAM_STATE
|
||||
{
|
||||
MAIN_MENU,
|
||||
SUBSCRIBE_TOPIC_MENU,
|
||||
CONNECT_TO_OTHER_NODE_MENU,
|
||||
PUBLISH_MESSAGE_MENU
|
||||
};
|
||||
|
||||
enum PROGRAM_STATE current_state = MAIN_MENU;
|
||||
|
||||
void show_main_menu() {
|
||||
printf("\nPlease, select an option:\n");
|
||||
printf("\t1.) Subscribe to topic\n");
|
||||
printf("\t2.) Connect to other node\n");
|
||||
printf("\t3.) Publish a message\n");
|
||||
void show_main_menu()
|
||||
{
|
||||
printf("\nPlease, select an option:\n");
|
||||
printf("\t1.) Subscribe to topic\n");
|
||||
printf("\t2.) Connect to other node\n");
|
||||
printf("\t3.) Publish a message\n");
|
||||
}
|
||||
|
||||
void handle_user_input() {
|
||||
char cmd[1024];
|
||||
memset(cmd, 0, 1024);
|
||||
int numRead = read(0, cmd, 1024);
|
||||
if (numRead <= 0) {
|
||||
return;
|
||||
}
|
||||
void handle_user_input()
|
||||
{
|
||||
char cmd[1024];
|
||||
memset(cmd, 0, 1024);
|
||||
int numRead = read(0, cmd, 1024);
|
||||
if (numRead <= 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
switch (atoi(cmd))
|
||||
{
|
||||
case SUBSCRIBE_TOPIC_MENU:
|
||||
{
|
||||
printf("Indicate the Pubsubtopic to subscribe:\n");
|
||||
char pubsubTopic[128];
|
||||
scanf("%127s", pubsubTopic);
|
||||
switch (atoi(cmd))
|
||||
{
|
||||
case SUBSCRIBE_TOPIC_MENU:
|
||||
{
|
||||
printf("Indicate the Pubsubtopic to subscribe:\n");
|
||||
char pubsubTopic[128];
|
||||
scanf("%127s", pubsubTopic);
|
||||
|
||||
WAKU_CALL( waku_relay_subscribe(ctx,
|
||||
pubsubTopic,
|
||||
event_handler,
|
||||
userData) );
|
||||
printf("The subscription went well\n");
|
||||
WAKU_CALL(waku_relay_subscribe(ctx,
|
||||
event_handler,
|
||||
userData,
|
||||
pubsubTopic));
|
||||
printf("The subscription went well\n");
|
||||
|
||||
show_main_menu();
|
||||
}
|
||||
show_main_menu();
|
||||
}
|
||||
break;
|
||||
|
||||
case CONNECT_TO_OTHER_NODE_MENU:
|
||||
// printf("Connecting to a node. Please indicate the peer Multiaddress:\n");
|
||||
// printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n");
|
||||
// char peerAddr[512];
|
||||
// scanf("%511s", peerAddr);
|
||||
// WAKU_CALL(waku_connect(ctx, peerAddr, 10000 /* timeoutMs */, event_handler, userData));
|
||||
show_main_menu();
|
||||
break;
|
||||
|
||||
case CONNECT_TO_OTHER_NODE_MENU:
|
||||
printf("Connecting to a node. Please indicate the peer Multiaddress:\n");
|
||||
printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n");
|
||||
char peerAddr[512];
|
||||
scanf("%511s", peerAddr);
|
||||
WAKU_CALL(waku_connect(ctx, peerAddr, 10000 /* timeoutMs */, event_handler, userData));
|
||||
show_main_menu();
|
||||
case PUBLISH_MESSAGE_MENU:
|
||||
{
|
||||
printf("Type the message to publish:\n");
|
||||
char msg[1024];
|
||||
scanf("%1023s", msg);
|
||||
|
||||
publish_message(msg);
|
||||
|
||||
show_main_menu();
|
||||
}
|
||||
break;
|
||||
|
||||
case MAIN_MENU:
|
||||
break;
|
||||
|
||||
case PUBLISH_MESSAGE_MENU:
|
||||
{
|
||||
printf("Type the message to publish:\n");
|
||||
char msg[1024];
|
||||
scanf("%1023s", msg);
|
||||
|
||||
publish_message(msg);
|
||||
|
||||
show_main_menu();
|
||||
}
|
||||
break;
|
||||
|
||||
case MAIN_MENU:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// End of UI program logic
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
struct ConfigNode cfgNode;
|
||||
// default values
|
||||
snprintf(cfgNode.host, 128, "0.0.0.0");
|
||||
cfgNode.port = 60000;
|
||||
cfgNode.relay = 1;
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct ConfigNode cfgNode;
|
||||
// default values
|
||||
snprintf(cfgNode.host, 128, "0.0.0.0");
|
||||
cfgNode.port = 60000;
|
||||
cfgNode.relay = 1;
|
||||
|
||||
cfgNode.store = 0;
|
||||
snprintf(cfgNode.storeNode, 2048, "");
|
||||
snprintf(cfgNode.storeRetentionPolicy, 64, "time:6000000");
|
||||
snprintf(cfgNode.storeDbUrl, 256, "postgres://postgres:test123@localhost:5432/postgres");
|
||||
cfgNode.storeVacuum = 0;
|
||||
cfgNode.storeDbMigration = 0;
|
||||
cfgNode.storeMaxNumDbConnections = 30;
|
||||
cfgNode.store = 0;
|
||||
snprintf(cfgNode.storeNode, 2048, "");
|
||||
snprintf(cfgNode.storeRetentionPolicy, 64, "time:6000000");
|
||||
snprintf(cfgNode.storeDbUrl, 256, "postgres://postgres:test123@localhost:5432/postgres");
|
||||
cfgNode.storeVacuum = 0;
|
||||
cfgNode.storeDbMigration = 0;
|
||||
cfgNode.storeMaxNumDbConnections = 30;
|
||||
|
||||
if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode)
|
||||
== ARGP_ERR_UNKNOWN) {
|
||||
show_help_and_exit();
|
||||
}
|
||||
if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode) == ARGP_ERR_UNKNOWN)
|
||||
{
|
||||
show_help_and_exit();
|
||||
}
|
||||
|
||||
char jsonConfig[5000];
|
||||
snprintf(jsonConfig, 5000, "{ \
|
||||
char jsonConfig[5000];
|
||||
snprintf(jsonConfig, 5000, "{ \
|
||||
\"clusterId\": 16, \
|
||||
\"shards\": [ 1, 32, 64, 128, 256 ], \
|
||||
\"numShardsInNetwork\": 257, \
|
||||
\"listenAddress\": \"%s\", \
|
||||
\"tcpPort\": %d, \
|
||||
\"relay\": %s, \
|
||||
@ -312,54 +339,56 @@ int main(int argc, char** argv) {
|
||||
\"discv5UdpPort\": 9999, \
|
||||
\"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \
|
||||
\"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \
|
||||
}", cfgNode.host,
|
||||
cfgNode.port,
|
||||
cfgNode.relay ? "true":"false",
|
||||
cfgNode.store ? "true":"false",
|
||||
cfgNode.storeDbUrl,
|
||||
cfgNode.storeRetentionPolicy,
|
||||
cfgNode.storeMaxNumDbConnections);
|
||||
}",
|
||||
cfgNode.host,
|
||||
cfgNode.port,
|
||||
cfgNode.relay ? "true" : "false",
|
||||
cfgNode.store ? "true" : "false",
|
||||
cfgNode.storeDbUrl,
|
||||
cfgNode.storeRetentionPolicy,
|
||||
cfgNode.storeMaxNumDbConnections);
|
||||
|
||||
ctx = waku_new(jsonConfig, event_handler, userData);
|
||||
waitForCallback();
|
||||
ctx = waku_new(jsonConfig, event_handler, userData);
|
||||
waitForCallback();
|
||||
|
||||
WAKU_CALL( waku_default_pubsub_topic(ctx, print_default_pubsub_topic, userData) );
|
||||
WAKU_CALL( waku_version(ctx, print_waku_version, userData) );
|
||||
WAKU_CALL(waku_default_pubsub_topic(ctx, print_default_pubsub_topic, userData));
|
||||
WAKU_CALL(waku_version(ctx, print_waku_version, userData));
|
||||
|
||||
printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port);
|
||||
printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES": "NO");
|
||||
printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port);
|
||||
printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES" : "NO");
|
||||
|
||||
waku_set_event_callback(ctx, on_event_received, userData);
|
||||
set_event_callback(ctx, on_event_received, userData);
|
||||
|
||||
waku_start(ctx, event_handler, userData);
|
||||
waitForCallback();
|
||||
waku_start(ctx, event_handler, userData);
|
||||
waitForCallback();
|
||||
|
||||
WAKU_CALL( waku_listen_addresses(ctx, event_handler, userData) );
|
||||
WAKU_CALL(waku_listen_addresses(ctx, event_handler, userData));
|
||||
|
||||
WAKU_CALL( waku_relay_subscribe(ctx,
|
||||
"/waku/2/rs/0/0",
|
||||
event_handler,
|
||||
userData) );
|
||||
WAKU_CALL(waku_relay_subscribe(ctx,
|
||||
event_handler,
|
||||
userData,
|
||||
"/waku/2/rs/16/32"));
|
||||
|
||||
WAKU_CALL( waku_discv5_update_bootnodes(ctx,
|
||||
"[\"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\",\"enr:-QEkuEB3WHNS-xA3RDpfu9A2Qycr3bN3u7VoArMEiDIFZJ66F1EB3d4wxZN1hcdcOX-RfuXB-MQauhJGQbpz3qUofOtLAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQPK35Nnz0cWUtSAhBp7zvHEhyU_AqeQUlqzLiLxfP2L4oN0Y3CCdl-DdWRwgiMohXdha3UyDw\"]",
|
||||
event_handler,
|
||||
userData) );
|
||||
WAKU_CALL(waku_discv5_update_bootnodes(ctx,
|
||||
event_handler,
|
||||
userData,
|
||||
"[\"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\",\"enr:-QEkuEB3WHNS-xA3RDpfu9A2Qycr3bN3u7VoArMEiDIFZJ66F1EB3d4wxZN1hcdcOX-RfuXB-MQauhJGQbpz3qUofOtLAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQPK35Nnz0cWUtSAhBp7zvHEhyU_AqeQUlqzLiLxfP2L4oN0Y3CCdl-DdWRwgiMohXdha3UyDw\"]"));
|
||||
|
||||
WAKU_CALL( waku_get_peerids_from_peerstore(ctx,
|
||||
event_handler,
|
||||
userData) );
|
||||
WAKU_CALL(waku_get_peerids_from_peerstore(ctx,
|
||||
event_handler,
|
||||
userData));
|
||||
|
||||
show_main_menu();
|
||||
while(1) {
|
||||
handle_user_input();
|
||||
show_main_menu();
|
||||
while (1)
|
||||
{
|
||||
handle_user_input();
|
||||
|
||||
// Uncomment the following if need to test the metrics retrieval
|
||||
// WAKU_CALL( waku_get_metrics(ctx,
|
||||
// event_handler,
|
||||
// userData) );
|
||||
}
|
||||
// Uncomment the following if need to test the metrics retrieval
|
||||
// WAKU_CALL( waku_get_metrics(ctx,
|
||||
// event_handler,
|
||||
// userData) );
|
||||
}
|
||||
|
||||
pthread_mutex_destroy(&mutex);
|
||||
pthread_cond_destroy(&cond);
|
||||
pthread_mutex_destroy(&mutex);
|
||||
pthread_cond_destroy(&cond);
|
||||
}
|
||||
|
||||
@ -21,37 +21,43 @@ pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
|
||||
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
|
||||
int callback_executed = 0;
|
||||
|
||||
void waitForCallback() {
|
||||
void waitForCallback()
|
||||
{
|
||||
pthread_mutex_lock(&mutex);
|
||||
while (!callback_executed) {
|
||||
while (!callback_executed)
|
||||
{
|
||||
pthread_cond_wait(&cond, &mutex);
|
||||
}
|
||||
callback_executed = 0;
|
||||
pthread_mutex_unlock(&mutex);
|
||||
}
|
||||
|
||||
void signal_cond() {
|
||||
void signal_cond()
|
||||
{
|
||||
pthread_mutex_lock(&mutex);
|
||||
callback_executed = 1;
|
||||
pthread_cond_signal(&cond);
|
||||
pthread_mutex_unlock(&mutex);
|
||||
}
|
||||
|
||||
#define WAKU_CALL(call) \
|
||||
do { \
|
||||
int ret = call; \
|
||||
if (ret != 0) { \
|
||||
std::cout << "Failed the call to: " << #call << ". Code: " << ret << "\n"; \
|
||||
} \
|
||||
waitForCallback(); \
|
||||
} while (0)
|
||||
#define WAKU_CALL(call) \
|
||||
do \
|
||||
{ \
|
||||
int ret = call; \
|
||||
if (ret != 0) \
|
||||
{ \
|
||||
std::cout << "Failed the call to: " << #call << ". Code: " << ret << "\n"; \
|
||||
} \
|
||||
waitForCallback(); \
|
||||
} while (0)
|
||||
|
||||
struct ConfigNode {
|
||||
char host[128];
|
||||
int port;
|
||||
char key[128];
|
||||
int relay;
|
||||
char peers[2048];
|
||||
struct ConfigNode
|
||||
{
|
||||
char host[128];
|
||||
int port;
|
||||
char key[128];
|
||||
int relay;
|
||||
char peers[2048];
|
||||
};
|
||||
|
||||
// Arguments parsing
|
||||
@ -59,70 +65,76 @@ static char doc[] = "\nC example that shows how to use the waku library.";
|
||||
static char args_doc[] = "";
|
||||
|
||||
static struct argp_option options[] = {
|
||||
{ "host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"},
|
||||
{ "port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"},
|
||||
{ "key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."},
|
||||
{ "relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"},
|
||||
{ "peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\
|
||||
{"host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"},
|
||||
{"port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"},
|
||||
{"key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."},
|
||||
{"relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"},
|
||||
{"peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\
|
||||
to. (default: \"\") e.g. \"/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\""},
|
||||
{ 0 }
|
||||
};
|
||||
{0}};
|
||||
|
||||
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
|
||||
static error_t parse_opt(int key, char *arg, struct argp_state *state)
|
||||
{
|
||||
|
||||
struct ConfigNode *cfgNode = (ConfigNode *) state->input;
|
||||
switch (key) {
|
||||
case 'h':
|
||||
snprintf(cfgNode->host, 128, "%s", arg);
|
||||
break;
|
||||
case 'p':
|
||||
cfgNode->port = atoi(arg);
|
||||
break;
|
||||
case 'k':
|
||||
snprintf(cfgNode->key, 128, "%s", arg);
|
||||
break;
|
||||
case 'r':
|
||||
cfgNode->relay = atoi(arg);
|
||||
break;
|
||||
case 'a':
|
||||
snprintf(cfgNode->peers, 2048, "%s", arg);
|
||||
break;
|
||||
case ARGP_KEY_ARG:
|
||||
if (state->arg_num >= 1) /* Too many arguments. */
|
||||
struct ConfigNode *cfgNode = (ConfigNode *)state->input;
|
||||
switch (key)
|
||||
{
|
||||
case 'h':
|
||||
snprintf(cfgNode->host, 128, "%s", arg);
|
||||
break;
|
||||
case 'p':
|
||||
cfgNode->port = atoi(arg);
|
||||
break;
|
||||
case 'k':
|
||||
snprintf(cfgNode->key, 128, "%s", arg);
|
||||
break;
|
||||
case 'r':
|
||||
cfgNode->relay = atoi(arg);
|
||||
break;
|
||||
case 'a':
|
||||
snprintf(cfgNode->peers, 2048, "%s", arg);
|
||||
break;
|
||||
case ARGP_KEY_ARG:
|
||||
if (state->arg_num >= 1) /* Too many arguments. */
|
||||
argp_usage(state);
|
||||
break;
|
||||
case ARGP_KEY_END:
|
||||
break;
|
||||
default:
|
||||
return ARGP_ERR_UNKNOWN;
|
||||
}
|
||||
break;
|
||||
case ARGP_KEY_END:
|
||||
break;
|
||||
default:
|
||||
return ARGP_ERR_UNKNOWN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void event_handler(const char* msg, size_t len) {
|
||||
void event_handler(const char *msg, size_t len)
|
||||
{
|
||||
printf("Receiving event: %s\n", msg);
|
||||
}
|
||||
|
||||
void handle_error(const char* msg, size_t len) {
|
||||
void handle_error(const char *msg, size_t len)
|
||||
{
|
||||
printf("handle_error: %s\n", msg);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
template <class F>
|
||||
auto cify(F&& f) {
|
||||
static F fn = std::forward<F>(f);
|
||||
return [](int callerRet, const char* msg, size_t len, void* userData) {
|
||||
signal_cond();
|
||||
return fn(msg, len);
|
||||
};
|
||||
auto cify(F &&f)
|
||||
{
|
||||
static F fn = std::forward<F>(f);
|
||||
return [](int callerRet, const char *msg, size_t len, void *userData)
|
||||
{
|
||||
signal_cond();
|
||||
return fn(msg, len);
|
||||
};
|
||||
}
|
||||
|
||||
static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 };
|
||||
static struct argp argp = {options, parse_opt, args_doc, doc, 0, 0, 0};
|
||||
|
||||
// Beginning of UI program logic
|
||||
|
||||
enum PROGRAM_STATE {
|
||||
enum PROGRAM_STATE
|
||||
{
|
||||
MAIN_MENU,
|
||||
SUBSCRIBE_TOPIC_MENU,
|
||||
CONNECT_TO_OTHER_NODE_MENU,
|
||||
@ -131,18 +143,21 @@ enum PROGRAM_STATE {
|
||||
|
||||
enum PROGRAM_STATE current_state = MAIN_MENU;
|
||||
|
||||
void show_main_menu() {
|
||||
void show_main_menu()
|
||||
{
|
||||
printf("\nPlease, select an option:\n");
|
||||
printf("\t1.) Subscribe to topic\n");
|
||||
printf("\t2.) Connect to other node\n");
|
||||
printf("\t3.) Publish a message\n");
|
||||
}
|
||||
|
||||
void handle_user_input(void* ctx) {
|
||||
void handle_user_input(void *ctx)
|
||||
{
|
||||
char cmd[1024];
|
||||
memset(cmd, 0, 1024);
|
||||
int numRead = read(0, cmd, 1024);
|
||||
if (numRead <= 0) {
|
||||
if (numRead <= 0)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
@ -154,12 +169,11 @@ void handle_user_input(void* ctx) {
|
||||
char pubsubTopic[128];
|
||||
scanf("%127s", pubsubTopic);
|
||||
|
||||
WAKU_CALL( waku_relay_subscribe(ctx,
|
||||
pubsubTopic,
|
||||
cify([&](const char* msg, size_t len) {
|
||||
event_handler(msg, len);
|
||||
}),
|
||||
nullptr) );
|
||||
WAKU_CALL(waku_relay_subscribe(ctx,
|
||||
cify([&](const char *msg, size_t len)
|
||||
{ event_handler(msg, len); }),
|
||||
nullptr,
|
||||
pubsubTopic));
|
||||
printf("The subscription went well\n");
|
||||
|
||||
show_main_menu();
|
||||
@ -171,15 +185,14 @@ void handle_user_input(void* ctx) {
|
||||
printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n");
|
||||
char peerAddr[512];
|
||||
scanf("%511s", peerAddr);
|
||||
WAKU_CALL( waku_connect(ctx,
|
||||
peerAddr,
|
||||
10000 /* timeoutMs */,
|
||||
cify([&](const char* msg, size_t len) {
|
||||
event_handler(msg, len);
|
||||
}),
|
||||
nullptr));
|
||||
WAKU_CALL(waku_connect(ctx,
|
||||
cify([&](const char *msg, size_t len)
|
||||
{ event_handler(msg, len); }),
|
||||
nullptr,
|
||||
peerAddr,
|
||||
10000 /* timeoutMs */));
|
||||
show_main_menu();
|
||||
break;
|
||||
break;
|
||||
|
||||
case PUBLISH_MESSAGE_MENU:
|
||||
{
|
||||
@ -193,28 +206,26 @@ void handle_user_input(void* ctx) {
|
||||
|
||||
std::string contentTopic;
|
||||
waku_content_topic(ctx,
|
||||
cify([&contentTopic](const char *msg, size_t len)
|
||||
{ contentTopic = msg; }),
|
||||
nullptr,
|
||||
"appName",
|
||||
1,
|
||||
"contentTopicName",
|
||||
"encoding",
|
||||
cify([&contentTopic](const char* msg, size_t len) {
|
||||
contentTopic = msg;
|
||||
}),
|
||||
nullptr);
|
||||
1,
|
||||
"contentTopicName",
|
||||
"encoding");
|
||||
|
||||
snprintf(jsonWakuMsg,
|
||||
2048,
|
||||
"{\"payload\":\"%s\",\"contentTopic\":\"%s\"}",
|
||||
msgPayload.data(), contentTopic.c_str());
|
||||
|
||||
WAKU_CALL( waku_relay_publish(ctx,
|
||||
"/waku/2/rs/16/32",
|
||||
jsonWakuMsg,
|
||||
10000 /*timeout ms*/,
|
||||
cify([&](const char* msg, size_t len) {
|
||||
event_handler(msg, len);
|
||||
}),
|
||||
nullptr) );
|
||||
WAKU_CALL(waku_relay_publish(ctx,
|
||||
cify([&](const char *msg, size_t len)
|
||||
{ event_handler(msg, len); }),
|
||||
nullptr,
|
||||
"/waku/2/rs/16/32",
|
||||
jsonWakuMsg,
|
||||
10000 /*timeout ms*/));
|
||||
|
||||
show_main_menu();
|
||||
}
|
||||
@ -227,12 +238,14 @@ void handle_user_input(void* ctx) {
|
||||
|
||||
// End of UI program logic
|
||||
|
||||
void show_help_and_exit() {
|
||||
void show_help_and_exit()
|
||||
{
|
||||
printf("Wrong parameters\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
struct ConfigNode cfgNode;
|
||||
// default values
|
||||
snprintf(cfgNode.host, 128, "0.0.0.0");
|
||||
@ -241,8 +254,8 @@ int main(int argc, char** argv) {
|
||||
cfgNode.port = 60000;
|
||||
cfgNode.relay = 1;
|
||||
|
||||
if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode)
|
||||
== ARGP_ERR_UNKNOWN) {
|
||||
if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode) == ARGP_ERR_UNKNOWN)
|
||||
{
|
||||
show_help_and_exit();
|
||||
}
|
||||
|
||||
@ -260,72 +273,64 @@ int main(int argc, char** argv) {
|
||||
\"discv5UdpPort\": 9999, \
|
||||
\"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \
|
||||
\"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \
|
||||
}", cfgNode.host,
|
||||
cfgNode.port);
|
||||
}",
|
||||
cfgNode.host,
|
||||
cfgNode.port);
|
||||
|
||||
void* ctx =
|
||||
void *ctx =
|
||||
waku_new(jsonConfig,
|
||||
cify([](const char* msg, size_t len) {
|
||||
std::cout << "waku_new feedback: " << msg << std::endl;
|
||||
}
|
||||
),
|
||||
nullptr
|
||||
);
|
||||
cify([](const char *msg, size_t len)
|
||||
{ std::cout << "waku_new feedback: " << msg << std::endl; }),
|
||||
nullptr);
|
||||
waitForCallback();
|
||||
|
||||
// example on how to retrieve a value from the `libwaku` callback.
|
||||
std::string defaultPubsubTopic;
|
||||
WAKU_CALL(
|
||||
waku_default_pubsub_topic(
|
||||
ctx,
|
||||
cify([&defaultPubsubTopic](const char* msg, size_t len) {
|
||||
defaultPubsubTopic = msg;
|
||||
}
|
||||
),
|
||||
nullptr));
|
||||
ctx,
|
||||
cify([&defaultPubsubTopic](const char *msg, size_t len)
|
||||
{ defaultPubsubTopic = msg; }),
|
||||
nullptr));
|
||||
|
||||
std::cout << "Default pubsub topic: " << defaultPubsubTopic << std::endl;
|
||||
|
||||
WAKU_CALL(waku_version(ctx,
|
||||
cify([&](const char* msg, size_t len) {
|
||||
std::cout << "Git Version: " << msg << std::endl;
|
||||
}),
|
||||
WAKU_CALL(waku_version(ctx,
|
||||
cify([&](const char *msg, size_t len)
|
||||
{ std::cout << "Git Version: " << msg << std::endl; }),
|
||||
nullptr));
|
||||
|
||||
printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port);
|
||||
printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES": "NO");
|
||||
printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES" : "NO");
|
||||
|
||||
std::string pubsubTopic;
|
||||
WAKU_CALL(waku_pubsub_topic(ctx,
|
||||
"example",
|
||||
cify([&](const char* msg, size_t len) {
|
||||
pubsubTopic = msg;
|
||||
}),
|
||||
nullptr));
|
||||
WAKU_CALL(waku_pubsub_topic(ctx,
|
||||
cify([&](const char *msg, size_t len)
|
||||
{ pubsubTopic = msg; }),
|
||||
nullptr,
|
||||
"example"));
|
||||
|
||||
std::cout << "Custom pubsub topic: " << pubsubTopic << std::endl;
|
||||
|
||||
waku_set_event_callback(ctx,
|
||||
cify([&](const char* msg, size_t len) {
|
||||
event_handler(msg, len);
|
||||
}),
|
||||
nullptr);
|
||||
set_event_callback(ctx,
|
||||
cify([&](const char *msg, size_t len)
|
||||
{ event_handler(msg, len); }),
|
||||
nullptr);
|
||||
|
||||
WAKU_CALL( waku_start(ctx,
|
||||
cify([&](const char* msg, size_t len) {
|
||||
event_handler(msg, len);
|
||||
}),
|
||||
nullptr));
|
||||
WAKU_CALL(waku_start(ctx,
|
||||
cify([&](const char *msg, size_t len)
|
||||
{ event_handler(msg, len); }),
|
||||
nullptr));
|
||||
|
||||
WAKU_CALL( waku_relay_subscribe(ctx,
|
||||
defaultPubsubTopic.c_str(),
|
||||
cify([&](const char* msg, size_t len) {
|
||||
event_handler(msg, len);
|
||||
}),
|
||||
nullptr) );
|
||||
WAKU_CALL(waku_relay_subscribe(ctx,
|
||||
cify([&](const char *msg, size_t len)
|
||||
{ event_handler(msg, len); }),
|
||||
nullptr,
|
||||
defaultPubsubTopic.c_str()));
|
||||
|
||||
show_main_menu();
|
||||
while(1) {
|
||||
while (1)
|
||||
{
|
||||
handle_user_input(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -62,13 +62,9 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
|
||||
"Building ENR with relay sharding failed"
|
||||
)
|
||||
|
||||
let recordRes = enrBuilder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
error "failed to create enr record", error = recordRes.error
|
||||
quit(QuitFailure)
|
||||
else:
|
||||
recordRes.get()
|
||||
let record = enrBuilder.build().valueOr:
|
||||
error "failed to create enr record", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodeKey)
|
||||
@ -76,8 +72,10 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
|
||||
builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet()
|
||||
let node = builder.build().tryGet()
|
||||
|
||||
node.mountMetadata(clusterId).expect("failed to mount waku metadata protocol")
|
||||
waitFor node.mountFilterClient()
|
||||
node.mountMetadata(clusterId, shardId).expect(
|
||||
"failed to mount waku metadata protocol"
|
||||
)
|
||||
await node.mountFilterClient()
|
||||
|
||||
await node.start()
|
||||
|
||||
@ -90,20 +88,18 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
|
||||
while true:
|
||||
notice "maintaining subscription"
|
||||
# First use filter-ping to check if we have an active subscription
|
||||
let pingRes = await node.wakuFilterClient.ping(filterPeer)
|
||||
if pingRes.isErr():
|
||||
if (await node.wakuFilterClient.ping(filterPeer)).isErr():
|
||||
# No subscription found. Let's subscribe.
|
||||
notice "no subscription found. Sending subscribe request"
|
||||
|
||||
let subscribeRes = await node.wakuFilterClient.subscribe(
|
||||
filterPeer, FilterPubsubTopic, @[FilterContentTopic]
|
||||
)
|
||||
|
||||
if subscribeRes.isErr():
|
||||
notice "subscribe request failed. Quitting.", err = subscribeRes.error
|
||||
(
|
||||
await node.wakuFilterClient.subscribe(
|
||||
filterPeer, FilterPubsubTopic, @[FilterContentTopic]
|
||||
)
|
||||
).isOkOr:
|
||||
notice "subscribe request failed. Quitting.", error = error
|
||||
break
|
||||
else:
|
||||
notice "subscribe request successful."
|
||||
notice "subscribe request successful."
|
||||
else:
|
||||
notice "subscription found."
|
||||
|
||||
|
||||
@ -71,32 +71,32 @@ package main
|
||||
|
||||
static void* cGoWakuNew(const char* configJson, void* resp) {
|
||||
// We pass NULL because we are not interested in retrieving data from this callback
|
||||
void* ret = waku_new(configJson, (WakuCallBack) callback, resp);
|
||||
void* ret = waku_new(configJson, (FFICallBack) callback, resp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cGoWakuStart(void* wakuCtx, void* resp) {
|
||||
WAKU_CALL(waku_start(wakuCtx, (WakuCallBack) callback, resp));
|
||||
WAKU_CALL(waku_start(wakuCtx, (FFICallBack) callback, resp));
|
||||
}
|
||||
|
||||
static void cGoWakuStop(void* wakuCtx, void* resp) {
|
||||
WAKU_CALL(waku_stop(wakuCtx, (WakuCallBack) callback, resp));
|
||||
WAKU_CALL(waku_stop(wakuCtx, (FFICallBack) callback, resp));
|
||||
}
|
||||
|
||||
static void cGoWakuDestroy(void* wakuCtx, void* resp) {
|
||||
WAKU_CALL(waku_destroy(wakuCtx, (WakuCallBack) callback, resp));
|
||||
WAKU_CALL(waku_destroy(wakuCtx, (FFICallBack) callback, resp));
|
||||
}
|
||||
|
||||
static void cGoWakuStartDiscV5(void* wakuCtx, void* resp) {
|
||||
WAKU_CALL(waku_start_discv5(wakuCtx, (WakuCallBack) callback, resp));
|
||||
WAKU_CALL(waku_start_discv5(wakuCtx, (FFICallBack) callback, resp));
|
||||
}
|
||||
|
||||
static void cGoWakuStopDiscV5(void* wakuCtx, void* resp) {
|
||||
WAKU_CALL(waku_stop_discv5(wakuCtx, (WakuCallBack) callback, resp));
|
||||
WAKU_CALL(waku_stop_discv5(wakuCtx, (FFICallBack) callback, resp));
|
||||
}
|
||||
|
||||
static void cGoWakuVersion(void* wakuCtx, void* resp) {
|
||||
WAKU_CALL(waku_version(wakuCtx, (WakuCallBack) callback, resp));
|
||||
WAKU_CALL(waku_version(wakuCtx, (FFICallBack) callback, resp));
|
||||
}
|
||||
|
||||
static void cGoWakuSetEventCallback(void* wakuCtx) {
|
||||
@ -112,7 +112,7 @@ package main
|
||||
|
||||
// This technique is needed because cgo only allows to export Go functions and not methods.
|
||||
|
||||
waku_set_event_callback(wakuCtx, (WakuCallBack) globalEventCallback, wakuCtx);
|
||||
set_event_callback(wakuCtx, (FFICallBack) globalEventCallback, wakuCtx);
|
||||
}
|
||||
|
||||
static void cGoWakuContentTopic(void* wakuCtx,
|
||||
@ -123,20 +123,21 @@ package main
|
||||
void* resp) {
|
||||
|
||||
WAKU_CALL( waku_content_topic(wakuCtx,
|
||||
(FFICallBack) callback,
|
||||
resp,
|
||||
appName,
|
||||
appVersion,
|
||||
contentTopicName,
|
||||
encoding,
|
||||
(WakuCallBack) callback,
|
||||
resp) );
|
||||
encoding
|
||||
) );
|
||||
}
|
||||
|
||||
static void cGoWakuPubsubTopic(void* wakuCtx, char* topicName, void* resp) {
|
||||
WAKU_CALL( waku_pubsub_topic(wakuCtx, topicName, (WakuCallBack) callback, resp) );
|
||||
WAKU_CALL( waku_pubsub_topic(wakuCtx, (FFICallBack) callback, resp, topicName) );
|
||||
}
|
||||
|
||||
static void cGoWakuDefaultPubsubTopic(void* wakuCtx, void* resp) {
|
||||
WAKU_CALL (waku_default_pubsub_topic(wakuCtx, (WakuCallBack) callback, resp));
|
||||
WAKU_CALL (waku_default_pubsub_topic(wakuCtx, (FFICallBack) callback, resp));
|
||||
}
|
||||
|
||||
static void cGoWakuRelayPublish(void* wakuCtx,
|
||||
@ -146,34 +147,36 @@ package main
|
||||
void* resp) {
|
||||
|
||||
WAKU_CALL (waku_relay_publish(wakuCtx,
|
||||
(FFICallBack) callback,
|
||||
resp,
|
||||
pubSubTopic,
|
||||
jsonWakuMessage,
|
||||
timeoutMs,
|
||||
(WakuCallBack) callback,
|
||||
resp));
|
||||
timeoutMs
|
||||
));
|
||||
}
|
||||
|
||||
static void cGoWakuRelaySubscribe(void* wakuCtx, char* pubSubTopic, void* resp) {
|
||||
WAKU_CALL ( waku_relay_subscribe(wakuCtx,
|
||||
pubSubTopic,
|
||||
(WakuCallBack) callback,
|
||||
resp) );
|
||||
(FFICallBack) callback,
|
||||
resp,
|
||||
pubSubTopic) );
|
||||
}
|
||||
|
||||
static void cGoWakuRelayUnsubscribe(void* wakuCtx, char* pubSubTopic, void* resp) {
|
||||
|
||||
WAKU_CALL ( waku_relay_unsubscribe(wakuCtx,
|
||||
pubSubTopic,
|
||||
(WakuCallBack) callback,
|
||||
resp) );
|
||||
(FFICallBack) callback,
|
||||
resp,
|
||||
pubSubTopic) );
|
||||
}
|
||||
|
||||
static void cGoWakuConnect(void* wakuCtx, char* peerMultiAddr, int timeoutMs, void* resp) {
|
||||
WAKU_CALL( waku_connect(wakuCtx,
|
||||
(FFICallBack) callback,
|
||||
resp,
|
||||
peerMultiAddr,
|
||||
timeoutMs,
|
||||
(WakuCallBack) callback,
|
||||
resp) );
|
||||
timeoutMs
|
||||
) );
|
||||
}
|
||||
|
||||
static void cGoWakuDialPeerById(void* wakuCtx,
|
||||
@ -183,42 +186,44 @@ package main
|
||||
void* resp) {
|
||||
|
||||
WAKU_CALL( waku_dial_peer_by_id(wakuCtx,
|
||||
(FFICallBack) callback,
|
||||
resp,
|
||||
peerId,
|
||||
protocol,
|
||||
timeoutMs,
|
||||
(WakuCallBack) callback,
|
||||
resp) );
|
||||
timeoutMs
|
||||
) );
|
||||
}
|
||||
|
||||
static void cGoWakuDisconnectPeerById(void* wakuCtx, char* peerId, void* resp) {
|
||||
WAKU_CALL( waku_disconnect_peer_by_id(wakuCtx,
|
||||
peerId,
|
||||
(WakuCallBack) callback,
|
||||
resp) );
|
||||
(FFICallBack) callback,
|
||||
resp,
|
||||
peerId
|
||||
) );
|
||||
}
|
||||
|
||||
static void cGoWakuListenAddresses(void* wakuCtx, void* resp) {
|
||||
WAKU_CALL (waku_listen_addresses(wakuCtx, (WakuCallBack) callback, resp) );
|
||||
WAKU_CALL (waku_listen_addresses(wakuCtx, (FFICallBack) callback, resp) );
|
||||
}
|
||||
|
||||
static void cGoWakuGetMyENR(void* ctx, void* resp) {
|
||||
WAKU_CALL (waku_get_my_enr(ctx, (WakuCallBack) callback, resp) );
|
||||
WAKU_CALL (waku_get_my_enr(ctx, (FFICallBack) callback, resp) );
|
||||
}
|
||||
|
||||
static void cGoWakuGetMyPeerId(void* ctx, void* resp) {
|
||||
WAKU_CALL (waku_get_my_peerid(ctx, (WakuCallBack) callback, resp) );
|
||||
WAKU_CALL (waku_get_my_peerid(ctx, (FFICallBack) callback, resp) );
|
||||
}
|
||||
|
||||
static void cGoWakuListPeersInMesh(void* ctx, char* pubSubTopic, void* resp) {
|
||||
WAKU_CALL (waku_relay_get_num_peers_in_mesh(ctx, pubSubTopic, (WakuCallBack) callback, resp) );
|
||||
WAKU_CALL (waku_relay_get_num_peers_in_mesh(ctx, (FFICallBack) callback, resp, pubSubTopic) );
|
||||
}
|
||||
|
||||
static void cGoWakuGetNumConnectedPeers(void* ctx, char* pubSubTopic, void* resp) {
|
||||
WAKU_CALL (waku_relay_get_num_connected_peers(ctx, pubSubTopic, (WakuCallBack) callback, resp) );
|
||||
WAKU_CALL (waku_relay_get_num_connected_peers(ctx, (FFICallBack) callback, resp, pubSubTopic) );
|
||||
}
|
||||
|
||||
static void cGoWakuGetPeerIdsFromPeerStore(void* wakuCtx, void* resp) {
|
||||
WAKU_CALL (waku_get_peerids_from_peerstore(wakuCtx, (WakuCallBack) callback, resp) );
|
||||
WAKU_CALL (waku_get_peerids_from_peerstore(wakuCtx, (FFICallBack) callback, resp) );
|
||||
}
|
||||
|
||||
static void cGoWakuLightpushPublish(void* wakuCtx,
|
||||
@ -227,10 +232,11 @@ package main
|
||||
void* resp) {
|
||||
|
||||
WAKU_CALL (waku_lightpush_publish(wakuCtx,
|
||||
(FFICallBack) callback,
|
||||
resp,
|
||||
pubSubTopic,
|
||||
jsonWakuMessage,
|
||||
(WakuCallBack) callback,
|
||||
resp));
|
||||
jsonWakuMessage
|
||||
));
|
||||
}
|
||||
|
||||
static void cGoWakuStoreQuery(void* wakuCtx,
|
||||
@ -240,11 +246,12 @@ package main
|
||||
void* resp) {
|
||||
|
||||
WAKU_CALL (waku_store_query(wakuCtx,
|
||||
(FFICallBack) callback,
|
||||
resp,
|
||||
jsonQuery,
|
||||
peerAddr,
|
||||
timeoutMs,
|
||||
(WakuCallBack) callback,
|
||||
resp));
|
||||
timeoutMs
|
||||
));
|
||||
}
|
||||
|
||||
static void cGoWakuPeerExchangeQuery(void* wakuCtx,
|
||||
@ -252,9 +259,10 @@ package main
|
||||
void* resp) {
|
||||
|
||||
WAKU_CALL (waku_peer_exchange_request(wakuCtx,
|
||||
numPeers,
|
||||
(WakuCallBack) callback,
|
||||
resp));
|
||||
(FFICallBack) callback,
|
||||
resp,
|
||||
numPeers
|
||||
));
|
||||
}
|
||||
|
||||
static void cGoWakuGetPeerIdsByProtocol(void* wakuCtx,
|
||||
@ -262,9 +270,10 @@ package main
|
||||
void* resp) {
|
||||
|
||||
WAKU_CALL (waku_get_peerids_by_protocol(wakuCtx,
|
||||
protocol,
|
||||
(WakuCallBack) callback,
|
||||
resp));
|
||||
(FFICallBack) callback,
|
||||
resp,
|
||||
protocol
|
||||
));
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
331
examples/ios/WakuExample.xcodeproj/project.pbxproj
Normal file
331
examples/ios/WakuExample.xcodeproj/project.pbxproj
Normal file
@ -0,0 +1,331 @@
|
||||
// !$*UTF8*$!
|
||||
{
|
||||
archiveVersion = 1;
|
||||
classes = {
|
||||
};
|
||||
objectVersion = 63;
|
||||
objects = {
|
||||
|
||||
/* Begin PBXBuildFile section */
|
||||
45714AF6D1D12AF5C36694FB /* WakuExampleApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 0671AF6DCB0D788B0C1E9C8B /* WakuExampleApp.swift */; };
|
||||
6468FA3F5F760D3FCAD6CDBF /* ContentView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7D8744E36DADC11F38A1CC99 /* ContentView.swift */; };
|
||||
C4EA202B782038F96336401F /* WakuNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = 638A565C495A63CFF7396FBC /* WakuNode.swift */; };
|
||||
/* End PBXBuildFile section */
|
||||
|
||||
/* Begin PBXFileReference section */
|
||||
0671AF6DCB0D788B0C1E9C8B /* WakuExampleApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WakuExampleApp.swift; sourceTree = "<group>"; };
|
||||
31BE20DB2755A11000723420 /* libwaku.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = libwaku.h; sourceTree = "<group>"; };
|
||||
5C5AAC91E0166D28BFA986DB /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist; path = Info.plist; sourceTree = "<group>"; };
|
||||
638A565C495A63CFF7396FBC /* WakuNode.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WakuNode.swift; sourceTree = "<group>"; };
|
||||
7D8744E36DADC11F38A1CC99 /* ContentView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ContentView.swift; sourceTree = "<group>"; };
|
||||
A8655016B3DF9B0877631CE5 /* WakuExample-Bridging-Header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "WakuExample-Bridging-Header.h"; sourceTree = "<group>"; };
|
||||
CFBE844B6E18ACB81C65F83B /* WakuExample.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = WakuExample.app; sourceTree = BUILT_PRODUCTS_DIR; };
|
||||
/* End PBXFileReference section */
|
||||
|
||||
/* Begin PBXGroup section */
|
||||
34547A6259485BD047D6375C /* Products */ = {
|
||||
isa = PBXGroup;
|
||||
children = (
|
||||
CFBE844B6E18ACB81C65F83B /* WakuExample.app */,
|
||||
);
|
||||
name = Products;
|
||||
sourceTree = "<group>";
|
||||
};
|
||||
4F76CB85EC44E951B8E75522 /* WakuExample */ = {
|
||||
isa = PBXGroup;
|
||||
children = (
|
||||
7D8744E36DADC11F38A1CC99 /* ContentView.swift */,
|
||||
5C5AAC91E0166D28BFA986DB /* Info.plist */,
|
||||
31BE20DB2755A11000723420 /* libwaku.h */,
|
||||
A8655016B3DF9B0877631CE5 /* WakuExample-Bridging-Header.h */,
|
||||
0671AF6DCB0D788B0C1E9C8B /* WakuExampleApp.swift */,
|
||||
638A565C495A63CFF7396FBC /* WakuNode.swift */,
|
||||
);
|
||||
path = WakuExample;
|
||||
sourceTree = "<group>";
|
||||
};
|
||||
D40CD2446F177CAABB0A747A = {
|
||||
isa = PBXGroup;
|
||||
children = (
|
||||
4F76CB85EC44E951B8E75522 /* WakuExample */,
|
||||
34547A6259485BD047D6375C /* Products */,
|
||||
);
|
||||
sourceTree = "<group>";
|
||||
};
|
||||
/* End PBXGroup section */
|
||||
|
||||
/* Begin PBXNativeTarget section */
|
||||
F751EF8294AD21F713D47FDA /* WakuExample */ = {
|
||||
isa = PBXNativeTarget;
|
||||
buildConfigurationList = 757FA0123629BD63CB254113 /* Build configuration list for PBXNativeTarget "WakuExample" */;
|
||||
buildPhases = (
|
||||
D3AFD8C4DA68BF5C4F7D8E10 /* Sources */,
|
||||
);
|
||||
buildRules = (
|
||||
);
|
||||
dependencies = (
|
||||
);
|
||||
name = WakuExample;
|
||||
packageProductDependencies = (
|
||||
);
|
||||
productName = WakuExample;
|
||||
productReference = CFBE844B6E18ACB81C65F83B /* WakuExample.app */;
|
||||
productType = "com.apple.product-type.application";
|
||||
};
|
||||
/* End PBXNativeTarget section */
|
||||
|
||||
/* Begin PBXProject section */
|
||||
4FF82F0F4AF8E1E34728F150 /* Project object */ = {
|
||||
isa = PBXProject;
|
||||
attributes = {
|
||||
BuildIndependentTargetsInParallel = YES;
|
||||
LastUpgradeCheck = 1500;
|
||||
};
|
||||
buildConfigurationList = B3A4F48294254543E79767C4 /* Build configuration list for PBXProject "WakuExample" */;
|
||||
compatibilityVersion = "Xcode 14.0";
|
||||
developmentRegion = en;
|
||||
hasScannedForEncodings = 0;
|
||||
knownRegions = (
|
||||
Base,
|
||||
en,
|
||||
);
|
||||
mainGroup = D40CD2446F177CAABB0A747A;
|
||||
minimizedProjectReferenceProxies = 1;
|
||||
projectDirPath = "";
|
||||
projectRoot = "";
|
||||
targets = (
|
||||
F751EF8294AD21F713D47FDA /* WakuExample */,
|
||||
);
|
||||
};
|
||||
/* End PBXProject section */
|
||||
|
||||
/* Begin PBXSourcesBuildPhase section */
|
||||
D3AFD8C4DA68BF5C4F7D8E10 /* Sources */ = {
|
||||
isa = PBXSourcesBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
6468FA3F5F760D3FCAD6CDBF /* ContentView.swift in Sources */,
|
||||
45714AF6D1D12AF5C36694FB /* WakuExampleApp.swift in Sources */,
|
||||
C4EA202B782038F96336401F /* WakuNode.swift in Sources */,
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
/* End PBXSourcesBuildPhase section */
|
||||
|
||||
/* Begin XCBuildConfiguration section */
|
||||
36939122077C66DD94082311 /* Release */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
|
||||
CODE_SIGN_IDENTITY = "iPhone Developer";
|
||||
DEVELOPMENT_TEAM = 2Q52K2W84K;
|
||||
HEADER_SEARCH_PATHS = "$(PROJECT_DIR)/WakuExample";
|
||||
INFOPLIST_FILE = WakuExample/Info.plist;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 18.6;
|
||||
LD_RUNPATH_SEARCH_PATHS = (
|
||||
"$(inherited)",
|
||||
"@executable_path/Frameworks",
|
||||
);
|
||||
"LIBRARY_SEARCH_PATHS[sdk=iphoneos*]" = "$(PROJECT_DIR)/../../build/ios/iphoneos-arm64";
|
||||
"LIBRARY_SEARCH_PATHS[sdk=iphonesimulator*]" = "$(PROJECT_DIR)/../../build/ios/iphonesimulator-arm64";
|
||||
MACOSX_DEPLOYMENT_TARGET = 15.6;
|
||||
OTHER_LDFLAGS = (
|
||||
"-lc++",
|
||||
"-force_load",
|
||||
"$(PROJECT_DIR)/../../build/ios/iphoneos-arm64/libwaku.a",
|
||||
"-lsqlite3",
|
||||
"-lz",
|
||||
);
|
||||
PRODUCT_BUNDLE_IDENTIFIER = org.waku.example;
|
||||
SDKROOT = iphoneos;
|
||||
SUPPORTED_PLATFORMS = "iphoneos iphonesimulator";
|
||||
SUPPORTS_MACCATALYST = NO;
|
||||
SUPPORTS_MAC_DESIGNED_FOR_IPHONE_IPAD = YES;
|
||||
SUPPORTS_XR_DESIGNED_FOR_IPHONE_IPAD = YES;
|
||||
SWIFT_OBJC_BRIDGING_HEADER = "WakuExample/WakuExample-Bridging-Header.h";
|
||||
TARGETED_DEVICE_FAMILY = "1,2";
|
||||
};
|
||||
name = Release;
|
||||
};
|
||||
9BA833A09EEDB4B3FCCD8F8E /* Release */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
ALWAYS_SEARCH_USER_PATHS = NO;
|
||||
CLANG_ANALYZER_NONNULL = YES;
|
||||
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
|
||||
CLANG_CXX_LANGUAGE_STANDARD = "gnu++14";
|
||||
CLANG_CXX_LIBRARY = "libc++";
|
||||
CLANG_ENABLE_MODULES = YES;
|
||||
CLANG_ENABLE_OBJC_ARC = YES;
|
||||
CLANG_ENABLE_OBJC_WEAK = YES;
|
||||
CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
|
||||
CLANG_WARN_BOOL_CONVERSION = YES;
|
||||
CLANG_WARN_COMMA = YES;
|
||||
CLANG_WARN_CONSTANT_CONVERSION = YES;
|
||||
CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
|
||||
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
|
||||
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
|
||||
CLANG_WARN_EMPTY_BODY = YES;
|
||||
CLANG_WARN_ENUM_CONVERSION = YES;
|
||||
CLANG_WARN_INFINITE_RECURSION = YES;
|
||||
CLANG_WARN_INT_CONVERSION = YES;
|
||||
CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
|
||||
CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
|
||||
CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
|
||||
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
|
||||
CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
|
||||
CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
|
||||
CLANG_WARN_STRICT_PROTOTYPES = YES;
|
||||
CLANG_WARN_SUSPICIOUS_MOVE = YES;
|
||||
CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
|
||||
CLANG_WARN_UNREACHABLE_CODE = YES;
|
||||
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
|
||||
COPY_PHASE_STRIP = NO;
|
||||
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
|
||||
ENABLE_NS_ASSERTIONS = NO;
|
||||
ENABLE_STRICT_OBJC_MSGSEND = YES;
|
||||
GCC_C_LANGUAGE_STANDARD = gnu11;
|
||||
GCC_NO_COMMON_BLOCKS = YES;
|
||||
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
|
||||
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
|
||||
GCC_WARN_UNDECLARED_SELECTOR = YES;
|
||||
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
|
||||
GCC_WARN_UNUSED_FUNCTION = YES;
|
||||
GCC_WARN_UNUSED_VARIABLE = YES;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 18.6;
|
||||
MTL_ENABLE_DEBUG_INFO = NO;
|
||||
MTL_FAST_MATH = YES;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
SDKROOT = iphoneos;
|
||||
SUPPORTED_PLATFORMS = "iphoneos iphonesimulator";
|
||||
SUPPORTS_MACCATALYST = NO;
|
||||
SWIFT_COMPILATION_MODE = wholemodule;
|
||||
SWIFT_OPTIMIZATION_LEVEL = "-O";
|
||||
SWIFT_VERSION = 5.0;
|
||||
};
|
||||
name = Release;
|
||||
};
|
||||
A59ABFB792FED8974231E5AC /* Debug */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
ALWAYS_SEARCH_USER_PATHS = NO;
|
||||
CLANG_ANALYZER_NONNULL = YES;
|
||||
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
|
||||
CLANG_CXX_LANGUAGE_STANDARD = "gnu++14";
|
||||
CLANG_CXX_LIBRARY = "libc++";
|
||||
CLANG_ENABLE_MODULES = YES;
|
||||
CLANG_ENABLE_OBJC_ARC = YES;
|
||||
CLANG_ENABLE_OBJC_WEAK = YES;
|
||||
CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
|
||||
CLANG_WARN_BOOL_CONVERSION = YES;
|
||||
CLANG_WARN_COMMA = YES;
|
||||
CLANG_WARN_CONSTANT_CONVERSION = YES;
|
||||
CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
|
||||
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
|
||||
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
|
||||
CLANG_WARN_EMPTY_BODY = YES;
|
||||
CLANG_WARN_ENUM_CONVERSION = YES;
|
||||
CLANG_WARN_INFINITE_RECURSION = YES;
|
||||
CLANG_WARN_INT_CONVERSION = YES;
|
||||
CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
|
||||
CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
|
||||
CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
|
||||
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
|
||||
CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
|
||||
CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
|
||||
CLANG_WARN_STRICT_PROTOTYPES = YES;
|
||||
CLANG_WARN_SUSPICIOUS_MOVE = YES;
|
||||
CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
|
||||
CLANG_WARN_UNREACHABLE_CODE = YES;
|
||||
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
|
||||
COPY_PHASE_STRIP = NO;
|
||||
DEBUG_INFORMATION_FORMAT = dwarf;
|
||||
ENABLE_STRICT_OBJC_MSGSEND = YES;
|
||||
ENABLE_TESTABILITY = YES;
|
||||
GCC_C_LANGUAGE_STANDARD = gnu11;
|
||||
GCC_DYNAMIC_NO_PIC = NO;
|
||||
GCC_NO_COMMON_BLOCKS = YES;
|
||||
GCC_OPTIMIZATION_LEVEL = 0;
|
||||
GCC_PREPROCESSOR_DEFINITIONS = (
|
||||
"$(inherited)",
|
||||
"DEBUG=1",
|
||||
);
|
||||
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
|
||||
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
|
||||
GCC_WARN_UNDECLARED_SELECTOR = YES;
|
||||
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
|
||||
GCC_WARN_UNUSED_FUNCTION = YES;
|
||||
GCC_WARN_UNUSED_VARIABLE = YES;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 18.6;
|
||||
MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE;
|
||||
MTL_FAST_MATH = YES;
|
||||
ONLY_ACTIVE_ARCH = YES;
|
||||
PRODUCT_NAME = "$(TARGET_NAME)";
|
||||
SDKROOT = iphoneos;
|
||||
SUPPORTED_PLATFORMS = "iphoneos iphonesimulator";
|
||||
SUPPORTS_MACCATALYST = NO;
|
||||
SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG;
|
||||
SWIFT_OPTIMIZATION_LEVEL = "-Onone";
|
||||
SWIFT_VERSION = 5.0;
|
||||
};
|
||||
name = Debug;
|
||||
};
|
||||
AF5ADDAA865B1F6BD4E70A79 /* Debug */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
|
||||
CODE_SIGN_IDENTITY = "iPhone Developer";
|
||||
DEVELOPMENT_TEAM = 2Q52K2W84K;
|
||||
HEADER_SEARCH_PATHS = "$(PROJECT_DIR)/WakuExample";
|
||||
INFOPLIST_FILE = WakuExample/Info.plist;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 18.6;
|
||||
LD_RUNPATH_SEARCH_PATHS = (
|
||||
"$(inherited)",
|
||||
"@executable_path/Frameworks",
|
||||
);
|
||||
"LIBRARY_SEARCH_PATHS[sdk=iphoneos*]" = "$(PROJECT_DIR)/../../build/ios/iphoneos-arm64";
|
||||
"LIBRARY_SEARCH_PATHS[sdk=iphonesimulator*]" = "$(PROJECT_DIR)/../../build/ios/iphonesimulator-arm64";
|
||||
MACOSX_DEPLOYMENT_TARGET = 15.6;
|
||||
OTHER_LDFLAGS = (
|
||||
"-lc++",
|
||||
"-force_load",
|
||||
"$(PROJECT_DIR)/../../build/ios/iphoneos-arm64/libwaku.a",
|
||||
"-lsqlite3",
|
||||
"-lz",
|
||||
);
|
||||
PRODUCT_BUNDLE_IDENTIFIER = org.waku.example;
|
||||
SDKROOT = iphoneos;
|
||||
SUPPORTED_PLATFORMS = "iphoneos iphonesimulator";
|
||||
SUPPORTS_MACCATALYST = NO;
|
||||
SUPPORTS_MAC_DESIGNED_FOR_IPHONE_IPAD = YES;
|
||||
SUPPORTS_XR_DESIGNED_FOR_IPHONE_IPAD = YES;
|
||||
SWIFT_OBJC_BRIDGING_HEADER = "WakuExample/WakuExample-Bridging-Header.h";
|
||||
TARGETED_DEVICE_FAMILY = "1,2";
|
||||
};
|
||||
name = Debug;
|
||||
};
|
||||
/* End XCBuildConfiguration section */
|
||||
|
||||
/* Begin XCConfigurationList section */
|
||||
757FA0123629BD63CB254113 /* Build configuration list for PBXNativeTarget "WakuExample" */ = {
|
||||
isa = XCConfigurationList;
|
||||
buildConfigurations = (
|
||||
AF5ADDAA865B1F6BD4E70A79 /* Debug */,
|
||||
36939122077C66DD94082311 /* Release */,
|
||||
);
|
||||
defaultConfigurationIsVisible = 0;
|
||||
defaultConfigurationName = Debug;
|
||||
};
|
||||
B3A4F48294254543E79767C4 /* Build configuration list for PBXProject "WakuExample" */ = {
|
||||
isa = XCConfigurationList;
|
||||
buildConfigurations = (
|
||||
A59ABFB792FED8974231E5AC /* Debug */,
|
||||
9BA833A09EEDB4B3FCCD8F8E /* Release */,
|
||||
);
|
||||
defaultConfigurationIsVisible = 0;
|
||||
defaultConfigurationName = Debug;
|
||||
};
|
||||
/* End XCConfigurationList section */
|
||||
};
|
||||
rootObject = 4FF82F0F4AF8E1E34728F150 /* Project object */;
|
||||
}
|
||||
7
examples/ios/WakuExample.xcodeproj/project.xcworkspace/contents.xcworkspacedata
generated
Normal file
7
examples/ios/WakuExample.xcodeproj/project.xcworkspace/contents.xcworkspacedata
generated
Normal file
@ -0,0 +1,7 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Workspace
|
||||
version = "1.0">
|
||||
<FileRef
|
||||
location = "self:">
|
||||
</FileRef>
|
||||
</Workspace>
|
||||
229
examples/ios/WakuExample/ContentView.swift
Normal file
229
examples/ios/WakuExample/ContentView.swift
Normal file
@ -0,0 +1,229 @@
|
||||
//
|
||||
// ContentView.swift
|
||||
// WakuExample
|
||||
//
|
||||
// Minimal chat PoC using libwaku on iOS
|
||||
//
|
||||
|
||||
import SwiftUI
|
||||
|
||||
struct ContentView: View {
|
||||
@StateObject private var wakuNode = WakuNode()
|
||||
@State private var messageText = ""
|
||||
|
||||
var body: some View {
|
||||
ZStack {
|
||||
// Main content
|
||||
VStack(spacing: 0) {
|
||||
// Header with status
|
||||
HStack {
|
||||
Circle()
|
||||
.fill(statusColor)
|
||||
.frame(width: 10, height: 10)
|
||||
VStack(alignment: .leading, spacing: 2) {
|
||||
Text(wakuNode.status.rawValue)
|
||||
.font(.caption)
|
||||
if wakuNode.status == .running {
|
||||
HStack(spacing: 4) {
|
||||
Text(wakuNode.isConnected ? "Connected" : "Discovering...")
|
||||
Text("•")
|
||||
filterStatusView
|
||||
}
|
||||
.font(.caption2)
|
||||
.foregroundColor(.secondary)
|
||||
|
||||
// Subscription maintenance status
|
||||
if wakuNode.subscriptionMaintenanceActive {
|
||||
HStack(spacing: 4) {
|
||||
Image(systemName: "arrow.triangle.2.circlepath")
|
||||
.foregroundColor(.blue)
|
||||
Text("Maintenance active")
|
||||
if wakuNode.failedSubscribeAttempts > 0 {
|
||||
Text("(\(wakuNode.failedSubscribeAttempts) retries)")
|
||||
.foregroundColor(.orange)
|
||||
}
|
||||
}
|
||||
.font(.caption2)
|
||||
.foregroundColor(.secondary)
|
||||
}
|
||||
}
|
||||
}
|
||||
Spacer()
|
||||
if wakuNode.status == .stopped {
|
||||
Button("Start") {
|
||||
wakuNode.start()
|
||||
}
|
||||
.buttonStyle(.borderedProminent)
|
||||
.controlSize(.small)
|
||||
} else if wakuNode.status == .running {
|
||||
if !wakuNode.filterSubscribed {
|
||||
Button("Resub") {
|
||||
wakuNode.resubscribe()
|
||||
}
|
||||
.buttonStyle(.bordered)
|
||||
.controlSize(.small)
|
||||
}
|
||||
Button("Stop") {
|
||||
wakuNode.stop()
|
||||
}
|
||||
.buttonStyle(.bordered)
|
||||
.controlSize(.small)
|
||||
}
|
||||
}
|
||||
.padding()
|
||||
.background(Color.gray.opacity(0.1))
|
||||
|
||||
// Messages list
|
||||
ScrollViewReader { proxy in
|
||||
ScrollView {
|
||||
LazyVStack(alignment: .leading, spacing: 8) {
|
||||
ForEach(wakuNode.receivedMessages.reversed()) { message in
|
||||
MessageBubble(message: message)
|
||||
.id(message.id)
|
||||
}
|
||||
}
|
||||
.padding()
|
||||
}
|
||||
.onChange(of: wakuNode.receivedMessages.count) { _, newCount in
|
||||
if let lastMessage = wakuNode.receivedMessages.first {
|
||||
withAnimation {
|
||||
proxy.scrollTo(lastMessage.id, anchor: .bottom)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Divider()
|
||||
|
||||
// Message input
|
||||
HStack(spacing: 12) {
|
||||
TextField("Message", text: $messageText)
|
||||
.textFieldStyle(.roundedBorder)
|
||||
.disabled(wakuNode.status != .running)
|
||||
|
||||
Button(action: sendMessage) {
|
||||
Image(systemName: "paperplane.fill")
|
||||
.foregroundColor(.white)
|
||||
.padding(10)
|
||||
.background(canSend ? Color.blue : Color.gray)
|
||||
.clipShape(Circle())
|
||||
}
|
||||
.disabled(!canSend)
|
||||
}
|
||||
.padding()
|
||||
.background(Color.gray.opacity(0.1))
|
||||
}
|
||||
|
||||
// Toast overlay for errors
|
||||
VStack {
|
||||
ForEach(wakuNode.errorQueue) { error in
|
||||
ToastView(error: error) {
|
||||
wakuNode.dismissError(error)
|
||||
}
|
||||
.transition(.asymmetric(
|
||||
insertion: .move(edge: .top).combined(with: .opacity),
|
||||
removal: .opacity
|
||||
))
|
||||
}
|
||||
Spacer()
|
||||
}
|
||||
.padding(.top, 8)
|
||||
.animation(.easeInOut(duration: 0.3), value: wakuNode.errorQueue)
|
||||
}
|
||||
}
|
||||
|
||||
private var statusColor: Color {
|
||||
switch wakuNode.status {
|
||||
case .stopped: return .gray
|
||||
case .starting: return .yellow
|
||||
case .running: return .green
|
||||
case .error: return .red
|
||||
}
|
||||
}
|
||||
|
||||
@ViewBuilder
|
||||
private var filterStatusView: some View {
|
||||
if wakuNode.filterSubscribed {
|
||||
Text("Filter OK")
|
||||
.foregroundColor(.green)
|
||||
} else if wakuNode.failedSubscribeAttempts > 0 {
|
||||
Text("Filter retrying (\(wakuNode.failedSubscribeAttempts))")
|
||||
.foregroundColor(.orange)
|
||||
} else {
|
||||
Text("Filter pending")
|
||||
.foregroundColor(.orange)
|
||||
}
|
||||
}
|
||||
|
||||
private var canSend: Bool {
|
||||
wakuNode.status == .running && wakuNode.isConnected && !messageText.trimmingCharacters(in: .whitespaces).isEmpty
|
||||
}
|
||||
|
||||
private func sendMessage() {
|
||||
let text = messageText.trimmingCharacters(in: .whitespaces)
|
||||
guard !text.isEmpty else { return }
|
||||
|
||||
wakuNode.publish(message: text)
|
||||
messageText = ""
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Toast View
|
||||
|
||||
struct ToastView: View {
|
||||
let error: TimestampedError
|
||||
let onDismiss: () -> Void
|
||||
|
||||
var body: some View {
|
||||
HStack(spacing: 12) {
|
||||
Image(systemName: "exclamationmark.triangle.fill")
|
||||
.foregroundColor(.white)
|
||||
|
||||
Text(error.message)
|
||||
.font(.subheadline)
|
||||
.foregroundColor(.white)
|
||||
.lineLimit(2)
|
||||
|
||||
Spacer()
|
||||
|
||||
Button(action: onDismiss) {
|
||||
Image(systemName: "xmark.circle.fill")
|
||||
.foregroundColor(.white.opacity(0.8))
|
||||
.font(.title3)
|
||||
}
|
||||
.buttonStyle(.plain)
|
||||
}
|
||||
.padding(.horizontal, 16)
|
||||
.padding(.vertical, 12)
|
||||
.background(
|
||||
RoundedRectangle(cornerRadius: 12)
|
||||
.fill(Color.red.opacity(0.9))
|
||||
.shadow(color: .black.opacity(0.2), radius: 8, x: 0, y: 4)
|
||||
)
|
||||
.padding(.horizontal, 16)
|
||||
.padding(.vertical, 4)
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Message Bubble
|
||||
|
||||
struct MessageBubble: View {
|
||||
let message: WakuMessage
|
||||
|
||||
var body: some View {
|
||||
VStack(alignment: .leading, spacing: 4) {
|
||||
Text(message.payload)
|
||||
.padding(10)
|
||||
.background(Color.blue.opacity(0.1))
|
||||
.cornerRadius(12)
|
||||
|
||||
Text(message.timestamp, style: .time)
|
||||
.font(.caption2)
|
||||
.foregroundColor(.secondary)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#Preview {
|
||||
ContentView()
|
||||
}
|
||||
36
examples/ios/WakuExample/Info.plist
Normal file
36
examples/ios/WakuExample/Info.plist
Normal file
@ -0,0 +1,36 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>CFBundleDevelopmentRegion</key>
|
||||
<string>$(DEVELOPMENT_LANGUAGE)</string>
|
||||
<key>CFBundleDisplayName</key>
|
||||
<string>Waku Example</string>
|
||||
<key>CFBundleExecutable</key>
|
||||
<string>$(EXECUTABLE_NAME)</string>
|
||||
<key>CFBundleIdentifier</key>
|
||||
<string>org.waku.example</string>
|
||||
<key>CFBundleInfoDictionaryVersion</key>
|
||||
<string>6.0</string>
|
||||
<key>CFBundleName</key>
|
||||
<string>WakuExample</string>
|
||||
<key>CFBundlePackageType</key>
|
||||
<string>APPL</string>
|
||||
<key>CFBundleShortVersionString</key>
|
||||
<string>1.0</string>
|
||||
<key>CFBundleVersion</key>
|
||||
<string>1</string>
|
||||
<key>NSAppTransportSecurity</key>
|
||||
<dict>
|
||||
<key>NSAllowsArbitraryLoads</key>
|
||||
<true/>
|
||||
</dict>
|
||||
<key>UILaunchScreen</key>
|
||||
<dict/>
|
||||
<key>UISupportedInterfaceOrientations</key>
|
||||
<array>
|
||||
<string>UIInterfaceOrientationPortrait</string>
|
||||
</array>
|
||||
</dict>
|
||||
</plist>
|
||||
|
||||
15
examples/ios/WakuExample/WakuExample-Bridging-Header.h
Normal file
15
examples/ios/WakuExample/WakuExample-Bridging-Header.h
Normal file
@ -0,0 +1,15 @@
|
||||
//
|
||||
// WakuExample-Bridging-Header.h
|
||||
// WakuExample
|
||||
//
|
||||
// Bridging header to expose libwaku C functions to Swift
|
||||
//
|
||||
|
||||
#ifndef WakuExample_Bridging_Header_h
|
||||
#define WakuExample_Bridging_Header_h
|
||||
|
||||
#import "libwaku.h"
|
||||
|
||||
#endif /* WakuExample_Bridging_Header_h */
|
||||
|
||||
|
||||
19
examples/ios/WakuExample/WakuExampleApp.swift
Normal file
19
examples/ios/WakuExample/WakuExampleApp.swift
Normal file
@ -0,0 +1,19 @@
|
||||
//
|
||||
// WakuExampleApp.swift
|
||||
// WakuExample
|
||||
//
|
||||
// SwiftUI app entry point for Waku iOS example
|
||||
//
|
||||
|
||||
import SwiftUI
|
||||
|
||||
@main
|
||||
struct WakuExampleApp: App {
|
||||
var body: some Scene {
|
||||
WindowGroup {
|
||||
ContentView()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
739
examples/ios/WakuExample/WakuNode.swift
Normal file
739
examples/ios/WakuExample/WakuNode.swift
Normal file
@ -0,0 +1,739 @@
|
||||
//
|
||||
// WakuNode.swift
|
||||
// WakuExample
|
||||
//
|
||||
// Swift wrapper around libwaku C API for edge mode (lightpush + filter)
|
||||
// Uses Swift actors for thread safety and UI responsiveness
|
||||
//
|
||||
|
||||
import Foundation
|
||||
|
||||
// MARK: - Data Types
|
||||
|
||||
/// Message received from Waku network
|
||||
struct WakuMessage: Identifiable, Equatable, Sendable {
|
||||
let id: String // messageHash from Waku - unique identifier for deduplication
|
||||
let payload: String
|
||||
let contentTopic: String
|
||||
let timestamp: Date
|
||||
}
|
||||
|
||||
/// Waku node status
|
||||
enum WakuNodeStatus: String, Sendable {
|
||||
case stopped = "Stopped"
|
||||
case starting = "Starting..."
|
||||
case running = "Running"
|
||||
case error = "Error"
|
||||
}
|
||||
|
||||
/// Status updates from WakuActor to WakuNode
|
||||
enum WakuStatusUpdate: Sendable {
|
||||
case statusChanged(WakuNodeStatus)
|
||||
case connectionChanged(isConnected: Bool)
|
||||
case filterSubscriptionChanged(subscribed: Bool, failedAttempts: Int)
|
||||
case maintenanceChanged(active: Bool)
|
||||
case error(String)
|
||||
}
|
||||
|
||||
/// Error with timestamp for toast queue
|
||||
struct TimestampedError: Identifiable, Equatable {
|
||||
let id = UUID()
|
||||
let message: String
|
||||
let timestamp: Date
|
||||
|
||||
static func == (lhs: TimestampedError, rhs: TimestampedError) -> Bool {
|
||||
lhs.id == rhs.id
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - Callback Context for C API
|
||||
|
||||
private final class CallbackContext: @unchecked Sendable {
|
||||
private let lock = NSLock()
|
||||
private var _continuation: CheckedContinuation<(success: Bool, result: String?), Never>?
|
||||
private var _resumed = false
|
||||
var success: Bool = false
|
||||
var result: String?
|
||||
|
||||
var continuation: CheckedContinuation<(success: Bool, result: String?), Never>? {
|
||||
get {
|
||||
lock.lock()
|
||||
defer { lock.unlock() }
|
||||
return _continuation
|
||||
}
|
||||
set {
|
||||
lock.lock()
|
||||
defer { lock.unlock() }
|
||||
_continuation = newValue
|
||||
}
|
||||
}
|
||||
|
||||
/// Thread-safe resume - ensures continuation is only resumed once
|
||||
/// Returns true if this call actually resumed, false if already resumed
|
||||
@discardableResult
|
||||
func resumeOnce(returning value: (success: Bool, result: String?)) -> Bool {
|
||||
lock.lock()
|
||||
defer { lock.unlock() }
|
||||
|
||||
guard !_resumed, let cont = _continuation else {
|
||||
return false
|
||||
}
|
||||
|
||||
_resumed = true
|
||||
_continuation = nil
|
||||
cont.resume(returning: value)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - WakuActor
|
||||
|
||||
/// Actor that isolates all Waku operations from the main thread
|
||||
/// All C API calls and mutable state are contained here
|
||||
actor WakuActor {
|
||||
|
||||
// MARK: - State
|
||||
|
||||
private var ctx: UnsafeMutableRawPointer?
|
||||
private var seenMessageHashes: Set<String> = []
|
||||
private var isSubscribed: Bool = false
|
||||
private var isSubscribing: Bool = false
|
||||
private var hasPeers: Bool = false
|
||||
private var maintenanceTask: Task<Void, Never>?
|
||||
private var eventProcessingTask: Task<Void, Never>?
|
||||
|
||||
// Stream continuations for communicating with UI
|
||||
private var messageContinuation: AsyncStream<WakuMessage>.Continuation?
|
||||
private var statusContinuation: AsyncStream<WakuStatusUpdate>.Continuation?
|
||||
|
||||
// Event stream from C callbacks
|
||||
private var eventContinuation: AsyncStream<String>.Continuation?
|
||||
|
||||
// Configuration
|
||||
let defaultPubsubTopic = "/waku/2/rs/1/0"
|
||||
let defaultContentTopic = "/waku-ios-example/1/chat/proto"
|
||||
private let staticPeer = "/dns4/node-01.do-ams3.waku.sandbox.status.im/tcp/30303/p2p/16Uiu2HAmPLe7Mzm8TsYUubgCAW1aJoeFScxrLj8ppHFivPo97bUZ"
|
||||
|
||||
// Subscription maintenance settings
|
||||
private let maxFailedSubscribes = 3
|
||||
private let retryWaitSeconds: UInt64 = 2_000_000_000 // 2 seconds in nanoseconds
|
||||
private let maintenanceIntervalSeconds: UInt64 = 30_000_000_000 // 30 seconds in nanoseconds
|
||||
private let maxSeenHashes = 1000
|
||||
|
||||
// MARK: - Static callback storage (for C callbacks)
|
||||
|
||||
// We need a way for C callbacks to reach the actor
|
||||
// Using a simple static reference (safe because we only have one instance)
|
||||
private static var sharedEventContinuation: AsyncStream<String>.Continuation?
|
||||
|
||||
private static let eventCallback: WakuCallBack = { ret, msg, len, userData in
|
||||
guard ret == RET_OK, let msg = msg else { return }
|
||||
let str = String(cString: msg)
|
||||
WakuActor.sharedEventContinuation?.yield(str)
|
||||
}
|
||||
|
||||
private static let syncCallback: WakuCallBack = { ret, msg, len, userData in
|
||||
guard let userData = userData else { return }
|
||||
let context = Unmanaged<CallbackContext>.fromOpaque(userData).takeUnretainedValue()
|
||||
let success = (ret == RET_OK)
|
||||
var resultStr: String? = nil
|
||||
if let msg = msg {
|
||||
resultStr = String(cString: msg)
|
||||
}
|
||||
context.resumeOnce(returning: (success, resultStr))
|
||||
}
|
||||
|
||||
// MARK: - Stream Setup
|
||||
|
||||
func setMessageContinuation(_ continuation: AsyncStream<WakuMessage>.Continuation?) {
|
||||
self.messageContinuation = continuation
|
||||
}
|
||||
|
||||
func setStatusContinuation(_ continuation: AsyncStream<WakuStatusUpdate>.Continuation?) {
|
||||
self.statusContinuation = continuation
|
||||
}
|
||||
|
||||
// MARK: - Public API
|
||||
|
||||
var isRunning: Bool {
|
||||
ctx != nil
|
||||
}
|
||||
|
||||
var hasConnectedPeers: Bool {
|
||||
hasPeers
|
||||
}
|
||||
|
||||
func start() async {
|
||||
guard ctx == nil else {
|
||||
print("[WakuActor] Already started")
|
||||
return
|
||||
}
|
||||
|
||||
statusContinuation?.yield(.statusChanged(.starting))
|
||||
|
||||
// Create event stream for C callbacks
|
||||
let eventStream = AsyncStream<String> { continuation in
|
||||
self.eventContinuation = continuation
|
||||
WakuActor.sharedEventContinuation = continuation
|
||||
}
|
||||
|
||||
// Start event processing task
|
||||
eventProcessingTask = Task { [weak self] in
|
||||
for await eventJson in eventStream {
|
||||
await self?.handleEvent(eventJson)
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize the node
|
||||
let success = await initializeNode()
|
||||
|
||||
if success {
|
||||
statusContinuation?.yield(.statusChanged(.running))
|
||||
|
||||
// Connect to peer
|
||||
let connected = await connectToPeer()
|
||||
if connected {
|
||||
hasPeers = true
|
||||
statusContinuation?.yield(.connectionChanged(isConnected: true))
|
||||
|
||||
// Start maintenance loop
|
||||
startMaintenanceLoop()
|
||||
} else {
|
||||
statusContinuation?.yield(.error("Failed to connect to service peer"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func stop() async {
|
||||
guard let context = ctx else { return }
|
||||
|
||||
// Stop maintenance loop
|
||||
maintenanceTask?.cancel()
|
||||
maintenanceTask = nil
|
||||
|
||||
// Stop event processing
|
||||
eventProcessingTask?.cancel()
|
||||
eventProcessingTask = nil
|
||||
|
||||
// Close event stream
|
||||
eventContinuation?.finish()
|
||||
eventContinuation = nil
|
||||
WakuActor.sharedEventContinuation = nil
|
||||
|
||||
statusContinuation?.yield(.statusChanged(.stopped))
|
||||
statusContinuation?.yield(.connectionChanged(isConnected: false))
|
||||
statusContinuation?.yield(.filterSubscriptionChanged(subscribed: false, failedAttempts: 0))
|
||||
statusContinuation?.yield(.maintenanceChanged(active: false))
|
||||
|
||||
// Reset state
|
||||
let ctxToStop = context
|
||||
ctx = nil
|
||||
isSubscribed = false
|
||||
isSubscribing = false
|
||||
hasPeers = false
|
||||
seenMessageHashes.removeAll()
|
||||
|
||||
// Unsubscribe and stop in background (fire and forget)
|
||||
Task.detached {
|
||||
// Unsubscribe
|
||||
_ = await self.callWakuSync { waku_filter_unsubscribe_all(ctxToStop, WakuActor.syncCallback, $0) }
|
||||
print("[WakuActor] Unsubscribed from filter")
|
||||
|
||||
// Stop
|
||||
_ = await self.callWakuSync { waku_stop(ctxToStop, WakuActor.syncCallback, $0) }
|
||||
print("[WakuActor] Node stopped")
|
||||
|
||||
// Destroy
|
||||
_ = await self.callWakuSync { waku_destroy(ctxToStop, WakuActor.syncCallback, $0) }
|
||||
print("[WakuActor] Node destroyed")
|
||||
}
|
||||
}
|
||||
|
||||
func publish(message: String, contentTopic: String? = nil) async {
|
||||
guard let context = ctx else {
|
||||
print("[WakuActor] Node not started")
|
||||
return
|
||||
}
|
||||
|
||||
guard hasPeers else {
|
||||
print("[WakuActor] No peers connected yet")
|
||||
statusContinuation?.yield(.error("No peers connected yet. Please wait..."))
|
||||
return
|
||||
}
|
||||
|
||||
let topic = contentTopic ?? defaultContentTopic
|
||||
guard let payloadData = message.data(using: .utf8) else { return }
|
||||
let payloadBase64 = payloadData.base64EncodedString()
|
||||
let timestamp = Int64(Date().timeIntervalSince1970 * 1_000_000_000)
|
||||
let jsonMessage = """
|
||||
{"payload":"\(payloadBase64)","contentTopic":"\(topic)","timestamp":\(timestamp)}
|
||||
"""
|
||||
|
||||
let result = await callWakuSync { userData in
|
||||
waku_lightpush_publish(
|
||||
context,
|
||||
self.defaultPubsubTopic,
|
||||
jsonMessage,
|
||||
WakuActor.syncCallback,
|
||||
userData
|
||||
)
|
||||
}
|
||||
|
||||
if result.success {
|
||||
print("[WakuActor] Published message")
|
||||
} else {
|
||||
print("[WakuActor] Publish error: \(result.result ?? "unknown")")
|
||||
statusContinuation?.yield(.error("Failed to send message"))
|
||||
}
|
||||
}
|
||||
|
||||
func resubscribe() async {
|
||||
print("[WakuActor] Force resubscribe requested")
|
||||
isSubscribed = false
|
||||
isSubscribing = false
|
||||
statusContinuation?.yield(.filterSubscriptionChanged(subscribed: false, failedAttempts: 0))
|
||||
_ = await subscribe()
|
||||
}
|
||||
|
||||
// MARK: - Private Methods
|
||||
|
||||
private func initializeNode() async -> Bool {
|
||||
let config = """
|
||||
{
|
||||
"tcpPort": 60000,
|
||||
"clusterId": 1,
|
||||
"shards": [0],
|
||||
"relay": false,
|
||||
"lightpush": true,
|
||||
"filter": true,
|
||||
"logLevel": "DEBUG",
|
||||
"discv5Discovery": true,
|
||||
"discv5BootstrapNodes": [
|
||||
"enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw",
|
||||
"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw"
|
||||
],
|
||||
"discv5UdpPort": 9999,
|
||||
"dnsDiscovery": true,
|
||||
"dnsDiscoveryUrl": "enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im",
|
||||
"dnsDiscoveryNameServers": ["8.8.8.8", "1.0.0.1"]
|
||||
}
|
||||
"""
|
||||
|
||||
// Create node - waku_new is special, it returns the context directly
|
||||
let createResult = await withCheckedContinuation { (continuation: CheckedContinuation<(ctx: UnsafeMutableRawPointer?, success: Bool, result: String?), Never>) in
|
||||
let callbackCtx = CallbackContext()
|
||||
let userDataPtr = Unmanaged.passRetained(callbackCtx).toOpaque()
|
||||
|
||||
// Set up a simple callback for waku_new
|
||||
let newCtx = waku_new(config, { ret, msg, len, userData in
|
||||
guard let userData = userData else { return }
|
||||
let context = Unmanaged<CallbackContext>.fromOpaque(userData).takeUnretainedValue()
|
||||
context.success = (ret == RET_OK)
|
||||
if let msg = msg {
|
||||
context.result = String(cString: msg)
|
||||
}
|
||||
}, userDataPtr)
|
||||
|
||||
// Small delay to ensure callback completes
|
||||
DispatchQueue.global().asyncAfter(deadline: .now() + 0.1) {
|
||||
Unmanaged<CallbackContext>.fromOpaque(userDataPtr).release()
|
||||
continuation.resume(returning: (newCtx, callbackCtx.success, callbackCtx.result))
|
||||
}
|
||||
}
|
||||
|
||||
guard createResult.ctx != nil else {
|
||||
statusContinuation?.yield(.statusChanged(.error))
|
||||
statusContinuation?.yield(.error("Failed to create node: \(createResult.result ?? "unknown")"))
|
||||
return false
|
||||
}
|
||||
|
||||
ctx = createResult.ctx
|
||||
|
||||
// Set event callback
|
||||
waku_set_event_callback(ctx, WakuActor.eventCallback, nil)
|
||||
|
||||
// Start node
|
||||
let startResult = await callWakuSync { userData in
|
||||
waku_start(self.ctx, WakuActor.syncCallback, userData)
|
||||
}
|
||||
|
||||
guard startResult.success else {
|
||||
statusContinuation?.yield(.statusChanged(.error))
|
||||
statusContinuation?.yield(.error("Failed to start node: \(startResult.result ?? "unknown")"))
|
||||
ctx = nil
|
||||
return false
|
||||
}
|
||||
|
||||
print("[WakuActor] Node started")
|
||||
return true
|
||||
}
|
||||
|
||||
private func connectToPeer() async -> Bool {
|
||||
guard let context = ctx else { return false }
|
||||
|
||||
print("[WakuActor] Connecting to static peer...")
|
||||
|
||||
let result = await callWakuSync { userData in
|
||||
waku_connect(context, self.staticPeer, 10000, WakuActor.syncCallback, userData)
|
||||
}
|
||||
|
||||
if result.success {
|
||||
print("[WakuActor] Connected to peer successfully")
|
||||
return true
|
||||
} else {
|
||||
print("[WakuActor] Failed to connect: \(result.result ?? "unknown")")
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
private func subscribe(contentTopic: String? = nil) async -> Bool {
|
||||
guard let context = ctx else { return false }
|
||||
guard !isSubscribed && !isSubscribing else { return isSubscribed }
|
||||
|
||||
isSubscribing = true
|
||||
let topic = contentTopic ?? defaultContentTopic
|
||||
|
||||
let result = await callWakuSync { userData in
|
||||
waku_filter_subscribe(
|
||||
context,
|
||||
self.defaultPubsubTopic,
|
||||
topic,
|
||||
WakuActor.syncCallback,
|
||||
userData
|
||||
)
|
||||
}
|
||||
|
||||
isSubscribing = false
|
||||
|
||||
if result.success {
|
||||
print("[WakuActor] Subscribe request successful to \(topic)")
|
||||
isSubscribed = true
|
||||
statusContinuation?.yield(.filterSubscriptionChanged(subscribed: true, failedAttempts: 0))
|
||||
return true
|
||||
} else {
|
||||
print("[WakuActor] Subscribe error: \(result.result ?? "unknown")")
|
||||
isSubscribed = false
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
private func pingFilterPeer() async -> Bool {
|
||||
guard let context = ctx else { return false }
|
||||
|
||||
let result = await callWakuSync { userData in
|
||||
waku_ping_peer(
|
||||
context,
|
||||
self.staticPeer,
|
||||
10000,
|
||||
WakuActor.syncCallback,
|
||||
userData
|
||||
)
|
||||
}
|
||||
|
||||
return result.success
|
||||
}
|
||||
|
||||
// MARK: - Subscription Maintenance
|
||||
|
||||
private func startMaintenanceLoop() {
|
||||
guard maintenanceTask == nil else {
|
||||
print("[WakuActor] Maintenance loop already running")
|
||||
return
|
||||
}
|
||||
|
||||
statusContinuation?.yield(.maintenanceChanged(active: true))
|
||||
print("[WakuActor] Starting subscription maintenance loop")
|
||||
|
||||
maintenanceTask = Task { [weak self] in
|
||||
guard let self = self else { return }
|
||||
|
||||
var failedSubscribes = 0
|
||||
var isFirstPingOnConnection = true
|
||||
|
||||
while !Task.isCancelled {
|
||||
guard await self.isRunning else { break }
|
||||
|
||||
print("[WakuActor] Maintaining subscription...")
|
||||
|
||||
let pingSuccess = await self.pingFilterPeer()
|
||||
let currentlySubscribed = await self.isSubscribed
|
||||
|
||||
if pingSuccess && currentlySubscribed {
|
||||
print("[WakuActor] Subscription is live, waiting 30s")
|
||||
try? await Task.sleep(nanoseconds: self.maintenanceIntervalSeconds)
|
||||
continue
|
||||
}
|
||||
|
||||
if !isFirstPingOnConnection && !pingSuccess {
|
||||
print("[WakuActor] Ping failed - subscription may be lost")
|
||||
await self.statusContinuation?.yield(.filterSubscriptionChanged(subscribed: false, failedAttempts: failedSubscribes))
|
||||
}
|
||||
isFirstPingOnConnection = false
|
||||
|
||||
print("[WakuActor] No active subscription found. Sending subscribe request...")
|
||||
|
||||
await self.resetSubscriptionState()
|
||||
let subscribeSuccess = await self.subscribe()
|
||||
|
||||
if subscribeSuccess {
|
||||
print("[WakuActor] Subscribe request successful")
|
||||
failedSubscribes = 0
|
||||
try? await Task.sleep(nanoseconds: self.maintenanceIntervalSeconds)
|
||||
continue
|
||||
}
|
||||
|
||||
failedSubscribes += 1
|
||||
await self.statusContinuation?.yield(.filterSubscriptionChanged(subscribed: false, failedAttempts: failedSubscribes))
|
||||
print("[WakuActor] Subscribe request failed. Attempt \(failedSubscribes)/\(self.maxFailedSubscribes)")
|
||||
|
||||
if failedSubscribes < self.maxFailedSubscribes {
|
||||
print("[WakuActor] Retrying in 2s...")
|
||||
try? await Task.sleep(nanoseconds: self.retryWaitSeconds)
|
||||
} else {
|
||||
print("[WakuActor] Max subscribe failures reached")
|
||||
await self.statusContinuation?.yield(.error("Filter subscription failed after \(self.maxFailedSubscribes) attempts"))
|
||||
failedSubscribes = 0
|
||||
try? await Task.sleep(nanoseconds: self.maintenanceIntervalSeconds)
|
||||
}
|
||||
}
|
||||
|
||||
print("[WakuActor] Subscription maintenance loop stopped")
|
||||
await self.statusContinuation?.yield(.maintenanceChanged(active: false))
|
||||
}
|
||||
}
|
||||
|
||||
private func resetSubscriptionState() {
|
||||
isSubscribed = false
|
||||
isSubscribing = false
|
||||
}
|
||||
|
||||
// MARK: - Event Handling
|
||||
|
||||
private func handleEvent(_ eventJson: String) {
|
||||
guard let data = eventJson.data(using: .utf8),
|
||||
let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any],
|
||||
let eventType = json["eventType"] as? String else {
|
||||
return
|
||||
}
|
||||
|
||||
if eventType == "connection_change" {
|
||||
handleConnectionChange(json)
|
||||
} else if eventType == "message" {
|
||||
handleMessage(json)
|
||||
}
|
||||
}
|
||||
|
||||
private func handleConnectionChange(_ json: [String: Any]) {
|
||||
guard let peerEvent = json["peerEvent"] as? String else { return }
|
||||
|
||||
if peerEvent == "Joined" || peerEvent == "Identified" {
|
||||
hasPeers = true
|
||||
statusContinuation?.yield(.connectionChanged(isConnected: true))
|
||||
} else if peerEvent == "Left" {
|
||||
statusContinuation?.yield(.filterSubscriptionChanged(subscribed: false, failedAttempts: 0))
|
||||
}
|
||||
}
|
||||
|
||||
private func handleMessage(_ json: [String: Any]) {
|
||||
guard let messageHash = json["messageHash"] as? String,
|
||||
let wakuMessage = json["wakuMessage"] as? [String: Any],
|
||||
let payloadBase64 = wakuMessage["payload"] as? String,
|
||||
let contentTopic = wakuMessage["contentTopic"] as? String,
|
||||
let payloadData = Data(base64Encoded: payloadBase64),
|
||||
let payloadString = String(data: payloadData, encoding: .utf8) else {
|
||||
return
|
||||
}
|
||||
|
||||
// Deduplicate
|
||||
guard !seenMessageHashes.contains(messageHash) else {
|
||||
return
|
||||
}
|
||||
|
||||
seenMessageHashes.insert(messageHash)
|
||||
|
||||
// Limit memory usage
|
||||
if seenMessageHashes.count > maxSeenHashes {
|
||||
seenMessageHashes.removeAll()
|
||||
}
|
||||
|
||||
let message = WakuMessage(
|
||||
id: messageHash,
|
||||
payload: payloadString,
|
||||
contentTopic: contentTopic,
|
||||
timestamp: Date()
|
||||
)
|
||||
|
||||
messageContinuation?.yield(message)
|
||||
}
|
||||
|
||||
// MARK: - Helper for synchronous C calls
|
||||
|
||||
private func callWakuSync(_ work: @escaping (UnsafeMutableRawPointer) -> Void) async -> (success: Bool, result: String?) {
|
||||
await withCheckedContinuation { continuation in
|
||||
let context = CallbackContext()
|
||||
context.continuation = continuation
|
||||
let userDataPtr = Unmanaged.passRetained(context).toOpaque()
|
||||
|
||||
work(userDataPtr)
|
||||
|
||||
// Set a timeout to avoid hanging forever
|
||||
DispatchQueue.global().asyncAfter(deadline: .now() + 15) {
|
||||
// Try to resume with timeout - will be ignored if callback already resumed
|
||||
let didTimeout = context.resumeOnce(returning: (false, "Timeout"))
|
||||
if didTimeout {
|
||||
print("[WakuActor] Call timed out")
|
||||
}
|
||||
Unmanaged<CallbackContext>.fromOpaque(userDataPtr).release()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MARK: - WakuNode (MainActor UI Wrapper)
|
||||
|
||||
/// Main-thread UI wrapper that consumes updates from WakuActor via AsyncStreams
|
||||
@MainActor
|
||||
class WakuNode: ObservableObject {
|
||||
|
||||
// MARK: - Published Properties (UI State)
|
||||
|
||||
@Published var status: WakuNodeStatus = .stopped
|
||||
@Published var receivedMessages: [WakuMessage] = []
|
||||
@Published var errorQueue: [TimestampedError] = []
|
||||
@Published var isConnected: Bool = false
|
||||
@Published var filterSubscribed: Bool = false
|
||||
@Published var subscriptionMaintenanceActive: Bool = false
|
||||
@Published var failedSubscribeAttempts: Int = 0
|
||||
|
||||
// Topics (read-only access to actor's config)
|
||||
var defaultPubsubTopic: String { "/waku/2/rs/1/0" }
|
||||
var defaultContentTopic: String { "/waku-ios-example/1/chat/proto" }
|
||||
|
||||
// MARK: - Private Properties
|
||||
|
||||
private let actor = WakuActor()
|
||||
private var messageTask: Task<Void, Never>?
|
||||
private var statusTask: Task<Void, Never>?
|
||||
|
||||
// MARK: - Initialization
|
||||
|
||||
init() {}
|
||||
|
||||
deinit {
|
||||
messageTask?.cancel()
|
||||
statusTask?.cancel()
|
||||
}
|
||||
|
||||
// MARK: - Public API
|
||||
|
||||
func start() {
|
||||
guard status == .stopped || status == .error else {
|
||||
print("[WakuNode] Already started or starting")
|
||||
return
|
||||
}
|
||||
|
||||
// Create message stream
|
||||
let messageStream = AsyncStream<WakuMessage> { continuation in
|
||||
Task {
|
||||
await self.actor.setMessageContinuation(continuation)
|
||||
}
|
||||
}
|
||||
|
||||
// Create status stream
|
||||
let statusStream = AsyncStream<WakuStatusUpdate> { continuation in
|
||||
Task {
|
||||
await self.actor.setStatusContinuation(continuation)
|
||||
}
|
||||
}
|
||||
|
||||
// Start consuming messages
|
||||
messageTask = Task { @MainActor in
|
||||
for await message in messageStream {
|
||||
self.receivedMessages.insert(message, at: 0)
|
||||
if self.receivedMessages.count > 100 {
|
||||
self.receivedMessages.removeLast()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Start consuming status updates
|
||||
statusTask = Task { @MainActor in
|
||||
for await update in statusStream {
|
||||
self.handleStatusUpdate(update)
|
||||
}
|
||||
}
|
||||
|
||||
// Start the actor
|
||||
Task {
|
||||
await actor.start()
|
||||
}
|
||||
}
|
||||
|
||||
func stop() {
|
||||
messageTask?.cancel()
|
||||
messageTask = nil
|
||||
statusTask?.cancel()
|
||||
statusTask = nil
|
||||
|
||||
Task {
|
||||
await actor.stop()
|
||||
}
|
||||
|
||||
// Immediate UI update
|
||||
status = .stopped
|
||||
isConnected = false
|
||||
filterSubscribed = false
|
||||
subscriptionMaintenanceActive = false
|
||||
failedSubscribeAttempts = 0
|
||||
}
|
||||
|
||||
func publish(message: String, contentTopic: String? = nil) {
|
||||
Task {
|
||||
await actor.publish(message: message, contentTopic: contentTopic)
|
||||
}
|
||||
}
|
||||
|
||||
func resubscribe() {
|
||||
Task {
|
||||
await actor.resubscribe()
|
||||
}
|
||||
}
|
||||
|
||||
func dismissError(_ error: TimestampedError) {
|
||||
errorQueue.removeAll { $0.id == error.id }
|
||||
}
|
||||
|
||||
func dismissAllErrors() {
|
||||
errorQueue.removeAll()
|
||||
}
|
||||
|
||||
// MARK: - Private Methods
|
||||
|
||||
private func handleStatusUpdate(_ update: WakuStatusUpdate) {
|
||||
switch update {
|
||||
case .statusChanged(let newStatus):
|
||||
status = newStatus
|
||||
|
||||
case .connectionChanged(let connected):
|
||||
isConnected = connected
|
||||
|
||||
case .filterSubscriptionChanged(let subscribed, let attempts):
|
||||
filterSubscribed = subscribed
|
||||
failedSubscribeAttempts = attempts
|
||||
|
||||
case .maintenanceChanged(let active):
|
||||
subscriptionMaintenanceActive = active
|
||||
|
||||
case .error(let message):
|
||||
let error = TimestampedError(message: message, timestamp: Date())
|
||||
errorQueue.append(error)
|
||||
|
||||
// Schedule auto-dismiss after 10 seconds
|
||||
let errorId = error.id
|
||||
Task { @MainActor in
|
||||
try? await Task.sleep(nanoseconds: 10_000_000_000)
|
||||
self.errorQueue.removeAll { $0.id == errorId }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
253
examples/ios/WakuExample/libwaku.h
Normal file
253
examples/ios/WakuExample/libwaku.h
Normal file
@ -0,0 +1,253 @@
|
||||
|
||||
// Generated manually and inspired by the one generated by the Nim Compiler.
|
||||
// In order to see the header file generated by Nim just run `make libwaku`
|
||||
// from the root repo folder and the header should be created in
|
||||
// nimcache/release/libwaku/libwaku.h
|
||||
#ifndef __libwaku__
|
||||
#define __libwaku__
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
|
||||
// The possible returned values for the functions that return int
|
||||
#define RET_OK 0
|
||||
#define RET_ERR 1
|
||||
#define RET_MISSING_CALLBACK 2
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
typedef void (*WakuCallBack) (int callerRet, const char* msg, size_t len, void* userData);
|
||||
|
||||
// Creates a new instance of the waku node.
|
||||
// Sets up the waku node from the given configuration.
|
||||
// Returns a pointer to the Context needed by the rest of the API functions.
|
||||
void* waku_new(
|
||||
const char* configJson,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_start(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_stop(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
// Destroys an instance of a waku node created with waku_new
|
||||
int waku_destroy(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_version(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
// Sets a callback that will be invoked whenever an event occurs.
|
||||
// It is crucial that the passed callback is fast, non-blocking and potentially thread-safe.
|
||||
void waku_set_event_callback(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_content_topic(void* ctx,
|
||||
const char* appName,
|
||||
unsigned int appVersion,
|
||||
const char* contentTopicName,
|
||||
const char* encoding,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_pubsub_topic(void* ctx,
|
||||
const char* topicName,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_default_pubsub_topic(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_relay_publish(void* ctx,
|
||||
const char* pubSubTopic,
|
||||
const char* jsonWakuMessage,
|
||||
unsigned int timeoutMs,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_lightpush_publish(void* ctx,
|
||||
const char* pubSubTopic,
|
||||
const char* jsonWakuMessage,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_relay_subscribe(void* ctx,
|
||||
const char* pubSubTopic,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_relay_add_protected_shard(void* ctx,
|
||||
int clusterId,
|
||||
int shardId,
|
||||
char* publicKey,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_relay_unsubscribe(void* ctx,
|
||||
const char* pubSubTopic,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_filter_subscribe(void* ctx,
|
||||
const char* pubSubTopic,
|
||||
const char* contentTopics,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_filter_unsubscribe(void* ctx,
|
||||
const char* pubSubTopic,
|
||||
const char* contentTopics,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_filter_unsubscribe_all(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_relay_get_num_connected_peers(void* ctx,
|
||||
const char* pubSubTopic,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_relay_get_connected_peers(void* ctx,
|
||||
const char* pubSubTopic,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_relay_get_num_peers_in_mesh(void* ctx,
|
||||
const char* pubSubTopic,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_relay_get_peers_in_mesh(void* ctx,
|
||||
const char* pubSubTopic,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_store_query(void* ctx,
|
||||
const char* jsonQuery,
|
||||
const char* peerAddr,
|
||||
int timeoutMs,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_connect(void* ctx,
|
||||
const char* peerMultiAddr,
|
||||
unsigned int timeoutMs,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_disconnect_peer_by_id(void* ctx,
|
||||
const char* peerId,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_disconnect_all_peers(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_dial_peer(void* ctx,
|
||||
const char* peerMultiAddr,
|
||||
const char* protocol,
|
||||
int timeoutMs,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_dial_peer_by_id(void* ctx,
|
||||
const char* peerId,
|
||||
const char* protocol,
|
||||
int timeoutMs,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_get_peerids_from_peerstore(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_get_connected_peers_info(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_get_peerids_by_protocol(void* ctx,
|
||||
const char* protocol,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_listen_addresses(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_get_connected_peers(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
// Returns a list of multiaddress given a url to a DNS discoverable ENR tree
|
||||
// Parameters
|
||||
// char* entTreeUrl: URL containing a discoverable ENR tree
|
||||
// char* nameDnsServer: The nameserver to resolve the ENR tree url.
|
||||
// int timeoutMs: Timeout value in milliseconds to execute the call.
|
||||
int waku_dns_discovery(void* ctx,
|
||||
const char* entTreeUrl,
|
||||
const char* nameDnsServer,
|
||||
int timeoutMs,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
// Updates the bootnode list used for discovering new peers via DiscoveryV5
|
||||
// bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]`
|
||||
int waku_discv5_update_bootnodes(void* ctx,
|
||||
char* bootnodes,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_start_discv5(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_stop_discv5(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
// Retrieves the ENR information
|
||||
int waku_get_my_enr(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_get_my_peerid(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_get_metrics(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_peer_exchange_request(void* ctx,
|
||||
int numPeers,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_ping_peer(void* ctx,
|
||||
const char* peerAddr,
|
||||
int timeoutMs,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_is_online(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __libwaku__ */
|
||||
47
examples/ios/project.yml
Normal file
47
examples/ios/project.yml
Normal file
@ -0,0 +1,47 @@
|
||||
name: WakuExample
|
||||
options:
|
||||
bundleIdPrefix: org.waku
|
||||
deploymentTarget:
|
||||
iOS: "14.0"
|
||||
xcodeVersion: "15.0"
|
||||
|
||||
settings:
|
||||
SWIFT_VERSION: "5.0"
|
||||
SUPPORTED_PLATFORMS: "iphoneos iphonesimulator"
|
||||
SUPPORTS_MACCATALYST: "NO"
|
||||
|
||||
targets:
|
||||
WakuExample:
|
||||
type: application
|
||||
platform: iOS
|
||||
supportedDestinations: [iOS]
|
||||
sources:
|
||||
- WakuExample
|
||||
settings:
|
||||
INFOPLIST_FILE: WakuExample/Info.plist
|
||||
PRODUCT_BUNDLE_IDENTIFIER: org.waku.example
|
||||
SWIFT_OBJC_BRIDGING_HEADER: WakuExample/WakuExample-Bridging-Header.h
|
||||
HEADER_SEARCH_PATHS:
|
||||
- "$(PROJECT_DIR)/WakuExample"
|
||||
"LIBRARY_SEARCH_PATHS[sdk=iphoneos*]":
|
||||
- "$(PROJECT_DIR)/../../build/ios/iphoneos-arm64"
|
||||
"LIBRARY_SEARCH_PATHS[sdk=iphonesimulator*]":
|
||||
- "$(PROJECT_DIR)/../../build/ios/iphonesimulator-arm64"
|
||||
OTHER_LDFLAGS:
|
||||
- "-lc++"
|
||||
- "-lwaku"
|
||||
IPHONEOS_DEPLOYMENT_TARGET: "14.0"
|
||||
info:
|
||||
path: WakuExample/Info.plist
|
||||
properties:
|
||||
CFBundleName: WakuExample
|
||||
CFBundleDisplayName: Waku Example
|
||||
CFBundleIdentifier: org.waku.example
|
||||
CFBundleVersion: "1"
|
||||
CFBundleShortVersionString: "1.0"
|
||||
UILaunchScreen: {}
|
||||
UISupportedInterfaceOrientations:
|
||||
- UIInterfaceOrientationPortrait
|
||||
NSAppTransportSecurity:
|
||||
NSAllowsArbitraryLoads: true
|
||||
|
||||
201
examples/lightpush_mix/lightpush_publisher_mix.nim
Normal file
201
examples/lightpush_mix/lightpush_publisher_mix.nim
Normal file
@ -0,0 +1,201 @@
|
||||
import
|
||||
std/[tables, times, sequtils, strutils],
|
||||
stew/byteutils,
|
||||
chronicles,
|
||||
results,
|
||||
chronos,
|
||||
confutils,
|
||||
libp2p/crypto/crypto,
|
||||
libp2p/crypto/curve25519,
|
||||
libp2p/protocols/mix,
|
||||
libp2p/protocols/mix/curve25519,
|
||||
libp2p/multiaddress,
|
||||
eth/keys,
|
||||
eth/p2p/discoveryv5/enr,
|
||||
metrics,
|
||||
metrics/chronos_httpserver
|
||||
|
||||
import
|
||||
waku/[
|
||||
common/logging,
|
||||
node/peer_manager,
|
||||
waku_core,
|
||||
waku_core/codecs,
|
||||
waku_node,
|
||||
waku_enr,
|
||||
discovery/waku_discv5,
|
||||
factory/builder,
|
||||
waku_lightpush/client,
|
||||
],
|
||||
./lightpush_publisher_mix_config,
|
||||
./lightpush_publisher_mix_metrics
|
||||
|
||||
const clusterId = 66
|
||||
const shardId = @[0'u16]
|
||||
|
||||
const
|
||||
LightpushPubsubTopic = PubsubTopic("/waku/2/rs/66/0")
|
||||
LightpushContentTopic = ContentTopic("/examples/1/light-pubsub-mix-example/proto")
|
||||
|
||||
proc splitPeerIdAndAddr(maddr: string): (string, string) =
|
||||
let parts = maddr.split("/p2p/")
|
||||
if parts.len != 2:
|
||||
error "Invalid multiaddress format", parts = parts
|
||||
return
|
||||
|
||||
let
|
||||
address = parts[0]
|
||||
peerId = parts[1]
|
||||
return (address, peerId)
|
||||
|
||||
proc setupAndPublish(rng: ref HmacDrbgContext, conf: LightPushMixConf) {.async.} =
|
||||
# use notice to filter all waku messaging
|
||||
setupLog(logging.LogLevel.DEBUG, logging.LogFormat.TEXT)
|
||||
notice "starting publisher", wakuPort = conf.port
|
||||
|
||||
let
|
||||
nodeKey = crypto.PrivateKey.random(Secp256k1, rng[]).get()
|
||||
ip = parseIpAddress("0.0.0.0")
|
||||
flags = CapabilitiesBitfield.init(relay = true)
|
||||
|
||||
let relayShards = RelayShards.init(clusterId, shardId).valueOr:
|
||||
error "Relay shards initialization failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||
enrBuilder.withWakuRelaySharding(relayShards).expect(
|
||||
"Building ENR with relay sharding failed"
|
||||
)
|
||||
|
||||
let record = enrBuilder.build().valueOr:
|
||||
error "failed to create enr record", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
setLogLevel(logging.LogLevel.TRACE)
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodeKey)
|
||||
builder.withRecord(record)
|
||||
builder.withNetworkConfigurationDetails(ip, Port(conf.port)).tryGet()
|
||||
|
||||
let node = builder.build().tryGet()
|
||||
|
||||
node.mountMetadata(clusterId, shardId).expect(
|
||||
"failed to mount waku metadata protocol"
|
||||
)
|
||||
node.mountLightPushClient()
|
||||
try:
|
||||
await node.mountPeerExchange(some(uint16(clusterId)))
|
||||
except CatchableError:
|
||||
error "failed to mount waku peer-exchange protocol",
|
||||
error = getCurrentExceptionMsg()
|
||||
return
|
||||
|
||||
let (destPeerAddr, destPeerId) = splitPeerIdAndAddr(conf.destPeerAddr)
|
||||
let (pxPeerAddr, pxPeerId) = splitPeerIdAndAddr(conf.pxAddr)
|
||||
info "dest peer address", destPeerAddr = destPeerAddr, destPeerId = destPeerId
|
||||
info "peer exchange address", pxPeerAddr = pxPeerAddr, pxPeerId = pxPeerId
|
||||
let pxPeerInfo =
|
||||
RemotePeerInfo.init(destPeerId, @[MultiAddress.init(destPeerAddr).get()])
|
||||
node.peerManager.addServicePeer(pxPeerInfo, WakuPeerExchangeCodec)
|
||||
|
||||
let pxPeerInfo1 =
|
||||
RemotePeerInfo.init(pxPeerId, @[MultiAddress.init(pxPeerAddr).get()])
|
||||
node.peerManager.addServicePeer(pxPeerInfo1, WakuPeerExchangeCodec)
|
||||
|
||||
if not conf.mixDisabled:
|
||||
let (mixPrivKey, mixPubKey) = generateKeyPair().valueOr:
|
||||
error "failed to generate mix key pair", error = error
|
||||
return
|
||||
(await node.mountMix(clusterId, mixPrivKey, conf.mixnodes)).isOkOr:
|
||||
error "failed to mount waku mix protocol: ", error = $error
|
||||
return
|
||||
|
||||
let dPeerId = PeerId.init(destPeerId).valueOr:
|
||||
error "Failed to initialize PeerId", error = error
|
||||
return
|
||||
|
||||
await node.mountRendezvousClient(clusterId)
|
||||
await node.start()
|
||||
node.peerManager.start()
|
||||
node.startPeerExchangeLoop()
|
||||
try:
|
||||
startMetricsHttpServer("0.0.0.0", Port(8008))
|
||||
except Exception:
|
||||
error "failed to start metrics server: ", error = getCurrentExceptionMsg()
|
||||
(await node.fetchPeerExchangePeers()).isOkOr:
|
||||
warn "Cannot fetch peers from peer exchange", cause = error
|
||||
|
||||
if not conf.mixDisabled:
|
||||
while node.getMixNodePoolSize() < conf.minMixPoolSize:
|
||||
info "waiting for mix nodes to be discovered",
|
||||
currentpoolSize = node.getMixNodePoolSize()
|
||||
await sleepAsync(1000)
|
||||
notice "publisher service started with mix node pool size ",
|
||||
currentpoolSize = node.getMixNodePoolSize()
|
||||
|
||||
var i = 0
|
||||
while i < conf.numMsgs:
|
||||
var conn: Connection
|
||||
if conf.mixDisabled:
|
||||
let connOpt = await node.peerManager.dialPeer(dPeerId, WakuLightPushCodec)
|
||||
if connOpt.isNone():
|
||||
error "failed to dial peer with WakuLightPushCodec", target_peer_id = dPeerId
|
||||
return
|
||||
conn = connOpt.get()
|
||||
else:
|
||||
conn = node.wakuMix.toConnection(
|
||||
MixDestination.exitNode(dPeerId), # destination lightpush peer
|
||||
WakuLightPushCodec, # protocol codec which will be used over the mix connection
|
||||
MixParameters(expectReply: Opt.some(true), numSurbs: Opt.some(byte(1))),
|
||||
# mix parameters indicating we expect a single reply
|
||||
).valueOr:
|
||||
error "failed to create mix connection", error = error
|
||||
return
|
||||
i = i + 1
|
||||
let text =
|
||||
"""Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam venenatis magna ut tortor faucibus, in vestibulum nibh commodo. Aenean eget vestibulum augue. Nullam suscipit urna non nunc efficitur, at iaculis nisl consequat. Mauris quis ultrices elit. Suspendisse lobortis odio vitae laoreet facilisis. Cras ornare sem felis, at vulputate magna aliquam ac. Duis quis est ultricies, euismod nulla ac, interdum dui. Maecenas sit amet est vitae enim commodo gravida. Proin vitae elit nulla. Donec tempor dolor lectus, in faucibus velit elementum quis. Donec non mauris eu nibh faucibus cursus ut egestas dolor. Aliquam venenatis ligula id velit pulvinar malesuada. Vestibulum scelerisque, justo non porta gravida, nulla justo tempor purus, at sollicitudin erat erat vel libero.
|
||||
Fusce nec eros eu metus tristique aliquet.
|
||||
This is message #""" &
|
||||
$i & """ sent from a publisher using mix. End of transmission."""
|
||||
let message = WakuMessage(
|
||||
payload: toBytes(text), # content of the message
|
||||
contentTopic: LightpushContentTopic, # content topic to publish to
|
||||
ephemeral: true, # tell store nodes to not store it
|
||||
timestamp: getNowInNanosecondTime(),
|
||||
) # current timestamp
|
||||
|
||||
let res =
|
||||
await node.wakuLightpushClient.publish(some(LightpushPubsubTopic), message, conn)
|
||||
|
||||
let startTime = getNowInNanosecondTime()
|
||||
|
||||
(
|
||||
await node.wakuLightpushClient.publishWithConn(
|
||||
LightpushPubsubTopic, message, conn, dPeerId
|
||||
)
|
||||
).isOkOr:
|
||||
error "failed to publish message via mix", error = error.desc
|
||||
lp_mix_failed.inc(labelValues = ["publish_error"])
|
||||
return
|
||||
|
||||
let latency = float64(getNowInNanosecondTime() - startTime) / 1_000_000.0
|
||||
lp_mix_latency.observe(latency)
|
||||
lp_mix_success.inc()
|
||||
notice "published message",
|
||||
text = text,
|
||||
timestamp = message.timestamp,
|
||||
latency = latency,
|
||||
psTopic = LightpushPubsubTopic,
|
||||
contentTopic = LightpushContentTopic
|
||||
|
||||
if conf.mixDisabled:
|
||||
await conn.close()
|
||||
await sleepAsync(conf.msgIntervalMilliseconds)
|
||||
info "Sent all messages via mix"
|
||||
quit(0)
|
||||
|
||||
when isMainModule:
|
||||
let conf = LightPushMixConf.load()
|
||||
let rng = crypto.newRng()
|
||||
asyncSpawn setupAndPublish(rng, conf)
|
||||
runForever()
|
||||
58
examples/lightpush_mix/lightpush_publisher_mix_config.nim
Normal file
58
examples/lightpush_mix/lightpush_publisher_mix_config.nim
Normal file
@ -0,0 +1,58 @@
|
||||
import
|
||||
confutils/defs,
|
||||
libp2p/crypto/curve25519,
|
||||
libp2p/multiaddress,
|
||||
libp2p/multicodec,
|
||||
nimcrypto/utils as ncrutils
|
||||
|
||||
import waku/waku_mix
|
||||
|
||||
type LightPushMixConf* = object
|
||||
destPeerAddr* {.desc: "Destination peer address with peerId.", name: "dp-addr".}:
|
||||
string
|
||||
|
||||
pxAddr* {.desc: "Peer exchange address with peerId.", name: "px-addr".}: string
|
||||
|
||||
port* {.desc: "Port to listen on.", defaultValue: 50000, name: "port".}: int
|
||||
|
||||
numMsgs* {.desc: "Number of messages to send.", defaultValue: 1, name: "num-msgs".}:
|
||||
int
|
||||
|
||||
msgIntervalMilliseconds* {.
|
||||
desc: "Interval between messages in milliseconds.",
|
||||
defaultValue: 1000,
|
||||
name: "msg-interval"
|
||||
.}: int
|
||||
|
||||
minMixPoolSize* {.
|
||||
desc: "Number of mix nodes to be discovered before sending lightpush messages.",
|
||||
defaultValue: 3,
|
||||
name: "min-mix-pool-size"
|
||||
.}: int
|
||||
|
||||
mixDisabled* {.
|
||||
desc: "Do not use mix for publishing.", defaultValue: false, name: "without-mix"
|
||||
.}: bool
|
||||
|
||||
mixnodes* {.
|
||||
desc:
|
||||
"Multiaddress and mix-key of mix node to be statically specified in format multiaddr:mixPubKey. Argument may be repeated.",
|
||||
name: "mixnode"
|
||||
.}: seq[MixNodePubInfo]
|
||||
|
||||
proc parseCmdArg*(T: typedesc[MixNodePubInfo], p: string): T =
|
||||
let elements = p.split(":")
|
||||
if elements.len != 2:
|
||||
raise newException(
|
||||
ValueError, "Invalid format for mix node expected multiaddr:mixPublicKey"
|
||||
)
|
||||
|
||||
let multiaddr = MultiAddress.init(elements[0]).valueOr:
|
||||
raise newException(ValueError, "Invalid multiaddress format")
|
||||
if not multiaddr.contains(multiCodec("ip4")).get():
|
||||
raise newException(
|
||||
ValueError, "Invalid format for ip address, expected a ipv4 multiaddress"
|
||||
)
|
||||
return MixNodePubInfo(
|
||||
multiaddr: elements[0], pubKey: intoCurve25519Key(ncrutils.fromHex(elements[1]))
|
||||
)
|
||||
11
examples/lightpush_mix/lightpush_publisher_mix_metrics.nim
Normal file
11
examples/lightpush_mix/lightpush_publisher_mix_metrics.nim
Normal file
@ -0,0 +1,11 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import metrics
|
||||
|
||||
declarePublicCounter lp_mix_success, "number of lightpush messages sent via mix"
|
||||
|
||||
declarePublicCounter lp_mix_failed,
|
||||
"number of lightpush messages failed via mix", labels = ["error"]
|
||||
|
||||
declarePublicHistogram lp_mix_latency,
|
||||
"lightpush publish latency via mix in milliseconds"
|
||||
@ -54,13 +54,9 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} =
|
||||
"Building ENR with relay sharding failed"
|
||||
)
|
||||
|
||||
let recordRes = enrBuilder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
error "failed to create enr record", error = recordRes.error
|
||||
quit(QuitFailure)
|
||||
else:
|
||||
recordRes.get()
|
||||
let record = enrBuilder.build().valueOr:
|
||||
error "failed to create enr record", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodeKey)
|
||||
@ -68,7 +64,9 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} =
|
||||
builder.withNetworkConfigurationDetails(ip, Port(wakuPort)).tryGet()
|
||||
let node = builder.build().tryGet()
|
||||
|
||||
node.mountMetadata(clusterId).expect("failed to mount waku metadata protocol")
|
||||
node.mountMetadata(clusterId, shardId).expect(
|
||||
"failed to mount waku metadata protocol"
|
||||
)
|
||||
node.mountLegacyLightPushClient()
|
||||
|
||||
await node.start()
|
||||
@ -84,7 +82,9 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} =
|
||||
timestamp: now(),
|
||||
) # current timestamp
|
||||
|
||||
let lightpushPeer = parsePeerInfo(LightpushPeer).get()
|
||||
let lightpushPeer = parsePeerInfo(LightpushPeer).valueOr:
|
||||
error "failed to parse LightpushPeer", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
let res = await node.legacyLightpushPublish(
|
||||
some(LightpushPubsubTopic), message, lightpushPeer
|
||||
|
||||
@ -49,13 +49,9 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} =
|
||||
|
||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||
|
||||
let recordRes = enrBuilder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
error "failed to create enr record", error = recordRes.error
|
||||
quit(QuitFailure)
|
||||
else:
|
||||
recordRes.get()
|
||||
let record = enrBuilder.build().valueOr:
|
||||
error "failed to create enr record", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodeKey)
|
||||
|
||||
@ -102,8 +102,8 @@ print("Waku Relay enabled: {}".format(args.relay))
|
||||
# Set the event callback
|
||||
callback = callback_type(handle_event) # This line is important so that the callback is not gc'ed
|
||||
|
||||
libwaku.waku_set_event_callback.argtypes = [callback_type, ctypes.c_void_p]
|
||||
libwaku.waku_set_event_callback(callback, ctypes.c_void_p(0))
|
||||
libwaku.set_event_callback.argtypes = [callback_type, ctypes.c_void_p]
|
||||
libwaku.set_event_callback(callback, ctypes.c_void_p(0))
|
||||
|
||||
# Start the node
|
||||
libwaku.waku_start.argtypes = [ctypes.c_void_p,
|
||||
@ -117,32 +117,32 @@ libwaku.waku_start(ctx,
|
||||
|
||||
# Subscribe to the default pubsub topic
|
||||
libwaku.waku_relay_subscribe.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_char_p,
|
||||
callback_type,
|
||||
ctypes.c_void_p]
|
||||
ctypes.c_void_p,
|
||||
ctypes.c_char_p]
|
||||
libwaku.waku_relay_subscribe(ctx,
|
||||
default_pubsub_topic.encode('utf-8'),
|
||||
callback_type(
|
||||
#onErrCb
|
||||
lambda ret, msg, len:
|
||||
print("Error calling waku_relay_subscribe: %s" %
|
||||
msg.decode('utf-8'))
|
||||
),
|
||||
ctypes.c_void_p(0))
|
||||
ctypes.c_void_p(0),
|
||||
default_pubsub_topic.encode('utf-8'))
|
||||
|
||||
libwaku.waku_connect.argtypes = [ctypes.c_void_p,
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_int,
|
||||
callback_type,
|
||||
ctypes.c_void_p]
|
||||
ctypes.c_void_p,
|
||||
ctypes.c_char_p,
|
||||
ctypes.c_int]
|
||||
libwaku.waku_connect(ctx,
|
||||
args.peer.encode('utf-8'),
|
||||
10000,
|
||||
# onErrCb
|
||||
callback_type(
|
||||
lambda ret, msg, len:
|
||||
print("Error calling waku_connect: %s" % msg.decode('utf-8'))),
|
||||
ctypes.c_void_p(0))
|
||||
ctypes.c_void_p(0),
|
||||
args.peer.encode('utf-8'),
|
||||
10000)
|
||||
|
||||
# app = Flask(__name__)
|
||||
# @app.route("/")
|
||||
|
||||
@ -27,7 +27,7 @@ public:
|
||||
void initialize(const QString& jsonConfig, WakuCallBack event_handler, void* userData) {
|
||||
ctx = waku_new(jsonConfig.toUtf8().constData(), WakuCallBack(event_handler), userData);
|
||||
|
||||
waku_set_event_callback(ctx, on_event_received, userData);
|
||||
set_event_callback(ctx, on_event_received, userData);
|
||||
qDebug() << "Waku context initialized, ready to start.";
|
||||
}
|
||||
|
||||
|
||||
@ -3,22 +3,22 @@ use std::ffi::CString;
|
||||
use std::os::raw::{c_char, c_int, c_void};
|
||||
use std::{slice, thread, time};
|
||||
|
||||
pub type WakuCallback = unsafe extern "C" fn(c_int, *const c_char, usize, *const c_void);
|
||||
pub type FFICallBack = unsafe extern "C" fn(c_int, *const c_char, usize, *const c_void);
|
||||
|
||||
extern "C" {
|
||||
pub fn waku_new(
|
||||
config_json: *const u8,
|
||||
cb: WakuCallback,
|
||||
cb: FFICallBack,
|
||||
user_data: *const c_void,
|
||||
) -> *mut c_void;
|
||||
|
||||
pub fn waku_version(ctx: *const c_void, cb: WakuCallback, user_data: *const c_void) -> c_int;
|
||||
pub fn waku_version(ctx: *const c_void, cb: FFICallBack, user_data: *const c_void) -> c_int;
|
||||
|
||||
pub fn waku_start(ctx: *const c_void, cb: WakuCallback, user_data: *const c_void) -> c_int;
|
||||
pub fn waku_start(ctx: *const c_void, cb: FFICallBack, user_data: *const c_void) -> c_int;
|
||||
|
||||
pub fn waku_default_pubsub_topic(
|
||||
ctx: *mut c_void,
|
||||
cb: WakuCallback,
|
||||
cb: FFICallBack,
|
||||
user_data: *const c_void,
|
||||
) -> *mut c_void;
|
||||
}
|
||||
@ -40,7 +40,7 @@ pub unsafe extern "C" fn trampoline<C>(
|
||||
closure(return_val, &buffer_utf8);
|
||||
}
|
||||
|
||||
pub fn get_trampoline<C>(_closure: &C) -> WakuCallback
|
||||
pub fn get_trampoline<C>(_closure: &C) -> FFICallBack
|
||||
where
|
||||
C: FnMut(i32, &str),
|
||||
{
|
||||
|
||||
@ -47,13 +47,9 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
|
||||
|
||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||
|
||||
let recordRes = enrBuilder.build()
|
||||
let record =
|
||||
if recordRes.isErr():
|
||||
error "failed to create enr record", error = recordRes.error
|
||||
quit(QuitFailure)
|
||||
else:
|
||||
recordRes.get()
|
||||
let record = enrBuilder.build().valueOr:
|
||||
error "failed to create enr record", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
var builder = WakuNodeBuilder.init()
|
||||
builder.withNodeKey(nodeKey)
|
||||
|
||||
40
examples/waku_example.nim
Normal file
40
examples/waku_example.nim
Normal file
@ -0,0 +1,40 @@
|
||||
import std/options
|
||||
import chronos, results, confutils, confutils/defs
|
||||
import waku
|
||||
|
||||
type CliArgs = object
|
||||
ethRpcEndpoint* {.
|
||||
defaultValue: "", desc: "ETH RPC Endpoint, if passed, RLN is enabled"
|
||||
.}: string
|
||||
|
||||
when isMainModule:
|
||||
let args = CliArgs.load()
|
||||
|
||||
echo "Starting Waku node..."
|
||||
|
||||
let config =
|
||||
if (args.ethRpcEndpoint == ""):
|
||||
# Create a basic configuration for the Waku node
|
||||
# No RLN as we don't have an ETH RPC Endpoint
|
||||
NodeConfig.init(
|
||||
protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 42)
|
||||
)
|
||||
else:
|
||||
# Connect to TWN, use ETH RPC Endpoint for RLN
|
||||
NodeConfig.init(ethRpcEndpoints = @[args.ethRpcEndpoint])
|
||||
|
||||
# Create the node using the library API's createNode function
|
||||
let node = (waitFor createNode(config)).valueOr:
|
||||
echo "Failed to create node: ", error
|
||||
quit(QuitFailure)
|
||||
|
||||
echo("Waku node created successfully!")
|
||||
|
||||
# Start the node
|
||||
(waitFor startWaku(addr node)).isOkOr:
|
||||
echo "Failed to start node: ", error
|
||||
quit(QuitFailure)
|
||||
|
||||
echo "Node started successfully!"
|
||||
|
||||
runForever()
|
||||
@ -1,6 +1,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import waku/[common/logging, factory/[waku, networks_config, external_config]]
|
||||
import tools/confutils/cli_args
|
||||
import waku/[common/logging, factory/[waku, networks_config]]
|
||||
import
|
||||
std/[options, strutils, os, sequtils],
|
||||
chronicles,
|
||||
@ -17,36 +18,33 @@ proc setup*(): Waku =
|
||||
const versionString = "version / git commit hash: " & waku.git_version
|
||||
let rng = crypto.newRng()
|
||||
|
||||
let confRes = WakuNodeConf.load(version = versionString)
|
||||
if confRes.isErr():
|
||||
error "failure while loading the configuration", error = $confRes.error
|
||||
let conf = WakuNodeConf.load(version = versionString).valueOr:
|
||||
error "failure while loading the configuration", error = $error
|
||||
quit(QuitFailure)
|
||||
|
||||
var conf = confRes.get()
|
||||
|
||||
let twnClusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let twnNetworkConf = NetworkConf.TheWakuNetworkConf()
|
||||
if len(conf.shards) != 0:
|
||||
conf.pubsubTopics = conf.shards.mapIt(twnClusterConf.pubsubTopics[it.uint16])
|
||||
conf.pubsubTopics = conf.shards.mapIt(twnNetworkConf.pubsubTopics[it.uint16])
|
||||
else:
|
||||
conf.pubsubTopics = twnClusterConf.pubsubTopics
|
||||
conf.pubsubTopics = twnNetworkConf.pubsubTopics
|
||||
|
||||
# Override configuration
|
||||
conf.maxMessageSize = twnClusterConf.maxMessageSize
|
||||
conf.clusterId = twnClusterConf.clusterId
|
||||
conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
|
||||
conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
|
||||
conf.discv5Discovery = twnClusterConf.discv5Discovery
|
||||
conf.maxMessageSize = twnNetworkConf.maxMessageSize
|
||||
conf.clusterId = twnNetworkConf.clusterId
|
||||
conf.rlnRelayEthContractAddress = twnNetworkConf.rlnRelayEthContractAddress
|
||||
conf.rlnRelayDynamic = twnNetworkConf.rlnRelayDynamic
|
||||
conf.discv5Discovery = twnNetworkConf.discv5Discovery
|
||||
conf.discv5BootstrapNodes =
|
||||
conf.discv5BootstrapNodes & twnClusterConf.discv5BootstrapNodes
|
||||
conf.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec
|
||||
conf.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit
|
||||
conf.discv5BootstrapNodes & twnNetworkConf.discv5BootstrapNodes
|
||||
conf.rlnEpochSizeSec = twnNetworkConf.rlnEpochSizeSec
|
||||
conf.rlnRelayUserMessageLimit = twnNetworkConf.rlnRelayUserMessageLimit
|
||||
|
||||
# Only set rlnRelay to true if relay is configured
|
||||
if conf.relay:
|
||||
conf.rlnRelay = twnClusterConf.rlnRelay
|
||||
conf.rlnRelay = twnNetworkConf.rlnRelay
|
||||
|
||||
debug "Starting node"
|
||||
var waku = Waku.new(conf).valueOr:
|
||||
info "Starting node"
|
||||
var waku = (waitFor Waku.new(conf)).valueOr:
|
||||
error "Waku initialization failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
|
||||
@ -52,7 +52,7 @@ proc sendThruWaku*(
|
||||
(await self.waku.node.publish(some(DefaultPubsubTopic), message)).isOkOr:
|
||||
return err("failed to publish message: " & $error)
|
||||
|
||||
debug "rate limit proof is appended to the message"
|
||||
info "rate limit proof is appended to the message"
|
||||
|
||||
return ok()
|
||||
|
||||
@ -95,61 +95,54 @@ proc sendResponse*(
|
||||
type SCPHandler* = proc(msg: WakuMessage): Future[void] {.async.}
|
||||
proc getSCPHandler(self: StealthCommitmentProtocol): SCPHandler =
|
||||
let handler = proc(msg: WakuMessage): Future[void] {.async.} =
|
||||
let decodedRes = WakuStealthCommitmentMsg.decode(msg.payload)
|
||||
if decodedRes.isErr():
|
||||
error "could not decode scp message"
|
||||
let decoded = decodedRes.get()
|
||||
let decoded = WakuStealthCommitmentMsg.decode(msg.payload).valueOr:
|
||||
error "could not decode scp message", error = error
|
||||
quit(QuitFailure)
|
||||
if decoded.request == false:
|
||||
# check if the generated stealth commitment belongs to the receiver
|
||||
# if not, continue
|
||||
let ephemeralPubKeyRes =
|
||||
deserialize(StealthCommitmentFFI.PublicKey, decoded.ephemeralPubKey.get())
|
||||
if ephemeralPubKeyRes.isErr():
|
||||
error "could not deserialize ephemeral public key: ",
|
||||
err = ephemeralPubKeyRes.error()
|
||||
let ephemeralPubKey = ephemeralPubKeyRes.get()
|
||||
let stealthCommitmentPrivateKeyRes = StealthCommitmentFFI.generateStealthPrivateKey(
|
||||
let ephemeralPubKey = deserialize(
|
||||
StealthCommitmentFFI.PublicKey, decoded.ephemeralPubKey.get()
|
||||
).valueOr:
|
||||
error "could not deserialize ephemeral public key: ", error = error
|
||||
quit(QuitFailure)
|
||||
let stealthCommitmentPrivateKey = StealthCommitmentFFI.generateStealthPrivateKey(
|
||||
ephemeralPubKey,
|
||||
self.spendingKeyPair.privateKey,
|
||||
self.viewingKeyPair.privateKey,
|
||||
decoded.viewTag.get(),
|
||||
)
|
||||
if stealthCommitmentPrivateKeyRes.isErr():
|
||||
info "received stealth commitment does not belong to the receiver: ",
|
||||
err = stealthCommitmentPrivateKeyRes.error()
|
||||
|
||||
let stealthCommitmentPrivateKey = stealthCommitmentPrivateKeyRes.get()
|
||||
).valueOr:
|
||||
error "received stealth commitment does not belong to the receiver: ",
|
||||
error = error
|
||||
quit(QuitFailure)
|
||||
info "received stealth commitment belongs to the receiver: ",
|
||||
stealthCommitmentPrivateKey,
|
||||
stealthCommitmentPubKey = decoded.stealthCommitment.get()
|
||||
return
|
||||
# send response
|
||||
# deseralize the keys
|
||||
let spendingKeyRes =
|
||||
deserialize(StealthCommitmentFFI.PublicKey, decoded.spendingPubKey.get())
|
||||
if spendingKeyRes.isErr():
|
||||
error "could not deserialize spending key: ", err = spendingKeyRes.error()
|
||||
let spendingKey = spendingKeyRes.get()
|
||||
let viewingKeyRes =
|
||||
(deserialize(StealthCommitmentFFI.PublicKey, decoded.viewingPubKey.get()))
|
||||
if viewingKeyRes.isErr():
|
||||
error "could not deserialize viewing key: ", err = viewingKeyRes.error()
|
||||
let viewingKey = viewingKeyRes.get()
|
||||
let spendingKey = deserialize(
|
||||
StealthCommitmentFFI.PublicKey, decoded.spendingPubKey.get()
|
||||
).valueOr:
|
||||
error "could not deserialize spending key: ", error = error
|
||||
quit(QuitFailure)
|
||||
let viewingKey = (
|
||||
deserialize(StealthCommitmentFFI.PublicKey, decoded.viewingPubKey.get())
|
||||
).valueOr:
|
||||
error "could not deserialize viewing key: ", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
info "received spending key", spendingKey
|
||||
info "received viewing key", viewingKey
|
||||
let ephemeralKeyPairRes = StealthCommitmentFFI.generateKeyPair()
|
||||
if ephemeralKeyPairRes.isErr():
|
||||
error "could not generate ephemeral key pair: ", err = ephemeralKeyPairRes.error()
|
||||
let ephemeralKeyPair = ephemeralKeyPairRes.get()
|
||||
let ephemeralKeyPair = StealthCommitmentFFI.generateKeyPair().valueOr:
|
||||
error "could not generate ephemeral key pair: ", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
let stealthCommitmentRes = StealthCommitmentFFI.generateStealthCommitment(
|
||||
let stealthCommitment = StealthCommitmentFFI.generateStealthCommitment(
|
||||
spendingKey, viewingKey, ephemeralKeyPair.privateKey
|
||||
)
|
||||
if stealthCommitmentRes.isErr():
|
||||
error "could not generate stealth commitment: ",
|
||||
err = stealthCommitmentRes.error()
|
||||
let stealthCommitment = stealthCommitmentRes.get()
|
||||
).valueOr:
|
||||
error "could not generate stealth commitment: ", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
(
|
||||
await self.sendResponse(
|
||||
@ -157,7 +150,7 @@ proc getSCPHandler(self: StealthCommitmentProtocol): SCPHandler =
|
||||
stealthCommitment.viewTag,
|
||||
)
|
||||
).isOkOr:
|
||||
error "could not send response: ", err = $error
|
||||
error "could not send response: ", error = $error
|
||||
|
||||
return handler
|
||||
|
||||
|
||||
32
flake.lock
generated
32
flake.lock
generated
@ -22,24 +22,46 @@
|
||||
"zerokit": "zerokit"
|
||||
}
|
||||
},
|
||||
"zerokit": {
|
||||
"rust-overlay": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"zerokit",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1743756626,
|
||||
"narHash": "sha256-SvhfEl0bJcRsCd79jYvZbxQecGV2aT+TXjJ57WVv7Aw=",
|
||||
"lastModified": 1748399823,
|
||||
"narHash": "sha256-kahD8D5hOXOsGbNdoLLnqCL887cjHkx98Izc37nDjlA=",
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"rev": "d68a69dc71bc19beb3479800392112c2f6218159",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "oxalica",
|
||||
"repo": "rust-overlay",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"zerokit": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
],
|
||||
"rust-overlay": "rust-overlay"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1749115386,
|
||||
"narHash": "sha256-UexIE2D7zr6aRajwnKongXwCZCeRZDXOL0kfjhqUFSU=",
|
||||
"owner": "vacp2p",
|
||||
"repo": "zerokit",
|
||||
"rev": "c60e0c33fc6350a4b1c20e6b6727c44317129582",
|
||||
"rev": "dc0b31752c91e7b4fefc441cfa6a8210ad7dba7b",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "vacp2p",
|
||||
"repo": "zerokit",
|
||||
"rev": "c60e0c33fc6350a4b1c20e6b6727c44317129582",
|
||||
"rev": "dc0b31752c91e7b4fefc441cfa6a8210ad7dba7b",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
|
||||
17
flake.nix
17
flake.nix
@ -9,7 +9,7 @@
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs?rev=f44bd8ca21e026135061a0a57dcf3d0775b67a49";
|
||||
zerokit = {
|
||||
url = "github:vacp2p/zerokit?rev=c60e0c33fc6350a4b1c20e6b6727c44317129582";
|
||||
url = "github:vacp2p/zerokit?rev=dc0b31752c91e7b4fefc441cfa6a8210ad7dba7b";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
};
|
||||
@ -49,11 +49,18 @@
|
||||
libwaku-android-arm64 = pkgs.callPackage ./nix/default.nix {
|
||||
inherit stableSystems;
|
||||
src = self;
|
||||
targets = ["libwaku-android-arm64"];
|
||||
androidArch = "aarch64-linux-android";
|
||||
targets = ["libwaku-android-arm64"];
|
||||
abidir = "arm64-v8a";
|
||||
zerokitPkg = zerokit.packages.${system}.zerokit-android-arm64;
|
||||
zerokitRln = zerokit.packages.${system}.rln-android-arm64;
|
||||
};
|
||||
|
||||
wakucanary = pkgs.callPackage ./nix/default.nix {
|
||||
inherit stableSystems;
|
||||
src = self;
|
||||
targets = ["wakucanary"];
|
||||
zerokitRln = zerokit.packages.${system}.rln;
|
||||
};
|
||||
|
||||
default = libwaku-android-arm64;
|
||||
});
|
||||
|
||||
@ -61,4 +68,4 @@
|
||||
default = pkgsFor.${system}.callPackage ./nix/shell.nix {};
|
||||
});
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,42 +0,0 @@
|
||||
## Can be shared safely between threads
|
||||
type SharedSeq*[T] = tuple[data: ptr UncheckedArray[T], len: int]
|
||||
|
||||
proc alloc*(str: cstring): cstring =
|
||||
# Byte allocation from the given address.
|
||||
# There should be the corresponding manual deallocation with deallocShared !
|
||||
if str.isNil():
|
||||
var ret = cast[cstring](allocShared(1)) # Allocate memory for the null terminator
|
||||
ret[0] = '\0' # Set the null terminator
|
||||
return ret
|
||||
|
||||
let ret = cast[cstring](allocShared(len(str) + 1))
|
||||
copyMem(ret, str, len(str) + 1)
|
||||
return ret
|
||||
|
||||
proc alloc*(str: string): cstring =
|
||||
## Byte allocation from the given address.
|
||||
## There should be the corresponding manual deallocation with deallocShared !
|
||||
var ret = cast[cstring](allocShared(str.len + 1))
|
||||
let s = cast[seq[char]](str)
|
||||
for i in 0 ..< str.len:
|
||||
ret[i] = s[i]
|
||||
ret[str.len] = '\0'
|
||||
return ret
|
||||
|
||||
proc allocSharedSeq*[T](s: seq[T]): SharedSeq[T] =
|
||||
let data = allocShared(sizeof(T) * s.len)
|
||||
if s.len != 0:
|
||||
copyMem(data, unsafeAddr s[0], s.len)
|
||||
return (cast[ptr UncheckedArray[T]](data), s.len)
|
||||
|
||||
proc deallocSharedSeq*[T](s: var SharedSeq[T]) =
|
||||
deallocShared(s.data)
|
||||
s.len = 0
|
||||
|
||||
proc toSeq*[T](s: SharedSeq[T]): seq[T] =
|
||||
## Creates a seq[T] from a SharedSeq[T]. No explicit dealloc is required
|
||||
## as req[T] is a GC managed type.
|
||||
var ret = newSeq[T]()
|
||||
for i in 0 ..< s.len:
|
||||
ret.add(s.data[i])
|
||||
return ret
|
||||
10
library/declare_lib.nim
Normal file
10
library/declare_lib.nim
Normal file
@ -0,0 +1,10 @@
|
||||
import ffi
|
||||
import waku/factory/waku
|
||||
|
||||
declareLibrary("waku")
|
||||
|
||||
proc set_event_callback(
|
||||
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
|
||||
) {.dynlib, exportc, cdecl.} =
|
||||
ctx[].eventCallback = cast[pointer](callback)
|
||||
ctx[].eventUserData = userData
|
||||
@ -1,30 +0,0 @@
|
||||
################################################################################
|
||||
### Exported types
|
||||
|
||||
type WakuCallBack* = proc(
|
||||
callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer
|
||||
) {.cdecl, gcsafe, raises: [].}
|
||||
|
||||
const RET_OK*: cint = 0
|
||||
const RET_ERR*: cint = 1
|
||||
const RET_MISSING_CALLBACK*: cint = 2
|
||||
|
||||
### End of exported types
|
||||
################################################################################
|
||||
|
||||
################################################################################
|
||||
### FFI utils
|
||||
|
||||
template foreignThreadGc*(body: untyped) =
|
||||
when declared(setupForeignThreadGc):
|
||||
setupForeignThreadGc()
|
||||
|
||||
body
|
||||
|
||||
when declared(tearDownForeignThreadGc):
|
||||
tearDownForeignThreadGc()
|
||||
|
||||
type onDone* = proc()
|
||||
|
||||
### End of FFI utils
|
||||
################################################################################
|
||||
32
library/ios_bearssl_stubs.c
Normal file
32
library/ios_bearssl_stubs.c
Normal file
@ -0,0 +1,32 @@
|
||||
/**
|
||||
* iOS stubs for BearSSL tools functions not normally included in the library.
|
||||
* These are typically from the BearSSL tools/ directory which is for CLI tools.
|
||||
*/
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
/* x509_noanchor context - simplified stub */
|
||||
typedef struct {
|
||||
void *vtable;
|
||||
void *inner;
|
||||
} x509_noanchor_context;
|
||||
|
||||
/* Stub for x509_noanchor_init - used to skip anchor validation */
|
||||
void x509_noanchor_init(x509_noanchor_context *xwc, const void **inner) {
|
||||
if (xwc && inner) {
|
||||
xwc->inner = (void*)*inner;
|
||||
xwc->vtable = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* TAs (Trust Anchors) - empty array stub */
|
||||
/* This is typically defined by applications with their CA certificates */
|
||||
typedef struct {
|
||||
void *dn;
|
||||
size_t dn_len;
|
||||
unsigned flags;
|
||||
void *pkey;
|
||||
} br_x509_trust_anchor;
|
||||
|
||||
const br_x509_trust_anchor TAs[1] = {{0}};
|
||||
const size_t TAs_NUM = 0;
|
||||
14
library/ios_natpmp_stubs.c
Normal file
14
library/ios_natpmp_stubs.c
Normal file
@ -0,0 +1,14 @@
|
||||
/**
|
||||
* iOS stub for getgateway.c functions.
|
||||
* iOS doesn't have net/route.h, so we provide a stub that returns failure.
|
||||
* NAT-PMP functionality won't work but the library will link.
|
||||
*/
|
||||
|
||||
#include <stdint.h>
|
||||
#include <netinet/in.h>
|
||||
|
||||
/* getdefaultgateway - returns -1 (failure) on iOS */
|
||||
int getdefaultgateway(in_addr_t *addr) {
|
||||
(void)addr; /* unused */
|
||||
return -1; /* failure - not supported on iOS */
|
||||
}
|
||||
49
library/kernel_api/debug_node_api.nim
Normal file
49
library/kernel_api/debug_node_api.nim
Normal file
@ -0,0 +1,49 @@
|
||||
import std/json
|
||||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
results,
|
||||
eth/p2p/discoveryv5/enr,
|
||||
strutils,
|
||||
libp2p/peerid,
|
||||
metrics,
|
||||
ffi
|
||||
import waku/factory/waku, waku/node/waku_node, waku/node/health_monitor, library/declare_lib
|
||||
|
||||
proc getMultiaddresses(node: WakuNode): seq[string] =
|
||||
return node.info().listenAddresses
|
||||
|
||||
proc getMetrics(): string =
|
||||
{.gcsafe.}:
|
||||
return defaultRegistry.toText() ## defaultRegistry is {.global.} in metrics module
|
||||
|
||||
proc waku_version(
|
||||
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
|
||||
) {.ffi.} =
|
||||
return ok(WakuNodeVersionString)
|
||||
|
||||
proc waku_listen_addresses(
|
||||
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
|
||||
) {.ffi.} =
|
||||
## returns a comma-separated string of the listen addresses
|
||||
return ok(ctx.myLib[].node.getMultiaddresses().join(","))
|
||||
|
||||
proc waku_get_my_enr(
|
||||
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
|
||||
) {.ffi.} =
|
||||
return ok(ctx.myLib[].node.enr.toURI())
|
||||
|
||||
proc waku_get_my_peerid(
|
||||
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
|
||||
) {.ffi.} =
|
||||
return ok($ctx.myLib[].node.peerId())
|
||||
|
||||
proc waku_get_metrics(
|
||||
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
|
||||
) {.ffi.} =
|
||||
return ok(getMetrics())
|
||||
|
||||
proc waku_is_online(
|
||||
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
|
||||
) {.ffi.} =
|
||||
return ok($ctx.myLib[].healthMonitor.onlineMonitor.amIOnline())
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user