Merge commit '2477c4980f15df0efc2eedf27d7593e0dd2b1e1b' into feat-waku-api-send

This commit is contained in:
NagyZoltanPeter 2025-12-15 15:42:32 +01:00
commit cfa229ccab
No known key found for this signature in database
GPG Key ID: 3E1F97CF4A7B6F42
38 changed files with 1128 additions and 323 deletions

View File

@ -10,7 +10,7 @@ assignees: ''
<!-- <!--
Add appropriate release number to title! Add appropriate release number to title!
For detailed info on the release process refer to https://github.com/waku-org/nwaku/blob/master/docs/contributors/release-process.md For detailed info on the release process refer to https://github.com/logos-messaging/nwaku/blob/master/docs/contributors/release-process.md
--> -->
### Items to complete ### Items to complete
@ -34,10 +34,10 @@ All items below are to be completed by the owner of the given release.
- [ ] **Proceed with release** - [ ] **Proceed with release**
- [ ] Assign a final release tag (`v0.X.0-beta`) to the same commit that contains the validated release-candidate tag (e.g. `v0.X.0-beta-rc.N`) and submit a PR from the release branch to `master`. - [ ] Assign a final release tag (`v0.X.0-beta`) to the same commit that contains the validated release-candidate tag (e.g. `v0.X.0-beta-rc.N`) and submit a PR from the release branch to `master`.
- [ ] Update [nwaku-compose](https://github.com/waku-org/nwaku-compose) and [waku-simulator](https://github.com/waku-org/waku-simulator) according to the new release. - [ ] Update [nwaku-compose](https://github.com/logos-messaging/nwaku-compose) and [waku-simulator](https://github.com/logos-messaging/waku-simulator) according to the new release.
- [ ] Bump nwaku dependency in [waku-rust-bindings](https://github.com/waku-org/waku-rust-bindings) and make sure all examples and tests work. - [ ] Bump nwaku dependency in [waku-rust-bindings](https://github.com/logos-messaging/waku-rust-bindings) and make sure all examples and tests work.
- [ ] Bump nwaku dependency in [waku-go-bindings](https://github.com/waku-org/waku-go-bindings) and make sure all tests work. - [ ] Bump nwaku dependency in [waku-go-bindings](https://github.com/logos-messaging/waku-go-bindings) and make sure all tests work.
- [ ] Create GitHub release (https://github.com/waku-org/nwaku/releases). - [ ] Create GitHub release (https://github.com/logos-messaging/nwaku/releases).
- [ ] Submit a PR to merge the release branch back to `master`. Make sure you use the option "Merge pull request (Create a merge commit)" to perform the merge. Ping repo admin if this option is not available. - [ ] Submit a PR to merge the release branch back to `master`. Make sure you use the option "Merge pull request (Create a merge commit)" to perform the merge. Ping repo admin if this option is not available.
- [ ] **Promote release to fleets** - [ ] **Promote release to fleets**
@ -47,8 +47,8 @@ All items below are to be completed by the owner of the given release.
### Links ### Links
- [Release process](https://github.com/waku-org/nwaku/blob/master/docs/contributors/release-process.md) - [Release process](https://github.com/logos-messaging/nwaku/blob/master/docs/contributors/release-process.md)
- [Release notes](https://github.com/waku-org/nwaku/blob/master/CHANGELOG.md) - [Release notes](https://github.com/logos-messaging/nwaku/blob/master/CHANGELOG.md)
- [Fleet ownership](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64) - [Fleet ownership](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64)
- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku) - [Infra-nim-waku](https://github.com/status-im/infra-nim-waku)
- [Jenkins](https://ci.infra.status.im/job/nim-waku/) - [Jenkins](https://ci.infra.status.im/job/nim-waku/)

View File

@ -10,7 +10,7 @@ assignees: ''
<!-- <!--
Add appropriate release number to title! Add appropriate release number to title!
For detailed info on the release process refer to https://github.com/waku-org/nwaku/blob/master/docs/contributors/release-process.md For detailed info on the release process refer to https://github.com/logos-messaging/nwaku/blob/master/docs/contributors/release-process.md
--> -->
### Items to complete ### Items to complete
@ -54,11 +54,11 @@ All items below are to be completed by the owner of the given release.
- [ ] **Proceed with release** - [ ] **Proceed with release**
- [ ] Assign a final release tag (`v0.X.0`) to the same commit that contains the validated release-candidate tag (e.g. `v0.X.0`). - [ ] Assign a final release tag (`v0.X.0`) to the same commit that contains the validated release-candidate tag (e.g. `v0.X.0`).
- [ ] Update [nwaku-compose](https://github.com/waku-org/nwaku-compose) and [waku-simulator](https://github.com/waku-org/waku-simulator) according to the new release. - [ ] Update [nwaku-compose](https://github.com/logos-messaging/nwaku-compose) and [waku-simulator](https://github.com/logos-messaging/waku-simulator) according to the new release.
- [ ] Bump nwaku dependency in [waku-rust-bindings](https://github.com/waku-org/waku-rust-bindings) and make sure all examples and tests work. - [ ] Bump nwaku dependency in [waku-rust-bindings](https://github.com/logos-messaging/waku-rust-bindings) and make sure all examples and tests work.
- [ ] Bump nwaku dependency in [waku-go-bindings](https://github.com/waku-org/waku-go-bindings) and make sure all tests work. - [ ] Bump nwaku dependency in [waku-go-bindings](https://github.com/logos-messaging/waku-go-bindings) and make sure all tests work.
- [ ] Create GitHub release (https://github.com/waku-org/nwaku/releases). - [ ] Create GitHub release (https://github.com/logos-messaging/nwaku/releases).
- [ ] Submit a PR to merge the release branch back to `master`. Make sure you use the option "Merge pull request (Create a merge commit)" to perform the merge. Ping repo admin if this option is not available. - [ ] Submit a PR to merge the release branch back to `master`. Make sure you use the option "Merge pull request (Create a merge commit)" to perform the merge. Ping repo admin if this option is not available.
- [ ] **Promote release to fleets** - [ ] **Promote release to fleets**
@ -67,8 +67,8 @@ All items below are to be completed by the owner of the given release.
### Links ### Links
- [Release process](https://github.com/waku-org/nwaku/blob/master/docs/contributors/release-process.md) - [Release process](https://github.com/logos-messaging/nwaku/blob/master/docs/contributors/release-process.md)
- [Release notes](https://github.com/waku-org/nwaku/blob/master/CHANGELOG.md) - [Release notes](https://github.com/logos-messaging/nwaku/blob/master/CHANGELOG.md)
- [Fleet ownership](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64) - [Fleet ownership](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64)
- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku) - [Infra-nim-waku](https://github.com/status-im/infra-nim-waku)
- [Jenkins](https://ci.infra.status.im/job/nim-waku/) - [Jenkins](https://ci.infra.status.im/job/nim-waku/)

View File

@ -76,9 +76,12 @@ jobs:
.git/modules .git/modules
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }} key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
- name: Make update
run: make update
- name: Build binaries - name: Build binaries
run: make V=1 QUICK_AND_DIRTY_COMPILER=1 all tools run: make V=1 QUICK_AND_DIRTY_COMPILER=1 all tools
build-windows: build-windows:
needs: changes needs: changes
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }} if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }}
@ -114,6 +117,9 @@ jobs:
.git/modules .git/modules
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }} key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
- name: Make update
run: make update
- name: Run tests - name: Run tests
run: | run: |
postgres_enabled=0 postgres_enabled=0
@ -121,7 +127,7 @@ jobs:
sudo docker run --rm -d -e POSTGRES_PASSWORD=test123 -p 5432:5432 postgres:15.4-alpine3.18 sudo docker run --rm -d -e POSTGRES_PASSWORD=test123 -p 5432:5432 postgres:15.4-alpine3.18
postgres_enabled=1 postgres_enabled=1
fi fi
export MAKEFLAGS="-j1" export MAKEFLAGS="-j1"
export NIMFLAGS="--colors:off -d:chronicles_colors:none" export NIMFLAGS="--colors:off -d:chronicles_colors:none"
export USE_LIBBACKTRACE=0 export USE_LIBBACKTRACE=0
@ -132,12 +138,12 @@ jobs:
build-docker-image: build-docker-image:
needs: changes needs: changes
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' || needs.changes.outputs.docker == 'true' }} if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' || needs.changes.outputs.docker == 'true' }}
uses: waku-org/nwaku/.github/workflows/container-image.yml@master uses: logos-messaging/logos-messaging-nim/.github/workflows/container-image.yml@10dc3d3eb4b6a3d4313f7b2cc4a85a925e9ce039
secrets: inherit secrets: inherit
nwaku-nwaku-interop-tests: nwaku-nwaku-interop-tests:
needs: build-docker-image needs: build-docker-image
uses: waku-org/waku-interop-tests/.github/workflows/nim_waku_PR.yml@SMOKE_TEST_0.0.1 uses: logos-messaging/logos-messaging-interop-tests/.github/workflows/nim_waku_PR.yml@SMOKE_TEST_0.0.1
with: with:
node_nwaku: ${{ needs.build-docker-image.outputs.image }} node_nwaku: ${{ needs.build-docker-image.outputs.image }}
@ -145,14 +151,14 @@ jobs:
js-waku-node: js-waku-node:
needs: build-docker-image needs: build-docker-image
uses: waku-org/js-waku/.github/workflows/test-node.yml@master uses: logos-messaging/js-waku/.github/workflows/test-node.yml@master
with: with:
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }} nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
test_type: node test_type: node
js-waku-node-optional: js-waku-node-optional:
needs: build-docker-image needs: build-docker-image
uses: waku-org/js-waku/.github/workflows/test-node.yml@master uses: logos-messaging/js-waku/.github/workflows/test-node.yml@master
with: with:
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }} nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
test_type: node-optional test_type: node-optional

View File

@ -41,7 +41,7 @@ jobs:
env: env:
QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }} QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }}
QUAY_USER: ${{ secrets.QUAY_USER }} QUAY_USER: ${{ secrets.QUAY_USER }}
- name: Checkout code - name: Checkout code
if: ${{ steps.secrets.outcome == 'success' }} if: ${{ steps.secrets.outcome == 'success' }}
uses: actions/checkout@v4 uses: actions/checkout@v4
@ -65,6 +65,7 @@ jobs:
id: build id: build
if: ${{ steps.secrets.outcome == 'success' }} if: ${{ steps.secrets.outcome == 'success' }}
run: | run: |
make update
make -j${NPROC} V=1 QUICK_AND_DIRTY_COMPILER=1 NIMFLAGS="-d:disableMarchNative -d:postgres -d:chronicles_colors:none" wakunode2 make -j${NPROC} V=1 QUICK_AND_DIRTY_COMPILER=1 NIMFLAGS="-d:disableMarchNative -d:postgres -d:chronicles_colors:none" wakunode2

View File

@ -47,7 +47,7 @@ jobs:
- name: prep variables - name: prep variables
id: vars id: vars
run: | run: |
ARCH=${{matrix.arch}} ARCH=${{matrix.arch}}
echo "arch=${ARCH}" >> $GITHUB_OUTPUT echo "arch=${ARCH}" >> $GITHUB_OUTPUT
@ -91,14 +91,14 @@ jobs:
build-docker-image: build-docker-image:
needs: tag-name needs: tag-name
uses: waku-org/nwaku/.github/workflows/container-image.yml@master uses: logos-messaging/nwaku/.github/workflows/container-image.yml@master
with: with:
image_tag: ${{ needs.tag-name.outputs.tag }} image_tag: ${{ needs.tag-name.outputs.tag }}
secrets: inherit secrets: inherit
js-waku-node: js-waku-node:
needs: build-docker-image needs: build-docker-image
uses: waku-org/js-waku/.github/workflows/test-node.yml@master uses: logos-messaging/js-waku/.github/workflows/test-node.yml@master
with: with:
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }} nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
test_type: node test_type: node
@ -106,7 +106,7 @@ jobs:
js-waku-node-optional: js-waku-node-optional:
needs: build-docker-image needs: build-docker-image
uses: waku-org/js-waku/.github/workflows/test-node.yml@master uses: logos-messaging/js-waku/.github/workflows/test-node.yml@master
with: with:
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }} nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
test_type: node-optional test_type: node-optional
@ -150,7 +150,7 @@ jobs:
-u $(id -u) \ -u $(id -u) \
docker.io/wakuorg/sv4git:latest \ docker.io/wakuorg/sv4git:latest \
release-notes ${RELEASE_NOTES_TAG} --previous $(git tag -l --sort -creatordate | grep -e "^v[0-9]*\.[0-9]*\.[0-9]*$") |\ release-notes ${RELEASE_NOTES_TAG} --previous $(git tag -l --sort -creatordate | grep -e "^v[0-9]*\.[0-9]*\.[0-9]*$") |\
sed -E 's@#([0-9]+)@[#\1](https://github.com/waku-org/nwaku/issues/\1)@g' > release_notes.md sed -E 's@#([0-9]+)@[#\1](https://github.com/logos-messaging/nwaku/issues/\1)@g' > release_notes.md
sed -i "s/^## .*/Generated at $(date)/" release_notes.md sed -i "s/^## .*/Generated at $(date)/" release_notes.md

View File

@ -41,25 +41,84 @@ jobs:
.git/modules .git/modules
key: ${{ runner.os }}-${{matrix.arch}}-submodules-${{ steps.submodules.outputs.hash }} key: ${{ runner.os }}-${{matrix.arch}}-submodules-${{ steps.submodules.outputs.hash }}
- name: prep variables - name: Get tag
id: version
run: |
# Use full tag, e.g., v0.37.0
echo "version=${GITHUB_REF_NAME}" >> $GITHUB_OUTPUT
- name: Prep variables
id: vars id: vars
run: | run: |
NWAKU_ARTIFACT_NAME=$(echo "nwaku-${{matrix.arch}}-${{runner.os}}.tar.gz" | tr "[:upper:]" "[:lower:]") VERSION=${{ steps.version.outputs.version }}
echo "nwaku=${NWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT NWAKU_ARTIFACT_NAME=$(echo "waku-${{matrix.arch}}-${{runner.os}}.tar.gz" | tr "[:upper:]" "[:lower:]")
echo "waku=${NWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT
- name: Install dependencies if [[ "${{ runner.os }}" == "Linux" ]]; then
LIBWAKU_ARTIFACT_NAME=$(echo "libwaku-${VERSION}-${{matrix.arch}}-${{runner.os}}-linux.deb" | tr "[:upper:]" "[:lower:]")
fi
if [[ "${{ runner.os }}" == "macOS" ]]; then
LIBWAKU_ARTIFACT_NAME=$(echo "libwaku-${VERSION}-${{matrix.arch}}-macos.tar.gz" | tr "[:upper:]" "[:lower:]")
fi
echo "libwaku=${LIBWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT
- name: Install build dependencies
run: |
if [[ "${{ runner.os }}" == "Linux" ]]; then
sudo apt-get update && sudo apt-get install -y build-essential dpkg-dev
fi
- name: Build Waku artifacts
run: | run: |
OS=$([[ "${{runner.os}}" == "macOS" ]] && echo "macosx" || echo "linux") OS=$([[ "${{runner.os}}" == "macOS" ]] && echo "macosx" || echo "linux")
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" V=1 update make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" V=1 update
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false wakunode2 make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false wakunode2
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" CI=false chat2 make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" CI=false chat2
tar -cvzf ${{steps.vars.outputs.nwaku}} ./build/ tar -cvzf ${{steps.vars.outputs.waku}} ./build/
- name: Upload asset make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false libwaku
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false STATIC=1 libwaku
- name: Create distributable libwaku package
run: |
VERSION=${{ steps.version.outputs.version }}
if [[ "${{ runner.os }}" == "Linux" ]]; then
rm -rf pkg
mkdir -p pkg/DEBIAN pkg/usr/local/lib pkg/usr/local/include
cp build/libwaku.so pkg/usr/local/lib/
cp build/libwaku.a pkg/usr/local/lib/
cp library/libwaku.h pkg/usr/local/include/
echo "Package: waku" >> pkg/DEBIAN/control
echo "Version: ${VERSION}" >> pkg/DEBIAN/control
echo "Priority: optional" >> pkg/DEBIAN/control
echo "Section: libs" >> pkg/DEBIAN/control
echo "Architecture: ${{matrix.arch}}" >> pkg/DEBIAN/control
echo "Maintainer: Waku Team <ivansete@status.im>" >> pkg/DEBIAN/control
echo "Description: Waku library" >> pkg/DEBIAN/control
dpkg-deb --build pkg ${{steps.vars.outputs.libwaku}}
fi
if [[ "${{ runner.os }}" == "macOS" ]]; then
tar -cvzf ${{steps.vars.outputs.libwaku}} ./build/libwaku.dylib ./build/libwaku.a ./library/libwaku.h
fi
- name: Upload waku artifact
uses: actions/upload-artifact@v4.4.0 uses: actions/upload-artifact@v4.4.0
with: with:
name: ${{steps.vars.outputs.nwaku}} name: waku-${{ steps.version.outputs.version }}-${{ matrix.arch }}-${{ runner.os }}
path: ${{steps.vars.outputs.nwaku}} path: ${{ steps.vars.outputs.waku }}
if-no-files-found: error
- name: Upload libwaku artifact
uses: actions/upload-artifact@v4.4.0
with:
name: libwaku-${{ steps.version.outputs.version }}-${{ matrix.arch }}-${{ runner.os }}
path: ${{ steps.vars.outputs.libwaku }}
if-no-files-found: error if-no-files-found: error

2
.gitmodules vendored
View File

@ -181,6 +181,6 @@
branch = master branch = master
[submodule "vendor/waku-rlnv2-contract"] [submodule "vendor/waku-rlnv2-contract"]
path = vendor/waku-rlnv2-contract path = vendor/waku-rlnv2-contract
url = https://github.com/waku-org/waku-rlnv2-contract.git url = https://github.com/logos-messaging/waku-rlnv2-contract.git
ignore = untracked ignore = untracked
branch = master branch = master

509
AGENTS.md Normal file
View File

@ -0,0 +1,509 @@
# AGENTS.md - AI Coding Context
This file provides essential context for LLMs assisting with Logos Messaging development.
## Project Identity
Logos Messaging is designed as a shared public network for generalized messaging, not application-specific infrastructure.
This project is a Nim implementation of a libp2p protocol suite for private, censorship-resistant P2P messaging. It targets resource-restricted devices and privacy-preserving communication.
Logos Messaging was formerly known as Waku. Waku-related terminology remains within the codebase for historical reasons.
### Design Philosophy
Key architectural decisions:
Resource-restricted first: Protocols differentiate between full nodes (relay) and light clients (filter, lightpush, store). Light clients can participate without maintaining full message history or relay capabilities. This explains the client/server split in protocol implementations.
Privacy through unlinkability: RLN (Rate Limiting Nullifier) provides DoS protection while preserving sender anonymity. Messages are routed through pubsub topics with automatic sharding across 8 shards. Code prioritizes metadata privacy alongside content encryption.
Scalability via sharding: The network uses automatic content-topic-based sharding to distribute traffic. This is why you'll see sharding logic throughout the codebase and why pubsub topic selection is protocol-level, not application-level.
See [documentation](https://docs.waku.org/learn/) for architectural details.
### Core Protocols
- Relay: Pub/sub message routing using GossipSub
- Store: Historical message retrieval and persistence
- Filter: Lightweight message filtering for resource-restricted clients
- Lightpush: Lightweight message publishing for clients
- Peer Exchange: Peer discovery mechanism
- RLN Relay: Rate limiting nullifier for spam protection
- Metadata: Cluster and shard metadata exchange between peers
- Mix: Mixnet protocol for enhanced privacy through onion routing
- Rendezvous: Alternative peer discovery mechanism
### Key Terminology
- ENR (Ethereum Node Record): Node identity and capability advertisement
- Multiaddr: libp2p addressing format (e.g., `/ip4/127.0.0.1/tcp/60000/p2p/16Uiu2...`)
- PubsubTopic: Gossipsub topic for message routing (e.g., `/waku/2/default-waku/proto`)
- ContentTopic: Application-level message categorization (e.g., `/my-app/1/chat/proto`)
- Sharding: Partitioning network traffic across topics (static or auto-sharding)
- RLN (Rate Limiting Nullifier): Zero-knowledge proof system for spam prevention
### Specifications
All specs are at [rfc.vac.dev/waku](https://rfc.vac.dev/waku). RFCs use `WAKU2-XXX` format (not legacy `WAKU-XXX`).
## Architecture
### Protocol Module Pattern
Each protocol typically follows this structure:
```
waku_<protocol>/
├── protocol.nim # Main protocol type and handler logic
├── client.nim # Client-side API
├── rpc.nim # RPC message types
├── rpc_codec.nim # Protobuf encoding/decoding
├── common.nim # Shared types and constants
└── protocol_metrics.nim # Prometheus metrics
```
### WakuNode Architecture
- WakuNode (`waku/node/waku_node.nim`) is the central orchestrator
- Protocols are "mounted" onto the node's switch (libp2p component)
- PeerManager handles peer selection and connection management
- Switch provides libp2p transport, security, and multiplexing
Example protocol type definition:
```nim
type WakuFilter* = ref object of LPProtocol
subscriptions*: FilterSubscriptions
peerManager: PeerManager
messageCache: TimedCache[string]
```
## Development Essentials
### Build Requirements
- Nim 2.x (check `waku.nimble` for minimum version)
- Rust toolchain (required for RLN dependencies)
- Build system: Make with nimbus-build-system
### Build System
The project uses Makefile with nimbus-build-system (Status's Nim build framework):
```bash
# Initial build (updates submodules)
make wakunode2
# After git pull, update submodules
make update
# Build with custom flags
make wakunode2 NIMFLAGS="-d:chronicles_log_level=DEBUG"
```
Note: The build system uses `--mm:refc` memory management (automatically enforced). Only relevant if compiling outside the standard build system.
### Common Make Targets
```bash
make wakunode2 # Build main node binary
make test # Run all tests
make testcommon # Run common tests only
make libwakuStatic # Build static C library
make chat2 # Build chat example
make install-nph # Install git hook for auto-formatting
```
### Testing
```bash
# Run all tests
make test
# Run specific test file
make test tests/test_waku_enr.nim
# Run specific test case from file
make test tests/test_waku_enr.nim "check capabilities support"
# Build and run test separately (for development iteration)
make test tests/test_waku_enr.nim
```
Test structure uses `testutils/unittests`:
```nim
import testutils/unittests
suite "Waku ENR - Capabilities":
test "check capabilities support":
## Given
let bitfield: CapabilitiesBitfield = 0b0000_1101u8
## Then
check:
bitfield.supportsCapability(Capabilities.Relay)
not bitfield.supportsCapability(Capabilities.Store)
```
### Code Formatting
Mandatory: All code must be formatted with `nph` (vendored in `vendor/nph`)
```bash
# Format specific file
make nph/waku/waku_core.nim
# Install git pre-commit hook (auto-formats on commit)
make install-nph
```
The nph formatter handles all formatting details automatically, especially with the pre-commit hook installed. Focus on semantic correctness.
### Logging
Uses `chronicles` library with compile-time configuration:
```nim
import chronicles
logScope:
topics = "waku lightpush"
info "handling request", peerId = peerId, topic = pubsubTopic
error "request failed", error = msg
```
Compile with log level:
```bash
nim c -d:chronicles_log_level=TRACE myfile.nim
```
## Code Conventions
Common pitfalls:
- Always handle Result types explicitly
- Avoid global mutable state: Pass state through parameters
- Keep functions focused: Under 50 lines when possible
- Prefer compile-time checks (`static assert`) over runtime checks
### Naming
- Files/Directories: `snake_case` (e.g., `waku_lightpush`, `peer_manager`)
- Procedures: `camelCase` (e.g., `handleRequest`, `pushMessage`)
- Types: `PascalCase` (e.g., `WakuFilter`, `PubsubTopic`)
- Constants: `PascalCase` (e.g., `MaxContentTopicsPerRequest`)
- Constructors: `func init(T: type Xxx, params): T`
- For ref types: `func new(T: type Xxx, params): ref T`
- Exceptions: `XxxError` for CatchableError, `XxxDefect` for Defect
- ref object types: `XxxRef` suffix
### Imports Organization
Group imports: stdlib, external libs, internal modules:
```nim
import
std/[options, sequtils], # stdlib
results, chronicles, chronos, # external
libp2p/peerid
import
../node/peer_manager, # internal (separate import block)
../waku_core,
./common
```
### Async Programming
Uses chronos, not stdlib `asyncdispatch`:
```nim
proc handleRequest(
wl: WakuLightPush, peerId: PeerId
): Future[WakuLightPushResult] {.async.} =
let res = await wl.pushHandler(peerId, pubsubTopic, message)
return res
```
### Error Handling
The project uses both Result types and exceptions:
Result types from nim-results are used for protocol and API-level errors:
```nim
proc subscribe(
wf: WakuFilter, peerId: PeerID
): Future[FilterSubscribeResult] {.async.} =
if contentTopics.len > MaxContentTopicsPerRequest:
return err(FilterSubscribeError.badRequest("exceeds maximum"))
# Handle Result with isOkOr
(await wf.subscriptions.addSubscription(peerId, criteria)).isOkOr:
return err(FilterSubscribeError.serviceUnavailable(error))
ok()
```
Exceptions still used for:
- chronos async failures (CancelledError, etc.)
- Database/system errors
- Library interop
Most files start with `{.push raises: [].}` to disable exception tracking, then use try/catch blocks where needed.
### Pragma Usage
```nim
{.push raises: [].} # Disable default exception tracking (at file top)
proc myProc(): Result[T, E] {.async.} = # Async proc
```
### Protocol Inheritance
Protocols inherit from libp2p's `LPProtocol`:
```nim
type WakuLightPush* = ref object of LPProtocol
rng*: ref rand.HmacDrbgContext
peerManager*: PeerManager
pushHandler*: PushMessageHandler
```
### Type Visibility
- Public exports use `*` suffix: `type WakuFilter* = ...`
- Fields without `*` are module-private
## Style Guide Essentials
This section summarizes key Nim style guidelines relevant to this project. Full guide: https://status-im.github.io/nim-style-guide/
### Language Features
Import and Export
- Use explicit import paths with std/ prefix for stdlib
- Group imports: stdlib, external, internal (separate blocks)
- Export modules whose types appear in public API
- Avoid include
Macros and Templates
- Avoid macros and templates - prefer simple constructs
- Avoid generating public API with macros
- Put logic in templates, use macros only for glue code
Object Construction
- Prefer Type(field: value) syntax
- Use Type.init(params) convention for constructors
- Default zero-initialization should be valid state
- Avoid using result variable for construction
ref object Types
- Avoid ref object unless needed for:
- Resource handles requiring reference semantics
- Shared ownership
- Reference-based data structures (trees, lists)
- Stable pointer for FFI
- Use explicit ref MyType where possible
- Name ref object types with Ref suffix: XxxRef
Memory Management
- Prefer stack-based and statically sized types in core code
- Use heap allocation in glue layers
- Avoid alloca
- For FFI: use create/dealloc or createShared/deallocShared
Variable Usage
- Use most restrictive of const, let, var (prefer const over let over var)
- Prefer expressions for initialization over var then assignment
- Avoid result variable - use explicit return or expression-based returns
Functions
- Prefer func over proc
- Avoid public (*) symbols not part of intended API
- Prefer openArray over seq for function parameters
Methods (runtime polymorphism)
- Avoid method keyword for dynamic dispatch
- Prefer manual vtable with proc closures for polymorphism
- Methods lack support for generics
Miscellaneous
- Annotate callback proc types with {.raises: [], gcsafe.}
- Avoid explicit {.inline.} pragma
- Avoid converters
- Avoid finalizers
Type Guidelines
Binary Data
- Use byte for binary data
- Use seq[byte] for dynamic arrays
- Convert string to seq[byte] early if stdlib returns binary as string
Integers
- Prefer signed (int, int64) for counting, lengths, indexing
- Use unsigned with explicit size (uint8, uint64) for binary data, bit ops
- Avoid Natural
- Check ranges before converting to int
- Avoid casting pointers to int
- Avoid range types
Strings
- Use string for text
- Use seq[byte] for binary data instead of string
### Error Handling
Philosophy
- Prefer Result, Opt for explicit error handling
- Use Exceptions only for legacy code compatibility
Result Types
- Use Result[T, E] for operations that can fail
- Use cstring for simple error messages: Result[T, cstring]
- Use enum for errors needing differentiation: Result[T, SomeErrorEnum]
- Use Opt[T] for simple optional values
- Annotate all modules: {.push raises: [].} at top
Exceptions (when unavoidable)
- Inherit from CatchableError, name XxxError
- Use Defect for panics/logic errors, name XxxDefect
- Annotate functions explicitly: {.raises: [SpecificError].}
- Catch specific error types, avoid catching CatchableError
- Use expression-based try blocks
- Isolate legacy exception code with try/except, convert to Result
Common Defect Sources
- Overflow in signed arithmetic
- Array/seq indexing with []
- Implicit range type conversions
Status Codes
- Avoid status code pattern
- Use Result instead
### Library Usage
Standard Library
- Use judiciously, prefer focused packages
- Prefer these replacements:
- async: chronos
- bitops: stew/bitops2
- endians: stew/endians2
- exceptions: results
- io: stew/io2
Results Library
- Use cstring errors for diagnostics without differentiation
- Use enum errors when caller needs to act on specific errors
- Use complex types when additional error context needed
- Use isOkOr pattern for chaining
Wrappers (C/FFI)
- Prefer native Nim when available
- For C libraries: use {.compile.} to build from source
- Create xxx_abi.nim for raw ABI wrapper
- Avoid C++ libraries
Miscellaneous
- Print hex output in lowercase, accept both cases
### Common Pitfalls
- Defects lack tracking by {.raises.}
- nil ref causes runtime crashes
- result variable disables branch checking
- Exception hierarchy unclear between Nim versions
- Range types have compiler bugs
- Finalizers infect all instances of type
## Common Workflows
### Adding a New Protocol
1. Create directory: `waku/waku_myprotocol/`
2. Define core files:
- `rpc.nim` - Message types
- `rpc_codec.nim` - Protobuf encoding
- `protocol.nim` - Protocol handler
- `client.nim` - Client API
- `common.nim` - Shared types
3. Define protocol type in `protocol.nim`:
```nim
type WakuMyProtocol* = ref object of LPProtocol
peerManager: PeerManager
# ... fields
```
4. Implement request handler
5. Mount in WakuNode (`waku/node/waku_node.nim`)
6. Add tests in `tests/waku_myprotocol/`
7. Export module via `waku/waku_myprotocol.nim`
### Adding a REST API Endpoint
1. Define handler in `waku/rest_api/endpoint/myprotocol/`
2. Implement endpoint following pattern:
```nim
proc installMyProtocolApiHandlers*(
router: var RestRouter, node: WakuNode
) =
router.api(MethodGet, "/waku/v2/myprotocol/endpoint") do () -> RestApiResponse:
# Implementation
return RestApiResponse.jsonResponse(data, status = Http200)
```
3. Register in `waku/rest_api/handlers.nim`
### Adding Database Migration
For message_store (SQLite):
1. Create `migrations/message_store/NNNNN_description.up.sql`
2. Create corresponding `.down.sql` for rollback
3. Increment version number sequentially
4. Test migration locally before committing
For PostgreSQL: add in `migrations/message_store_postgres/`
### Running Single Test During Development
```bash
# Build test binary
make test tests/waku_filter_v2/test_waku_client.nim
# Binary location
./build/tests/waku_filter_v2/test_waku_client.nim.bin
# Or combine
make test tests/waku_filter_v2/test_waku_client.nim "specific test name"
```
### Debugging with Chronicles
Set log level and filter topics:
```bash
nim c -r \
-d:chronicles_log_level=TRACE \
-d:chronicles_disabled_topics="eth,dnsdisc" \
tests/mytest.nim
```
## Key Constraints
### Vendor Directory
- Never edit files directly in vendor - it is auto-generated from git submodules
- Always run `make update` after pulling changes
- Managed by `nimbus-build-system`
### Chronicles Performance
- Log levels are configured at compile time for performance
- Runtime filtering is available but should be used sparingly: `-d:chronicles_runtime_filtering=on`
- Default sinks are optimized for production
### Memory Management
- Uses `refc` (reference counting with cycle collection)
- Automatically enforced by the build system (hardcoded in `waku.nimble`)
- Do not override unless absolutely necessary, as it breaks compatibility
### RLN Dependencies
- RLN code requires a Rust toolchain, which explains Rust imports in some modules
- Pre-built `librln` libraries are checked into the repository
## Quick Reference
Language: Nim 2.x | License: MIT or Apache 2.0
### Important Files
- `Makefile` - Primary build interface
- `waku.nimble` - Package definition and build tasks (called via nimbus-build-system)
- `vendor/nimbus-build-system/` - Status's build framework
- `waku/node/waku_node.nim` - Core node implementation
- `apps/wakunode2/wakunode2.nim` - Main CLI application
- `waku/factory/waku_conf.nim` - Configuration types
- `library/libwaku.nim` - C bindings entry point
### Testing Entry Points
- `tests/all_tests_waku.nim` - All Waku protocol tests
- `tests/all_tests_wakunode2.nim` - Node application tests
- `tests/all_tests_common.nim` - Common utilities tests
### Key Dependencies
- `chronos` - Async framework
- `nim-results` - Result type for error handling
- `chronicles` - Logging
- `libp2p` - P2P networking
- `confutils` - CLI argument parsing
- `presto` - REST server
- `nimcrypto` - Cryptographic primitives
Note: For specific version requirements, check `waku.nimble`.

View File

@ -1,4 +1,10 @@
## v0.37.0 (2025-10-01) ## v0.37.1-beta (2025-12-10)
### Bug Fixes
- Remove ENR cache from peer exchange ([#3652](https://github.com/logos-messaging/logos-messaging-nim/pull/3652)) ([7920368a](https://github.com/logos-messaging/logos-messaging-nim/commit/7920368a36687cd5f12afa52d59866792d8457ca))
## v0.37.0-beta (2025-10-01)
### Notes ### Notes

View File

@ -430,18 +430,27 @@ docker-liteprotocoltester-push:
.PHONY: cbindings cwaku_example libwaku .PHONY: cbindings cwaku_example libwaku
STATIC ?= 0 STATIC ?= 0
BUILD_COMMAND ?= libwakuDynamic
ifeq ($(detected_OS),Windows)
LIB_EXT_DYNAMIC = dll
LIB_EXT_STATIC = lib
else ifeq ($(detected_OS),Darwin)
LIB_EXT_DYNAMIC = dylib
LIB_EXT_STATIC = a
else ifeq ($(detected_OS),Linux)
LIB_EXT_DYNAMIC = so
LIB_EXT_STATIC = a
endif
LIB_EXT := $(LIB_EXT_DYNAMIC)
ifeq ($(STATIC), 1)
LIB_EXT = $(LIB_EXT_STATIC)
BUILD_COMMAND = libwakuStatic
endif
libwaku: | build deps librln libwaku: | build deps librln
rm -f build/libwaku* echo -e $(BUILD_MSG) "build/$@.$(LIB_EXT)" && $(ENV_SCRIPT) nim $(BUILD_COMMAND) $(NIM_PARAMS) waku.nims $@.$(LIB_EXT)
ifeq ($(STATIC), 1)
echo -e $(BUILD_MSG) "build/$@.a" && $(ENV_SCRIPT) nim libwakuStatic $(NIM_PARAMS) waku.nims
else ifeq ($(detected_OS),Windows)
make -f scripts/libwaku_windows_setup.mk windows-setup
echo -e $(BUILD_MSG) "build/$@.dll" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims
else
echo -e $(BUILD_MSG) "build/$@.so" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims
endif
##################### #####################
## Mobile Bindings ## ## Mobile Bindings ##
@ -553,4 +562,3 @@ release-notes:
sed -E 's@#([0-9]+)@[#\1](https://github.com/waku-org/nwaku/issues/\1)@g' sed -E 's@#([0-9]+)@[#\1](https://github.com/waku-org/nwaku/issues/\1)@g'
# I could not get the tool to replace issue ids with links, so using sed for now, # I could not get the tool to replace issue ids with links, so using sed for now,
# asked here: https://github.com/bvieira/sv4git/discussions/101 # asked here: https://github.com/bvieira/sv4git/discussions/101

View File

@ -480,7 +480,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
if conf.lightpushnode != "": if conf.lightpushnode != "":
let peerInfo = parsePeerInfo(conf.lightpushnode) let peerInfo = parsePeerInfo(conf.lightpushnode)
if peerInfo.isOk(): if peerInfo.isOk():
await mountLegacyLightPush(node) (await node.mountLegacyLightPush()).isOkOr:
error "failed to mount legacy lightpush", error = error
quit(QuitFailure)
node.mountLegacyLightPushClient() node.mountLegacyLightPushClient()
node.peerManager.addServicePeer(peerInfo.value, WakuLightpushCodec) node.peerManager.addServicePeer(peerInfo.value, WakuLightpushCodec)
else: else:

View File

@ -38,6 +38,9 @@ A particular OpenAPI spec can be easily imported into [Postman](https://www.post
curl http://localhost:8645/debug/v1/info -s | jq curl http://localhost:8645/debug/v1/info -s | jq
``` ```
### Store API
The `page_size` flag in the Store API has a default value of 20 and a max value of 100.
### Node configuration ### Node configuration
Find details [here](https://github.com/waku-org/nwaku/tree/master/docs/operators/how-to/configure-rest-api.md) Find details [here](https://github.com/waku-org/nwaku/tree/master/docs/operators/how-to/configure-rest-api.md)

View File

@ -1,4 +1,3 @@
# Configure a REST API node # Configure a REST API node
A subset of the node configuration can be used to modify the behaviour of the HTTP REST API. A subset of the node configuration can be used to modify the behaviour of the HTTP REST API.
@ -21,3 +20,5 @@ Example:
```shell ```shell
wakunode2 --rest=true wakunode2 --rest=true
``` ```
The `page_size` flag in the Store API has a default value of 20 and a max value of 100.

View File

@ -13,6 +13,7 @@ import
node/peer_manager, node/peer_manager,
node/waku_node, node/waku_node,
node/kernel_api, node/kernel_api,
node/kernel_api/lightpush,
waku_lightpush_legacy, waku_lightpush_legacy,
waku_lightpush_legacy/common, waku_lightpush_legacy/common,
waku_lightpush_legacy/protocol_metrics, waku_lightpush_legacy/protocol_metrics,
@ -56,7 +57,7 @@ suite "Waku Legacy Lightpush - End To End":
(await server.mountRelay()).isOkOr: (await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay" assert false, "Failed to mount relay"
await server.mountLegacyLightpush() # without rln-relay check (await server.mountLegacyLightpush()).isOk() # without rln-relay
client.mountLegacyLightpushClient() client.mountLegacyLightpushClient()
serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo()
@ -135,8 +136,8 @@ suite "RLN Proofs as a Lightpush Service":
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
anvilProc = runAnvil() anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH))
manager = waitFor setupOnchainGroupManager() manager = waitFor setupOnchainGroupManager(deployContracts = false)
# mount rln-relay # mount rln-relay
let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
@ -147,7 +148,7 @@ suite "RLN Proofs as a Lightpush Service":
(await server.mountRelay()).isOkOr: (await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay" assert false, "Failed to mount relay"
await server.mountRlnRelay(wakuRlnConfig) await server.mountRlnRelay(wakuRlnConfig)
await server.mountLegacyLightPush() check (await server.mountLegacyLightPush()).isOk()
client.mountLegacyLightPushClient() client.mountLegacyLightPushClient()
let manager1 = cast[OnchainGroupManager](server.wakuRlnRelay.groupManager) let manager1 = cast[OnchainGroupManager](server.wakuRlnRelay.groupManager)
@ -213,7 +214,7 @@ suite "Waku Legacy Lightpush message delivery":
assert false, "Failed to mount relay" assert false, "Failed to mount relay"
(await bridgeNode.mountRelay()).isOkOr: (await bridgeNode.mountRelay()).isOkOr:
assert false, "Failed to mount relay" assert false, "Failed to mount relay"
await bridgeNode.mountLegacyLightPush() check (await bridgeNode.mountLegacyLightPush()).isOk()
lightNode.mountLegacyLightPushClient() lightNode.mountLegacyLightPushClient()
discard await lightNode.peerManager.dialPeer( discard await lightNode.peerManager.dialPeer(
@ -249,3 +250,19 @@ suite "Waku Legacy Lightpush message delivery":
## Cleanup ## Cleanup
await allFutures(lightNode.stop(), bridgeNode.stop(), destNode.stop()) await allFutures(lightNode.stop(), bridgeNode.stop(), destNode.stop())
suite "Waku Legacy Lightpush mounting behavior":
asyncTest "fails to mount when relay is not mounted":
## Given a node without Relay mounted
let
key = generateSecp256k1Key()
node = newTestWakuNode(key, parseIpAddress("0.0.0.0"), Port(0))
# Do not mount Relay on purpose
check node.wakuRelay.isNil()
## Then mounting Legacy Lightpush must fail
let res = await node.mountLegacyLightPush()
check:
res.isErr()
res.error == MountWithoutRelayError

View File

@ -13,6 +13,7 @@ import
node/peer_manager, node/peer_manager,
node/waku_node, node/waku_node,
node/kernel_api, node/kernel_api,
node/kernel_api/lightpush,
waku_lightpush, waku_lightpush,
waku_rln_relay, waku_rln_relay,
], ],
@ -55,7 +56,7 @@ suite "Waku Lightpush - End To End":
(await server.mountRelay()).isOkOr: (await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay" assert false, "Failed to mount relay"
await server.mountLightpush() # without rln-relay check (await server.mountLightpush()).isOk() # without rln-relay
client.mountLightpushClient() client.mountLightpushClient()
serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo() serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo()
@ -135,8 +136,8 @@ suite "RLN Proofs as a Lightpush Service":
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0)) server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0)) client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
anvilProc = runAnvil() anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH))
manager = waitFor setupOnchainGroupManager() manager = waitFor setupOnchainGroupManager(deployContracts = false)
# mount rln-relay # mount rln-relay
let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1)) let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
@ -147,7 +148,7 @@ suite "RLN Proofs as a Lightpush Service":
(await server.mountRelay()).isOkOr: (await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay" assert false, "Failed to mount relay"
await server.mountRlnRelay(wakuRlnConfig) await server.mountRlnRelay(wakuRlnConfig)
await server.mountLightPush() check (await server.mountLightPush()).isOk()
client.mountLightPushClient() client.mountLightPushClient()
let manager1 = cast[OnchainGroupManager](server.wakuRlnRelay.groupManager) let manager1 = cast[OnchainGroupManager](server.wakuRlnRelay.groupManager)
@ -213,7 +214,7 @@ suite "Waku Lightpush message delivery":
assert false, "Failed to mount relay" assert false, "Failed to mount relay"
(await bridgeNode.mountRelay()).isOkOr: (await bridgeNode.mountRelay()).isOkOr:
assert false, "Failed to mount relay" assert false, "Failed to mount relay"
await bridgeNode.mountLightPush() check (await bridgeNode.mountLightPush()).isOk()
lightNode.mountLightPushClient() lightNode.mountLightPushClient()
discard await lightNode.peerManager.dialPeer( discard await lightNode.peerManager.dialPeer(
@ -251,3 +252,19 @@ suite "Waku Lightpush message delivery":
## Cleanup ## Cleanup
await allFutures(lightNode.stop(), bridgeNode.stop(), destNode.stop()) await allFutures(lightNode.stop(), bridgeNode.stop(), destNode.stop())
suite "Waku Lightpush mounting behavior":
asyncTest "fails to mount when relay is not mounted":
## Given a node without Relay mounted
let
key = generateSecp256k1Key()
node = newTestWakuNode(key, parseIpAddress("0.0.0.0"), Port(0))
# Do not mount Relay on purpose
check node.wakuRelay.isNil()
## Then mounting Lightpush must fail
let res = await node.mountLightPush()
check:
res.isErr()
res.error == MountWithoutRelayError

View File

@ -66,15 +66,17 @@ suite "Waku Peer Exchange":
suite "fetchPeerExchangePeers": suite "fetchPeerExchangePeers":
var node2 {.threadvar.}: WakuNode var node2 {.threadvar.}: WakuNode
var node3 {.threadvar.}: WakuNode
asyncSetup: asyncSetup:
node = newTestWakuNode(generateSecp256k1Key(), bindIp, bindPort) node = newTestWakuNode(generateSecp256k1Key(), bindIp, bindPort)
node2 = newTestWakuNode(generateSecp256k1Key(), bindIp, bindPort) node2 = newTestWakuNode(generateSecp256k1Key(), bindIp, bindPort)
node3 = newTestWakuNode(generateSecp256k1Key(), bindIp, bindPort)
await allFutures(node.start(), node2.start()) await allFutures(node.start(), node2.start(), node3.start())
asyncTeardown: asyncTeardown:
await allFutures(node.stop(), node2.stop()) await allFutures(node.stop(), node2.stop(), node3.stop())
asyncTest "Node fetches without mounting peer exchange": asyncTest "Node fetches without mounting peer exchange":
# When a node, without peer exchange mounted, fetches peers # When a node, without peer exchange mounted, fetches peers
@ -104,12 +106,10 @@ suite "Waku Peer Exchange":
await allFutures([node.mountPeerExchangeClient(), node2.mountPeerExchange()]) await allFutures([node.mountPeerExchangeClient(), node2.mountPeerExchange()])
check node.peerManager.switch.peerStore.peers.len == 0 check node.peerManager.switch.peerStore.peers.len == 0
# Mock that we discovered a node (to avoid running discv5) # Simulate node2 discovering node3 via Discv5
var enr = enr.Record() var rpInfo = node3.peerInfo.toRemotePeerInfo()
assert enr.fromUri( rpInfo.enr = some(node3.enr)
"enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB" node2.peerManager.addPeer(rpInfo, PeerOrigin.Discv5)
), "Failed to parse ENR"
node2.wakuPeerExchange.enrCache.add(enr)
# Set node2 as service peer (default one) for px protocol # Set node2 as service peer (default one) for px protocol
node.peerManager.addServicePeer( node.peerManager.addServicePeer(
@ -121,10 +121,8 @@ suite "Waku Peer Exchange":
check res.tryGet() == 1 check res.tryGet() == 1
# Check that the peer ended up in the peerstore # Check that the peer ended up in the peerstore
let rpInfo = enr.toRemotePeerInfo.get()
check: check:
node.peerManager.switch.peerStore.peers.anyIt(it.peerId == rpInfo.peerId) node.peerManager.switch.peerStore.peers.anyIt(it.peerId == rpInfo.peerId)
node.peerManager.switch.peerStore.peers.anyIt(it.addrs == rpInfo.addrs)
suite "setPeerExchangePeer": suite "setPeerExchangePeer":
var node2 {.threadvar.}: WakuNode var node2 {.threadvar.}: WakuNode

View File

@ -282,7 +282,7 @@ suite "Sharding":
asyncTest "lightpush": asyncTest "lightpush":
# Given a connected server and client subscribed to the same pubsub topic # Given a connected server and client subscribed to the same pubsub topic
client.mountLegacyLightPushClient() client.mountLegacyLightPushClient()
await server.mountLightpush() check (await server.mountLightpush()).isOk()
let let
topic = "/waku/2/rs/0/1" topic = "/waku/2/rs/0/1"
@ -405,7 +405,7 @@ suite "Sharding":
asyncTest "lightpush (automatic sharding filtering)": asyncTest "lightpush (automatic sharding filtering)":
# Given a connected server and client using the same content topic (with two different formats) # Given a connected server and client using the same content topic (with two different formats)
client.mountLegacyLightPushClient() client.mountLegacyLightPushClient()
await server.mountLightpush() check (await server.mountLightpush()).isOk()
let let
contentTopicShort = "/toychat/2/huilong/proto" contentTopicShort = "/toychat/2/huilong/proto"
@ -563,7 +563,7 @@ suite "Sharding":
asyncTest "lightpush - exclusion (automatic sharding filtering)": asyncTest "lightpush - exclusion (automatic sharding filtering)":
# Given a connected server and client using different content topics # Given a connected server and client using different content topics
client.mountLegacyLightPushClient() client.mountLegacyLightPushClient()
await server.mountLightpush() check (await server.mountLightpush()).isOk()
let let
contentTopic1 = "/toychat/2/huilong/proto" contentTopic1 = "/toychat/2/huilong/proto"
@ -874,7 +874,7 @@ suite "Sharding":
asyncTest "Waku LightPush Sharding (Static Sharding)": asyncTest "Waku LightPush Sharding (Static Sharding)":
# Given a connected server and client using two different pubsub topics # Given a connected server and client using two different pubsub topics
client.mountLegacyLightPushClient() client.mountLegacyLightPushClient()
await server.mountLightpush() check (await server.mountLightpush()).isOk()
# Given a connected server and client subscribed to multiple pubsub topics # Given a connected server and client subscribed to multiple pubsub topics
let let

View File

@ -142,9 +142,13 @@ suite "Waku Peer Exchange":
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node2 = node2 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node3 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node4 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
# Start and mount peer exchange # Start and mount peer exchange
await allFutures([node1.start(), node2.start()]) await allFutures([node1.start(), node2.start(), node3.start(), node4.start()])
await allFutures([node1.mountPeerExchange(), node2.mountPeerExchangeClient()]) await allFutures([node1.mountPeerExchange(), node2.mountPeerExchangeClient()])
# Create connection # Create connection
@ -154,18 +158,15 @@ suite "Waku Peer Exchange":
require: require:
connOpt.isSome connOpt.isSome
# Create some enr and add to peer exchange (simulating disv5) # Simulate node1 discovering node3 via Discv5
var enr1, enr2 = enr.Record() var info3 = node3.peerInfo.toRemotePeerInfo()
check enr1.fromUri( info3.enr = some(node3.enr)
"enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB" node1.peerManager.addPeer(info3, PeerOrigin.Discv5)
)
check enr2.fromUri(
"enr:-Iu4QGJllOWlviPIh_SGR-VVm55nhnBIU5L-s3ran7ARz_4oDdtJPtUs3Bc5aqZHCiPQX6qzNYF2ARHER0JPX97TFbEBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQP3ULycvday4EkvtVu0VqbBdmOkbfVLJx8fPe0lE_dRkIN0Y3CC6mCFd2FrdTIB"
)
# Mock that we have discovered these enrs # Simulate node1 discovering node4 via Discv5
node1.wakuPeerExchange.enrCache.add(enr1) var info4 = node4.peerInfo.toRemotePeerInfo()
node1.wakuPeerExchange.enrCache.add(enr2) info4.enr = some(node4.enr)
node1.peerManager.addPeer(info4, PeerOrigin.Discv5)
# Request 2 peer from px. Test all request variants # Request 2 peer from px. Test all request variants
let response1 = await node2.wakuPeerExchangeClient.request(2) let response1 = await node2.wakuPeerExchangeClient.request(2)
@ -185,12 +186,12 @@ suite "Waku Peer Exchange":
response3.get().peerInfos.len == 2 response3.get().peerInfos.len == 2
# Since it can return duplicates test that at least one of the enrs is in the response # Since it can return duplicates test that at least one of the enrs is in the response
response1.get().peerInfos.anyIt(it.enr == enr1.raw) or response1.get().peerInfos.anyIt(it.enr == node3.enr.raw) or
response1.get().peerInfos.anyIt(it.enr == enr2.raw) response1.get().peerInfos.anyIt(it.enr == node4.enr.raw)
response2.get().peerInfos.anyIt(it.enr == enr1.raw) or response2.get().peerInfos.anyIt(it.enr == node3.enr.raw) or
response2.get().peerInfos.anyIt(it.enr == enr2.raw) response2.get().peerInfos.anyIt(it.enr == node4.enr.raw)
response3.get().peerInfos.anyIt(it.enr == enr1.raw) or response3.get().peerInfos.anyIt(it.enr == node3.enr.raw) or
response3.get().peerInfos.anyIt(it.enr == enr2.raw) response3.get().peerInfos.anyIt(it.enr == node4.enr.raw)
asyncTest "Request fails gracefully": asyncTest "Request fails gracefully":
let let
@ -265,8 +266,8 @@ suite "Waku Peer Exchange":
peerInfo2.origin = PeerOrigin.Discv5 peerInfo2.origin = PeerOrigin.Discv5
check: check:
not poolFilter(cluster, peerInfo1) poolFilter(cluster, peerInfo1).isErr()
poolFilter(cluster, peerInfo2) poolFilter(cluster, peerInfo2).isOk()
asyncTest "Request 0 peers, with 1 peer in PeerExchange": asyncTest "Request 0 peers, with 1 peer in PeerExchange":
# Given two valid nodes with PeerExchange # Given two valid nodes with PeerExchange
@ -275,9 +276,11 @@ suite "Waku Peer Exchange":
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node2 = node2 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node3 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
# Start and mount peer exchange # Start and mount peer exchange
await allFutures([node1.start(), node2.start()]) await allFutures([node1.start(), node2.start(), node3.start()])
await allFutures([node1.mountPeerExchange(), node2.mountPeerExchangeClient()]) await allFutures([node1.mountPeerExchange(), node2.mountPeerExchangeClient()])
# Connect the nodes # Connect the nodes
@ -286,12 +289,10 @@ suite "Waku Peer Exchange":
) )
assert dialResponse.isSome assert dialResponse.isSome
# Mock that we have discovered one enr # Simulate node1 discovering node3 via Discv5
var record = enr.Record() var info3 = node3.peerInfo.toRemotePeerInfo()
check record.fromUri( info3.enr = some(node3.enr)
"enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB" node1.peerManager.addPeer(info3, PeerOrigin.Discv5)
)
node1.wakuPeerExchange.enrCache.add(record)
# When requesting 0 peers # When requesting 0 peers
let response = await node2.wakuPeerExchangeClient.request(0) let response = await node2.wakuPeerExchangeClient.request(0)
@ -312,13 +313,6 @@ suite "Waku Peer Exchange":
await allFutures([node1.start(), node2.start()]) await allFutures([node1.start(), node2.start()])
await allFutures([node1.mountPeerExchangeClient(), node2.mountPeerExchange()]) await allFutures([node1.mountPeerExchangeClient(), node2.mountPeerExchange()])
# Mock that we have discovered one enr
var record = enr.Record()
check record.fromUri(
"enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB"
)
node2.wakuPeerExchange.enrCache.add(record)
# When making any request with an invalid peer info # When making any request with an invalid peer info
var remotePeerInfo2 = node2.peerInfo.toRemotePeerInfo() var remotePeerInfo2 = node2.peerInfo.toRemotePeerInfo()
remotePeerInfo2.peerId.data.add(255.byte) remotePeerInfo2.peerId.data.add(255.byte)
@ -362,17 +356,17 @@ suite "Waku Peer Exchange":
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node2 = node2 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node3 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
# Start and mount peer exchange # Start and mount peer exchange
await allFutures([node1.start(), node2.start()]) await allFutures([node1.start(), node2.start(), node3.start()])
await allFutures([node1.mountPeerExchange(), node2.mountPeerExchange()]) await allFutures([node1.mountPeerExchange(), node2.mountPeerExchange()])
# Mock that we have discovered these enrs # Simulate node1 discovering node3 via Discv5
var enr1 = enr.Record() var info3 = node3.peerInfo.toRemotePeerInfo()
check enr1.fromUri( info3.enr = some(node3.enr)
"enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB" node1.peerManager.addPeer(info3, PeerOrigin.Discv5)
)
node1.wakuPeerExchange.enrCache.add(enr1)
# Create connection # Create connection
let connOpt = await node2.peerManager.dialPeer( let connOpt = await node2.peerManager.dialPeer(
@ -396,7 +390,7 @@ suite "Waku Peer Exchange":
check: check:
decodedBuff.get().response.status_code == PeerExchangeResponseStatusCode.SUCCESS decodedBuff.get().response.status_code == PeerExchangeResponseStatusCode.SUCCESS
decodedBuff.get().response.peerInfos.len == 1 decodedBuff.get().response.peerInfos.len == 1
decodedBuff.get().response.peerInfos[0].enr == enr1.raw decodedBuff.get().response.peerInfos[0].enr == node3.enr.raw
asyncTest "RateLimit as expected": asyncTest "RateLimit as expected":
let let
@ -404,9 +398,11 @@ suite "Waku Peer Exchange":
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node2 = node2 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node3 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
# Start and mount peer exchange # Start and mount peer exchange
await allFutures([node1.start(), node2.start()]) await allFutures([node1.start(), node2.start(), node3.start()])
await allFutures( await allFutures(
[ [
node1.mountPeerExchange(rateLimit = (1, 150.milliseconds)), node1.mountPeerExchange(rateLimit = (1, 150.milliseconds)),
@ -414,6 +410,11 @@ suite "Waku Peer Exchange":
] ]
) )
# Simulate node1 discovering nodeA via Discv5
var info3 = node3.peerInfo.toRemotePeerInfo()
info3.enr = some(node3.enr)
node1.peerManager.addPeer(info3, PeerOrigin.Discv5)
# Create connection # Create connection
let connOpt = await node2.peerManager.dialPeer( let connOpt = await node2.peerManager.dialPeer(
node1.switch.peerInfo.toRemotePeerInfo(), WakuPeerExchangeCodec node1.switch.peerInfo.toRemotePeerInfo(), WakuPeerExchangeCodec
@ -421,19 +422,6 @@ suite "Waku Peer Exchange":
require: require:
connOpt.isSome connOpt.isSome
# Create some enr and add to peer exchange (simulating disv5)
var enr1, enr2 = enr.Record()
check enr1.fromUri(
"enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB"
)
check enr2.fromUri(
"enr:-Iu4QGJllOWlviPIh_SGR-VVm55nhnBIU5L-s3ran7ARz_4oDdtJPtUs3Bc5aqZHCiPQX6qzNYF2ARHER0JPX97TFbEBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQP3ULycvday4EkvtVu0VqbBdmOkbfVLJx8fPe0lE_dRkIN0Y3CC6mCFd2FrdTIB"
)
# Mock that we have discovered these enrs
node1.wakuPeerExchange.enrCache.add(enr1)
node1.wakuPeerExchange.enrCache.add(enr2)
await sleepAsync(150.milliseconds) await sleepAsync(150.milliseconds)
# Request 2 peer from px. Test all request variants # Request 2 peer from px. Test all request variants

View File

@ -0,0 +1,29 @@
{.used.}
{.push raises: [].}
import std/[options, os], results, testutils/unittests, chronos, web3
import
waku/[
waku_rln_relay,
waku_rln_relay/conversion_utils,
waku_rln_relay/group_manager/on_chain/group_manager,
],
./utils_onchain
suite "Token and RLN Contract Deployment":
test "anvil should dump state to file on exit":
# git will ignore this file, if the contract has been updated and the state file needs to be regenerated then this file can be renamed to replace the one in the repo (tests/waku_rln_relay/anvil_state/tests/waku_rln_relay/anvil_state/state-deployed-contracts-mint-and-approved.json)
let testStateFile = some("tests/waku_rln_relay/anvil_state/anvil_state.ignore.json")
let anvilProc = runAnvil(stateFile = testStateFile, dumpStateOnExit = true)
let manager = waitFor setupOnchainGroupManager(deployContracts = true)
stopAnvil(anvilProc)
check:
fileExists(testStateFile.get())
#The test should still pass even if thie compression fails
compressGzipFile(testStateFile.get(), testStateFile.get() & ".gz").isOkOr:
error "Failed to compress state file", error = error

View File

@ -33,8 +33,8 @@ suite "Onchain group manager":
var manager {.threadVar.}: OnchainGroupManager var manager {.threadVar.}: OnchainGroupManager
setup: setup:
anvilProc = runAnvil() anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH))
manager = waitFor setupOnchainGroupManager() manager = waitFor setupOnchainGroupManager(deployContracts = false)
teardown: teardown:
stopAnvil(anvilProc) stopAnvil(anvilProc)

View File

@ -27,8 +27,8 @@ suite "Waku rln relay":
var manager {.threadVar.}: OnchainGroupManager var manager {.threadVar.}: OnchainGroupManager
setup: setup:
anvilProc = runAnvil() anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH))
manager = waitFor setupOnchainGroupManager() manager = waitFor setupOnchainGroupManager(deployContracts = false)
teardown: teardown:
stopAnvil(anvilProc) stopAnvil(anvilProc)

View File

@ -30,8 +30,8 @@ procSuite "WakuNode - RLN relay":
var manager {.threadVar.}: OnchainGroupManager var manager {.threadVar.}: OnchainGroupManager
setup: setup:
anvilProc = runAnvil() anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH))
manager = waitFor setupOnchainGroupManager() manager = waitFor setupOnchainGroupManager(deployContracts = false)
teardown: teardown:
stopAnvil(anvilProc) stopAnvil(anvilProc)

View File

@ -3,7 +3,7 @@
{.push raises: [].} {.push raises: [].}
import import
std/[options, os, osproc, deques, streams, strutils, tempfiles, strformat], std/[options, os, osproc, streams, strutils, strformat],
results, results,
stew/byteutils, stew/byteutils,
testutils/unittests, testutils/unittests,
@ -14,7 +14,6 @@ import
web3/conversions, web3/conversions,
web3/eth_api_types, web3/eth_api_types,
json_rpc/rpcclient, json_rpc/rpcclient,
json,
libp2p/crypto/crypto, libp2p/crypto/crypto,
eth/keys, eth/keys,
results results
@ -24,25 +23,19 @@ import
waku_rln_relay, waku_rln_relay,
waku_rln_relay/protocol_types, waku_rln_relay/protocol_types,
waku_rln_relay/constants, waku_rln_relay/constants,
waku_rln_relay/contract,
waku_rln_relay/rln, waku_rln_relay/rln,
], ],
../testlib/common, ../testlib/common
./utils
const CHAIN_ID* = 1234'u256 const CHAIN_ID* = 1234'u256
template skip0xPrefix(hexStr: string): int = # Path to the file which Anvil loads at startup to initialize the chain with pre-deployed contracts, an account funded with tokens and approved for spending
## Returns the index of the first meaningful char in `hexStr` by skipping const DEFAULT_ANVIL_STATE_PATH* =
## "0x" prefix "tests/waku_rln_relay/anvil_state/state-deployed-contracts-mint-and-approved.json.gz"
if hexStr.len > 1 and hexStr[0] == '0' and hexStr[1] in {'x', 'X'}: 2 else: 0 # The contract address of the TestStableToken used for the RLN Membership registration fee
const TOKEN_ADDRESS* = "0x5FbDB2315678afecb367f032d93F642f64180aa3"
func strip0xPrefix(s: string): string = # The contract address used ti interact with the WakuRLNV2 contract via the proxy
let prefixLen = skip0xPrefix(s) const WAKU_RLNV2_PROXY_ADDRESS* = "0x5fc8d32690cc91d4c39d9d3abcbd16989f875707"
if prefixLen != 0:
s[prefixLen .. ^1]
else:
s
proc generateCredentials*(): IdentityCredential = proc generateCredentials*(): IdentityCredential =
let credRes = membershipKeyGen() let credRes = membershipKeyGen()
@ -106,7 +99,7 @@ proc sendMintCall(
recipientAddress: Address, recipientAddress: Address,
amountTokens: UInt256, amountTokens: UInt256,
recipientBalanceBeforeExpectedTokens: Option[UInt256] = none(UInt256), recipientBalanceBeforeExpectedTokens: Option[UInt256] = none(UInt256),
): Future[TxHash] {.async.} = ): Future[void] {.async.} =
let doBalanceAssert = recipientBalanceBeforeExpectedTokens.isSome() let doBalanceAssert = recipientBalanceBeforeExpectedTokens.isSome()
if doBalanceAssert: if doBalanceAssert:
@ -142,7 +135,7 @@ proc sendMintCall(
tx.data = Opt.some(byteutils.hexToSeqByte(mintCallData)) tx.data = Opt.some(byteutils.hexToSeqByte(mintCallData))
trace "Sending mint call" trace "Sending mint call"
let txHash = await web3.send(tx) discard await web3.send(tx)
let balanceOfSelector = "0x70a08231" let balanceOfSelector = "0x70a08231"
let balanceCallData = balanceOfSelector & paddedAddress let balanceCallData = balanceOfSelector & paddedAddress
@ -157,8 +150,6 @@ proc sendMintCall(
assert balanceAfterMint == balanceAfterExpectedTokens, assert balanceAfterMint == balanceAfterExpectedTokens,
fmt"Balance is {balanceAfterMint} after transfer but expected {balanceAfterExpectedTokens}" fmt"Balance is {balanceAfterMint} after transfer but expected {balanceAfterExpectedTokens}"
return txHash
# Check how many tokens a spender (the RLN contract) is allowed to spend on behalf of the owner (account which wishes to register a membership) # Check how many tokens a spender (the RLN contract) is allowed to spend on behalf of the owner (account which wishes to register a membership)
proc checkTokenAllowance( proc checkTokenAllowance(
web3: Web3, tokenAddress: Address, owner: Address, spender: Address web3: Web3, tokenAddress: Address, owner: Address, spender: Address
@ -487,20 +478,64 @@ proc getAnvilPath*(): string =
anvilPath = joinPath(anvilPath, ".foundry/bin/anvil") anvilPath = joinPath(anvilPath, ".foundry/bin/anvil")
return $anvilPath return $anvilPath
proc decompressGzipFile*(
compressedPath: string, targetPath: string
): Result[void, string] =
## Decompress a gzipped file using the gunzip command-line utility
let cmd = fmt"gunzip -c {compressedPath} > {targetPath}"
try:
let (output, exitCode) = execCmdEx(cmd)
if exitCode != 0:
return err(
"Failed to decompress '" & compressedPath & "' to '" & targetPath & "': " &
output
)
except OSError as e:
return err("Failed to execute gunzip command: " & e.msg)
except IOError as e:
return err("Failed to execute gunzip command: " & e.msg)
ok()
proc compressGzipFile*(sourcePath: string, targetPath: string): Result[void, string] =
## Compress a file with gzip using the gzip command-line utility
let cmd = fmt"gzip -c {sourcePath} > {targetPath}"
try:
let (output, exitCode) = execCmdEx(cmd)
if exitCode != 0:
return err(
"Failed to compress '" & sourcePath & "' to '" & targetPath & "': " & output
)
except OSError as e:
return err("Failed to execute gzip command: " & e.msg)
except IOError as e:
return err("Failed to execute gzip command: " & e.msg)
ok()
# Runs Anvil daemon # Runs Anvil daemon
proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process = proc runAnvil*(
port: int = 8540,
chainId: string = "1234",
stateFile: Option[string] = none(string),
dumpStateOnExit: bool = false,
): Process =
# Passed options are # Passed options are
# --port Port to listen on. # --port Port to listen on.
# --gas-limit Sets the block gas limit in WEI. # --gas-limit Sets the block gas limit in WEI.
# --balance The default account balance, specified in ether. # --balance The default account balance, specified in ether.
# --chain-id Chain ID of the network. # --chain-id Chain ID of the network.
# --load-state Initialize the chain from a previously saved state snapshot (read-only)
# --dump-state Dump the state on exit to the given file (write-only)
# See anvil documentation https://book.getfoundry.sh/reference/anvil/ for more details # See anvil documentation https://book.getfoundry.sh/reference/anvil/ for more details
try: try:
let anvilPath = getAnvilPath() let anvilPath = getAnvilPath()
info "Anvil path", anvilPath info "Anvil path", anvilPath
let runAnvil = startProcess(
anvilPath, var args =
args = [ @[
"--port", "--port",
$port, $port,
"--gas-limit", "--gas-limit",
@ -509,9 +544,54 @@ proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process =
"1000000000", "1000000000",
"--chain-id", "--chain-id",
$chainId, $chainId,
], ]
options = {poUsePath, poStdErrToStdOut},
) # Add state file argument if provided
if stateFile.isSome():
var statePath = stateFile.get()
info "State file parameter provided",
statePath = statePath,
dumpStateOnExit = dumpStateOnExit,
absolutePath = absolutePath(statePath)
# Check if the file is gzip compressed and handle decompression
if statePath.endsWith(".gz"):
let decompressedPath = statePath[0 .. ^4] # Remove .gz extension
debug "Gzip compressed state file detected",
compressedPath = statePath, decompressedPath = decompressedPath
if not fileExists(decompressedPath):
decompressGzipFile(statePath, decompressedPath).isOkOr:
error "Failed to decompress state file", error = error
return nil
statePath = decompressedPath
if dumpStateOnExit:
# Ensure the directory exists
let stateDir = parentDir(statePath)
if not dirExists(stateDir):
createDir(stateDir)
# Fresh deployment: start clean and dump state on exit
args.add("--dump-state")
args.add(statePath)
debug "Anvil configured to dump state on exit", path = statePath
else:
# Using cache: only load state, don't overwrite it (preserves clean cached state)
if fileExists(statePath):
args.add("--load-state")
args.add(statePath)
debug "Anvil configured to load state file (read-only)", path = statePath
else:
warn "State file does not exist, anvil will start fresh",
path = statePath, absolutePath = absolutePath(statePath)
else:
info "No state file provided, anvil will start fresh without state persistence"
info "Starting anvil with arguments", args = args.join(" ")
let runAnvil =
startProcess(anvilPath, args = args, options = {poUsePath, poStdErrToStdOut})
let anvilPID = runAnvil.processID let anvilPID = runAnvil.processID
# We read stdout from Anvil to see when daemon is ready # We read stdout from Anvil to see when daemon is ready
@ -549,7 +629,14 @@ proc stopAnvil*(runAnvil: Process) {.used.} =
# Send termination signals # Send termination signals
when not defined(windows): when not defined(windows):
discard execCmdEx(fmt"kill -TERM {anvilPID}") discard execCmdEx(fmt"kill -TERM {anvilPID}")
discard execCmdEx(fmt"kill -9 {anvilPID}") # Wait for graceful shutdown to allow state dumping
sleep(200)
# Only force kill if process is still running
let checkResult = execCmdEx(fmt"kill -0 {anvilPID} 2>/dev/null")
if checkResult.exitCode == 0:
info "Anvil process still running after TERM signal, sending KILL",
anvilPID = anvilPID
discard execCmdEx(fmt"kill -9 {anvilPID}")
else: else:
discard execCmdEx(fmt"taskkill /F /PID {anvilPID}") discard execCmdEx(fmt"taskkill /F /PID {anvilPID}")
@ -560,52 +647,100 @@ proc stopAnvil*(runAnvil: Process) {.used.} =
info "Error stopping Anvil daemon", anvilPID = anvilPID, error = e.msg info "Error stopping Anvil daemon", anvilPID = anvilPID, error = e.msg
proc setupOnchainGroupManager*( proc setupOnchainGroupManager*(
ethClientUrl: string = EthClient, amountEth: UInt256 = 10.u256 ethClientUrl: string = EthClient,
amountEth: UInt256 = 10.u256,
deployContracts: bool = true,
): Future[OnchainGroupManager] {.async.} = ): Future[OnchainGroupManager] {.async.} =
## Setup an onchain group manager for testing
## If deployContracts is false, it will assume that the Anvil testnet already has the required contracts deployed, this significantly speeds up test runs.
## To run Anvil with a cached state file containing pre-deployed contracts, see runAnvil documentation.
##
## To generate/update the cached state file:
## 1. Call runAnvil with stateFile and dumpStateOnExit=true
## 2. Run setupOnchainGroupManager with deployContracts=true to deploy contracts
## 3. The state will be saved to the specified file when anvil exits
## 4. Commit this file to git
##
## To use cached state:
## 1. Call runAnvil with stateFile and dumpStateOnExit=false
## 2. Anvil loads state in read-only mode (won't overwrite the cached file)
## 3. Call setupOnchainGroupManager with deployContracts=false
## 4. Tests run fast using pre-deployed contracts
let rlnInstanceRes = createRlnInstance() let rlnInstanceRes = createRlnInstance()
check: check:
rlnInstanceRes.isOk() rlnInstanceRes.isOk()
let rlnInstance = rlnInstanceRes.get() let rlnInstance = rlnInstanceRes.get()
# connect to the eth client
let web3 = await newWeb3(ethClientUrl) let web3 = await newWeb3(ethClientUrl)
let accounts = await web3.provider.eth_accounts() let accounts = await web3.provider.eth_accounts()
web3.defaultAccount = accounts[1] web3.defaultAccount = accounts[1]
let (privateKey, acc) = createEthAccount(web3) var privateKey: keys.PrivateKey
var acc: Address
var testTokenAddress: Address
var contractAddress: Address
# we just need to fund the default account if not deployContracts:
# the send procedure returns a tx hash that we don't use, hence discard info "Using contract addresses from constants"
discard await sendEthTransfer(
web3, web3.defaultAccount, acc, ethToWei(1000.u256), some(0.u256)
)
let testTokenAddress = (await deployTestToken(privateKey, acc, web3)).valueOr: testTokenAddress = Address(hexToByteArray[20](TOKEN_ADDRESS))
assert false, "Failed to deploy test token contract: " & $error contractAddress = Address(hexToByteArray[20](WAKU_RLNV2_PROXY_ADDRESS))
return
# mint the token from the generated account (privateKey, acc) = createEthAccount(web3)
discard await sendMintCall(
web3, web3.defaultAccount, testTokenAddress, acc, ethToWei(1000.u256), some(0.u256)
)
let contractAddress = (await executeForgeContractDeployScripts(privateKey, acc, web3)).valueOr: # Fund the test account
assert false, "Failed to deploy RLN contract: " & $error discard await sendEthTransfer(web3, web3.defaultAccount, acc, ethToWei(1000.u256))
return
# If the generated account wishes to register a membership, it needs to approve the contract to spend its tokens # Mint tokens to the test account
let tokenApprovalResult = await approveTokenAllowanceAndVerify( await sendMintCall(
web3, web3, web3.defaultAccount, testTokenAddress, acc, ethToWei(1000.u256)
acc, )
privateKey,
testTokenAddress,
contractAddress,
ethToWei(200.u256),
some(0.u256),
)
assert tokenApprovalResult.isOk, tokenApprovalResult.error() # Approve the contract to spend tokens
let tokenApprovalResult = await approveTokenAllowanceAndVerify(
web3, acc, privateKey, testTokenAddress, contractAddress, ethToWei(200.u256)
)
assert tokenApprovalResult.isOk(), tokenApprovalResult.error
else:
info "Performing Token and RLN contracts deployment"
(privateKey, acc) = createEthAccount(web3)
# fund the default account
discard await sendEthTransfer(
web3, web3.defaultAccount, acc, ethToWei(1000.u256), some(0.u256)
)
testTokenAddress = (await deployTestToken(privateKey, acc, web3)).valueOr:
assert false, "Failed to deploy test token contract: " & $error
return
# mint the token from the generated account
await sendMintCall(
web3,
web3.defaultAccount,
testTokenAddress,
acc,
ethToWei(1000.u256),
some(0.u256),
)
contractAddress = (await executeForgeContractDeployScripts(privateKey, acc, web3)).valueOr:
assert false, "Failed to deploy RLN contract: " & $error
return
# If the generated account wishes to register a membership, it needs to approve the contract to spend its tokens
let tokenApprovalResult = await approveTokenAllowanceAndVerify(
web3,
acc,
privateKey,
testTokenAddress,
contractAddress,
ethToWei(200.u256),
some(0.u256),
)
assert tokenApprovalResult.isOk(), tokenApprovalResult.error
let manager = OnchainGroupManager( let manager = OnchainGroupManager(
ethClientUrls: @[ethClientUrl], ethClientUrls: @[ethClientUrl],

View File

@ -41,8 +41,8 @@ suite "Waku v2 REST API - health":
var manager {.threadVar.}: OnchainGroupManager var manager {.threadVar.}: OnchainGroupManager
setup: setup:
anvilProc = runAnvil() anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH))
manager = waitFor setupOnchainGroupManager() manager = waitFor setupOnchainGroupManager(deployContracts = false)
teardown: teardown:
stopAnvil(anvilProc) stopAnvil(anvilProc)

View File

@ -61,7 +61,7 @@ proc init(
assert false, "Failed to mount relay: " & $error assert false, "Failed to mount relay: " & $error
(await testSetup.serviceNode.mountRelay()).isOkOr: (await testSetup.serviceNode.mountRelay()).isOkOr:
assert false, "Failed to mount relay: " & $error assert false, "Failed to mount relay: " & $error
await testSetup.serviceNode.mountLightPush(rateLimit) check (await testSetup.serviceNode.mountLightPush(rateLimit)).isOk()
testSetup.pushNode.mountLightPushClient() testSetup.pushNode.mountLightPushClient()
testSetup.serviceNode.peerManager.addServicePeer( testSetup.serviceNode.peerManager.addServicePeer(

View File

@ -61,7 +61,7 @@ proc init(
assert false, "Failed to mount relay" assert false, "Failed to mount relay"
(await testSetup.serviceNode.mountRelay()).isOkOr: (await testSetup.serviceNode.mountRelay()).isOkOr:
assert false, "Failed to mount relay" assert false, "Failed to mount relay"
await testSetup.serviceNode.mountLegacyLightPush(rateLimit) check (await testSetup.serviceNode.mountLegacyLightPush(rateLimit)).isOk()
testSetup.pushNode.mountLegacyLightPushClient() testSetup.pushNode.mountLegacyLightPushClient()
testSetup.serviceNode.peerManager.addServicePeer( testSetup.serviceNode.peerManager.addServicePeer(

@ -1 +1 @@
Subproject commit 900d4f95e0e618bdeb4c241f7a4b6347df6bb950 Subproject commit 8a338f354481e8a3f3d64a72e38fad4c62e32dcd

View File

@ -61,27 +61,21 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
exec "nim " & lang & " --out:build/" & name & " --mm:refc " & extra_params & " " & exec "nim " & lang & " --out:build/" & name & " --mm:refc " & extra_params & " " &
srcDir & name & ".nim" srcDir & name & ".nim"
proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "static") = proc buildLibrary(lib_name: string, srcDir = "./", params = "", `type` = "static") =
if not dirExists "build": if not dirExists "build":
mkDir "build" mkDir "build"
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims" # allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
var extra_params = params var extra_params = params
for i in 2 ..< paramCount(): for i in 2 ..< (paramCount() - 1):
extra_params &= " " & paramStr(i) extra_params &= " " & paramStr(i)
if `type` == "static": if `type` == "static":
exec "nim c" & " --out:build/" & name & exec "nim c" & " --out:build/" & lib_name &
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " & " --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & name & ".nim" extra_params & " " & srcDir & "libwaku.nim"
else: else:
let lib_name = (when defined(windows): toDll(name) else: name & ".so") exec "nim c" & " --out:build/" & lib_name &
when defined(windows): " --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
exec "nim c" & " --out:build/" & lib_name & extra_params & " " & srcDir & "libwaku.nim"
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & name & ".nim"
else:
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & name & ".nim"
proc buildMobileAndroid(srcDir = ".", params = "") = proc buildMobileAndroid(srcDir = ".", params = "") =
let cpu = getEnv("CPU") let cpu = getEnv("CPU")
@ -210,12 +204,12 @@ let chroniclesParams =
"--warning:UnusedImport:on " & "-d:chronicles_log_level=TRACE" "--warning:UnusedImport:on " & "-d:chronicles_log_level=TRACE"
task libwakuStatic, "Build the cbindings waku node library": task libwakuStatic, "Build the cbindings waku node library":
let name = "libwaku" let lib_name = paramStr(paramCount())
buildLibrary name, "library/", chroniclesParams, "static" buildLibrary lib_name, "library/", chroniclesParams, "static"
task libwakuDynamic, "Build the cbindings waku node library": task libwakuDynamic, "Build the cbindings waku node library":
let name = "libwaku" let lib_name = paramStr(paramCount())
buildLibrary name, "library/", chroniclesParams, "dynamic" buildLibrary lib_name, "library/", chroniclesParams, "dynamic"
### Mobile Android ### Mobile Android
task libWakuAndroid, "Build the mobile bindings for Android": task libWakuAndroid, "Build the mobile bindings for Android":

View File

@ -606,7 +606,7 @@ proc build*(
let relayShardedPeerManagement = builder.relayShardedPeerManagement.get(false) let relayShardedPeerManagement = builder.relayShardedPeerManagement.get(false)
let wakuFlags = CapabilitiesBitfield.init( let wakuFlags = CapabilitiesBitfield.init(
lightpush = lightPush, lightpush = lightPush and relay,
filter = filterServiceConf.isSome, filter = filterServiceConf.isSome,
store = storeServiceConf.isSome, store = storeServiceConf.isSome,
relay = relay, relay = relay,

View File

@ -368,8 +368,11 @@ proc setupProtocols(
# NOTE Must be mounted after relay # NOTE Must be mounted after relay
if conf.lightPush: if conf.lightPush:
try: try:
await mountLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH)) (await mountLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH))).isOkOr:
await mountLegacyLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH)) return err("failed to mount waku lightpush protocol: " & $error)
(await mountLegacyLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH))).isOkOr:
return err("failed to mount waku legacy lightpush protocol: " & $error)
except CatchableError: except CatchableError:
return err("failed to mount waku lightpush protocol: " & getCurrentExceptionMsg()) return err("failed to mount waku lightpush protocol: " & getCurrentExceptionMsg())

View File

@ -34,26 +34,27 @@ import
logScope: logScope:
topics = "waku node lightpush api" topics = "waku node lightpush api"
const MountWithoutRelayError* = "cannot mount lightpush because relay is not mounted"
## Waku lightpush ## Waku lightpush
proc mountLegacyLightPush*( proc mountLegacyLightPush*(
node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit
) {.async.} = ): Future[Result[void, string]] {.async.} =
info "mounting legacy light push" info "mounting legacy light push"
let pushHandler = if node.wakuRelay.isNil():
if node.wakuRelay.isNil: return err(MountWithoutRelayError)
info "mounting legacy lightpush without relay (nil)"
legacy_lightpush_protocol.getNilPushHandler() info "mounting legacy lightpush with relay"
let rlnPeer =
if node.wakuRlnRelay.isNil():
info "mounting legacy lightpush without rln-relay"
none(WakuRLNRelay)
else: else:
info "mounting legacy lightpush with relay" info "mounting legacy lightpush with rln-relay"
let rlnPeer = some(node.wakuRlnRelay)
if isNil(node.wakuRlnRelay): let pushHandler =
info "mounting legacy lightpush without rln-relay" legacy_lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer)
none(WakuRLNRelay)
else:
info "mounting legacy lightpush with rln-relay"
some(node.wakuRlnRelay)
legacy_lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer)
node.wakuLegacyLightPush = node.wakuLegacyLightPush =
WakuLegacyLightPush.new(node.peerManager, node.rng, pushHandler, some(rateLimit)) WakuLegacyLightPush.new(node.peerManager, node.rng, pushHandler, some(rateLimit))
@ -64,6 +65,9 @@ proc mountLegacyLightPush*(
node.switch.mount(node.wakuLegacyLightPush, protocolMatcher(WakuLegacyLightPushCodec)) node.switch.mount(node.wakuLegacyLightPush, protocolMatcher(WakuLegacyLightPushCodec))
info "legacy lightpush mounted successfully"
return ok()
proc mountLegacyLightPushClient*(node: WakuNode) = proc mountLegacyLightPushClient*(node: WakuNode) =
info "mounting legacy light push client" info "mounting legacy light push client"
@ -146,23 +150,21 @@ proc legacyLightpushPublish*(
proc mountLightPush*( proc mountLightPush*(
node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit
) {.async.} = ): Future[Result[void, string]] {.async.} =
info "mounting light push" info "mounting light push"
let pushHandler = if node.wakuRelay.isNil():
if node.wakuRelay.isNil(): return err(MountWithoutRelayError)
info "mounting lightpush v2 without relay (nil)"
lightpush_protocol.getNilPushHandler() info "mounting lightpush with relay"
let rlnPeer =
if node.wakuRlnRelay.isNil():
info "mounting lightpush without rln-relay"
none(WakuRLNRelay)
else: else:
info "mounting lightpush with relay" info "mounting lightpush with rln-relay"
let rlnPeer = some(node.wakuRlnRelay)
if isNil(node.wakuRlnRelay): let pushHandler = lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer)
info "mounting lightpush without rln-relay"
none(WakuRLNRelay)
else:
info "mounting lightpush with rln-relay"
some(node.wakuRlnRelay)
lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer)
node.wakuLightPush = WakuLightPush.new( node.wakuLightPush = WakuLightPush.new(
node.peerManager, node.rng, pushHandler, node.wakuAutoSharding, some(rateLimit) node.peerManager, node.rng, pushHandler, node.wakuAutoSharding, some(rateLimit)
@ -174,6 +176,9 @@ proc mountLightPush*(
node.switch.mount(node.wakuLightPush, protocolMatcher(WakuLightPushCodec)) node.switch.mount(node.wakuLightPush, protocolMatcher(WakuLightPushCodec))
info "lightpush mounted successfully"
return ok()
proc mountLightPushClient*(node: WakuNode) = proc mountLightPushClient*(node: WakuNode) =
info "mounting light push client" info "mounting light push client"

View File

@ -227,3 +227,17 @@ proc getPeersByCapability*(
): seq[RemotePeerInfo] = ): seq[RemotePeerInfo] =
return return
peerStore.peers.filterIt(it.enr.isSome() and it.enr.get().supportsCapability(cap)) peerStore.peers.filterIt(it.enr.isSome() and it.enr.get().supportsCapability(cap))
template forEnrPeers*(
peerStore: PeerStore,
peerId, peerConnectedness, peerOrigin, peerEnrRecord, body: untyped,
) =
let enrBook = peerStore[ENRBook]
let connBook = peerStore[ConnectionBook]
let sourceBook = peerStore[SourceBook]
for pid, enrRecord in tables.pairs(enrBook.book):
let peerId {.inject.} = pid
let peerConnectedness {.inject.} = connBook.book.getOrDefault(pid, NotConnected)
let peerOrigin {.inject.} = sourceBook.book.getOrDefault(pid, UnknownOrigin)
let peerEnrRecord {.inject.} = enrRecord
body

View File

@ -560,9 +560,6 @@ proc stop*(node: WakuNode) {.async.} =
if not node.wakuStoreTransfer.isNil(): if not node.wakuStoreTransfer.isNil():
node.wakuStoreTransfer.stop() node.wakuStoreTransfer.stop()
if not node.wakuPeerExchange.isNil() and not node.wakuPeerExchange.pxLoopHandle.isNil():
await node.wakuPeerExchange.pxLoopHandle.cancelAndWait()
if not node.wakuPeerExchangeClient.isNil() and if not node.wakuPeerExchangeClient.isNil() and
not node.wakuPeerExchangeClient.pxLoopHandle.isNil(): not node.wakuPeerExchangeClient.pxLoopHandle.isNil():
await node.wakuPeerExchangeClient.pxLoopHandle.cancelAndWait() await node.wakuPeerExchangeClient.pxLoopHandle.cancelAndWait()

View File

@ -57,7 +57,7 @@ proc getStoreMessagesV3*(
# Optional cursor fields # Optional cursor fields
cursor: string = "", # base64-encoded hash cursor: string = "", # base64-encoded hash
ascending: string = "", ascending: string = "",
pageSize: string = "", pageSize: string = "20", # default value is 20
): RestResponse[StoreQueryResponseHex] {. ): RestResponse[StoreQueryResponseHex] {.
rest, endpoint: "/store/v3/messages", meth: HttpMethod.MethodGet rest, endpoint: "/store/v3/messages", meth: HttpMethod.MethodGet
.} .}

View File

@ -129,6 +129,14 @@ proc createStoreQuery(
except CatchableError: except CatchableError:
return err("page size parsing error: " & getCurrentExceptionMsg()) return err("page size parsing error: " & getCurrentExceptionMsg())
# Enforce default value of page_size to 20
if parsedPagedSize.isNone():
parsedPagedSize = some(20.uint64)
# Enforce max value of page_size to 100
if parsedPagedSize.get() > 100:
parsedPagedSize = some(100.uint64)
return ok( return ok(
StoreQueryRequest( StoreQueryRequest(
includeData: parsedIncludeData, includeData: parsedIncludeData,

View File

@ -22,7 +22,6 @@ export WakuPeerExchangeCodec
declarePublicGauge waku_px_peers_received_unknown, declarePublicGauge waku_px_peers_received_unknown,
"number of previously unknown ENRs received via peer exchange" "number of previously unknown ENRs received via peer exchange"
declarePublicGauge waku_px_peers_cached, "number of peer exchange peer ENRs cached"
declarePublicCounter waku_px_errors, "number of peer exchange errors", ["type"] declarePublicCounter waku_px_errors, "number of peer exchange errors", ["type"]
declarePublicCounter waku_px_peers_sent, declarePublicCounter waku_px_peers_sent,
"number of ENRs sent to peer exchange requesters" "number of ENRs sent to peer exchange requesters"
@ -32,11 +31,9 @@ logScope:
type WakuPeerExchange* = ref object of LPProtocol type WakuPeerExchange* = ref object of LPProtocol
peerManager*: PeerManager peerManager*: PeerManager
enrCache*: seq[enr.Record]
cluster*: Option[uint16] cluster*: Option[uint16]
# todo: next step: ring buffer; future: implement cache satisfying https://rfc.vac.dev/spec/34/ # todo: next step: ring buffer; future: implement cache satisfying https://rfc.vac.dev/spec/34/
requestRateLimiter*: RequestRateLimiter requestRateLimiter*: RequestRateLimiter
pxLoopHandle*: Future[void]
proc respond( proc respond(
wpx: WakuPeerExchange, enrs: seq[enr.Record], conn: Connection wpx: WakuPeerExchange, enrs: seq[enr.Record], conn: Connection
@ -79,61 +76,50 @@ proc respondError(
return ok() return ok()
proc getEnrsFromCache( proc poolFilter*(
wpx: WakuPeerExchange, numPeers: uint64 cluster: Option[uint16], origin: PeerOrigin, enr: enr.Record
): seq[enr.Record] {.gcsafe.} = ): Result[void, string] =
if wpx.enrCache.len() == 0: if origin != Discv5:
info "peer exchange ENR cache is empty" trace "peer not from discv5", origin = $origin
return @[] return err("peer not from discv5: " & $origin)
if cluster.isSome() and enr.isClusterMismatched(cluster.get()):
# copy and shuffle trace "peer has mismatching cluster"
randomize() return err("peer has mismatching cluster")
var shuffledCache = wpx.enrCache return ok()
shuffledCache.shuffle()
# return numPeers or less if cache is smaller
return shuffledCache[0 ..< min(shuffledCache.len.int, numPeers.int)]
proc poolFilter*(cluster: Option[uint16], peer: RemotePeerInfo): bool =
if peer.origin != Discv5:
trace "peer not from discv5", peer = $peer, origin = $peer.origin
return false
proc poolFilter*(cluster: Option[uint16], peer: RemotePeerInfo): Result[void, string] =
if peer.enr.isNone(): if peer.enr.isNone():
info "peer has no ENR", peer = $peer info "peer has no ENR", peer = $peer
return false return err("peer has no ENR: " & $peer)
return poolFilter(cluster, peer.origin, peer.enr.get())
if cluster.isSome() and peer.enr.get().isClusterMismatched(cluster.get()): proc getEnrsFromStore(
info "peer has mismatching cluster", peer = $peer wpx: WakuPeerExchange, numPeers: uint64
return false ): seq[enr.Record] {.gcsafe.} =
# Reservoir sampling (Algorithm R)
return true var i = 0
let k = min(MaxPeersCacheSize, numPeers.int)
proc populateEnrCache(wpx: WakuPeerExchange) = let enrStoreLen = wpx.peerManager.switch.peerStore[ENRBook].len
# share only peers that i) are reachable ii) come from discv5 iii) share cluster var enrs = newSeqOfCap[enr.Record](min(k, enrStoreLen))
let withEnr = wpx.peerManager.switch.peerStore.getReachablePeers().filterIt( wpx.peerManager.switch.peerStore.forEnrPeers(
poolFilter(wpx.cluster, it) peerId, peerConnectedness, peerOrigin, peerEnrRecord
) ):
if peerConnectedness == CannotConnect:
# either what we have or max cache size debug "Could not retrieve ENR because cannot connect to peer",
var newEnrCache = newSeq[enr.Record](0) remotePeerId = peerId
for i in 0 ..< min(withEnr.len, MaxPeersCacheSize): continue
newEnrCache.add(withEnr[i].enr.get()) poolFilter(wpx.cluster, peerOrigin, peerEnrRecord).isOkOr:
debug "Could not get ENR because no peer matched pool", error = error
# swap cache for new continue
wpx.enrCache = newEnrCache if i < k:
trace "ENR cache populated" enrs.add(peerEnrRecord)
else:
proc updatePxEnrCache(wpx: WakuPeerExchange) {.async.} = # Add some randomness
# try more aggressively to fill the cache at startup let j = rand(i)
var attempts = 50 if j < k:
while wpx.enrCache.len < MaxPeersCacheSize and attempts > 0: enrs[j] = peerEnrRecord
attempts -= 1 inc(i)
wpx.populateEnrCache() return enrs
await sleepAsync(1.seconds)
heartbeat "Updating px enr cache", CacheRefreshInterval:
wpx.populateEnrCache()
proc initProtocolHandler(wpx: WakuPeerExchange) = proc initProtocolHandler(wpx: WakuPeerExchange) =
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} = proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
@ -174,7 +160,8 @@ proc initProtocolHandler(wpx: WakuPeerExchange) =
error "Failed to respond with BAD_REQUEST:", error = $error error "Failed to respond with BAD_REQUEST:", error = $error
return return
let enrs = wpx.getEnrsFromCache(decBuf.request.numPeers) let enrs = wpx.getEnrsFromStore(decBuf.request.numPeers)
info "peer exchange request received" info "peer exchange request received"
trace "px enrs to respond", enrs = $enrs trace "px enrs to respond", enrs = $enrs
try: try:
@ -214,5 +201,4 @@ proc new*(
) )
wpx.initProtocolHandler() wpx.initProtocolHandler()
setServiceLimitMetric(WakuPeerExchangeCodec, rateLimitSetting) setServiceLimitMetric(WakuPeerExchangeCodec, rateLimitSetting)
asyncSpawn wpx.updatePxEnrCache()
return wpx return wpx

View File

@ -229,9 +229,20 @@ method register*(
var gasPrice: int var gasPrice: int
g.retryWrapper(gasPrice, "Failed to get gas price"): g.retryWrapper(gasPrice, "Failed to get gas price"):
int(await ethRpc.provider.eth_gasPrice()) * 2 let fetchedGasPrice = uint64(await ethRpc.provider.eth_gasPrice())
## Multiply by 2 to speed up the transaction
## Check for overflow when casting to int
if fetchedGasPrice > uint64(high(int) div 2):
warn "Gas price overflow detected, capping at maximum int value",
fetchedGasPrice = fetchedGasPrice, maxInt = high(int)
high(int)
else:
let calculatedGasPrice = int(fetchedGasPrice) * 2
debug "Gas price calculated",
fetchedGasPrice = fetchedGasPrice, gasPrice = calculatedGasPrice
calculatedGasPrice
let idCommitmentHex = identityCredential.idCommitment.inHex() let idCommitmentHex = identityCredential.idCommitment.inHex()
info "identityCredential idCommitmentHex", idCommitment = idCommitmentHex debug "identityCredential idCommitmentHex", idCommitment = idCommitmentHex
let idCommitment = identityCredential.idCommitment.toUInt256() let idCommitment = identityCredential.idCommitment.toUInt256()
let idCommitmentsToErase: seq[UInt256] = @[] let idCommitmentsToErase: seq[UInt256] = @[]
info "registering the member", info "registering the member",
@ -248,11 +259,10 @@ method register*(
var tsReceipt: ReceiptObject var tsReceipt: ReceiptObject
g.retryWrapper(tsReceipt, "Failed to get the transaction receipt"): g.retryWrapper(tsReceipt, "Failed to get the transaction receipt"):
await ethRpc.getMinedTransactionReceipt(txHash) await ethRpc.getMinedTransactionReceipt(txHash)
info "registration transaction mined", txHash = txHash debug "registration transaction mined", txHash = txHash
g.registrationTxHash = some(txHash) g.registrationTxHash = some(txHash)
# the receipt topic holds the hash of signature of the raised events # the receipt topic holds the hash of signature of the raised events
# TODO: make this robust. search within the event list for the event debug "ts receipt", receipt = tsReceipt[]
info "ts receipt", receipt = tsReceipt[]
if tsReceipt.status.isNone(): if tsReceipt.status.isNone():
raise newException(ValueError, "Transaction failed: status is None") raise newException(ValueError, "Transaction failed: status is None")
@ -261,18 +271,27 @@ method register*(
ValueError, "Transaction failed with status: " & $tsReceipt.status.get() ValueError, "Transaction failed with status: " & $tsReceipt.status.get()
) )
## Extract MembershipRegistered event from transaction logs (third event) ## Search through all transaction logs to find the MembershipRegistered event
let thirdTopic = tsReceipt.logs[2].topics[0] let expectedEventSignature = cast[FixedBytes[32]](keccak.keccak256.digest(
info "third topic", thirdTopic = thirdTopic "MembershipRegistered(uint256,uint256,uint32)"
if thirdTopic != ).data)
cast[FixedBytes[32]](keccak.keccak256.digest(
"MembershipRegistered(uint256,uint256,uint32)"
).data):
raise newException(ValueError, "register: unexpected event signature")
## Parse MembershipRegistered event data: rateCommitment(256) || membershipRateLimit(256) || index(32) var membershipRegisteredLog: Option[LogObject]
let arguments = tsReceipt.logs[2].data for log in tsReceipt.logs:
info "tx log data", arguments = arguments if log.topics.len > 0 and log.topics[0] == expectedEventSignature:
membershipRegisteredLog = some(log)
break
if membershipRegisteredLog.isNone():
raise newException(
ValueError, "register: MembershipRegistered event not found in transaction logs"
)
let registrationLog = membershipRegisteredLog.get()
## Parse MembershipRegistered event data: idCommitment(256) || membershipRateLimit(256) || index(32)
let arguments = registrationLog.data
trace "registration transaction log data", arguments = arguments
let let
## Extract membership index from transaction log data (big endian) ## Extract membership index from transaction log data (big endian)
membershipIndex = UInt256.fromBytesBE(arguments[64 .. 95]) membershipIndex = UInt256.fromBytesBE(arguments[64 .. 95])