Merge remote-tracking branch 'origin/master' into feat/ios

This commit is contained in:
pablo 2025-12-22 14:50:08 +02:00
commit 4aa9a08d4a
No known key found for this signature in database
GPG Key ID: 78F35FCC60FDC63A
69 changed files with 3282 additions and 3250 deletions

View File

@ -76,6 +76,9 @@ jobs:
.git/modules
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
- name: Make update
run: make update
- name: Build binaries
run: make V=1 QUICK_AND_DIRTY_COMPILER=1 all tools
@ -94,7 +97,7 @@ jobs:
matrix:
os: [ubuntu-22.04, macos-15]
runs-on: ${{ matrix.os }}
timeout-minutes: 90
timeout-minutes: 45
name: test-${{ matrix.os }}
steps:
@ -114,6 +117,9 @@ jobs:
.git/modules
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
- name: Make update
run: make update
- name: Run tests
run: |
postgres_enabled=0
@ -132,12 +138,12 @@ jobs:
build-docker-image:
needs: changes
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' || needs.changes.outputs.docker == 'true' }}
uses: logos-messaging/nwaku/.github/workflows/container-image.yml@master
uses: logos-messaging/logos-messaging-nim/.github/workflows/container-image.yml@10dc3d3eb4b6a3d4313f7b2cc4a85a925e9ce039
secrets: inherit
nwaku-nwaku-interop-tests:
needs: build-docker-image
uses: logos-messaging/waku-interop-tests/.github/workflows/nim_waku_PR.yml@SMOKE_TEST_0.0.1
uses: logos-messaging/logos-messaging-interop-tests/.github/workflows/nim_waku_PR.yml@SMOKE_TEST_STABLE
with:
node_nwaku: ${{ needs.build-docker-image.outputs.image }}

View File

@ -41,7 +41,7 @@ jobs:
env:
QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }}
QUAY_USER: ${{ secrets.QUAY_USER }}
- name: Checkout code
if: ${{ steps.secrets.outcome == 'success' }}
uses: actions/checkout@v4
@ -65,6 +65,7 @@ jobs:
id: build
if: ${{ steps.secrets.outcome == 'success' }}
run: |
make update
make -j${NPROC} V=1 QUICK_AND_DIRTY_COMPILER=1 NIMFLAGS="-d:disableMarchNative -d:postgres -d:chronicles_colors:none" wakunode2

View File

@ -41,25 +41,84 @@ jobs:
.git/modules
key: ${{ runner.os }}-${{matrix.arch}}-submodules-${{ steps.submodules.outputs.hash }}
- name: prep variables
- name: Get tag
id: version
run: |
# Use full tag, e.g., v0.37.0
echo "version=${GITHUB_REF_NAME}" >> $GITHUB_OUTPUT
- name: Prep variables
id: vars
run: |
NWAKU_ARTIFACT_NAME=$(echo "nwaku-${{matrix.arch}}-${{runner.os}}.tar.gz" | tr "[:upper:]" "[:lower:]")
VERSION=${{ steps.version.outputs.version }}
echo "nwaku=${NWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT
NWAKU_ARTIFACT_NAME=$(echo "waku-${{matrix.arch}}-${{runner.os}}.tar.gz" | tr "[:upper:]" "[:lower:]")
echo "waku=${NWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT
- name: Install dependencies
if [[ "${{ runner.os }}" == "Linux" ]]; then
LIBWAKU_ARTIFACT_NAME=$(echo "libwaku-${VERSION}-${{matrix.arch}}-${{runner.os}}-linux.deb" | tr "[:upper:]" "[:lower:]")
fi
if [[ "${{ runner.os }}" == "macOS" ]]; then
LIBWAKU_ARTIFACT_NAME=$(echo "libwaku-${VERSION}-${{matrix.arch}}-macos.tar.gz" | tr "[:upper:]" "[:lower:]")
fi
echo "libwaku=${LIBWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT
- name: Install build dependencies
run: |
if [[ "${{ runner.os }}" == "Linux" ]]; then
sudo apt-get update && sudo apt-get install -y build-essential dpkg-dev
fi
- name: Build Waku artifacts
run: |
OS=$([[ "${{runner.os}}" == "macOS" ]] && echo "macosx" || echo "linux")
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" V=1 update
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false wakunode2
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" CI=false chat2
tar -cvzf ${{steps.vars.outputs.nwaku}} ./build/
tar -cvzf ${{steps.vars.outputs.waku}} ./build/
- name: Upload asset
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false libwaku
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false STATIC=1 libwaku
- name: Create distributable libwaku package
run: |
VERSION=${{ steps.version.outputs.version }}
if [[ "${{ runner.os }}" == "Linux" ]]; then
rm -rf pkg
mkdir -p pkg/DEBIAN pkg/usr/local/lib pkg/usr/local/include
cp build/libwaku.so pkg/usr/local/lib/
cp build/libwaku.a pkg/usr/local/lib/
cp library/libwaku.h pkg/usr/local/include/
echo "Package: waku" >> pkg/DEBIAN/control
echo "Version: ${VERSION}" >> pkg/DEBIAN/control
echo "Priority: optional" >> pkg/DEBIAN/control
echo "Section: libs" >> pkg/DEBIAN/control
echo "Architecture: ${{matrix.arch}}" >> pkg/DEBIAN/control
echo "Maintainer: Waku Team <ivansete@status.im>" >> pkg/DEBIAN/control
echo "Description: Waku library" >> pkg/DEBIAN/control
dpkg-deb --build pkg ${{steps.vars.outputs.libwaku}}
fi
if [[ "${{ runner.os }}" == "macOS" ]]; then
tar -cvzf ${{steps.vars.outputs.libwaku}} ./build/libwaku.dylib ./build/libwaku.a ./library/libwaku.h
fi
- name: Upload waku artifact
uses: actions/upload-artifact@v4.4.0
with:
name: ${{steps.vars.outputs.nwaku}}
path: ${{steps.vars.outputs.nwaku}}
name: waku-${{ steps.version.outputs.version }}-${{ matrix.arch }}-${{ runner.os }}
path: ${{ steps.vars.outputs.waku }}
if-no-files-found: error
- name: Upload libwaku artifact
uses: actions/upload-artifact@v4.4.0
with:
name: libwaku-${{ steps.version.outputs.version }}-${{ matrix.arch }}-${{ runner.os }}
path: ${{ steps.vars.outputs.libwaku }}
if-no-files-found: error

5
.gitmodules vendored
View File

@ -184,3 +184,8 @@
url = https://github.com/logos-messaging/waku-rlnv2-contract.git
ignore = untracked
branch = master
[submodule "vendor/nim-ffi"]
path = vendor/nim-ffi
url = https://github.com/logos-messaging/nim-ffi/
ignore = untracked
branch = master

509
AGENTS.md Normal file
View File

@ -0,0 +1,509 @@
# AGENTS.md - AI Coding Context
This file provides essential context for LLMs assisting with Logos Messaging development.
## Project Identity
Logos Messaging is designed as a shared public network for generalized messaging, not application-specific infrastructure.
This project is a Nim implementation of a libp2p protocol suite for private, censorship-resistant P2P messaging. It targets resource-restricted devices and privacy-preserving communication.
Logos Messaging was formerly known as Waku. Waku-related terminology remains within the codebase for historical reasons.
### Design Philosophy
Key architectural decisions:
Resource-restricted first: Protocols differentiate between full nodes (relay) and light clients (filter, lightpush, store). Light clients can participate without maintaining full message history or relay capabilities. This explains the client/server split in protocol implementations.
Privacy through unlinkability: RLN (Rate Limiting Nullifier) provides DoS protection while preserving sender anonymity. Messages are routed through pubsub topics with automatic sharding across 8 shards. Code prioritizes metadata privacy alongside content encryption.
Scalability via sharding: The network uses automatic content-topic-based sharding to distribute traffic. This is why you'll see sharding logic throughout the codebase and why pubsub topic selection is protocol-level, not application-level.
See [documentation](https://docs.waku.org/learn/) for architectural details.
### Core Protocols
- Relay: Pub/sub message routing using GossipSub
- Store: Historical message retrieval and persistence
- Filter: Lightweight message filtering for resource-restricted clients
- Lightpush: Lightweight message publishing for clients
- Peer Exchange: Peer discovery mechanism
- RLN Relay: Rate limiting nullifier for spam protection
- Metadata: Cluster and shard metadata exchange between peers
- Mix: Mixnet protocol for enhanced privacy through onion routing
- Rendezvous: Alternative peer discovery mechanism
### Key Terminology
- ENR (Ethereum Node Record): Node identity and capability advertisement
- Multiaddr: libp2p addressing format (e.g., `/ip4/127.0.0.1/tcp/60000/p2p/16Uiu2...`)
- PubsubTopic: Gossipsub topic for message routing (e.g., `/waku/2/default-waku/proto`)
- ContentTopic: Application-level message categorization (e.g., `/my-app/1/chat/proto`)
- Sharding: Partitioning network traffic across topics (static or auto-sharding)
- RLN (Rate Limiting Nullifier): Zero-knowledge proof system for spam prevention
### Specifications
All specs are at [rfc.vac.dev/waku](https://rfc.vac.dev/waku). RFCs use `WAKU2-XXX` format (not legacy `WAKU-XXX`).
## Architecture
### Protocol Module Pattern
Each protocol typically follows this structure:
```
waku_<protocol>/
├── protocol.nim # Main protocol type and handler logic
├── client.nim # Client-side API
├── rpc.nim # RPC message types
├── rpc_codec.nim # Protobuf encoding/decoding
├── common.nim # Shared types and constants
└── protocol_metrics.nim # Prometheus metrics
```
### WakuNode Architecture
- WakuNode (`waku/node/waku_node.nim`) is the central orchestrator
- Protocols are "mounted" onto the node's switch (libp2p component)
- PeerManager handles peer selection and connection management
- Switch provides libp2p transport, security, and multiplexing
Example protocol type definition:
```nim
type WakuFilter* = ref object of LPProtocol
subscriptions*: FilterSubscriptions
peerManager: PeerManager
messageCache: TimedCache[string]
```
## Development Essentials
### Build Requirements
- Nim 2.x (check `waku.nimble` for minimum version)
- Rust toolchain (required for RLN dependencies)
- Build system: Make with nimbus-build-system
### Build System
The project uses Makefile with nimbus-build-system (Status's Nim build framework):
```bash
# Initial build (updates submodules)
make wakunode2
# After git pull, update submodules
make update
# Build with custom flags
make wakunode2 NIMFLAGS="-d:chronicles_log_level=DEBUG"
```
Note: The build system uses `--mm:refc` memory management (automatically enforced). Only relevant if compiling outside the standard build system.
### Common Make Targets
```bash
make wakunode2 # Build main node binary
make test # Run all tests
make testcommon # Run common tests only
make libwakuStatic # Build static C library
make chat2 # Build chat example
make install-nph # Install git hook for auto-formatting
```
### Testing
```bash
# Run all tests
make test
# Run specific test file
make test tests/test_waku_enr.nim
# Run specific test case from file
make test tests/test_waku_enr.nim "check capabilities support"
# Build and run test separately (for development iteration)
make test tests/test_waku_enr.nim
```
Test structure uses `testutils/unittests`:
```nim
import testutils/unittests
suite "Waku ENR - Capabilities":
test "check capabilities support":
## Given
let bitfield: CapabilitiesBitfield = 0b0000_1101u8
## Then
check:
bitfield.supportsCapability(Capabilities.Relay)
not bitfield.supportsCapability(Capabilities.Store)
```
### Code Formatting
Mandatory: All code must be formatted with `nph` (vendored in `vendor/nph`)
```bash
# Format specific file
make nph/waku/waku_core.nim
# Install git pre-commit hook (auto-formats on commit)
make install-nph
```
The nph formatter handles all formatting details automatically, especially with the pre-commit hook installed. Focus on semantic correctness.
### Logging
Uses `chronicles` library with compile-time configuration:
```nim
import chronicles
logScope:
topics = "waku lightpush"
info "handling request", peerId = peerId, topic = pubsubTopic
error "request failed", error = msg
```
Compile with log level:
```bash
nim c -d:chronicles_log_level=TRACE myfile.nim
```
## Code Conventions
Common pitfalls:
- Always handle Result types explicitly
- Avoid global mutable state: Pass state through parameters
- Keep functions focused: Under 50 lines when possible
- Prefer compile-time checks (`static assert`) over runtime checks
### Naming
- Files/Directories: `snake_case` (e.g., `waku_lightpush`, `peer_manager`)
- Procedures: `camelCase` (e.g., `handleRequest`, `pushMessage`)
- Types: `PascalCase` (e.g., `WakuFilter`, `PubsubTopic`)
- Constants: `PascalCase` (e.g., `MaxContentTopicsPerRequest`)
- Constructors: `func init(T: type Xxx, params): T`
- For ref types: `func new(T: type Xxx, params): ref T`
- Exceptions: `XxxError` for CatchableError, `XxxDefect` for Defect
- ref object types: `XxxRef` suffix
### Imports Organization
Group imports: stdlib, external libs, internal modules:
```nim
import
std/[options, sequtils], # stdlib
results, chronicles, chronos, # external
libp2p/peerid
import
../node/peer_manager, # internal (separate import block)
../waku_core,
./common
```
### Async Programming
Uses chronos, not stdlib `asyncdispatch`:
```nim
proc handleRequest(
wl: WakuLightPush, peerId: PeerId
): Future[WakuLightPushResult] {.async.} =
let res = await wl.pushHandler(peerId, pubsubTopic, message)
return res
```
### Error Handling
The project uses both Result types and exceptions:
Result types from nim-results are used for protocol and API-level errors:
```nim
proc subscribe(
wf: WakuFilter, peerId: PeerID
): Future[FilterSubscribeResult] {.async.} =
if contentTopics.len > MaxContentTopicsPerRequest:
return err(FilterSubscribeError.badRequest("exceeds maximum"))
# Handle Result with isOkOr
(await wf.subscriptions.addSubscription(peerId, criteria)).isOkOr:
return err(FilterSubscribeError.serviceUnavailable(error))
ok()
```
Exceptions still used for:
- chronos async failures (CancelledError, etc.)
- Database/system errors
- Library interop
Most files start with `{.push raises: [].}` to disable exception tracking, then use try/catch blocks where needed.
### Pragma Usage
```nim
{.push raises: [].} # Disable default exception tracking (at file top)
proc myProc(): Result[T, E] {.async.} = # Async proc
```
### Protocol Inheritance
Protocols inherit from libp2p's `LPProtocol`:
```nim
type WakuLightPush* = ref object of LPProtocol
rng*: ref rand.HmacDrbgContext
peerManager*: PeerManager
pushHandler*: PushMessageHandler
```
### Type Visibility
- Public exports use `*` suffix: `type WakuFilter* = ...`
- Fields without `*` are module-private
## Style Guide Essentials
This section summarizes key Nim style guidelines relevant to this project. Full guide: https://status-im.github.io/nim-style-guide/
### Language Features
Import and Export
- Use explicit import paths with std/ prefix for stdlib
- Group imports: stdlib, external, internal (separate blocks)
- Export modules whose types appear in public API
- Avoid include
Macros and Templates
- Avoid macros and templates - prefer simple constructs
- Avoid generating public API with macros
- Put logic in templates, use macros only for glue code
Object Construction
- Prefer Type(field: value) syntax
- Use Type.init(params) convention for constructors
- Default zero-initialization should be valid state
- Avoid using result variable for construction
ref object Types
- Avoid ref object unless needed for:
- Resource handles requiring reference semantics
- Shared ownership
- Reference-based data structures (trees, lists)
- Stable pointer for FFI
- Use explicit ref MyType where possible
- Name ref object types with Ref suffix: XxxRef
Memory Management
- Prefer stack-based and statically sized types in core code
- Use heap allocation in glue layers
- Avoid alloca
- For FFI: use create/dealloc or createShared/deallocShared
Variable Usage
- Use most restrictive of const, let, var (prefer const over let over var)
- Prefer expressions for initialization over var then assignment
- Avoid result variable - use explicit return or expression-based returns
Functions
- Prefer func over proc
- Avoid public (*) symbols not part of intended API
- Prefer openArray over seq for function parameters
Methods (runtime polymorphism)
- Avoid method keyword for dynamic dispatch
- Prefer manual vtable with proc closures for polymorphism
- Methods lack support for generics
Miscellaneous
- Annotate callback proc types with {.raises: [], gcsafe.}
- Avoid explicit {.inline.} pragma
- Avoid converters
- Avoid finalizers
Type Guidelines
Binary Data
- Use byte for binary data
- Use seq[byte] for dynamic arrays
- Convert string to seq[byte] early if stdlib returns binary as string
Integers
- Prefer signed (int, int64) for counting, lengths, indexing
- Use unsigned with explicit size (uint8, uint64) for binary data, bit ops
- Avoid Natural
- Check ranges before converting to int
- Avoid casting pointers to int
- Avoid range types
Strings
- Use string for text
- Use seq[byte] for binary data instead of string
### Error Handling
Philosophy
- Prefer Result, Opt for explicit error handling
- Use Exceptions only for legacy code compatibility
Result Types
- Use Result[T, E] for operations that can fail
- Use cstring for simple error messages: Result[T, cstring]
- Use enum for errors needing differentiation: Result[T, SomeErrorEnum]
- Use Opt[T] for simple optional values
- Annotate all modules: {.push raises: [].} at top
Exceptions (when unavoidable)
- Inherit from CatchableError, name XxxError
- Use Defect for panics/logic errors, name XxxDefect
- Annotate functions explicitly: {.raises: [SpecificError].}
- Catch specific error types, avoid catching CatchableError
- Use expression-based try blocks
- Isolate legacy exception code with try/except, convert to Result
Common Defect Sources
- Overflow in signed arithmetic
- Array/seq indexing with []
- Implicit range type conversions
Status Codes
- Avoid status code pattern
- Use Result instead
### Library Usage
Standard Library
- Use judiciously, prefer focused packages
- Prefer these replacements:
- async: chronos
- bitops: stew/bitops2
- endians: stew/endians2
- exceptions: results
- io: stew/io2
Results Library
- Use cstring errors for diagnostics without differentiation
- Use enum errors when caller needs to act on specific errors
- Use complex types when additional error context needed
- Use isOkOr pattern for chaining
Wrappers (C/FFI)
- Prefer native Nim when available
- For C libraries: use {.compile.} to build from source
- Create xxx_abi.nim for raw ABI wrapper
- Avoid C++ libraries
Miscellaneous
- Print hex output in lowercase, accept both cases
### Common Pitfalls
- Defects lack tracking by {.raises.}
- nil ref causes runtime crashes
- result variable disables branch checking
- Exception hierarchy unclear between Nim versions
- Range types have compiler bugs
- Finalizers infect all instances of type
## Common Workflows
### Adding a New Protocol
1. Create directory: `waku/waku_myprotocol/`
2. Define core files:
- `rpc.nim` - Message types
- `rpc_codec.nim` - Protobuf encoding
- `protocol.nim` - Protocol handler
- `client.nim` - Client API
- `common.nim` - Shared types
3. Define protocol type in `protocol.nim`:
```nim
type WakuMyProtocol* = ref object of LPProtocol
peerManager: PeerManager
# ... fields
```
4. Implement request handler
5. Mount in WakuNode (`waku/node/waku_node.nim`)
6. Add tests in `tests/waku_myprotocol/`
7. Export module via `waku/waku_myprotocol.nim`
### Adding a REST API Endpoint
1. Define handler in `waku/rest_api/endpoint/myprotocol/`
2. Implement endpoint following pattern:
```nim
proc installMyProtocolApiHandlers*(
router: var RestRouter, node: WakuNode
) =
router.api(MethodGet, "/waku/v2/myprotocol/endpoint") do () -> RestApiResponse:
# Implementation
return RestApiResponse.jsonResponse(data, status = Http200)
```
3. Register in `waku/rest_api/handlers.nim`
### Adding Database Migration
For message_store (SQLite):
1. Create `migrations/message_store/NNNNN_description.up.sql`
2. Create corresponding `.down.sql` for rollback
3. Increment version number sequentially
4. Test migration locally before committing
For PostgreSQL: add in `migrations/message_store_postgres/`
### Running Single Test During Development
```bash
# Build test binary
make test tests/waku_filter_v2/test_waku_client.nim
# Binary location
./build/tests/waku_filter_v2/test_waku_client.nim.bin
# Or combine
make test tests/waku_filter_v2/test_waku_client.nim "specific test name"
```
### Debugging with Chronicles
Set log level and filter topics:
```bash
nim c -r \
-d:chronicles_log_level=TRACE \
-d:chronicles_disabled_topics="eth,dnsdisc" \
tests/mytest.nim
```
## Key Constraints
### Vendor Directory
- Never edit files directly in vendor - it is auto-generated from git submodules
- Always run `make update` after pulling changes
- Managed by `nimbus-build-system`
### Chronicles Performance
- Log levels are configured at compile time for performance
- Runtime filtering is available but should be used sparingly: `-d:chronicles_runtime_filtering=on`
- Default sinks are optimized for production
### Memory Management
- Uses `refc` (reference counting with cycle collection)
- Automatically enforced by the build system (hardcoded in `waku.nimble`)
- Do not override unless absolutely necessary, as it breaks compatibility
### RLN Dependencies
- RLN code requires a Rust toolchain, which explains Rust imports in some modules
- Pre-built `librln` libraries are checked into the repository
## Quick Reference
Language: Nim 2.x | License: MIT or Apache 2.0
### Important Files
- `Makefile` - Primary build interface
- `waku.nimble` - Package definition and build tasks (called via nimbus-build-system)
- `vendor/nimbus-build-system/` - Status's build framework
- `waku/node/waku_node.nim` - Core node implementation
- `apps/wakunode2/wakunode2.nim` - Main CLI application
- `waku/factory/waku_conf.nim` - Configuration types
- `library/libwaku.nim` - C bindings entry point
### Testing Entry Points
- `tests/all_tests_waku.nim` - All Waku protocol tests
- `tests/all_tests_wakunode2.nim` - Node application tests
- `tests/all_tests_common.nim` - Common utilities tests
### Key Dependencies
- `chronos` - Async framework
- `nim-results` - Result type for error handling
- `chronicles` - Logging
- `libp2p` - P2P networking
- `confutils` - CLI argument parsing
- `presto` - REST server
- `nimcrypto` - Cryptographic primitives
Note: For specific version requirements, check `waku.nimble`.

View File

@ -1,4 +1,10 @@
## v0.37.0 (2025-10-01)
## v0.37.1-beta (2025-12-10)
### Bug Fixes
- Remove ENR cache from peer exchange ([#3652](https://github.com/logos-messaging/logos-messaging-nim/pull/3652)) ([7920368a](https://github.com/logos-messaging/logos-messaging-nim/commit/7920368a36687cd5f12afa52d59866792d8457ca))
## v0.37.0-beta (2025-10-01)
### Notes

View File

@ -119,6 +119,10 @@ endif
##################
.PHONY: deps libbacktrace
FOUNDRY_VERSION := 1.5.0
PNPM_VERSION := 10.23.0
rustup:
ifeq (, $(shell which cargo))
# Install Rustup if it's not installed
@ -128,7 +132,7 @@ ifeq (, $(shell which cargo))
endif
rln-deps: rustup
./scripts/install_rln_tests_dependencies.sh
./scripts/install_rln_tests_dependencies.sh $(FOUNDRY_VERSION) $(PNPM_VERSION)
deps: | deps-common nat-libs waku.nims
@ -426,18 +430,27 @@ docker-liteprotocoltester-push:
.PHONY: cbindings cwaku_example libwaku
STATIC ?= 0
BUILD_COMMAND ?= libwakuDynamic
ifeq ($(detected_OS),Windows)
LIB_EXT_DYNAMIC = dll
LIB_EXT_STATIC = lib
else ifeq ($(detected_OS),Darwin)
LIB_EXT_DYNAMIC = dylib
LIB_EXT_STATIC = a
else ifeq ($(detected_OS),Linux)
LIB_EXT_DYNAMIC = so
LIB_EXT_STATIC = a
endif
LIB_EXT := $(LIB_EXT_DYNAMIC)
ifeq ($(STATIC), 1)
LIB_EXT = $(LIB_EXT_STATIC)
BUILD_COMMAND = libwakuStatic
endif
libwaku: | build deps librln
rm -f build/libwaku*
ifeq ($(STATIC), 1)
echo -e $(BUILD_MSG) "build/$@.a" && $(ENV_SCRIPT) nim libwakuStatic $(NIM_PARAMS) waku.nims
else ifeq ($(detected_OS),Windows)
make -f scripts/libwaku_windows_setup.mk windows-setup
echo -e $(BUILD_MSG) "build/$@.dll" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims
else
echo -e $(BUILD_MSG) "build/$@.so" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims
endif
echo -e $(BUILD_MSG) "build/$@.$(LIB_EXT)" && $(ENV_SCRIPT) nim $(BUILD_COMMAND) $(NIM_PARAMS) waku.nims $@.$(LIB_EXT)
#####################
## Mobile Bindings ##
@ -594,4 +607,3 @@ release-notes:
sed -E 's@#([0-9]+)@[#\1](https://github.com/waku-org/nwaku/issues/\1)@g'
# I could not get the tool to replace issue ids with links, so using sed for now,
# asked here: https://github.com/bvieira/sv4git/discussions/101

View File

@ -480,7 +480,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
if conf.lightpushnode != "":
let peerInfo = parsePeerInfo(conf.lightpushnode)
if peerInfo.isOk():
await mountLegacyLightPush(node)
(await node.mountLegacyLightPush()).isOkOr:
error "failed to mount legacy lightpush", error = error
quit(QuitFailure)
node.mountLegacyLightPushClient()
node.peerManager.addServicePeer(peerInfo.value, WakuLightpushCodec)
else:

View File

@ -19,283 +19,309 @@ pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
int callback_executed = 0;
void waitForCallback() {
pthread_mutex_lock(&mutex);
while (!callback_executed) {
pthread_cond_wait(&cond, &mutex);
}
callback_executed = 0;
pthread_mutex_unlock(&mutex);
void waitForCallback()
{
pthread_mutex_lock(&mutex);
while (!callback_executed)
{
pthread_cond_wait(&cond, &mutex);
}
callback_executed = 0;
pthread_mutex_unlock(&mutex);
}
#define WAKU_CALL(call) \
do { \
int ret = call; \
if (ret != 0) { \
printf("Failed the call to: %s. Returned code: %d\n", #call, ret); \
exit(1); \
} \
waitForCallback(); \
} while (0)
#define WAKU_CALL(call) \
do \
{ \
int ret = call; \
if (ret != 0) \
{ \
printf("Failed the call to: %s. Returned code: %d\n", #call, ret); \
exit(1); \
} \
waitForCallback(); \
} while (0)
struct ConfigNode {
char host[128];
int port;
char key[128];
int relay;
char peers[2048];
int store;
char storeNode[2048];
char storeRetentionPolicy[64];
char storeDbUrl[256];
int storeVacuum;
int storeDbMigration;
int storeMaxNumDbConnections;
struct ConfigNode
{
char host[128];
int port;
char key[128];
int relay;
char peers[2048];
int store;
char storeNode[2048];
char storeRetentionPolicy[64];
char storeDbUrl[256];
int storeVacuum;
int storeDbMigration;
int storeMaxNumDbConnections;
};
// libwaku Context
void* ctx;
void *ctx;
// For the case of C language we don't need to store a particular userData
void* userData = NULL;
void *userData = NULL;
// Arguments parsing
static char doc[] = "\nC example that shows how to use the waku library.";
static char args_doc[] = "";
static struct argp_option options[] = {
{ "host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"},
{ "port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"},
{ "key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."},
{ "relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"},
{ "peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\
{"host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"},
{"port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"},
{"key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."},
{"relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"},
{"peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\
to. (default: \"\") e.g. \"/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\""},
{ 0 }
};
{0}};
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
static error_t parse_opt(int key, char *arg, struct argp_state *state)
{
struct ConfigNode *cfgNode = state->input;
switch (key) {
case 'h':
snprintf(cfgNode->host, 128, "%s", arg);
break;
case 'p':
cfgNode->port = atoi(arg);
break;
case 'k':
snprintf(cfgNode->key, 128, "%s", arg);
break;
case 'r':
cfgNode->relay = atoi(arg);
break;
case 'a':
snprintf(cfgNode->peers, 2048, "%s", arg);
break;
case ARGP_KEY_ARG:
if (state->arg_num >= 1) /* Too many arguments. */
argp_usage(state);
break;
case ARGP_KEY_END:
break;
default:
return ARGP_ERR_UNKNOWN;
}
struct ConfigNode *cfgNode = state->input;
switch (key)
{
case 'h':
snprintf(cfgNode->host, 128, "%s", arg);
break;
case 'p':
cfgNode->port = atoi(arg);
break;
case 'k':
snprintf(cfgNode->key, 128, "%s", arg);
break;
case 'r':
cfgNode->relay = atoi(arg);
break;
case 'a':
snprintf(cfgNode->peers, 2048, "%s", arg);
break;
case ARGP_KEY_ARG:
if (state->arg_num >= 1) /* Too many arguments. */
argp_usage(state);
break;
case ARGP_KEY_END:
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
return 0;
}
void signal_cond() {
pthread_mutex_lock(&mutex);
callback_executed = 1;
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
void signal_cond()
{
pthread_mutex_lock(&mutex);
callback_executed = 1;
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
}
static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 };
static struct argp argp = {options, parse_opt, args_doc, doc, 0, 0, 0};
void event_handler(int callerRet, const char* msg, size_t len, void* userData) {
if (callerRet == RET_ERR) {
printf("Error: %s\n", msg);
exit(1);
}
else if (callerRet == RET_OK) {
printf("Receiving event: %s\n", msg);
}
void event_handler(int callerRet, const char *msg, size_t len, void *userData)
{
if (callerRet == RET_ERR)
{
printf("Error: %s\n", msg);
exit(1);
}
else if (callerRet == RET_OK)
{
printf("Receiving event: %s\n", msg);
}
signal_cond();
signal_cond();
}
void on_event_received(int callerRet, const char* msg, size_t len, void* userData) {
if (callerRet == RET_ERR) {
printf("Error: %s\n", msg);
exit(1);
}
else if (callerRet == RET_OK) {
printf("Receiving event: %s\n", msg);
}
void on_event_received(int callerRet, const char *msg, size_t len, void *userData)
{
if (callerRet == RET_ERR)
{
printf("Error: %s\n", msg);
exit(1);
}
else if (callerRet == RET_OK)
{
printf("Receiving event: %s\n", msg);
}
}
char* contentTopic = NULL;
void handle_content_topic(int callerRet, const char* msg, size_t len, void* userData) {
if (contentTopic != NULL) {
free(contentTopic);
}
char *contentTopic = NULL;
void handle_content_topic(int callerRet, const char *msg, size_t len, void *userData)
{
if (contentTopic != NULL)
{
free(contentTopic);
}
contentTopic = malloc(len * sizeof(char) + 1);
strcpy(contentTopic, msg);
signal_cond();
contentTopic = malloc(len * sizeof(char) + 1);
strcpy(contentTopic, msg);
signal_cond();
}
char* publishResponse = NULL;
void handle_publish_ok(int callerRet, const char* msg, size_t len, void* userData) {
printf("Publish Ok: %s %lu\n", msg, len);
char *publishResponse = NULL;
void handle_publish_ok(int callerRet, const char *msg, size_t len, void *userData)
{
printf("Publish Ok: %s %lu\n", msg, len);
if (publishResponse != NULL) {
free(publishResponse);
}
if (publishResponse != NULL)
{
free(publishResponse);
}
publishResponse = malloc(len * sizeof(char) + 1);
strcpy(publishResponse, msg);
publishResponse = malloc(len * sizeof(char) + 1);
strcpy(publishResponse, msg);
}
#define MAX_MSG_SIZE 65535
void publish_message(const char* msg) {
char jsonWakuMsg[MAX_MSG_SIZE];
char *msgPayload = b64_encode(msg, strlen(msg));
void publish_message(const char *msg)
{
char jsonWakuMsg[MAX_MSG_SIZE];
char *msgPayload = b64_encode(msg, strlen(msg));
WAKU_CALL( waku_content_topic(ctx,
"appName",
1,
"contentTopicName",
"encoding",
handle_content_topic,
userData) );
snprintf(jsonWakuMsg,
MAX_MSG_SIZE,
"{\"payload\":\"%s\",\"contentTopic\":\"%s\"}",
msgPayload, contentTopic);
WAKU_CALL(waku_content_topic(ctx,
handle_content_topic,
userData,
"appName",
1,
"contentTopicName",
"encoding"));
snprintf(jsonWakuMsg,
MAX_MSG_SIZE,
"{\"payload\":\"%s\",\"contentTopic\":\"%s\"}",
msgPayload, contentTopic);
free(msgPayload);
free(msgPayload);
WAKU_CALL( waku_relay_publish(ctx,
"/waku/2/rs/16/32",
jsonWakuMsg,
10000 /*timeout ms*/,
event_handler,
userData) );
WAKU_CALL(waku_relay_publish(ctx,
event_handler,
userData,
"/waku/2/rs/16/32",
jsonWakuMsg,
10000 /*timeout ms*/));
}
void show_help_and_exit() {
printf("Wrong parameters\n");
exit(1);
void show_help_and_exit()
{
printf("Wrong parameters\n");
exit(1);
}
void print_default_pubsub_topic(int callerRet, const char* msg, size_t len, void* userData) {
printf("Default pubsub topic: %s\n", msg);
signal_cond();
void print_default_pubsub_topic(int callerRet, const char *msg, size_t len, void *userData)
{
printf("Default pubsub topic: %s\n", msg);
signal_cond();
}
void print_waku_version(int callerRet, const char* msg, size_t len, void* userData) {
printf("Git Version: %s\n", msg);
signal_cond();
void print_waku_version(int callerRet, const char *msg, size_t len, void *userData)
{
printf("Git Version: %s\n", msg);
signal_cond();
}
// Beginning of UI program logic
enum PROGRAM_STATE {
MAIN_MENU,
SUBSCRIBE_TOPIC_MENU,
CONNECT_TO_OTHER_NODE_MENU,
PUBLISH_MESSAGE_MENU
enum PROGRAM_STATE
{
MAIN_MENU,
SUBSCRIBE_TOPIC_MENU,
CONNECT_TO_OTHER_NODE_MENU,
PUBLISH_MESSAGE_MENU
};
enum PROGRAM_STATE current_state = MAIN_MENU;
void show_main_menu() {
printf("\nPlease, select an option:\n");
printf("\t1.) Subscribe to topic\n");
printf("\t2.) Connect to other node\n");
printf("\t3.) Publish a message\n");
void show_main_menu()
{
printf("\nPlease, select an option:\n");
printf("\t1.) Subscribe to topic\n");
printf("\t2.) Connect to other node\n");
printf("\t3.) Publish a message\n");
}
void handle_user_input() {
char cmd[1024];
memset(cmd, 0, 1024);
int numRead = read(0, cmd, 1024);
if (numRead <= 0) {
return;
}
void handle_user_input()
{
char cmd[1024];
memset(cmd, 0, 1024);
int numRead = read(0, cmd, 1024);
if (numRead <= 0)
{
return;
}
switch (atoi(cmd))
{
case SUBSCRIBE_TOPIC_MENU:
{
printf("Indicate the Pubsubtopic to subscribe:\n");
char pubsubTopic[128];
scanf("%127s", pubsubTopic);
switch (atoi(cmd))
{
case SUBSCRIBE_TOPIC_MENU:
{
printf("Indicate the Pubsubtopic to subscribe:\n");
char pubsubTopic[128];
scanf("%127s", pubsubTopic);
WAKU_CALL( waku_relay_subscribe(ctx,
pubsubTopic,
event_handler,
userData) );
printf("The subscription went well\n");
WAKU_CALL(waku_relay_subscribe(ctx,
event_handler,
userData,
pubsubTopic));
printf("The subscription went well\n");
show_main_menu();
}
show_main_menu();
}
break;
case CONNECT_TO_OTHER_NODE_MENU:
// printf("Connecting to a node. Please indicate the peer Multiaddress:\n");
// printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n");
// char peerAddr[512];
// scanf("%511s", peerAddr);
// WAKU_CALL(waku_connect(ctx, peerAddr, 10000 /* timeoutMs */, event_handler, userData));
show_main_menu();
break;
case CONNECT_TO_OTHER_NODE_MENU:
printf("Connecting to a node. Please indicate the peer Multiaddress:\n");
printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n");
char peerAddr[512];
scanf("%511s", peerAddr);
WAKU_CALL(waku_connect(ctx, peerAddr, 10000 /* timeoutMs */, event_handler, userData));
show_main_menu();
case PUBLISH_MESSAGE_MENU:
{
printf("Type the message to publish:\n");
char msg[1024];
scanf("%1023s", msg);
publish_message(msg);
show_main_menu();
}
break;
case MAIN_MENU:
break;
case PUBLISH_MESSAGE_MENU:
{
printf("Type the message to publish:\n");
char msg[1024];
scanf("%1023s", msg);
publish_message(msg);
show_main_menu();
}
break;
case MAIN_MENU:
break;
}
}
}
// End of UI program logic
int main(int argc, char** argv) {
struct ConfigNode cfgNode;
// default values
snprintf(cfgNode.host, 128, "0.0.0.0");
cfgNode.port = 60000;
cfgNode.relay = 1;
int main(int argc, char **argv)
{
struct ConfigNode cfgNode;
// default values
snprintf(cfgNode.host, 128, "0.0.0.0");
cfgNode.port = 60000;
cfgNode.relay = 1;
cfgNode.store = 0;
snprintf(cfgNode.storeNode, 2048, "");
snprintf(cfgNode.storeRetentionPolicy, 64, "time:6000000");
snprintf(cfgNode.storeDbUrl, 256, "postgres://postgres:test123@localhost:5432/postgres");
cfgNode.storeVacuum = 0;
cfgNode.storeDbMigration = 0;
cfgNode.storeMaxNumDbConnections = 30;
cfgNode.store = 0;
snprintf(cfgNode.storeNode, 2048, "");
snprintf(cfgNode.storeRetentionPolicy, 64, "time:6000000");
snprintf(cfgNode.storeDbUrl, 256, "postgres://postgres:test123@localhost:5432/postgres");
cfgNode.storeVacuum = 0;
cfgNode.storeDbMigration = 0;
cfgNode.storeMaxNumDbConnections = 30;
if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode)
== ARGP_ERR_UNKNOWN) {
show_help_and_exit();
}
if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode) == ARGP_ERR_UNKNOWN)
{
show_help_and_exit();
}
char jsonConfig[5000];
snprintf(jsonConfig, 5000, "{ \
char jsonConfig[5000];
snprintf(jsonConfig, 5000, "{ \
\"clusterId\": 16, \
\"shards\": [ 1, 32, 64, 128, 256 ], \
\"numShardsInNetwork\": 257, \
@ -313,54 +339,56 @@ int main(int argc, char** argv) {
\"discv5UdpPort\": 9999, \
\"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \
\"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \
}", cfgNode.host,
cfgNode.port,
cfgNode.relay ? "true":"false",
cfgNode.store ? "true":"false",
cfgNode.storeDbUrl,
cfgNode.storeRetentionPolicy,
cfgNode.storeMaxNumDbConnections);
}",
cfgNode.host,
cfgNode.port,
cfgNode.relay ? "true" : "false",
cfgNode.store ? "true" : "false",
cfgNode.storeDbUrl,
cfgNode.storeRetentionPolicy,
cfgNode.storeMaxNumDbConnections);
ctx = waku_new(jsonConfig, event_handler, userData);
waitForCallback();
ctx = waku_new(jsonConfig, event_handler, userData);
waitForCallback();
WAKU_CALL( waku_default_pubsub_topic(ctx, print_default_pubsub_topic, userData) );
WAKU_CALL( waku_version(ctx, print_waku_version, userData) );
WAKU_CALL(waku_default_pubsub_topic(ctx, print_default_pubsub_topic, userData));
WAKU_CALL(waku_version(ctx, print_waku_version, userData));
printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port);
printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES": "NO");
printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port);
printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES" : "NO");
waku_set_event_callback(ctx, on_event_received, userData);
set_event_callback(ctx, on_event_received, userData);
waku_start(ctx, event_handler, userData);
waitForCallback();
waku_start(ctx, event_handler, userData);
waitForCallback();
WAKU_CALL( waku_listen_addresses(ctx, event_handler, userData) );
WAKU_CALL(waku_listen_addresses(ctx, event_handler, userData));
WAKU_CALL( waku_relay_subscribe(ctx,
"/waku/2/rs/0/0",
event_handler,
userData) );
WAKU_CALL(waku_relay_subscribe(ctx,
event_handler,
userData,
"/waku/2/rs/16/32"));
WAKU_CALL( waku_discv5_update_bootnodes(ctx,
"[\"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\",\"enr:-QEkuEB3WHNS-xA3RDpfu9A2Qycr3bN3u7VoArMEiDIFZJ66F1EB3d4wxZN1hcdcOX-RfuXB-MQauhJGQbpz3qUofOtLAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQPK35Nnz0cWUtSAhBp7zvHEhyU_AqeQUlqzLiLxfP2L4oN0Y3CCdl-DdWRwgiMohXdha3UyDw\"]",
event_handler,
userData) );
WAKU_CALL(waku_discv5_update_bootnodes(ctx,
event_handler,
userData,
"[\"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\",\"enr:-QEkuEB3WHNS-xA3RDpfu9A2Qycr3bN3u7VoArMEiDIFZJ66F1EB3d4wxZN1hcdcOX-RfuXB-MQauhJGQbpz3qUofOtLAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQPK35Nnz0cWUtSAhBp7zvHEhyU_AqeQUlqzLiLxfP2L4oN0Y3CCdl-DdWRwgiMohXdha3UyDw\"]"));
WAKU_CALL( waku_get_peerids_from_peerstore(ctx,
event_handler,
userData) );
WAKU_CALL(waku_get_peerids_from_peerstore(ctx,
event_handler,
userData));
show_main_menu();
while(1) {
handle_user_input();
show_main_menu();
while (1)
{
handle_user_input();
// Uncomment the following if need to test the metrics retrieval
// WAKU_CALL( waku_get_metrics(ctx,
// event_handler,
// userData) );
}
// Uncomment the following if need to test the metrics retrieval
// WAKU_CALL( waku_get_metrics(ctx,
// event_handler,
// userData) );
}
pthread_mutex_destroy(&mutex);
pthread_cond_destroy(&cond);
pthread_mutex_destroy(&mutex);
pthread_cond_destroy(&cond);
}

View File

@ -21,37 +21,43 @@ pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
int callback_executed = 0;
void waitForCallback() {
void waitForCallback()
{
pthread_mutex_lock(&mutex);
while (!callback_executed) {
while (!callback_executed)
{
pthread_cond_wait(&cond, &mutex);
}
callback_executed = 0;
pthread_mutex_unlock(&mutex);
}
void signal_cond() {
void signal_cond()
{
pthread_mutex_lock(&mutex);
callback_executed = 1;
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
}
#define WAKU_CALL(call) \
do { \
int ret = call; \
if (ret != 0) { \
std::cout << "Failed the call to: " << #call << ". Code: " << ret << "\n"; \
} \
waitForCallback(); \
} while (0)
#define WAKU_CALL(call) \
do \
{ \
int ret = call; \
if (ret != 0) \
{ \
std::cout << "Failed the call to: " << #call << ". Code: " << ret << "\n"; \
} \
waitForCallback(); \
} while (0)
struct ConfigNode {
char host[128];
int port;
char key[128];
int relay;
char peers[2048];
struct ConfigNode
{
char host[128];
int port;
char key[128];
int relay;
char peers[2048];
};
// Arguments parsing
@ -59,70 +65,76 @@ static char doc[] = "\nC example that shows how to use the waku library.";
static char args_doc[] = "";
static struct argp_option options[] = {
{ "host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"},
{ "port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"},
{ "key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."},
{ "relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"},
{ "peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\
{"host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"},
{"port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"},
{"key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."},
{"relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"},
{"peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\
to. (default: \"\") e.g. \"/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\""},
{ 0 }
};
{0}};
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
static error_t parse_opt(int key, char *arg, struct argp_state *state)
{
struct ConfigNode *cfgNode = (ConfigNode *) state->input;
switch (key) {
case 'h':
snprintf(cfgNode->host, 128, "%s", arg);
break;
case 'p':
cfgNode->port = atoi(arg);
break;
case 'k':
snprintf(cfgNode->key, 128, "%s", arg);
break;
case 'r':
cfgNode->relay = atoi(arg);
break;
case 'a':
snprintf(cfgNode->peers, 2048, "%s", arg);
break;
case ARGP_KEY_ARG:
if (state->arg_num >= 1) /* Too many arguments. */
struct ConfigNode *cfgNode = (ConfigNode *)state->input;
switch (key)
{
case 'h':
snprintf(cfgNode->host, 128, "%s", arg);
break;
case 'p':
cfgNode->port = atoi(arg);
break;
case 'k':
snprintf(cfgNode->key, 128, "%s", arg);
break;
case 'r':
cfgNode->relay = atoi(arg);
break;
case 'a':
snprintf(cfgNode->peers, 2048, "%s", arg);
break;
case ARGP_KEY_ARG:
if (state->arg_num >= 1) /* Too many arguments. */
argp_usage(state);
break;
case ARGP_KEY_END:
break;
default:
return ARGP_ERR_UNKNOWN;
}
break;
case ARGP_KEY_END:
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
void event_handler(const char* msg, size_t len) {
void event_handler(const char *msg, size_t len)
{
printf("Receiving event: %s\n", msg);
}
void handle_error(const char* msg, size_t len) {
void handle_error(const char *msg, size_t len)
{
printf("handle_error: %s\n", msg);
exit(1);
}
template <class F>
auto cify(F&& f) {
static F fn = std::forward<F>(f);
return [](int callerRet, const char* msg, size_t len, void* userData) {
signal_cond();
return fn(msg, len);
};
auto cify(F &&f)
{
static F fn = std::forward<F>(f);
return [](int callerRet, const char *msg, size_t len, void *userData)
{
signal_cond();
return fn(msg, len);
};
}
static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 };
static struct argp argp = {options, parse_opt, args_doc, doc, 0, 0, 0};
// Beginning of UI program logic
enum PROGRAM_STATE {
enum PROGRAM_STATE
{
MAIN_MENU,
SUBSCRIBE_TOPIC_MENU,
CONNECT_TO_OTHER_NODE_MENU,
@ -131,18 +143,21 @@ enum PROGRAM_STATE {
enum PROGRAM_STATE current_state = MAIN_MENU;
void show_main_menu() {
void show_main_menu()
{
printf("\nPlease, select an option:\n");
printf("\t1.) Subscribe to topic\n");
printf("\t2.) Connect to other node\n");
printf("\t3.) Publish a message\n");
}
void handle_user_input(void* ctx) {
void handle_user_input(void *ctx)
{
char cmd[1024];
memset(cmd, 0, 1024);
int numRead = read(0, cmd, 1024);
if (numRead <= 0) {
if (numRead <= 0)
{
return;
}
@ -154,12 +169,11 @@ void handle_user_input(void* ctx) {
char pubsubTopic[128];
scanf("%127s", pubsubTopic);
WAKU_CALL( waku_relay_subscribe(ctx,
pubsubTopic,
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr) );
WAKU_CALL(waku_relay_subscribe(ctx,
cify([&](const char *msg, size_t len)
{ event_handler(msg, len); }),
nullptr,
pubsubTopic));
printf("The subscription went well\n");
show_main_menu();
@ -171,15 +185,14 @@ void handle_user_input(void* ctx) {
printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n");
char peerAddr[512];
scanf("%511s", peerAddr);
WAKU_CALL( waku_connect(ctx,
peerAddr,
10000 /* timeoutMs */,
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr));
WAKU_CALL(waku_connect(ctx,
cify([&](const char *msg, size_t len)
{ event_handler(msg, len); }),
nullptr,
peerAddr,
10000 /* timeoutMs */));
show_main_menu();
break;
break;
case PUBLISH_MESSAGE_MENU:
{
@ -193,28 +206,26 @@ void handle_user_input(void* ctx) {
std::string contentTopic;
waku_content_topic(ctx,
cify([&contentTopic](const char *msg, size_t len)
{ contentTopic = msg; }),
nullptr,
"appName",
1,
"contentTopicName",
"encoding",
cify([&contentTopic](const char* msg, size_t len) {
contentTopic = msg;
}),
nullptr);
1,
"contentTopicName",
"encoding");
snprintf(jsonWakuMsg,
2048,
"{\"payload\":\"%s\",\"contentTopic\":\"%s\"}",
msgPayload.data(), contentTopic.c_str());
WAKU_CALL( waku_relay_publish(ctx,
"/waku/2/rs/16/32",
jsonWakuMsg,
10000 /*timeout ms*/,
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr) );
WAKU_CALL(waku_relay_publish(ctx,
cify([&](const char *msg, size_t len)
{ event_handler(msg, len); }),
nullptr,
"/waku/2/rs/16/32",
jsonWakuMsg,
10000 /*timeout ms*/));
show_main_menu();
}
@ -227,12 +238,14 @@ void handle_user_input(void* ctx) {
// End of UI program logic
void show_help_and_exit() {
void show_help_and_exit()
{
printf("Wrong parameters\n");
exit(1);
}
int main(int argc, char** argv) {
int main(int argc, char **argv)
{
struct ConfigNode cfgNode;
// default values
snprintf(cfgNode.host, 128, "0.0.0.0");
@ -241,8 +254,8 @@ int main(int argc, char** argv) {
cfgNode.port = 60000;
cfgNode.relay = 1;
if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode)
== ARGP_ERR_UNKNOWN) {
if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode) == ARGP_ERR_UNKNOWN)
{
show_help_and_exit();
}
@ -260,72 +273,64 @@ int main(int argc, char** argv) {
\"discv5UdpPort\": 9999, \
\"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \
\"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \
}", cfgNode.host,
cfgNode.port);
}",
cfgNode.host,
cfgNode.port);
void* ctx =
void *ctx =
waku_new(jsonConfig,
cify([](const char* msg, size_t len) {
std::cout << "waku_new feedback: " << msg << std::endl;
}
),
nullptr
);
cify([](const char *msg, size_t len)
{ std::cout << "waku_new feedback: " << msg << std::endl; }),
nullptr);
waitForCallback();
// example on how to retrieve a value from the `libwaku` callback.
std::string defaultPubsubTopic;
WAKU_CALL(
waku_default_pubsub_topic(
ctx,
cify([&defaultPubsubTopic](const char* msg, size_t len) {
defaultPubsubTopic = msg;
}
),
nullptr));
ctx,
cify([&defaultPubsubTopic](const char *msg, size_t len)
{ defaultPubsubTopic = msg; }),
nullptr));
std::cout << "Default pubsub topic: " << defaultPubsubTopic << std::endl;
WAKU_CALL(waku_version(ctx,
cify([&](const char* msg, size_t len) {
std::cout << "Git Version: " << msg << std::endl;
}),
WAKU_CALL(waku_version(ctx,
cify([&](const char *msg, size_t len)
{ std::cout << "Git Version: " << msg << std::endl; }),
nullptr));
printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port);
printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES": "NO");
printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES" : "NO");
std::string pubsubTopic;
WAKU_CALL(waku_pubsub_topic(ctx,
"example",
cify([&](const char* msg, size_t len) {
pubsubTopic = msg;
}),
nullptr));
WAKU_CALL(waku_pubsub_topic(ctx,
cify([&](const char *msg, size_t len)
{ pubsubTopic = msg; }),
nullptr,
"example"));
std::cout << "Custom pubsub topic: " << pubsubTopic << std::endl;
waku_set_event_callback(ctx,
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr);
set_event_callback(ctx,
cify([&](const char *msg, size_t len)
{ event_handler(msg, len); }),
nullptr);
WAKU_CALL( waku_start(ctx,
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr));
WAKU_CALL(waku_start(ctx,
cify([&](const char *msg, size_t len)
{ event_handler(msg, len); }),
nullptr));
WAKU_CALL( waku_relay_subscribe(ctx,
defaultPubsubTopic.c_str(),
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr) );
WAKU_CALL(waku_relay_subscribe(ctx,
cify([&](const char *msg, size_t len)
{ event_handler(msg, len); }),
nullptr,
defaultPubsubTopic.c_str()));
show_main_menu();
while(1) {
while (1)
{
handle_user_input(ctx);
}
}

View File

@ -71,32 +71,32 @@ package main
static void* cGoWakuNew(const char* configJson, void* resp) {
// We pass NULL because we are not interested in retrieving data from this callback
void* ret = waku_new(configJson, (WakuCallBack) callback, resp);
void* ret = waku_new(configJson, (FFICallBack) callback, resp);
return ret;
}
static void cGoWakuStart(void* wakuCtx, void* resp) {
WAKU_CALL(waku_start(wakuCtx, (WakuCallBack) callback, resp));
WAKU_CALL(waku_start(wakuCtx, (FFICallBack) callback, resp));
}
static void cGoWakuStop(void* wakuCtx, void* resp) {
WAKU_CALL(waku_stop(wakuCtx, (WakuCallBack) callback, resp));
WAKU_CALL(waku_stop(wakuCtx, (FFICallBack) callback, resp));
}
static void cGoWakuDestroy(void* wakuCtx, void* resp) {
WAKU_CALL(waku_destroy(wakuCtx, (WakuCallBack) callback, resp));
WAKU_CALL(waku_destroy(wakuCtx, (FFICallBack) callback, resp));
}
static void cGoWakuStartDiscV5(void* wakuCtx, void* resp) {
WAKU_CALL(waku_start_discv5(wakuCtx, (WakuCallBack) callback, resp));
WAKU_CALL(waku_start_discv5(wakuCtx, (FFICallBack) callback, resp));
}
static void cGoWakuStopDiscV5(void* wakuCtx, void* resp) {
WAKU_CALL(waku_stop_discv5(wakuCtx, (WakuCallBack) callback, resp));
WAKU_CALL(waku_stop_discv5(wakuCtx, (FFICallBack) callback, resp));
}
static void cGoWakuVersion(void* wakuCtx, void* resp) {
WAKU_CALL(waku_version(wakuCtx, (WakuCallBack) callback, resp));
WAKU_CALL(waku_version(wakuCtx, (FFICallBack) callback, resp));
}
static void cGoWakuSetEventCallback(void* wakuCtx) {
@ -112,7 +112,7 @@ package main
// This technique is needed because cgo only allows to export Go functions and not methods.
waku_set_event_callback(wakuCtx, (WakuCallBack) globalEventCallback, wakuCtx);
set_event_callback(wakuCtx, (FFICallBack) globalEventCallback, wakuCtx);
}
static void cGoWakuContentTopic(void* wakuCtx,
@ -123,20 +123,21 @@ package main
void* resp) {
WAKU_CALL( waku_content_topic(wakuCtx,
(FFICallBack) callback,
resp,
appName,
appVersion,
contentTopicName,
encoding,
(WakuCallBack) callback,
resp) );
encoding
) );
}
static void cGoWakuPubsubTopic(void* wakuCtx, char* topicName, void* resp) {
WAKU_CALL( waku_pubsub_topic(wakuCtx, topicName, (WakuCallBack) callback, resp) );
WAKU_CALL( waku_pubsub_topic(wakuCtx, (FFICallBack) callback, resp, topicName) );
}
static void cGoWakuDefaultPubsubTopic(void* wakuCtx, void* resp) {
WAKU_CALL (waku_default_pubsub_topic(wakuCtx, (WakuCallBack) callback, resp));
WAKU_CALL (waku_default_pubsub_topic(wakuCtx, (FFICallBack) callback, resp));
}
static void cGoWakuRelayPublish(void* wakuCtx,
@ -146,34 +147,36 @@ package main
void* resp) {
WAKU_CALL (waku_relay_publish(wakuCtx,
(FFICallBack) callback,
resp,
pubSubTopic,
jsonWakuMessage,
timeoutMs,
(WakuCallBack) callback,
resp));
timeoutMs
));
}
static void cGoWakuRelaySubscribe(void* wakuCtx, char* pubSubTopic, void* resp) {
WAKU_CALL ( waku_relay_subscribe(wakuCtx,
pubSubTopic,
(WakuCallBack) callback,
resp) );
(FFICallBack) callback,
resp,
pubSubTopic) );
}
static void cGoWakuRelayUnsubscribe(void* wakuCtx, char* pubSubTopic, void* resp) {
WAKU_CALL ( waku_relay_unsubscribe(wakuCtx,
pubSubTopic,
(WakuCallBack) callback,
resp) );
(FFICallBack) callback,
resp,
pubSubTopic) );
}
static void cGoWakuConnect(void* wakuCtx, char* peerMultiAddr, int timeoutMs, void* resp) {
WAKU_CALL( waku_connect(wakuCtx,
(FFICallBack) callback,
resp,
peerMultiAddr,
timeoutMs,
(WakuCallBack) callback,
resp) );
timeoutMs
) );
}
static void cGoWakuDialPeerById(void* wakuCtx,
@ -183,42 +186,44 @@ package main
void* resp) {
WAKU_CALL( waku_dial_peer_by_id(wakuCtx,
(FFICallBack) callback,
resp,
peerId,
protocol,
timeoutMs,
(WakuCallBack) callback,
resp) );
timeoutMs
) );
}
static void cGoWakuDisconnectPeerById(void* wakuCtx, char* peerId, void* resp) {
WAKU_CALL( waku_disconnect_peer_by_id(wakuCtx,
peerId,
(WakuCallBack) callback,
resp) );
(FFICallBack) callback,
resp,
peerId
) );
}
static void cGoWakuListenAddresses(void* wakuCtx, void* resp) {
WAKU_CALL (waku_listen_addresses(wakuCtx, (WakuCallBack) callback, resp) );
WAKU_CALL (waku_listen_addresses(wakuCtx, (FFICallBack) callback, resp) );
}
static void cGoWakuGetMyENR(void* ctx, void* resp) {
WAKU_CALL (waku_get_my_enr(ctx, (WakuCallBack) callback, resp) );
WAKU_CALL (waku_get_my_enr(ctx, (FFICallBack) callback, resp) );
}
static void cGoWakuGetMyPeerId(void* ctx, void* resp) {
WAKU_CALL (waku_get_my_peerid(ctx, (WakuCallBack) callback, resp) );
WAKU_CALL (waku_get_my_peerid(ctx, (FFICallBack) callback, resp) );
}
static void cGoWakuListPeersInMesh(void* ctx, char* pubSubTopic, void* resp) {
WAKU_CALL (waku_relay_get_num_peers_in_mesh(ctx, pubSubTopic, (WakuCallBack) callback, resp) );
WAKU_CALL (waku_relay_get_num_peers_in_mesh(ctx, (FFICallBack) callback, resp, pubSubTopic) );
}
static void cGoWakuGetNumConnectedPeers(void* ctx, char* pubSubTopic, void* resp) {
WAKU_CALL (waku_relay_get_num_connected_peers(ctx, pubSubTopic, (WakuCallBack) callback, resp) );
WAKU_CALL (waku_relay_get_num_connected_peers(ctx, (FFICallBack) callback, resp, pubSubTopic) );
}
static void cGoWakuGetPeerIdsFromPeerStore(void* wakuCtx, void* resp) {
WAKU_CALL (waku_get_peerids_from_peerstore(wakuCtx, (WakuCallBack) callback, resp) );
WAKU_CALL (waku_get_peerids_from_peerstore(wakuCtx, (FFICallBack) callback, resp) );
}
static void cGoWakuLightpushPublish(void* wakuCtx,
@ -227,10 +232,11 @@ package main
void* resp) {
WAKU_CALL (waku_lightpush_publish(wakuCtx,
(FFICallBack) callback,
resp,
pubSubTopic,
jsonWakuMessage,
(WakuCallBack) callback,
resp));
jsonWakuMessage
));
}
static void cGoWakuStoreQuery(void* wakuCtx,
@ -240,11 +246,12 @@ package main
void* resp) {
WAKU_CALL (waku_store_query(wakuCtx,
(FFICallBack) callback,
resp,
jsonQuery,
peerAddr,
timeoutMs,
(WakuCallBack) callback,
resp));
timeoutMs
));
}
static void cGoWakuPeerExchangeQuery(void* wakuCtx,
@ -252,9 +259,10 @@ package main
void* resp) {
WAKU_CALL (waku_peer_exchange_request(wakuCtx,
numPeers,
(WakuCallBack) callback,
resp));
(FFICallBack) callback,
resp,
numPeers
));
}
static void cGoWakuGetPeerIdsByProtocol(void* wakuCtx,
@ -262,9 +270,10 @@ package main
void* resp) {
WAKU_CALL (waku_get_peerids_by_protocol(wakuCtx,
protocol,
(WakuCallBack) callback,
resp));
(FFICallBack) callback,
resp,
protocol
));
}
*/

View File

@ -102,8 +102,8 @@ print("Waku Relay enabled: {}".format(args.relay))
# Set the event callback
callback = callback_type(handle_event) # This line is important so that the callback is not gc'ed
libwaku.waku_set_event_callback.argtypes = [callback_type, ctypes.c_void_p]
libwaku.waku_set_event_callback(callback, ctypes.c_void_p(0))
libwaku.set_event_callback.argtypes = [callback_type, ctypes.c_void_p]
libwaku.set_event_callback(callback, ctypes.c_void_p(0))
# Start the node
libwaku.waku_start.argtypes = [ctypes.c_void_p,
@ -117,32 +117,32 @@ libwaku.waku_start(ctx,
# Subscribe to the default pubsub topic
libwaku.waku_relay_subscribe.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
callback_type,
ctypes.c_void_p]
ctypes.c_void_p,
ctypes.c_char_p]
libwaku.waku_relay_subscribe(ctx,
default_pubsub_topic.encode('utf-8'),
callback_type(
#onErrCb
lambda ret, msg, len:
print("Error calling waku_relay_subscribe: %s" %
msg.decode('utf-8'))
),
ctypes.c_void_p(0))
ctypes.c_void_p(0),
default_pubsub_topic.encode('utf-8'))
libwaku.waku_connect.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int,
callback_type,
ctypes.c_void_p]
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int]
libwaku.waku_connect(ctx,
args.peer.encode('utf-8'),
10000,
# onErrCb
callback_type(
lambda ret, msg, len:
print("Error calling waku_connect: %s" % msg.decode('utf-8'))),
ctypes.c_void_p(0))
ctypes.c_void_p(0),
args.peer.encode('utf-8'),
10000)
# app = Flask(__name__)
# @app.route("/")

View File

@ -27,7 +27,7 @@ public:
void initialize(const QString& jsonConfig, WakuCallBack event_handler, void* userData) {
ctx = waku_new(jsonConfig.toUtf8().constData(), WakuCallBack(event_handler), userData);
waku_set_event_callback(ctx, on_event_received, userData);
set_event_callback(ctx, on_event_received, userData);
qDebug() << "Waku context initialized, ready to start.";
}

View File

@ -3,22 +3,22 @@ use std::ffi::CString;
use std::os::raw::{c_char, c_int, c_void};
use std::{slice, thread, time};
pub type WakuCallback = unsafe extern "C" fn(c_int, *const c_char, usize, *const c_void);
pub type FFICallBack = unsafe extern "C" fn(c_int, *const c_char, usize, *const c_void);
extern "C" {
pub fn waku_new(
config_json: *const u8,
cb: WakuCallback,
cb: FFICallBack,
user_data: *const c_void,
) -> *mut c_void;
pub fn waku_version(ctx: *const c_void, cb: WakuCallback, user_data: *const c_void) -> c_int;
pub fn waku_version(ctx: *const c_void, cb: FFICallBack, user_data: *const c_void) -> c_int;
pub fn waku_start(ctx: *const c_void, cb: WakuCallback, user_data: *const c_void) -> c_int;
pub fn waku_start(ctx: *const c_void, cb: FFICallBack, user_data: *const c_void) -> c_int;
pub fn waku_default_pubsub_topic(
ctx: *mut c_void,
cb: WakuCallback,
cb: FFICallBack,
user_data: *const c_void,
) -> *mut c_void;
}
@ -40,7 +40,7 @@ pub unsafe extern "C" fn trampoline<C>(
closure(return_val, &buffer_utf8);
}
pub fn get_trampoline<C>(_closure: &C) -> WakuCallback
pub fn get_trampoline<C>(_closure: &C) -> FFICallBack
where
C: FnMut(i32, &str),
{

View File

@ -1,42 +0,0 @@
## Can be shared safely between threads
type SharedSeq*[T] = tuple[data: ptr UncheckedArray[T], len: int]
proc alloc*(str: cstring): cstring =
# Byte allocation from the given address.
# There should be the corresponding manual deallocation with deallocShared !
if str.isNil():
var ret = cast[cstring](allocShared(1)) # Allocate memory for the null terminator
ret[0] = '\0' # Set the null terminator
return ret
let ret = cast[cstring](allocShared(len(str) + 1))
copyMem(ret, str, len(str) + 1)
return ret
proc alloc*(str: string): cstring =
## Byte allocation from the given address.
## There should be the corresponding manual deallocation with deallocShared !
var ret = cast[cstring](allocShared(str.len + 1))
let s = cast[seq[char]](str)
for i in 0 ..< str.len:
ret[i] = s[i]
ret[str.len] = '\0'
return ret
proc allocSharedSeq*[T](s: seq[T]): SharedSeq[T] =
let data = allocShared(sizeof(T) * s.len)
if s.len != 0:
copyMem(data, unsafeAddr s[0], s.len)
return (cast[ptr UncheckedArray[T]](data), s.len)
proc deallocSharedSeq*[T](s: var SharedSeq[T]) =
deallocShared(s.data)
s.len = 0
proc toSeq*[T](s: SharedSeq[T]): seq[T] =
## Creates a seq[T] from a SharedSeq[T]. No explicit dealloc is required
## as req[T] is a GC managed type.
var ret = newSeq[T]()
for i in 0 ..< s.len:
ret.add(s.data[i])
return ret

10
library/declare_lib.nim Normal file
View File

@ -0,0 +1,10 @@
import ffi
import waku/factory/waku
declareLibrary("waku")
proc set_event_callback(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.dynlib, exportc, cdecl.} =
ctx[].eventCallback = cast[pointer](callback)
ctx[].eventUserData = userData

View File

@ -1,9 +0,0 @@
import system, std/json, ./json_base_event
type JsonWakuNotRespondingEvent* = ref object of JsonEvent
proc new*(T: type JsonWakuNotRespondingEvent): T =
return JsonWakuNotRespondingEvent(eventType: "waku_not_responding")
method `$`*(event: JsonWakuNotRespondingEvent): string =
$(%*event)

View File

@ -1,30 +0,0 @@
################################################################################
### Exported types
type WakuCallBack* = proc(
callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer
) {.cdecl, gcsafe, raises: [].}
const RET_OK*: cint = 0
const RET_ERR*: cint = 1
const RET_MISSING_CALLBACK*: cint = 2
### End of exported types
################################################################################
################################################################################
### FFI utils
template foreignThreadGc*(body: untyped) =
when declared(setupForeignThreadGc):
setupForeignThreadGc()
body
when declared(tearDownForeignThreadGc):
tearDownForeignThreadGc()
type onDone* = proc()
### End of FFI utils
################################################################################

View File

@ -0,0 +1,49 @@
import std/json
import
chronicles,
chronos,
results,
eth/p2p/discoveryv5/enr,
strutils,
libp2p/peerid,
metrics,
ffi
import waku/factory/waku, waku/node/waku_node, waku/node/health_monitor, library/declare_lib
proc getMultiaddresses(node: WakuNode): seq[string] =
return node.info().listenAddresses
proc getMetrics(): string =
{.gcsafe.}:
return defaultRegistry.toText() ## defaultRegistry is {.global.} in metrics module
proc waku_version(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
return ok(WakuNodeVersionString)
proc waku_listen_addresses(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
## returns a comma-separated string of the listen addresses
return ok(ctx.myLib[].node.getMultiaddresses().join(","))
proc waku_get_my_enr(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
return ok(ctx.myLib[].node.enr.toURI())
proc waku_get_my_peerid(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
return ok($ctx.myLib[].node.peerId())
proc waku_get_metrics(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
return ok(getMetrics())
proc waku_is_online(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
return ok($ctx.myLib[].healthMonitor.onlineMonitor.amIOnline())

View File

@ -0,0 +1,96 @@
import std/json
import chronos, chronicles, results, strutils, libp2p/multiaddress, ffi
import
waku/factory/waku,
waku/discovery/waku_dnsdisc,
waku/discovery/waku_discv5,
waku/waku_core/peers,
waku/node/waku_node,
waku/node/kernel_api,
library/declare_lib
proc retrieveBootstrapNodes(
enrTreeUrl: string, ipDnsServer: string
): Future[Result[seq[string], string]] {.async.} =
let dnsNameServers = @[parseIpAddress(ipDnsServer)]
let discoveredPeers: seq[RemotePeerInfo] = (
await retrieveDynamicBootstrapNodes(enrTreeUrl, dnsNameServers)
).valueOr:
return err("failed discovering peers from DNS: " & $error)
var multiAddresses = newSeq[string]()
for discPeer in discoveredPeers:
for address in discPeer.addrs:
multiAddresses.add($address & "/p2p/" & $discPeer)
return ok(multiAddresses)
proc updateDiscv5BootstrapNodes(nodes: string, waku: Waku): Result[void, string] =
waku.wakuDiscv5.updateBootstrapRecords(nodes).isOkOr:
return err("error in updateDiscv5BootstrapNodes: " & $error)
return ok()
proc performPeerExchangeRequestTo*(
numPeers: uint64, waku: Waku
): Future[Result[int, string]] {.async.} =
let numPeersRecv = (await waku.node.fetchPeerExchangePeers(numPeers)).valueOr:
return err($error)
return ok(numPeersRecv)
proc waku_discv5_update_bootnodes(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
bootnodes: cstring,
) {.ffi.} =
## Updates the bootnode list used for discovering new peers via DiscoveryV5
## bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]`
updateDiscv5BootstrapNodes($bootnodes, ctx.myLib[]).isOkOr:
error "UPDATE_DISCV5_BOOTSTRAP_NODES failed", error = error
return err($error)
return ok("discovery request processed correctly")
proc waku_dns_discovery(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
enrTreeUrl: cstring,
nameDnsServer: cstring,
timeoutMs: cint,
) {.ffi.} =
let nodes = (await retrieveBootstrapNodes($enrTreeUrl, $nameDnsServer)).valueOr:
error "GET_BOOTSTRAP_NODES failed", error = error
return err($error)
## returns a comma-separated string of bootstrap nodes' multiaddresses
return ok(nodes.join(","))
proc waku_start_discv5(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
(await ctx.myLib[].wakuDiscv5.start()).isOkOr:
error "START_DISCV5 failed", error = error
return err("error starting discv5: " & $error)
return ok("discv5 started correctly")
proc waku_stop_discv5(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
await ctx.myLib[].wakuDiscv5.stop()
return ok("discv5 stopped correctly")
proc waku_peer_exchange_request(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
numPeers: uint64,
) {.ffi.} =
let numValidPeers = (await performPeerExchangeRequestTo(numPeers, ctx.myLib[])).valueOr:
error "waku_peer_exchange_request failed", error = error
return err("failed peer exchange: " & $error)
return ok($numValidPeers)

View File

@ -1,43 +1,14 @@
import std/[options, json, strutils, net]
import chronos, chronicles, results, confutils, confutils/std/net
import chronos, chronicles, results, confutils, confutils/std/net, ffi
import
waku/node/peer_manager/peer_manager,
tools/confutils/cli_args,
waku/factory/waku,
waku/factory/node_factory,
waku/factory/networks_config,
waku/factory/app_callbacks,
waku/rest_api/endpoint/builder
import
../../alloc
type NodeLifecycleMsgType* = enum
CREATE_NODE
START_NODE
STOP_NODE
type NodeLifecycleRequest* = object
operation: NodeLifecycleMsgType
configJson: cstring ## Only used in 'CREATE_NODE' operation
appCallbacks: AppCallbacks
proc createShared*(
T: type NodeLifecycleRequest,
op: NodeLifecycleMsgType,
configJson: cstring = "",
appCallbacks: AppCallbacks = nil,
): ptr type T =
var ret = createShared(T)
ret[].operation = op
ret[].appCallbacks = appCallbacks
ret[].configJson = configJson.alloc()
return ret
proc destroyShared(self: ptr NodeLifecycleRequest) =
deallocShared(self[].configJson)
deallocShared(self)
waku/rest_api/endpoint/builder,
library/declare_lib
proc createWaku(
configJson: cstring, appCallbacks: AppCallbacks = nil
@ -87,26 +58,30 @@ proc createWaku(
return ok(wakuRes)
proc process*(
self: ptr NodeLifecycleRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
case self.operation
of CREATE_NODE:
waku[] = (await createWaku(self.configJson, self.appCallbacks)).valueOr:
error "CREATE_NODE failed", error = error
registerReqFFI(CreateNodeRequest, ctx: ptr FFIContext[Waku]):
proc(
configJson: cstring, appCallbacks: AppCallbacks
): Future[Result[string, string]] {.async.} =
ctx.myLib[] = (await createWaku(configJson, cast[AppCallbacks](appCallbacks))).valueOr:
error "CreateNodeRequest failed", error = error
return err($error)
of START_NODE:
(await waku.startWaku()).isOkOr:
error "START_NODE failed", error = error
return err($error)
of STOP_NODE:
try:
await waku[].stop()
except Exception:
error "STOP_NODE failed", error = getCurrentExceptionMsg()
return err(getCurrentExceptionMsg())
return ok("")
proc waku_start(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
(await startWaku(ctx[].myLib)).isOkOr:
error "START_NODE failed", error = error
return err("failed to start: " & $error)
return ok("")
proc waku_stop(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
try:
await ctx.myLib[].stop()
except Exception as exc:
error "STOP_NODE failed", error = exc.msg
return err("failed to stop: " & exc.msg)
return ok("")

View File

@ -0,0 +1,123 @@
import std/[sequtils, strutils, tables]
import chronicles, chronos, results, options, json, ffi
import waku/factory/waku, waku/node/waku_node, waku/node/peer_manager, ../declare_lib
type PeerInfo = object
protocols: seq[string]
addresses: seq[string]
proc waku_get_peerids_from_peerstore(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
## returns a comma-separated string of peerIDs
let peerIDs =
ctx.myLib[].node.peerManager.switch.peerStore.peers().mapIt($it.peerId).join(",")
return ok(peerIDs)
proc waku_connect(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
peerMultiAddr: cstring,
timeoutMs: cuint,
) {.ffi.} =
let peers = ($peerMultiAddr).split(",").mapIt(strip(it))
await ctx.myLib[].node.connectToNodes(peers, source = "static")
return ok("")
proc waku_disconnect_peer_by_id(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer, peerId: cstring
) {.ffi.} =
let pId = PeerId.init($peerId).valueOr:
error "DISCONNECT_PEER_BY_ID failed", error = $error
return err($error)
await ctx.myLib[].node.peerManager.disconnectNode(pId)
return ok("")
proc waku_disconnect_all_peers(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
await ctx.myLib[].node.peerManager.disconnectAllPeers()
return ok("")
proc waku_dial_peer(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
peerMultiAddr: cstring,
protocol: cstring,
timeoutMs: cuint,
) {.ffi.} =
let remotePeerInfo = parsePeerInfo($peerMultiAddr).valueOr:
error "DIAL_PEER failed", error = $error
return err($error)
let conn = await ctx.myLib[].node.peerManager.dialPeer(remotePeerInfo, $protocol)
if conn.isNone():
let msg = "failed dialing peer"
error "DIAL_PEER failed", error = msg, peerId = $remotePeerInfo.peerId
return err(msg)
return ok("")
proc waku_dial_peer_by_id(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
peerId: cstring,
protocol: cstring,
timeoutMs: cuint,
) {.ffi.} =
let pId = PeerId.init($peerId).valueOr:
error "DIAL_PEER_BY_ID failed", error = $error
return err($error)
let conn = await ctx.myLib[].node.peerManager.dialPeer(pId, $protocol)
if conn.isNone():
let msg = "failed dialing peer"
error "DIAL_PEER_BY_ID failed", error = msg, peerId = $peerId
return err(msg)
return ok("")
proc waku_get_connected_peers_info(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
## returns a JSON string mapping peerIDs to objects with protocols and addresses
var peersMap = initTable[string, PeerInfo]()
let peers = ctx.myLib[].node.peerManager.switch.peerStore.peers().filterIt(
it.connectedness == Connected
)
# Build a map of peer IDs to peer info objects
for peer in peers:
let peerIdStr = $peer.peerId
peersMap[peerIdStr] =
PeerInfo(protocols: peer.protocols, addresses: peer.addrs.mapIt($it))
# Convert the map to JSON string
let jsonObj = %*peersMap
let jsonStr = $jsonObj
return ok(jsonStr)
proc waku_get_connected_peers(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
## returns a comma-separated string of peerIDs
let
(inPeerIds, outPeerIds) = ctx.myLib[].node.peerManager.connectedPeers()
connectedPeerids = concat(inPeerIds, outPeerIds)
return ok(connectedPeerids.mapIt($it).join(","))
proc waku_get_peerids_by_protocol(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
protocol: cstring,
) {.ffi.} =
## returns a comma-separated string of peerIDs that mount the given protocol
let connectedPeers = ctx.myLib[].node.peerManager.switch.peerStore
.peers($protocol)
.filterIt(it.connectedness == Connected)
.mapIt($it.peerId)
.join(",")
return ok(connectedPeers)

View File

@ -0,0 +1,43 @@
import std/[json, strutils]
import chronos, results, ffi
import libp2p/[protocols/ping, switch, multiaddress, multicodec]
import waku/[factory/waku, waku_core/peers, node/waku_node], library/declare_lib
proc waku_ping_peer(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
peerAddr: cstring,
timeoutMs: cuint,
) {.ffi.} =
let peerInfo = peers.parsePeerInfo(($peerAddr).split(",")).valueOr:
return err("PingRequest failed to parse peer addr: " & $error)
let timeout = chronos.milliseconds(timeoutMs)
proc ping(): Future[Result[Duration, string]] {.async, gcsafe.} =
try:
let conn =
await ctx.myLib[].node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec)
defer:
await conn.close()
let pingRTT = await ctx.myLib[].node.libp2pPing.ping(conn)
if pingRTT == 0.nanos:
return err("could not ping peer: rtt-0")
return ok(pingRTT)
except CatchableError as exc:
return err("could not ping peer: " & exc.msg)
let pingFuture = ping()
let pingRTT: Duration =
if timeout == chronos.milliseconds(0): # No timeout expected
(await pingFuture).valueOr:
return err("ping failed, no timeout expected: " & error)
else:
let timedOut = not (await pingFuture.withTimeout(timeout))
if timedOut:
return err("ping timed out")
pingFuture.read().valueOr:
return err("failed to read ping future: " & error)
return ok($(pingRTT.nanos))

View File

@ -0,0 +1,109 @@
import options, std/[strutils, sequtils]
import chronicles, chronos, results, ffi
import
waku/waku_filter_v2/client,
waku/waku_core/message/message,
waku/factory/waku,
waku/waku_relay,
waku/waku_filter_v2/common,
waku/waku_core/subscription/push_handler,
waku/node/peer_manager/peer_manager,
waku/node/waku_node,
waku/node/kernel_api,
waku/waku_core/topics/pubsub_topic,
waku/waku_core/topics/content_topic,
library/events/json_message_event,
library/declare_lib
const FilterOpTimeout = 5.seconds
proc checkFilterClientMounted(waku: Waku): Result[string, string] =
if waku.node.wakuFilterClient.isNil():
let errorMsg = "wakuFilterClient is not mounted"
error "fail filter process", error = errorMsg
return err(errorMsg)
return ok("")
proc waku_filter_subscribe(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
contentTopics: cstring,
) {.ffi.} =
proc onReceivedMessage(ctx: ptr FFIContext): WakuRelayHandler =
return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} =
callEventCallback(ctx, "onReceivedMessage"):
$JsonMessageEvent.new(pubsubTopic, msg)
checkFilterClientMounted(ctx.myLib[]).isOkOr:
return err($error)
var filterPushEventCallback = FilterPushHandler(onReceivedMessage(ctx))
ctx.myLib[].node.wakuFilterClient.registerPushHandler(filterPushEventCallback)
let peer = ctx.myLib[].node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
let errorMsg = "could not find peer with WakuFilterSubscribeCodec when subscribing"
error "fail filter subscribe", error = errorMsg
return err(errorMsg)
let subFut = ctx.myLib[].node.filterSubscribe(
some(PubsubTopic($pubsubTopic)),
($contentTopics).split(",").mapIt(ContentTopic(it)),
peer,
)
if not await subFut.withTimeout(FilterOpTimeout):
let errorMsg = "filter subscription timed out"
error "fail filter unsubscribe", error = errorMsg
return err(errorMsg)
return ok("")
proc waku_filter_unsubscribe(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
contentTopics: cstring,
) {.ffi.} =
checkFilterClientMounted(ctx.myLib[]).isOkOr:
return err($error)
let peer = ctx.myLib[].node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
let errorMsg =
"could not find peer with WakuFilterSubscribeCodec when unsubscribing"
error "fail filter process", error = errorMsg
return err(errorMsg)
let subFut = ctx.myLib[].node.filterUnsubscribe(
some(PubsubTopic($pubsubTopic)),
($contentTopics).split(",").mapIt(ContentTopic(it)),
peer,
)
if not await subFut.withTimeout(FilterOpTimeout):
let errorMsg = "filter un-subscription timed out"
error "fail filter unsubscribe", error = errorMsg
return err(errorMsg)
return ok("")
proc waku_filter_unsubscribe_all(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
checkFilterClientMounted(ctx.myLib[]).isOkOr:
return err($error)
let peer = ctx.myLib[].node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
let errorMsg =
"could not find peer with WakuFilterSubscribeCodec when unsubscribing all"
error "fail filter unsubscribe all", error = errorMsg
return err(errorMsg)
let unsubFut = ctx.myLib[].node.filterUnsubscribeAll(peer)
if not await unsubFut.withTimeout(FilterOpTimeout):
let errorMsg = "filter un-subscription all timed out"
error "fail filter unsubscribe all", error = errorMsg
return err(errorMsg)
return ok("")

View File

@ -0,0 +1,51 @@
import options, std/[json, strformat]
import chronicles, chronos, results, ffi
import
waku/waku_core/message/message,
waku/waku_core/codecs,
waku/factory/waku,
waku/waku_core/message,
waku/waku_core/topics/pubsub_topic,
waku/waku_lightpush_legacy/client,
waku/node/peer_manager/peer_manager,
library/events/json_message_event,
library/declare_lib
proc waku_lightpush_publish(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
jsonWakuMessage: cstring,
) {.ffi.} =
if ctx.myLib[].node.wakuLightpushClient.isNil():
let errorMsg = "LightpushRequest waku.node.wakuLightpushClient is nil"
error "PUBLISH failed", error = errorMsg
return err(errorMsg)
var jsonMessage: JsonMessage
try:
let jsonContent = parseJson($jsonWakuMessage)
jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr:
raise newException(JsonParsingError, $error)
except JsonParsingError as exc:
return err(fmt"Error parsing json message: {exc.msg}")
let msg = json_message_event.toWakuMessage(jsonMessage).valueOr:
return err("Problem building the WakuMessage: " & $error)
let peerOpt = ctx.myLib[].node.peerManager.selectPeer(WakuLightPushCodec)
if peerOpt.isNone():
let errorMsg = "failed to lightpublish message, no suitable remote peers"
error "PUBLISH failed", error = errorMsg
return err(errorMsg)
let msgHashHex = (
await ctx.myLib[].node.wakuLegacyLightpushClient.publish(
$pubsubTopic, msg, peer = peerOpt.get()
)
).valueOr:
error "PUBLISH failed", error = error
return err($error)
return ok(msgHashHex)

View File

@ -0,0 +1,171 @@
import std/[net, sequtils, strutils, json], strformat
import chronicles, chronos, stew/byteutils, results, ffi
import
waku/waku_core/message/message,
waku/factory/[validator_signed, waku],
tools/confutils/cli_args,
waku/waku_core/message,
waku/waku_core/topics/pubsub_topic,
waku/waku_core/topics,
waku/node/kernel_api/relay,
waku/waku_relay/protocol,
waku/node/peer_manager,
library/events/json_message_event,
library/declare_lib
proc waku_relay_get_peers_in_mesh(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
) {.ffi.} =
let meshPeers = ctx.myLib[].node.wakuRelay.getPeersInMesh($pubsubTopic).valueOr:
error "LIST_MESH_PEERS failed", error = error
return err($error)
## returns a comma-separated string of peerIDs
return ok(meshPeers.mapIt($it).join(","))
proc waku_relay_get_num_peers_in_mesh(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
) {.ffi.} =
let numPeersInMesh = ctx.myLib[].node.wakuRelay.getNumPeersInMesh($pubsubTopic).valueOr:
error "NUM_MESH_PEERS failed", error = error
return err($error)
return ok($numPeersInMesh)
proc waku_relay_get_connected_peers(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
) {.ffi.} =
## Returns the list of all connected peers to an specific pubsub topic
let connPeers = ctx.myLib[].node.wakuRelay.getConnectedPeers($pubsubTopic).valueOr:
error "LIST_CONNECTED_PEERS failed", error = error
return err($error)
## returns a comma-separated string of peerIDs
return ok(connPeers.mapIt($it).join(","))
proc waku_relay_get_num_connected_peers(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
) {.ffi.} =
let numConnPeers = ctx.myLib[].node.wakuRelay.getNumConnectedPeers($pubsubTopic).valueOr:
error "NUM_CONNECTED_PEERS failed", error = error
return err($error)
return ok($numConnPeers)
proc waku_relay_add_protected_shard(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
clusterId: cint,
shardId: cint,
publicKey: cstring,
) {.ffi.} =
## Protects a shard with a public key
try:
let relayShard = RelayShard(clusterId: uint16(clusterId), shardId: uint16(shardId))
let protectedShard = ProtectedShard.parseCmdArg($relayShard & ":" & $publicKey)
ctx.myLib[].node.wakuRelay.addSignedShardsValidator(
@[protectedShard], uint16(clusterId)
)
except ValueError as exc:
return err("ERROR in waku_relay_add_protected_shard: " & exc.msg)
return ok("")
proc waku_relay_subscribe(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
) {.ffi.} =
echo "Subscribing to topic: " & $pubSubTopic & " ..."
proc onReceivedMessage(ctx: ptr FFIContext[Waku]): WakuRelayHandler =
return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} =
callEventCallback(ctx, "onReceivedMessage"):
$JsonMessageEvent.new(pubsubTopic, msg)
var cb = onReceivedMessage(ctx)
ctx.myLib[].node.subscribe(
(kind: SubscriptionKind.PubsubSub, topic: $pubsubTopic),
handler = WakuRelayHandler(cb),
).isOkOr:
error "SUBSCRIBE failed", error = error
return err($error)
return ok("")
proc waku_relay_unsubscribe(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
) {.ffi.} =
ctx.myLib[].node.unsubscribe((kind: SubscriptionKind.PubsubSub, topic: $pubsubTopic)).isOkOr:
error "UNSUBSCRIBE failed", error = error
return err($error)
return ok("")
proc waku_relay_publish(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
jsonWakuMessage: cstring,
timeoutMs: cuint,
) {.ffi.} =
var
# https://rfc.vac.dev/spec/36/#extern-char-waku_relay_publishchar-messagejson-char-pubsubtopic-int-timeoutms
jsonMessage: JsonMessage
try:
let jsonContent = parseJson($jsonWakuMessage)
jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr:
raise newException(JsonParsingError, $error)
except JsonParsingError as exc:
return err(fmt"Error parsing json message: {exc.msg}")
let msg = json_message_event.toWakuMessage(jsonMessage).valueOr:
return err("Problem building the WakuMessage: " & $error)
(await ctx.myLib[].node.wakuRelay.publish($pubsubTopic, msg)).isOkOr:
error "PUBLISH failed", error = error
return err($error)
let msgHash = computeMessageHash($pubSubTopic, msg).to0xHex
return ok(msgHash)
proc waku_default_pubsub_topic(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
# https://rfc.vac.dev/spec/36/#extern-char-waku_default_pubsub_topic
return ok(DefaultPubsubTopic)
proc waku_content_topic(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
appName: cstring,
appVersion: cuint,
contentTopicName: cstring,
encoding: cstring,
) {.ffi.} =
# https://rfc.vac.dev/spec/36/#extern-char-waku_content_topicchar-applicationname-unsigned-int-applicationversion-char-contenttopicname-char-encoding
return ok(fmt"/{$appName}/{$appVersion}/{$contentTopicName}/{$encoding}")
proc waku_pubsub_topic(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
topicName: cstring,
) {.ffi.} =
# https://rfc.vac.dev/spec/36/#extern-char-waku_pubsub_topicchar-name-char-encoding
return ok(fmt"/waku/2/{$topicName}")

View File

@ -1,28 +1,16 @@
import std/[json, sugar, strutils, options]
import chronos, chronicles, results, stew/byteutils
import chronos, chronicles, results, stew/byteutils, ffi
import
../../../../waku/factory/waku,
../../../alloc,
../../../utils,
../../../../waku/waku_core/peers,
../../../../waku/waku_core/time,
../../../../waku/waku_core/message/digest,
../../../../waku/waku_store/common,
../../../../waku/waku_store/client,
../../../../waku/common/paging
waku/factory/waku,
library/utils,
waku/waku_core/peers,
waku/waku_core/message/digest,
waku/waku_store/common,
waku/waku_store/client,
waku/common/paging,
library/declare_lib
type StoreReqType* = enum
REMOTE_QUERY ## to perform a query to another Store node
type StoreRequest* = object
operation: StoreReqType
jsonQuery: cstring
peerAddr: cstring
timeoutMs: cint
func fromJsonNode(
T: type StoreRequest, jsonContent: JsonNode
): Result[StoreQueryRequest, string] =
func fromJsonNode(jsonContent: JsonNode): Result[StoreQueryRequest, string] =
var contentTopics: seq[string]
if jsonContent.contains("contentTopics"):
contentTopics = collect(newSeq):
@ -78,54 +66,29 @@ func fromJsonNode(
)
)
proc createShared*(
T: type StoreRequest,
op: StoreReqType,
proc waku_store_query(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
jsonQuery: cstring,
peerAddr: cstring,
timeoutMs: cint,
): ptr type T =
var ret = createShared(T)
ret[].operation = op
ret[].timeoutMs = timeoutMs
ret[].jsonQuery = jsonQuery.alloc()
ret[].peerAddr = peerAddr.alloc()
return ret
proc destroyShared(self: ptr StoreRequest) =
deallocShared(self[].jsonQuery)
deallocShared(self[].peerAddr)
deallocShared(self)
proc process_remote_query(
self: ptr StoreRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
) {.ffi.} =
let jsonContentRes = catch:
parseJson($self[].jsonQuery)
parseJson($jsonQuery)
if jsonContentRes.isErr():
return err("StoreRequest failed parsing store request: " & jsonContentRes.error.msg)
let storeQueryRequest = ?StoreRequest.fromJsonNode(jsonContentRes.get())
let storeQueryRequest = ?fromJsonNode(jsonContentRes.get())
let peer = peers.parsePeerInfo(($self[].peerAddr).split(",")).valueOr:
let peer = peers.parsePeerInfo(($peerAddr).split(",")).valueOr:
return err("StoreRequest failed to parse peer addr: " & $error)
let queryResponse = (await waku.node.wakuStoreClient.query(storeQueryRequest, peer)).valueOr:
let queryResponse = (
await ctx.myLib[].node.wakuStoreClient.query(storeQueryRequest, peer)
).valueOr:
return err("StoreRequest failed store query: " & $error)
let res = $(%*(queryResponse.toHex()))
return ok(res) ## returning the response in json format
proc process*(
self: ptr StoreRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
defer:
deallocShared(self)
case self.operation
of REMOTE_QUERY:
return await self.process_remote_query(waku)
error "store request not handled at all"
return err("store request not handled at all")

View File

@ -10,241 +10,242 @@
#include <stdint.h>
// The possible returned values for the functions that return int
#define RET_OK 0
#define RET_ERR 1
#define RET_MISSING_CALLBACK 2
#define RET_OK 0
#define RET_ERR 1
#define RET_MISSING_CALLBACK 2
#ifdef __cplusplus
extern "C" {
extern "C"
{
#endif
typedef void (*WakuCallBack) (int callerRet, const char* msg, size_t len, void* userData);
typedef void (*FFICallBack)(int callerRet, const char *msg, size_t len, void *userData);
// Creates a new instance of the waku node.
// Sets up the waku node from the given configuration.
// Returns a pointer to the Context needed by the rest of the API functions.
void* waku_new(
const char* configJson,
WakuCallBack callback,
void* userData);
// Creates a new instance of the waku node.
// Sets up the waku node from the given configuration.
// Returns a pointer to the Context needed by the rest of the API functions.
void *waku_new(
const char *configJson,
FFICallBack callback,
void *userData);
int waku_start(void* ctx,
WakuCallBack callback,
void* userData);
int waku_start(void *ctx,
FFICallBack callback,
void *userData);
int waku_stop(void* ctx,
WakuCallBack callback,
void* userData);
int waku_stop(void *ctx,
FFICallBack callback,
void *userData);
// Destroys an instance of a waku node created with waku_new
int waku_destroy(void* ctx,
WakuCallBack callback,
void* userData);
// Destroys an instance of a waku node created with waku_new
int waku_destroy(void *ctx,
FFICallBack callback,
void *userData);
int waku_version(void* ctx,
WakuCallBack callback,
void* userData);
int waku_version(void *ctx,
FFICallBack callback,
void *userData);
// Sets a callback that will be invoked whenever an event occurs.
// It is crucial that the passed callback is fast, non-blocking and potentially thread-safe.
void waku_set_event_callback(void* ctx,
WakuCallBack callback,
void* userData);
// Sets a callback that will be invoked whenever an event occurs.
// It is crucial that the passed callback is fast, non-blocking and potentially thread-safe.
void set_event_callback(void *ctx,
FFICallBack callback,
void *userData);
int waku_content_topic(void* ctx,
const char* appName,
unsigned int appVersion,
const char* contentTopicName,
const char* encoding,
WakuCallBack callback,
void* userData);
int waku_content_topic(void *ctx,
FFICallBack callback,
void *userData,
const char *appName,
unsigned int appVersion,
const char *contentTopicName,
const char *encoding);
int waku_pubsub_topic(void* ctx,
const char* topicName,
WakuCallBack callback,
void* userData);
int waku_pubsub_topic(void *ctx,
FFICallBack callback,
void *userData,
const char *topicName);
int waku_default_pubsub_topic(void* ctx,
WakuCallBack callback,
void* userData);
int waku_default_pubsub_topic(void *ctx,
FFICallBack callback,
void *userData);
int waku_relay_publish(void* ctx,
const char* pubSubTopic,
const char* jsonWakuMessage,
unsigned int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_relay_publish(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic,
const char *jsonWakuMessage,
unsigned int timeoutMs);
int waku_lightpush_publish(void* ctx,
const char* pubSubTopic,
const char* jsonWakuMessage,
WakuCallBack callback,
void* userData);
int waku_lightpush_publish(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic,
const char *jsonWakuMessage);
int waku_relay_subscribe(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_subscribe(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic);
int waku_relay_add_protected_shard(void* ctx,
int clusterId,
int shardId,
char* publicKey,
WakuCallBack callback,
void* userData);
int waku_relay_add_protected_shard(void *ctx,
FFICallBack callback,
void *userData,
int clusterId,
int shardId,
char *publicKey);
int waku_relay_unsubscribe(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_unsubscribe(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic);
int waku_filter_subscribe(void* ctx,
const char* pubSubTopic,
const char* contentTopics,
WakuCallBack callback,
void* userData);
int waku_filter_subscribe(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic,
const char *contentTopics);
int waku_filter_unsubscribe(void* ctx,
const char* pubSubTopic,
const char* contentTopics,
WakuCallBack callback,
void* userData);
int waku_filter_unsubscribe(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic,
const char *contentTopics);
int waku_filter_unsubscribe_all(void* ctx,
WakuCallBack callback,
void* userData);
int waku_filter_unsubscribe_all(void *ctx,
FFICallBack callback,
void *userData);
int waku_relay_get_num_connected_peers(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_get_num_connected_peers(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic);
int waku_relay_get_connected_peers(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_get_connected_peers(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic);
int waku_relay_get_num_peers_in_mesh(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_get_num_peers_in_mesh(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic);
int waku_relay_get_peers_in_mesh(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_get_peers_in_mesh(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic);
int waku_store_query(void* ctx,
const char* jsonQuery,
const char* peerAddr,
int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_store_query(void *ctx,
FFICallBack callback,
void *userData,
const char *jsonQuery,
const char *peerAddr,
int timeoutMs);
int waku_connect(void* ctx,
const char* peerMultiAddr,
unsigned int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_connect(void *ctx,
FFICallBack callback,
void *userData,
const char *peerMultiAddr,
unsigned int timeoutMs);
int waku_disconnect_peer_by_id(void* ctx,
const char* peerId,
WakuCallBack callback,
void* userData);
int waku_disconnect_peer_by_id(void *ctx,
FFICallBack callback,
void *userData,
const char *peerId);
int waku_disconnect_all_peers(void* ctx,
WakuCallBack callback,
void* userData);
int waku_disconnect_all_peers(void *ctx,
FFICallBack callback,
void *userData);
int waku_dial_peer(void* ctx,
const char* peerMultiAddr,
const char* protocol,
int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_dial_peer(void *ctx,
FFICallBack callback,
void *userData,
const char *peerMultiAddr,
const char *protocol,
int timeoutMs);
int waku_dial_peer_by_id(void* ctx,
const char* peerId,
const char* protocol,
int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_dial_peer_by_id(void *ctx,
FFICallBack callback,
void *userData,
const char *peerId,
const char *protocol,
int timeoutMs);
int waku_get_peerids_from_peerstore(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_peerids_from_peerstore(void *ctx,
FFICallBack callback,
void *userData);
int waku_get_connected_peers_info(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_connected_peers_info(void *ctx,
FFICallBack callback,
void *userData);
int waku_get_peerids_by_protocol(void* ctx,
const char* protocol,
WakuCallBack callback,
void* userData);
int waku_get_peerids_by_protocol(void *ctx,
FFICallBack callback,
void *userData,
const char *protocol);
int waku_listen_addresses(void* ctx,
WakuCallBack callback,
void* userData);
int waku_listen_addresses(void *ctx,
FFICallBack callback,
void *userData);
int waku_get_connected_peers(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_connected_peers(void *ctx,
FFICallBack callback,
void *userData);
// Returns a list of multiaddress given a url to a DNS discoverable ENR tree
// Parameters
// char* entTreeUrl: URL containing a discoverable ENR tree
// char* nameDnsServer: The nameserver to resolve the ENR tree url.
// int timeoutMs: Timeout value in milliseconds to execute the call.
int waku_dns_discovery(void* ctx,
const char* entTreeUrl,
const char* nameDnsServer,
int timeoutMs,
WakuCallBack callback,
void* userData);
// Returns a list of multiaddress given a url to a DNS discoverable ENR tree
// Parameters
// char* entTreeUrl: URL containing a discoverable ENR tree
// char* nameDnsServer: The nameserver to resolve the ENR tree url.
// int timeoutMs: Timeout value in milliseconds to execute the call.
int waku_dns_discovery(void *ctx,
FFICallBack callback,
void *userData,
const char *entTreeUrl,
const char *nameDnsServer,
int timeoutMs);
// Updates the bootnode list used for discovering new peers via DiscoveryV5
// bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]`
int waku_discv5_update_bootnodes(void* ctx,
char* bootnodes,
WakuCallBack callback,
void* userData);
// Updates the bootnode list used for discovering new peers via DiscoveryV5
// bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]`
int waku_discv5_update_bootnodes(void *ctx,
FFICallBack callback,
void *userData,
char *bootnodes);
int waku_start_discv5(void* ctx,
WakuCallBack callback,
void* userData);
int waku_start_discv5(void *ctx,
FFICallBack callback,
void *userData);
int waku_stop_discv5(void* ctx,
WakuCallBack callback,
void* userData);
int waku_stop_discv5(void *ctx,
FFICallBack callback,
void *userData);
// Retrieves the ENR information
int waku_get_my_enr(void* ctx,
WakuCallBack callback,
void* userData);
// Retrieves the ENR information
int waku_get_my_enr(void *ctx,
FFICallBack callback,
void *userData);
int waku_get_my_peerid(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_my_peerid(void *ctx,
FFICallBack callback,
void *userData);
int waku_get_metrics(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_metrics(void *ctx,
FFICallBack callback,
void *userData);
int waku_peer_exchange_request(void* ctx,
int numPeers,
WakuCallBack callback,
void* userData);
int waku_peer_exchange_request(void *ctx,
FFICallBack callback,
void *userData,
int numPeers);
int waku_ping_peer(void* ctx,
const char* peerAddr,
int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_ping_peer(void *ctx,
FFICallBack callback,
void *userData,
const char *peerAddr,
int timeoutMs);
int waku_is_online(void* ctx,
WakuCallBack callback,
void* userData);
int waku_is_online(void *ctx,
FFICallBack callback,
void *userData);
#ifdef __cplusplus
}

View File

@ -1,107 +1,35 @@
{.pragma: exported, exportc, cdecl, raises: [].}
{.pragma: callback, cdecl, raises: [], gcsafe.}
{.passc: "-fPIC".}
when defined(linux):
{.passl: "-Wl,-soname,libwaku.so".}
import std/[json, atomics, strformat, options, atomics]
import chronicles, chronos, chronos/threadsync
import std/[atomics, options, atomics, macros]
import chronicles, chronos, chronos/threadsync, ffi
import
waku/common/base64,
waku/waku_core/message/message,
waku/node/waku_node,
waku/node/peer_manager,
waku/waku_core/topics/pubsub_topic,
waku/waku_core/subscription/push_handler,
waku/waku_relay,
./events/json_message_event,
./waku_context,
./waku_thread_requests/requests/node_lifecycle_request,
./waku_thread_requests/requests/peer_manager_request,
./waku_thread_requests/requests/protocols/relay_request,
./waku_thread_requests/requests/protocols/store_request,
./waku_thread_requests/requests/protocols/lightpush_request,
./waku_thread_requests/requests/protocols/filter_request,
./waku_thread_requests/requests/debug_node_request,
./waku_thread_requests/requests/discovery_request,
./waku_thread_requests/requests/ping_request,
./waku_thread_requests/waku_thread_request,
./alloc,
./ffi_types,
../waku/factory/app_callbacks
./events/json_topic_health_change_event,
./events/json_connection_change_event,
../waku/factory/app_callbacks,
waku/factory/waku,
waku/node/waku_node,
./declare_lib
################################################################################
### Wrapper around the waku node
################################################################################
################################################################################
### Not-exported components
template checkLibwakuParams*(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
) =
if not isNil(ctx):
ctx[].userData = userData
if isNil(callback):
return RET_MISSING_CALLBACK
proc handleRequest(
ctx: ptr WakuContext,
requestType: RequestType,
content: pointer,
callback: WakuCallBack,
userData: pointer,
): cint =
waku_context.sendRequestToWakuThread(ctx, requestType, content, callback, userData).isOkOr:
let msg = "libwaku error: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
return RET_OK
### End of not-exported components
################################################################################
################################################################################
### Library setup
# Every Nim library must have this function called - the name is derived from
# the `--nimMainPrefix` command line option
proc libwakuNimMain() {.importc.}
# To control when the library has been initialized
var initialized: Atomic[bool]
if defined(android):
# Redirect chronicles to Android System logs
when compiles(defaultChroniclesStream.outputs[0].writer):
defaultChroniclesStream.outputs[0].writer = proc(
logLevel: LogLevel, msg: LogOutputStr
) {.raises: [].} =
echo logLevel, msg
proc initializeLibrary() {.exported.} =
if not initialized.exchange(true):
## Every Nim library needs to call `<yourprefix>NimMain` once exactly, to initialize the Nim runtime.
## Being `<yourprefix>` the value given in the optional compilation flag --nimMainPrefix:yourprefix
libwakuNimMain()
when declared(setupForeignThreadGc):
setupForeignThreadGc()
when declared(nimGC_setStackBottom):
var locals {.volatile, noinit.}: pointer
locals = addr(locals)
nimGC_setStackBottom(locals)
### End of library setup
################################################################################
## Include different APIs, i.e. all procs with {.ffi.} pragma
include
./kernel_api/peer_manager_api,
./kernel_api/discovery_api,
./kernel_api/node_lifecycle_api,
./kernel_api/debug_node_api,
./kernel_api/ping_api,
./kernel_api/protocols/relay_api,
./kernel_api/protocols/store_api,
./kernel_api/protocols/lightpush_api,
./kernel_api/protocols/filter_api
################################################################################
### Exported procs
proc waku_new(
configJson: cstring, callback: WakuCallback, userData: pointer
configJson: cstring, callback: FFICallback, userData: pointer
): pointer {.dynlib, exportc, cdecl.} =
initializeLibrary()
@ -111,41 +39,50 @@ proc waku_new(
return nil
## Create the Waku thread that will keep waiting for req from the main thread.
var ctx = waku_context.createWakuContext().valueOr:
let msg = "Error in createWakuContext: " & $error
var ctx = ffi.createFFIContext[Waku]().valueOr:
let msg = "Error in createFFIContext: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return nil
ctx.userData = userData
proc onReceivedMessage(ctx: ptr FFIContext): WakuRelayHandler =
return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} =
callEventCallback(ctx, "onReceivedMessage"):
$JsonMessageEvent.new(pubsubTopic, msg)
proc onTopicHealthChange(ctx: ptr FFIContext): TopicHealthChangeHandler =
return proc(pubsubTopic: PubsubTopic, topicHealth: TopicHealth) {.async.} =
callEventCallback(ctx, "onTopicHealthChange"):
$JsonTopicHealthChangeEvent.new(pubsubTopic, topicHealth)
proc onConnectionChange(ctx: ptr FFIContext): ConnectionChangeHandler =
return proc(peerId: PeerId, peerEvent: PeerEventKind) {.async.} =
callEventCallback(ctx, "onConnectionChange"):
$JsonConnectionChangeEvent.new($peerId, peerEvent)
let appCallbacks = AppCallbacks(
relayHandler: onReceivedMessage(ctx),
topicHealthChangeHandler: onTopicHealthChange(ctx),
connectionChangeHandler: onConnectionChange(ctx),
)
let retCode = handleRequest(
ctx,
RequestType.LIFECYCLE,
NodeLifecycleRequest.createShared(
NodeLifecycleMsgType.CREATE_NODE, configJson, appCallbacks
),
callback,
userData,
)
if retCode == RET_ERR:
ffi.sendRequestToFFIThread(
ctx, CreateNodeRequest.ffiNewReq(callback, userData, configJson, appCallbacks)
).isOkOr:
let msg = "error in sendRequestToFFIThread: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return nil
return ctx
proc waku_destroy(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
): cint {.dynlib, exportc, cdecl.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
checkParams(ctx, callback, userData)
waku_context.destroyWakuContext(ctx).isOkOr:
ffi.destroyFFIContext(ctx).isOkOr:
let msg = "libwaku error: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
@ -155,699 +92,5 @@ proc waku_destroy(
return RET_OK
proc waku_version(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
callback(
RET_OK,
cast[ptr cchar](WakuNodeVersionString),
cast[csize_t](len(WakuNodeVersionString)),
userData,
)
return RET_OK
proc waku_set_event_callback(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
) {.dynlib, exportc.} =
initializeLibrary()
ctx[].eventCallback = cast[pointer](callback)
ctx[].eventUserData = userData
proc waku_content_topic(
ctx: ptr WakuContext,
appName: cstring,
appVersion: cuint,
contentTopicName: cstring,
encoding: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
# https://rfc.vac.dev/spec/36/#extern-char-waku_content_topicchar-applicationname-unsigned-int-applicationversion-char-contenttopicname-char-encoding
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
let contentTopic = fmt"/{$appName}/{$appVersion}/{$contentTopicName}/{$encoding}"
callback(
RET_OK, unsafeAddr contentTopic[0], cast[csize_t](len(contentTopic)), userData
)
return RET_OK
proc waku_pubsub_topic(
ctx: ptr WakuContext, topicName: cstring, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc, cdecl.} =
# https://rfc.vac.dev/spec/36/#extern-char-waku_pubsub_topicchar-name-char-encoding
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
let outPubsubTopic = fmt"/waku/2/{$topicName}"
callback(
RET_OK, unsafeAddr outPubsubTopic[0], cast[csize_t](len(outPubsubTopic)), userData
)
return RET_OK
proc waku_default_pubsub_topic(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
# https://rfc.vac.dev/spec/36/#extern-char-waku_default_pubsub_topic
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
callback(
RET_OK,
cast[ptr cchar](DefaultPubsubTopic),
cast[csize_t](len(DefaultPubsubTopic)),
userData,
)
return RET_OK
proc waku_relay_publish(
ctx: ptr WakuContext,
pubSubTopic: cstring,
jsonWakuMessage: cstring,
timeoutMs: cuint,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc, cdecl.} =
# https://rfc.vac.dev/spec/36/#extern-char-waku_relay_publishchar-messagejson-char-pubsubtopic-int-timeoutms
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
var jsonMessage: JsonMessage
try:
let jsonContent = parseJson($jsonWakuMessage)
jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr:
raise newException(JsonParsingError, $error)
except JsonParsingError:
let msg = fmt"Error parsing json message: {getCurrentExceptionMsg()}"
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
let wakuMessage = jsonMessage.toWakuMessage().valueOr:
let msg = "Problem building the WakuMessage: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.PUBLISH, pubSubTopic, nil, wakuMessage),
callback,
userData,
)
proc waku_start(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.LIFECYCLE,
NodeLifecycleRequest.createShared(NodeLifecycleMsgType.START_NODE),
callback,
userData,
)
proc waku_stop(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.LIFECYCLE,
NodeLifecycleRequest.createShared(NodeLifecycleMsgType.STOP_NODE),
callback,
userData,
)
proc waku_relay_subscribe(
ctx: ptr WakuContext,
pubSubTopic: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
var cb = onReceivedMessage(ctx)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.SUBSCRIBE, pubSubTopic, WakuRelayHandler(cb)),
callback,
userData,
)
proc waku_relay_add_protected_shard(
ctx: ptr WakuContext,
clusterId: cint,
shardId: cint,
publicKey: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc, cdecl.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(
RelayMsgType.ADD_PROTECTED_SHARD,
clusterId = clusterId,
shardId = shardId,
publicKey = publicKey,
),
callback,
userData,
)
proc waku_relay_unsubscribe(
ctx: ptr WakuContext,
pubSubTopic: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(
RelayMsgType.UNSUBSCRIBE, pubSubTopic, WakuRelayHandler(onReceivedMessage(ctx))
),
callback,
userData,
)
proc waku_relay_get_num_connected_peers(
ctx: ptr WakuContext,
pubSubTopic: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.NUM_CONNECTED_PEERS, pubSubTopic),
callback,
userData,
)
proc waku_relay_get_connected_peers(
ctx: ptr WakuContext,
pubSubTopic: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.LIST_CONNECTED_PEERS, pubSubTopic),
callback,
userData,
)
proc waku_relay_get_num_peers_in_mesh(
ctx: ptr WakuContext,
pubSubTopic: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.NUM_MESH_PEERS, pubSubTopic),
callback,
userData,
)
proc waku_relay_get_peers_in_mesh(
ctx: ptr WakuContext,
pubSubTopic: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.LIST_MESH_PEERS, pubSubTopic),
callback,
userData,
)
proc waku_filter_subscribe(
ctx: ptr WakuContext,
pubSubTopic: cstring,
contentTopics: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.FILTER,
FilterRequest.createShared(
FilterMsgType.SUBSCRIBE,
pubSubTopic,
contentTopics,
FilterPushHandler(onReceivedMessage(ctx)),
),
callback,
userData,
)
proc waku_filter_unsubscribe(
ctx: ptr WakuContext,
pubSubTopic: cstring,
contentTopics: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.FILTER,
FilterRequest.createShared(FilterMsgType.UNSUBSCRIBE, pubSubTopic, contentTopics),
callback,
userData,
)
proc waku_filter_unsubscribe_all(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.FILTER,
FilterRequest.createShared(FilterMsgType.UNSUBSCRIBE_ALL),
callback,
userData,
)
proc waku_lightpush_publish(
ctx: ptr WakuContext,
pubSubTopic: cstring,
jsonWakuMessage: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc, cdecl.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
var jsonMessage: JsonMessage
try:
let jsonContent = parseJson($jsonWakuMessage)
jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr:
raise newException(JsonParsingError, $error)
except JsonParsingError:
let msg = fmt"Error parsing json message: {getCurrentExceptionMsg()}"
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
let wakuMessage = jsonMessage.toWakuMessage().valueOr:
let msg = "Problem building the WakuMessage: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
handleRequest(
ctx,
RequestType.LIGHTPUSH,
LightpushRequest.createShared(LightpushMsgType.PUBLISH, pubSubTopic, wakuMessage),
callback,
userData,
)
proc waku_connect(
ctx: ptr WakuContext,
peerMultiAddr: cstring,
timeoutMs: cuint,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(
PeerManagementMsgType.CONNECT_TO, $peerMultiAddr, chronos.milliseconds(timeoutMs)
),
callback,
userData,
)
proc waku_disconnect_peer_by_id(
ctx: ptr WakuContext, peerId: cstring, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(
op = PeerManagementMsgType.DISCONNECT_PEER_BY_ID, peerId = $peerId
),
callback,
userData,
)
proc waku_disconnect_all_peers(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(op = PeerManagementMsgType.DISCONNECT_ALL_PEERS),
callback,
userData,
)
proc waku_dial_peer(
ctx: ptr WakuContext,
peerMultiAddr: cstring,
protocol: cstring,
timeoutMs: cuint,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(
op = PeerManagementMsgType.DIAL_PEER,
peerMultiAddr = $peerMultiAddr,
protocol = $protocol,
),
callback,
userData,
)
proc waku_dial_peer_by_id(
ctx: ptr WakuContext,
peerId: cstring,
protocol: cstring,
timeoutMs: cuint,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(
op = PeerManagementMsgType.DIAL_PEER_BY_ID, peerId = $peerId, protocol = $protocol
),
callback,
userData,
)
proc waku_get_peerids_from_peerstore(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(PeerManagementMsgType.GET_ALL_PEER_IDS),
callback,
userData,
)
proc waku_get_connected_peers_info(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(PeerManagementMsgType.GET_CONNECTED_PEERS_INFO),
callback,
userData,
)
proc waku_get_connected_peers(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(PeerManagementMsgType.GET_CONNECTED_PEERS),
callback,
userData,
)
proc waku_get_peerids_by_protocol(
ctx: ptr WakuContext, protocol: cstring, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(
op = PeerManagementMsgType.GET_PEER_IDS_BY_PROTOCOL, protocol = $protocol
),
callback,
userData,
)
proc waku_store_query(
ctx: ptr WakuContext,
jsonQuery: cstring,
peerAddr: cstring,
timeoutMs: cint,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.STORE,
StoreRequest.createShared(StoreReqType.REMOTE_QUERY, jsonQuery, peerAddr, timeoutMs),
callback,
userData,
)
proc waku_listen_addresses(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DEBUG,
DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_LISTENING_ADDRESSES),
callback,
userData,
)
proc waku_dns_discovery(
ctx: ptr WakuContext,
entTreeUrl: cstring,
nameDnsServer: cstring,
timeoutMs: cint,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DISCOVERY,
DiscoveryRequest.createRetrieveBootstrapNodesRequest(
DiscoveryMsgType.GET_BOOTSTRAP_NODES, entTreeUrl, nameDnsServer, timeoutMs
),
callback,
userData,
)
proc waku_discv5_update_bootnodes(
ctx: ptr WakuContext, bootnodes: cstring, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
## Updates the bootnode list used for discovering new peers via DiscoveryV5
## bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]`
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DISCOVERY,
DiscoveryRequest.createUpdateBootstrapNodesRequest(
DiscoveryMsgType.UPDATE_DISCV5_BOOTSTRAP_NODES, bootnodes
),
callback,
userData,
)
proc waku_get_my_enr(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DEBUG,
DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_MY_ENR),
callback,
userData,
)
proc waku_get_my_peerid(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DEBUG,
DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_MY_PEER_ID),
callback,
userData,
)
proc waku_get_metrics(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DEBUG,
DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_METRICS),
callback,
userData,
)
proc waku_start_discv5(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DISCOVERY,
DiscoveryRequest.createDiscV5StartRequest(),
callback,
userData,
)
proc waku_stop_discv5(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DISCOVERY,
DiscoveryRequest.createDiscV5StopRequest(),
callback,
userData,
)
proc waku_peer_exchange_request(
ctx: ptr WakuContext, numPeers: uint64, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DISCOVERY,
DiscoveryRequest.createPeerExchangeRequest(numPeers),
callback,
userData,
)
proc waku_ping_peer(
ctx: ptr WakuContext,
peerAddr: cstring,
timeoutMs: cuint,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PING,
PingRequest.createShared(peerAddr, chronos.milliseconds(timeoutMs)),
callback,
userData,
)
proc waku_is_online(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DEBUG,
DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_ONLINE_STATE),
callback,
userData,
)
### End of exported procs
################################################################################
# ### End of exported procs
# ################################################################################

View File

@ -1,223 +0,0 @@
{.pragma: exported, exportc, cdecl, raises: [].}
{.pragma: callback, cdecl, raises: [], gcsafe.}
{.passc: "-fPIC".}
import std/[options, atomics, os, net, locks]
import chronicles, chronos, chronos/threadsync, taskpools/channels_spsc_single, results
import
waku/common/logging,
waku/factory/waku,
waku/node/peer_manager,
waku/waku_relay/[protocol, topic_health],
waku/waku_core/[topics/pubsub_topic, message],
./waku_thread_requests/[waku_thread_request, requests/debug_node_request],
./ffi_types,
./events/[
json_message_event, json_topic_health_change_event, json_connection_change_event,
json_waku_not_responding_event,
]
type WakuContext* = object
wakuThread: Thread[(ptr WakuContext)]
watchdogThread: Thread[(ptr WakuContext)]
# monitors the Waku thread and notifies the Waku SDK consumer if it hangs
lock: Lock
reqChannel: ChannelSPSCSingle[ptr WakuThreadRequest]
reqSignal: ThreadSignalPtr
# to inform The Waku Thread (a.k.a TWT) that a new request is sent
reqReceivedSignal: ThreadSignalPtr
# to inform the main thread that the request is rx by TWT
userData*: pointer
eventCallback*: pointer
eventUserdata*: pointer
running: Atomic[bool] # To control when the threads are running
const git_version* {.strdefine.} = "n/a"
const versionString = "version / git commit hash: " & waku.git_version
template callEventCallback(ctx: ptr WakuContext, eventName: string, body: untyped) =
if isNil(ctx[].eventCallback):
error eventName & " - eventCallback is nil"
return
foreignThreadGc:
try:
let event = body
cast[WakuCallBack](ctx[].eventCallback)(
RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData
)
except Exception, CatchableError:
let msg =
"Exception " & eventName & " when calling 'eventCallBack': " &
getCurrentExceptionMsg()
cast[WakuCallBack](ctx[].eventCallback)(
RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData
)
proc onConnectionChange*(ctx: ptr WakuContext): ConnectionChangeHandler =
return proc(peerId: PeerId, peerEvent: PeerEventKind) {.async.} =
callEventCallback(ctx, "onConnectionChange"):
$JsonConnectionChangeEvent.new($peerId, peerEvent)
proc onReceivedMessage*(ctx: ptr WakuContext): WakuRelayHandler =
return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} =
callEventCallback(ctx, "onReceivedMessage"):
$JsonMessageEvent.new(pubsubTopic, msg)
proc onTopicHealthChange*(ctx: ptr WakuContext): TopicHealthChangeHandler =
return proc(pubsubTopic: PubsubTopic, topicHealth: TopicHealth) {.async.} =
callEventCallback(ctx, "onTopicHealthChange"):
$JsonTopicHealthChangeEvent.new(pubsubTopic, topicHealth)
proc onWakuNotResponding*(ctx: ptr WakuContext) =
callEventCallback(ctx, "onWakuNotResponsive"):
$JsonWakuNotRespondingEvent.new()
proc sendRequestToWakuThread*(
ctx: ptr WakuContext,
reqType: RequestType,
reqContent: pointer,
callback: WakuCallBack,
userData: pointer,
timeout = InfiniteDuration,
): Result[void, string] =
ctx.lock.acquire()
# This lock is only necessary while we use a SP Channel and while the signalling
# between threads assumes that there aren't concurrent requests.
# Rearchitecting the signaling + migrating to a MP Channel will allow us to receive
# requests concurrently and spare us the need of locks
defer:
ctx.lock.release()
let req = WakuThreadRequest.createShared(reqType, reqContent, callback, userData)
## Sending the request
let sentOk = ctx.reqChannel.trySend(req)
if not sentOk:
deallocShared(req)
return err("Couldn't send a request to the waku thread: " & $req[])
let fireSync = ctx.reqSignal.fireSync().valueOr:
deallocShared(req)
return err("failed fireSync: " & $error)
if not fireSync:
deallocShared(req)
return err("Couldn't fireSync in time")
## wait until the Waku Thread properly received the request
ctx.reqReceivedSignal.waitSync(timeout).isOkOr:
deallocShared(req)
return err("Couldn't receive reqReceivedSignal signal")
## Notice that in case of "ok", the deallocShared(req) is performed by the Waku Thread in the
## process proc. See the 'waku_thread_request.nim' module for more details.
ok()
proc watchdogThreadBody(ctx: ptr WakuContext) {.thread.} =
## Watchdog thread that monitors the Waku thread and notifies the library user if it hangs.
let watchdogRun = proc(ctx: ptr WakuContext) {.async.} =
const WatchdogStartDelay = 10.seconds
const WatchdogTimeinterval = 1.seconds
const WakuNotRespondingTimeout = 3.seconds
# Give time for the node to be created and up before sending watchdog requests
await sleepAsync(WatchdogStartDelay)
while true:
await sleepAsync(WatchdogTimeinterval)
if ctx.running.load == false:
info "Watchdog thread exiting because WakuContext is not running"
break
let wakuCallback = proc(
callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer
) {.cdecl, gcsafe, raises: [].} =
discard ## Don't do anything. Just respecting the callback signature.
const nilUserData = nil
trace "Sending watchdog request to Waku thread"
sendRequestToWakuThread(
ctx,
RequestType.DEBUG,
DebugNodeRequest.createShared(DebugNodeMsgType.CHECK_WAKU_NOT_BLOCKED),
wakuCallback,
nilUserData,
WakuNotRespondingTimeout,
).isOkOr:
error "Failed to send watchdog request to Waku thread", error = $error
onWakuNotResponding(ctx)
waitFor watchdogRun(ctx)
proc wakuThreadBody(ctx: ptr WakuContext) {.thread.} =
## Waku thread that attends library user requests (stop, connect_to, etc.)
logging.setupLog(logging.LogLevel.DEBUG, logging.LogFormat.TEXT)
let wakuRun = proc(ctx: ptr WakuContext) {.async.} =
var waku: Waku
while true:
await ctx.reqSignal.wait()
if ctx.running.load == false:
break
## Trying to get a request from the libwaku requestor thread
var request: ptr WakuThreadRequest
let recvOk = ctx.reqChannel.tryRecv(request)
if not recvOk:
error "waku thread could not receive a request"
continue
## Handle the request
asyncSpawn WakuThreadRequest.process(request, addr waku)
ctx.reqReceivedSignal.fireSync().isOkOr:
error "could not fireSync back to requester thread", error = error
waitFor wakuRun(ctx)
proc createWakuContext*(): Result[ptr WakuContext, string] =
## This proc is called from the main thread and it creates
## the Waku working thread.
var ctx = createShared(WakuContext, 1)
ctx.reqSignal = ThreadSignalPtr.new().valueOr:
return err("couldn't create reqSignal ThreadSignalPtr")
ctx.reqReceivedSignal = ThreadSignalPtr.new().valueOr:
return err("couldn't create reqReceivedSignal ThreadSignalPtr")
ctx.lock.initLock()
ctx.running.store(true)
try:
createThread(ctx.wakuThread, wakuThreadBody, ctx)
except ValueError, ResourceExhaustedError:
freeShared(ctx)
return err("failed to create the Waku thread: " & getCurrentExceptionMsg())
try:
createThread(ctx.watchdogThread, watchdogThreadBody, ctx)
except ValueError, ResourceExhaustedError:
freeShared(ctx)
return err("failed to create the watchdog thread: " & getCurrentExceptionMsg())
return ok(ctx)
proc destroyWakuContext*(ctx: ptr WakuContext): Result[void, string] =
ctx.running.store(false)
let signaledOnTime = ctx.reqSignal.fireSync().valueOr:
return err("error in destroyWakuContext: " & $error)
if not signaledOnTime:
return err("failed to signal reqSignal on time in destroyWakuContext")
joinThread(ctx.wakuThread)
joinThread(ctx.watchdogThread)
ctx.lock.deinitLock()
?ctx.reqSignal.close()
?ctx.reqReceivedSignal.close()
freeShared(ctx)
return ok()

View File

@ -1,63 +0,0 @@
import std/json
import
chronicles,
chronos,
results,
eth/p2p/discoveryv5/enr,
strutils,
libp2p/peerid,
metrics
import
../../../waku/factory/waku,
../../../waku/node/waku_node,
../../../waku/node/health_monitor
type DebugNodeMsgType* = enum
RETRIEVE_LISTENING_ADDRESSES
RETRIEVE_MY_ENR
RETRIEVE_MY_PEER_ID
RETRIEVE_METRICS
RETRIEVE_ONLINE_STATE
CHECK_WAKU_NOT_BLOCKED
type DebugNodeRequest* = object
operation: DebugNodeMsgType
proc createShared*(T: type DebugNodeRequest, op: DebugNodeMsgType): ptr type T =
var ret = createShared(T)
ret[].operation = op
return ret
proc destroyShared(self: ptr DebugNodeRequest) =
deallocShared(self)
proc getMultiaddresses(node: WakuNode): seq[string] =
return node.info().listenAddresses
proc getMetrics(): string =
{.gcsafe.}:
return defaultRegistry.toText() ## defaultRegistry is {.global.} in metrics module
proc process*(
self: ptr DebugNodeRequest, waku: Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
case self.operation
of RETRIEVE_LISTENING_ADDRESSES:
## returns a comma-separated string of the listen addresses
return ok(waku.node.getMultiaddresses().join(","))
of RETRIEVE_MY_ENR:
return ok(waku.node.enr.toURI())
of RETRIEVE_MY_PEER_ID:
return ok($waku.node.peerId())
of RETRIEVE_METRICS:
return ok(getMetrics())
of RETRIEVE_ONLINE_STATE:
return ok($waku.healthMonitor.onlineMonitor.amIOnline())
of CHECK_WAKU_NOT_BLOCKED:
return ok("waku thread is not blocked")
error "unsupported operation in DebugNodeRequest"
return err("unsupported operation in DebugNodeRequest")

View File

@ -1,151 +0,0 @@
import std/json
import chronos, chronicles, results, strutils, libp2p/multiaddress
import
../../../waku/factory/waku,
../../../waku/discovery/waku_dnsdisc,
../../../waku/discovery/waku_discv5,
../../../waku/waku_core/peers,
../../../waku/node/waku_node,
../../../waku/node/kernel_api,
../../alloc
type DiscoveryMsgType* = enum
GET_BOOTSTRAP_NODES
UPDATE_DISCV5_BOOTSTRAP_NODES
START_DISCV5
STOP_DISCV5
PEER_EXCHANGE
type DiscoveryRequest* = object
operation: DiscoveryMsgType
## used in GET_BOOTSTRAP_NODES
enrTreeUrl: cstring
nameDnsServer: cstring
timeoutMs: cint
## used in UPDATE_DISCV5_BOOTSTRAP_NODES
nodes: cstring
## used in PEER_EXCHANGE
numPeers: uint64
proc createShared(
T: type DiscoveryRequest,
op: DiscoveryMsgType,
enrTreeUrl: cstring,
nameDnsServer: cstring,
timeoutMs: cint,
nodes: cstring,
numPeers: uint64,
): ptr type T =
var ret = createShared(T)
ret[].operation = op
ret[].enrTreeUrl = enrTreeUrl.alloc()
ret[].nameDnsServer = nameDnsServer.alloc()
ret[].timeoutMs = timeoutMs
ret[].nodes = nodes.alloc()
ret[].numPeers = numPeers
return ret
proc createRetrieveBootstrapNodesRequest*(
T: type DiscoveryRequest,
op: DiscoveryMsgType,
enrTreeUrl: cstring,
nameDnsServer: cstring,
timeoutMs: cint,
): ptr type T =
return T.createShared(op, enrTreeUrl, nameDnsServer, timeoutMs, "", 0)
proc createUpdateBootstrapNodesRequest*(
T: type DiscoveryRequest, op: DiscoveryMsgType, nodes: cstring
): ptr type T =
return T.createShared(op, "", "", 0, nodes, 0)
proc createDiscV5StartRequest*(T: type DiscoveryRequest): ptr type T =
return T.createShared(START_DISCV5, "", "", 0, "", 0)
proc createDiscV5StopRequest*(T: type DiscoveryRequest): ptr type T =
return T.createShared(STOP_DISCV5, "", "", 0, "", 0)
proc createPeerExchangeRequest*(
T: type DiscoveryRequest, numPeers: uint64
): ptr type T =
return T.createShared(PEER_EXCHANGE, "", "", 0, "", numPeers)
proc destroyShared(self: ptr DiscoveryRequest) =
deallocShared(self[].enrTreeUrl)
deallocShared(self[].nameDnsServer)
deallocShared(self[].nodes)
deallocShared(self)
proc retrieveBootstrapNodes(
enrTreeUrl: string, ipDnsServer: string
): Future[Result[seq[string], string]] {.async.} =
let dnsNameServers = @[parseIpAddress(ipDnsServer)]
let discoveredPeers: seq[RemotePeerInfo] = (
await retrieveDynamicBootstrapNodes(enrTreeUrl, dnsNameServers)
).valueOr:
return err("failed discovering peers from DNS: " & $error)
var multiAddresses = newSeq[string]()
for discPeer in discoveredPeers:
for address in discPeer.addrs:
multiAddresses.add($address & "/p2p/" & $discPeer)
return ok(multiAddresses)
proc updateDiscv5BootstrapNodes(nodes: string, waku: ptr Waku): Result[void, string] =
waku.wakuDiscv5.updateBootstrapRecords(nodes).isOkOr:
return err("error in updateDiscv5BootstrapNodes: " & $error)
return ok()
proc performPeerExchangeRequestTo(
numPeers: uint64, waku: ptr Waku
): Future[Result[int, string]] {.async.} =
let numPeersRecv = (await waku.node.fetchPeerExchangePeers(numPeers)).valueOr:
return err($error)
return ok(numPeersRecv)
proc process*(
self: ptr DiscoveryRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
case self.operation
of START_DISCV5:
let res = await waku.wakuDiscv5.start()
res.isOkOr:
error "START_DISCV5 failed", error = error
return err($error)
return ok("discv5 started correctly")
of STOP_DISCV5:
await waku.wakuDiscv5.stop()
return ok("discv5 stopped correctly")
of GET_BOOTSTRAP_NODES:
let nodes = (
await retrieveBootstrapNodes($self[].enrTreeUrl, $self[].nameDnsServer)
).valueOr:
error "GET_BOOTSTRAP_NODES failed", error = error
return err($error)
## returns a comma-separated string of bootstrap nodes' multiaddresses
return ok(nodes.join(","))
of UPDATE_DISCV5_BOOTSTRAP_NODES:
updateDiscv5BootstrapNodes($self[].nodes, waku).isOkOr:
error "UPDATE_DISCV5_BOOTSTRAP_NODES failed", error = error
return err($error)
return ok("discovery request processed correctly")
of PEER_EXCHANGE:
let numValidPeers = (await performPeerExchangeRequestTo(self[].numPeers, waku)).valueOr:
error "PEER_EXCHANGE failed", error = error
return err($error)
return ok($numValidPeers)
error "discovery request not handled"
return err("discovery request not handled")

View File

@ -1,135 +0,0 @@
import std/[sequtils, strutils, tables]
import chronicles, chronos, results, options, json
import
../../../waku/factory/waku,
../../../waku/node/waku_node,
../../alloc,
../../../waku/node/peer_manager
type PeerManagementMsgType* {.pure.} = enum
CONNECT_TO
GET_ALL_PEER_IDS
GET_CONNECTED_PEERS_INFO
GET_PEER_IDS_BY_PROTOCOL
DISCONNECT_PEER_BY_ID
DISCONNECT_ALL_PEERS
DIAL_PEER
DIAL_PEER_BY_ID
GET_CONNECTED_PEERS
type PeerManagementRequest* = object
operation: PeerManagementMsgType
peerMultiAddr: cstring
dialTimeout: Duration
protocol: cstring
peerId: cstring
type PeerInfo = object
protocols: seq[string]
addresses: seq[string]
proc createShared*(
T: type PeerManagementRequest,
op: PeerManagementMsgType,
peerMultiAddr = "",
dialTimeout = chronos.milliseconds(0), ## arbitrary Duration as not all ops needs dialTimeout
peerId = "",
protocol = "",
): ptr type T =
var ret = createShared(T)
ret[].operation = op
ret[].peerMultiAddr = peerMultiAddr.alloc()
ret[].peerId = peerId.alloc()
ret[].protocol = protocol.alloc()
ret[].dialTimeout = dialTimeout
return ret
proc destroyShared(self: ptr PeerManagementRequest) =
if not isNil(self[].peerMultiAddr):
deallocShared(self[].peerMultiAddr)
if not isNil(self[].peerId):
deallocShared(self[].peerId)
if not isNil(self[].protocol):
deallocShared(self[].protocol)
deallocShared(self)
proc process*(
self: ptr PeerManagementRequest, waku: Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
case self.operation
of CONNECT_TO:
let peers = ($self[].peerMultiAddr).split(",").mapIt(strip(it))
await waku.node.connectToNodes(peers, source = "static")
return ok("")
of GET_ALL_PEER_IDS:
## returns a comma-separated string of peerIDs
let peerIDs =
waku.node.peerManager.switch.peerStore.peers().mapIt($it.peerId).join(",")
return ok(peerIDs)
of GET_CONNECTED_PEERS_INFO:
## returns a JSON string mapping peerIDs to objects with protocols and addresses
var peersMap = initTable[string, PeerInfo]()
let peers = waku.node.peerManager.switch.peerStore.peers().filterIt(
it.connectedness == Connected
)
# Build a map of peer IDs to peer info objects
for peer in peers:
let peerIdStr = $peer.peerId
peersMap[peerIdStr] =
PeerInfo(protocols: peer.protocols, addresses: peer.addrs.mapIt($it))
# Convert the map to JSON string
let jsonObj = %*peersMap
let jsonStr = $jsonObj
return ok(jsonStr)
of GET_PEER_IDS_BY_PROTOCOL:
## returns a comma-separated string of peerIDs that mount the given protocol
let connectedPeers = waku.node.peerManager.switch.peerStore
.peers($self[].protocol)
.filterIt(it.connectedness == Connected)
.mapIt($it.peerId)
.join(",")
return ok(connectedPeers)
of DISCONNECT_PEER_BY_ID:
let peerId = PeerId.init($self[].peerId).valueOr:
error "DISCONNECT_PEER_BY_ID failed", error = $error
return err($error)
await waku.node.peerManager.disconnectNode(peerId)
return ok("")
of DISCONNECT_ALL_PEERS:
await waku.node.peerManager.disconnectAllPeers()
return ok("")
of DIAL_PEER:
let remotePeerInfo = parsePeerInfo($self[].peerMultiAddr).valueOr:
error "DIAL_PEER failed", error = $error
return err($error)
let conn = await waku.node.peerManager.dialPeer(remotePeerInfo, $self[].protocol)
if conn.isNone():
let msg = "failed dialing peer"
error "DIAL_PEER failed", error = msg, peerId = $remotePeerInfo.peerId
return err(msg)
of DIAL_PEER_BY_ID:
let peerId = PeerId.init($self[].peerId).valueOr:
error "DIAL_PEER_BY_ID failed", error = $error
return err($error)
let conn = await waku.node.peerManager.dialPeer(peerId, $self[].protocol)
if conn.isNone():
let msg = "failed dialing peer"
error "DIAL_PEER_BY_ID failed", error = msg, peerId = $peerId
return err(msg)
of GET_CONNECTED_PEERS:
## returns a comma-separated string of peerIDs
let
(inPeerIds, outPeerIds) = waku.node.peerManager.connectedPeers()
connectedPeerids = concat(inPeerIds, outPeerIds)
return ok(connectedPeerids.mapIt($it).join(","))
return ok("")

View File

@ -1,54 +0,0 @@
import std/[json, strutils]
import chronos, results
import libp2p/[protocols/ping, switch, multiaddress, multicodec]
import ../../../waku/[factory/waku, waku_core/peers, node/waku_node], ../../alloc
type PingRequest* = object
peerAddr: cstring
timeout: Duration
proc createShared*(
T: type PingRequest, peerAddr: cstring, timeout: Duration
): ptr type T =
var ret = createShared(T)
ret[].peerAddr = peerAddr.alloc()
ret[].timeout = timeout
return ret
proc destroyShared(self: ptr PingRequest) =
deallocShared(self[].peerAddr)
deallocShared(self)
proc process*(
self: ptr PingRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
let peerInfo = peers.parsePeerInfo(($self[].peerAddr).split(",")).valueOr:
return err("PingRequest failed to parse peer addr: " & $error)
proc ping(): Future[Result[Duration, string]] {.async, gcsafe.} =
try:
let conn = await waku.node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec)
defer:
await conn.close()
let pingRTT = await waku.node.libp2pPing.ping(conn)
if pingRTT == 0.nanos:
return err("could not ping peer: rtt-0")
return ok(pingRTT)
except CatchableError:
return err("could not ping peer: " & getCurrentExceptionMsg())
let pingFuture = ping()
let pingRTT: Duration =
if self[].timeout == chronos.milliseconds(0): # No timeout expected
?(await pingFuture)
else:
let timedOut = not (await pingFuture.withTimeout(self[].timeout))
if timedOut:
return err("ping timed out")
?(pingFuture.read())
ok($(pingRTT.nanos))

View File

@ -1,106 +0,0 @@
import options, std/[strutils, sequtils]
import chronicles, chronos, results
import
../../../../waku/waku_filter_v2/client,
../../../../waku/waku_core/message/message,
../../../../waku/factory/waku,
../../../../waku/waku_filter_v2/common,
../../../../waku/waku_core/subscription/push_handler,
../../../../waku/node/peer_manager/peer_manager,
../../../../waku/node/waku_node,
../../../../waku/node/kernel_api,
../../../../waku/waku_core/topics/pubsub_topic,
../../../../waku/waku_core/topics/content_topic,
../../../alloc
type FilterMsgType* = enum
SUBSCRIBE
UNSUBSCRIBE
UNSUBSCRIBE_ALL
type FilterRequest* = object
operation: FilterMsgType
pubsubTopic: cstring
contentTopics: cstring ## comma-separated list of content-topics
filterPushEventCallback: FilterPushHandler ## handles incoming filter pushed msgs
proc createShared*(
T: type FilterRequest,
op: FilterMsgType,
pubsubTopic: cstring = "",
contentTopics: cstring = "",
filterPushEventCallback: FilterPushHandler = nil,
): ptr type T =
var ret = createShared(T)
ret[].operation = op
ret[].pubsubTopic = pubsubTopic.alloc()
ret[].contentTopics = contentTopics.alloc()
ret[].filterPushEventCallback = filterPushEventCallback
return ret
proc destroyShared(self: ptr FilterRequest) =
deallocShared(self[].pubsubTopic)
deallocShared(self[].contentTopics)
deallocShared(self)
proc process*(
self: ptr FilterRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
const FilterOpTimeout = 5.seconds
if waku.node.wakuFilterClient.isNil():
let errorMsg = "FilterRequest waku.node.wakuFilterClient is nil"
error "fail filter process", error = errorMsg, op = $(self.operation)
return err(errorMsg)
case self.operation
of SUBSCRIBE:
waku.node.wakuFilterClient.registerPushHandler(self.filterPushEventCallback)
let peer = waku.node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
let errorMsg =
"could not find peer with WakuFilterSubscribeCodec when subscribing"
error "fail filter process", error = errorMsg, op = $(self.operation)
return err(errorMsg)
let pubsubTopic = some(PubsubTopic($self[].pubsubTopic))
let contentTopics = ($(self[].contentTopics)).split(",").mapIt(ContentTopic(it))
let subFut = waku.node.filterSubscribe(pubsubTopic, contentTopics, peer)
if not await subFut.withTimeout(FilterOpTimeout):
let errorMsg = "filter subscription timed out"
error "fail filter process", error = errorMsg, op = $(self.operation)
return err(errorMsg)
of UNSUBSCRIBE:
let peer = waku.node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
let errorMsg =
"could not find peer with WakuFilterSubscribeCodec when unsubscribing"
error "fail filter process", error = errorMsg, op = $(self.operation)
return err(errorMsg)
let pubsubTopic = some(PubsubTopic($self[].pubsubTopic))
let contentTopics = ($(self[].contentTopics)).split(",").mapIt(ContentTopic(it))
let subFut = waku.node.filterUnsubscribe(pubsubTopic, contentTopics, peer)
if not await subFut.withTimeout(FilterOpTimeout):
let errorMsg = "filter un-subscription timed out"
error "fail filter process", error = errorMsg, op = $(self.operation)
return err(errorMsg)
of UNSUBSCRIBE_ALL:
let peer = waku.node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
let errorMsg =
"could not find peer with WakuFilterSubscribeCodec when unsubscribing all"
error "fail filter process", error = errorMsg, op = $(self.operation)
return err(errorMsg)
let unsubFut = waku.node.filterUnsubscribeAll(peer)
if not await unsubFut.withTimeout(FilterOpTimeout):
let errorMsg = "filter un-subscription all timed out"
error "fail filter process", error = errorMsg, op = $(self.operation)
return err(errorMsg)
return ok("")

View File

@ -1,109 +0,0 @@
import options
import chronicles, chronos, results
import
../../../../waku/waku_core/message/message,
../../../../waku/waku_core/codecs,
../../../../waku/factory/waku,
../../../../waku/waku_core/message,
../../../../waku/waku_core/time, # Timestamp
../../../../waku/waku_core/topics/pubsub_topic,
../../../../waku/waku_lightpush_legacy/client,
../../../../waku/waku_lightpush_legacy/common,
../../../../waku/node/peer_manager/peer_manager,
../../../alloc
type LightpushMsgType* = enum
PUBLISH
type ThreadSafeWakuMessage* = object
payload: SharedSeq[byte]
contentTopic: cstring
meta: SharedSeq[byte]
version: uint32
timestamp: Timestamp
ephemeral: bool
when defined(rln):
proof: SharedSeq[byte]
type LightpushRequest* = object
operation: LightpushMsgType
pubsubTopic: cstring
message: ThreadSafeWakuMessage # only used in 'PUBLISH' requests
proc createShared*(
T: type LightpushRequest,
op: LightpushMsgType,
pubsubTopic: cstring,
m = WakuMessage(),
): ptr type T =
var ret = createShared(T)
ret[].operation = op
ret[].pubsubTopic = pubsubTopic.alloc()
ret[].message = ThreadSafeWakuMessage(
payload: allocSharedSeq(m.payload),
contentTopic: m.contentTopic.alloc(),
meta: allocSharedSeq(m.meta),
version: m.version,
timestamp: m.timestamp,
ephemeral: m.ephemeral,
)
when defined(rln):
ret[].message.proof = allocSharedSeq(m.proof)
return ret
proc destroyShared(self: ptr LightpushRequest) =
deallocSharedSeq(self[].message.payload)
deallocShared(self[].message.contentTopic)
deallocSharedSeq(self[].message.meta)
when defined(rln):
deallocSharedSeq(self[].message.proof)
deallocShared(self)
proc toWakuMessage(m: ThreadSafeWakuMessage): WakuMessage =
var wakuMessage = WakuMessage()
wakuMessage.payload = m.payload.toSeq()
wakuMessage.contentTopic = $m.contentTopic
wakuMessage.meta = m.meta.toSeq()
wakuMessage.version = m.version
wakuMessage.timestamp = m.timestamp
wakuMessage.ephemeral = m.ephemeral
when defined(rln):
wakuMessage.proof = m.proof
return wakuMessage
proc process*(
self: ptr LightpushRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
case self.operation
of PUBLISH:
let msg = self.message.toWakuMessage()
let pubsubTopic = $self.pubsubTopic
if waku.node.wakuLightpushClient.isNil():
let errorMsg = "LightpushRequest waku.node.wakuLightpushClient is nil"
error "PUBLISH failed", error = errorMsg
return err(errorMsg)
let peerOpt = waku.node.peerManager.selectPeer(WakuLightPushCodec)
if peerOpt.isNone():
let errorMsg = "failed to lightpublish message, no suitable remote peers"
error "PUBLISH failed", error = errorMsg
return err(errorMsg)
let msgHashHex = (
await waku.node.wakuLegacyLightpushClient.publish(
pubsubTopic, msg, peer = peerOpt.get()
)
).valueOr:
error "PUBLISH failed", error = error
return err($error)
return ok(msgHashHex)

View File

@ -1,168 +0,0 @@
import std/[net, sequtils, strutils]
import chronicles, chronos, stew/byteutils, results
import
waku/waku_core/message/message,
waku/factory/[validator_signed, waku],
tools/confutils/cli_args,
waku/waku_node,
waku/waku_core/message,
waku/waku_core/time, # Timestamp
waku/waku_core/topics/pubsub_topic,
waku/waku_core/topics,
waku/waku_relay/protocol,
waku/node/peer_manager
import
../../../alloc
type RelayMsgType* = enum
SUBSCRIBE
UNSUBSCRIBE
PUBLISH
NUM_CONNECTED_PEERS
LIST_CONNECTED_PEERS
## to return the list of all connected peers to an specific pubsub topic
NUM_MESH_PEERS
LIST_MESH_PEERS
## to return the list of only the peers that conform the mesh for a particular pubsub topic
ADD_PROTECTED_SHARD ## Protects a shard with a public key
type ThreadSafeWakuMessage* = object
payload: SharedSeq[byte]
contentTopic: cstring
meta: SharedSeq[byte]
version: uint32
timestamp: Timestamp
ephemeral: bool
when defined(rln):
proof: SharedSeq[byte]
type RelayRequest* = object
operation: RelayMsgType
pubsubTopic: cstring
relayEventCallback: WakuRelayHandler # not used in 'PUBLISH' requests
message: ThreadSafeWakuMessage # only used in 'PUBLISH' requests
clusterId: cint # only used in 'ADD_PROTECTED_SHARD' requests
shardId: cint # only used in 'ADD_PROTECTED_SHARD' requests
publicKey: cstring # only used in 'ADD_PROTECTED_SHARD' requests
proc createShared*(
T: type RelayRequest,
op: RelayMsgType,
pubsubTopic: cstring = nil,
relayEventCallback: WakuRelayHandler = nil,
m = WakuMessage(),
clusterId: cint = 0,
shardId: cint = 0,
publicKey: cstring = nil,
): ptr type T =
var ret = createShared(T)
ret[].operation = op
ret[].pubsubTopic = pubsubTopic.alloc()
ret[].clusterId = clusterId
ret[].shardId = shardId
ret[].publicKey = publicKey.alloc()
ret[].relayEventCallback = relayEventCallback
ret[].message = ThreadSafeWakuMessage(
payload: allocSharedSeq(m.payload),
contentTopic: m.contentTopic.alloc(),
meta: allocSharedSeq(m.meta),
version: m.version,
timestamp: m.timestamp,
ephemeral: m.ephemeral,
)
when defined(rln):
ret[].message.proof = allocSharedSeq(m.proof)
return ret
proc destroyShared(self: ptr RelayRequest) =
deallocSharedSeq(self[].message.payload)
deallocShared(self[].message.contentTopic)
deallocSharedSeq(self[].message.meta)
when defined(rln):
deallocSharedSeq(self[].message.proof)
deallocShared(self[].pubsubTopic)
deallocShared(self[].publicKey)
deallocShared(self)
proc toWakuMessage(m: ThreadSafeWakuMessage): WakuMessage =
var wakuMessage = WakuMessage()
wakuMessage.payload = m.payload.toSeq()
wakuMessage.contentTopic = $m.contentTopic
wakuMessage.meta = m.meta.toSeq()
wakuMessage.version = m.version
wakuMessage.timestamp = m.timestamp
wakuMessage.ephemeral = m.ephemeral
when defined(rln):
wakuMessage.proof = m.proof
return wakuMessage
proc process*(
self: ptr RelayRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
if waku.node.wakuRelay.isNil():
return err("Operation not supported without Waku Relay enabled.")
case self.operation
of SUBSCRIBE:
waku.node.subscribe(
(kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic),
handler = self.relayEventCallback,
).isOkOr:
error "SUBSCRIBE failed", error
return err($error)
of UNSUBSCRIBE:
waku.node.unsubscribe((kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic)).isOkOr:
error "UNSUBSCRIBE failed", error
return err($error)
of PUBLISH:
let msg = self.message.toWakuMessage()
let pubsubTopic = $self.pubsubTopic
(await waku.node.wakuRelay.publish(pubsubTopic, msg)).isOkOr:
error "PUBLISH failed", error
return err($error)
let msgHash = computeMessageHash(pubSubTopic, msg).to0xHex
return ok(msgHash)
of NUM_CONNECTED_PEERS:
let numConnPeers = waku.node.wakuRelay.getNumConnectedPeers($self.pubsubTopic).valueOr:
error "NUM_CONNECTED_PEERS failed", error
return err($error)
return ok($numConnPeers)
of LIST_CONNECTED_PEERS:
let connPeers = waku.node.wakuRelay.getConnectedPeers($self.pubsubTopic).valueOr:
error "LIST_CONNECTED_PEERS failed", error = error
return err($error)
## returns a comma-separated string of peerIDs
return ok(connPeers.mapIt($it).join(","))
of NUM_MESH_PEERS:
let numPeersInMesh = waku.node.wakuRelay.getNumPeersInMesh($self.pubsubTopic).valueOr:
error "NUM_MESH_PEERS failed", error = error
return err($error)
return ok($numPeersInMesh)
of LIST_MESH_PEERS:
let meshPeers = waku.node.wakuRelay.getPeersInMesh($self.pubsubTopic).valueOr:
error "LIST_MESH_PEERS failed", error = error
return err($error)
## returns a comma-separated string of peerIDs
return ok(meshPeers.mapIt($it).join(","))
of ADD_PROTECTED_SHARD:
try:
let relayShard =
RelayShard(clusterId: uint16(self.clusterId), shardId: uint16(self.shardId))
let protectedShard =
ProtectedShard.parseCmdArg($relayShard & ":" & $self.publicKey)
waku.node.wakuRelay.addSignedShardsValidator(
@[protectedShard], uint16(self.clusterId)
)
except ValueError:
return err(getCurrentExceptionMsg())
return ok("")

View File

@ -1,104 +0,0 @@
## This file contains the base message request type that will be handled.
## The requests are created by the main thread and processed by
## the Waku Thread.
import std/json, results
import chronos, chronos/threadsync
import
../../waku/factory/waku,
../ffi_types,
./requests/node_lifecycle_request,
./requests/peer_manager_request,
./requests/protocols/relay_request,
./requests/protocols/store_request,
./requests/protocols/lightpush_request,
./requests/protocols/filter_request,
./requests/debug_node_request,
./requests/discovery_request,
./requests/ping_request
type RequestType* {.pure.} = enum
LIFECYCLE
PEER_MANAGER
PING
RELAY
STORE
DEBUG
DISCOVERY
LIGHTPUSH
FILTER
type WakuThreadRequest* = object
reqType: RequestType
reqContent: pointer
callback: WakuCallBack
userData: pointer
proc createShared*(
T: type WakuThreadRequest,
reqType: RequestType,
reqContent: pointer,
callback: WakuCallBack,
userData: pointer,
): ptr type T =
var ret = createShared(T)
ret[].reqType = reqType
ret[].reqContent = reqContent
ret[].callback = callback
ret[].userData = userData
return ret
proc handleRes[T: string | void](
res: Result[T, string], request: ptr WakuThreadRequest
) =
## Handles the Result responses, which can either be Result[string, string] or
## Result[void, string].
defer:
deallocShared(request)
if res.isErr():
foreignThreadGc:
let msg = "libwaku error: handleRes fireSyncRes error: " & $res.error
request[].callback(
RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData
)
return
foreignThreadGc:
var msg: cstring = ""
when T is string:
msg = res.get().cstring()
request[].callback(
RET_OK, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData
)
return
proc process*(
T: type WakuThreadRequest, request: ptr WakuThreadRequest, waku: ptr Waku
) {.async.} =
let retFut =
case request[].reqType
of LIFECYCLE:
cast[ptr NodeLifecycleRequest](request[].reqContent).process(waku)
of PEER_MANAGER:
cast[ptr PeerManagementRequest](request[].reqContent).process(waku[])
of PING:
cast[ptr PingRequest](request[].reqContent).process(waku)
of RELAY:
cast[ptr RelayRequest](request[].reqContent).process(waku)
of STORE:
cast[ptr StoreRequest](request[].reqContent).process(waku)
of DEBUG:
cast[ptr DebugNodeRequest](request[].reqContent).process(waku[])
of DISCOVERY:
cast[ptr DiscoveryRequest](request[].reqContent).process(waku)
of LIGHTPUSH:
cast[ptr LightpushRequest](request[].reqContent).process(waku)
of FILTER:
cast[ptr FilterRequest](request[].reqContent).process(waku)
handleRes(await retFut, request)
proc `$`*(self: WakuThreadRequest): string =
return $self.reqType

View File

@ -2,14 +2,51 @@
# Install Anvil
if ! command -v anvil &> /dev/null; then
REQUIRED_FOUNDRY_VERSION="$1"
if command -v anvil &> /dev/null; then
# Foundry is already installed; check the current version.
CURRENT_FOUNDRY_VERSION=$(anvil --version 2>/dev/null | awk '{print $2}')
if [ -n "$CURRENT_FOUNDRY_VERSION" ]; then
# Compare CURRENT_FOUNDRY_VERSION < REQUIRED_FOUNDRY_VERSION using sort -V
lower_version=$(printf '%s\n%s\n' "$CURRENT_FOUNDRY_VERSION" "$REQUIRED_FOUNDRY_VERSION" | sort -V | head -n1)
if [ "$lower_version" != "$REQUIRED_FOUNDRY_VERSION" ]; then
echo "Anvil is already installed with version $CURRENT_FOUNDRY_VERSION, which is older than the required $REQUIRED_FOUNDRY_VERSION. Please update Foundry manually if needed."
fi
fi
else
BASE_DIR="${XDG_CONFIG_HOME:-$HOME}"
FOUNDRY_DIR="${FOUNDRY_DIR:-"$BASE_DIR/.foundry"}"
FOUNDRY_BIN_DIR="$FOUNDRY_DIR/bin"
echo "Installing Foundry..."
curl -L https://foundry.paradigm.xyz | bash
# Extract the source path from the download result
echo "foundryup_path: $FOUNDRY_BIN_DIR"
# run foundryup
$FOUNDRY_BIN_DIR/foundryup
# Add Foundry to PATH for this script session
export PATH="$FOUNDRY_BIN_DIR:$PATH"
# Verify foundryup is available
if ! command -v foundryup >/dev/null 2>&1; then
echo "Error: foundryup installation failed or not found in $FOUNDRY_BIN_DIR"
exit 1
fi
# Run foundryup to install the required version
if [ -n "$REQUIRED_FOUNDRY_VERSION" ]; then
echo "Installing Foundry tools version $REQUIRED_FOUNDRY_VERSION..."
foundryup --install "$REQUIRED_FOUNDRY_VERSION"
else
echo "Installing latest Foundry tools..."
foundryup
fi
# Verify anvil was installed
if ! command -v anvil >/dev/null 2>&1; then
echo "Error: anvil installation failed"
exit 1
fi
echo "Anvil successfully installed: $(anvil --version)"
fi

View File

@ -1,8 +1,37 @@
#!/usr/bin/env bash
# Install pnpm
if ! command -v pnpm &> /dev/null; then
echo "pnpm is not installed, installing it now..."
npm i pnpm --global
REQUIRED_PNPM_VERSION="$1"
if command -v pnpm &> /dev/null; then
# pnpm is already installed; check the current version.
CURRENT_PNPM_VERSION=$(pnpm --version 2>/dev/null)
if [ -n "$CURRENT_PNPM_VERSION" ]; then
# Compare CURRENT_PNPM_VERSION < REQUIRED_PNPM_VERSION using sort -V
lower_version=$(printf '%s\n%s\n' "$CURRENT_PNPM_VERSION" "$REQUIRED_PNPM_VERSION" | sort -V | head -n1)
if [ "$lower_version" != "$REQUIRED_PNPM_VERSION" ]; then
echo "pnpm is already installed with version $CURRENT_PNPM_VERSION, which is older than the required $REQUIRED_PNPM_VERSION. Please update pnpm manually if needed."
fi
fi
else
# Install pnpm using npm
if [ -n "$REQUIRED_PNPM_VERSION" ]; then
echo "Installing pnpm version $REQUIRED_PNPM_VERSION..."
npm install -g pnpm@$REQUIRED_PNPM_VERSION
else
echo "Installing latest pnpm..."
npm install -g pnpm
fi
# Verify pnpm was installed
if ! command -v pnpm >/dev/null 2>&1; then
echo "Error: pnpm installation failed"
exit 1
fi
echo "pnpm successfully installed: $(pnpm --version)"
fi

View File

@ -1,7 +1,9 @@
#!/usr/bin/env bash
# Install Anvil
./scripts/install_anvil.sh
FOUNDRY_VERSION="$1"
./scripts/install_anvil.sh "$FOUNDRY_VERSION"
#Install pnpm
./scripts/install_pnpm.sh
# Install pnpm
PNPM_VERSION="$2"
./scripts/install_pnpm.sh "$PNPM_VERSION"

View File

@ -6,6 +6,10 @@ import std/strutils
import waku/common/broker/request_broker
## ---------------------------------------------------------------------------
## Async-mode brokers + tests
## ---------------------------------------------------------------------------
RequestBroker:
type SimpleResponse = object
value*: string
@ -31,11 +35,14 @@ RequestBroker:
suffix: string
): Future[Result[DualResponse, string]] {.async.}
RequestBroker:
RequestBroker(async):
type ImplicitResponse = ref object
note*: string
suite "RequestBroker macro":
static:
doAssert typeof(SimpleResponse.request()) is Future[Result[SimpleResponse, string]]
suite "RequestBroker macro (async mode)":
test "serves zero-argument providers":
check SimpleResponse
.setProvider(
@ -52,7 +59,7 @@ suite "RequestBroker macro":
test "zero-argument request errors when unset":
let res = waitFor SimpleResponse.request()
check res.isErr
check res.isErr()
check res.error.contains("no zero-arg provider")
test "serves input-based providers":
@ -78,7 +85,6 @@ suite "RequestBroker macro":
.setProvider(
proc(key: string, subKey: int): Future[Result[KeyedResponse, string]] {.async.} =
raise newException(ValueError, "simulated failure")
ok(KeyedResponse(key: key, payload: ""))
)
.isOk()
@ -90,7 +96,7 @@ suite "RequestBroker macro":
test "input request errors when unset":
let res = waitFor KeyedResponse.request("foo", 2)
check res.isErr
check res.isErr()
check res.error.contains("input signature")
test "supports both provider types simultaneously":
@ -109,11 +115,11 @@ suite "RequestBroker macro":
.isOk()
let noInput = waitFor DualResponse.request()
check noInput.isOk
check noInput.isOk()
check noInput.value.note == "base"
let withInput = waitFor DualResponse.request("-extra")
check withInput.isOk
check withInput.isOk()
check withInput.value.note == "base-extra"
check withInput.value.count == 6
@ -129,7 +135,7 @@ suite "RequestBroker macro":
DualResponse.clearProvider()
let res = waitFor DualResponse.request()
check res.isErr
check res.isErr()
test "implicit zero-argument provider works by default":
check ImplicitResponse
@ -140,14 +146,14 @@ suite "RequestBroker macro":
.isOk()
let res = waitFor ImplicitResponse.request()
check res.isOk
check res.isOk()
ImplicitResponse.clearProvider()
check res.value.note == "auto"
test "implicit zero-argument request errors when unset":
let res = waitFor ImplicitResponse.request()
check res.isErr
check res.isErr()
check res.error.contains("no zero-arg provider")
test "no provider override":
@ -171,7 +177,7 @@ suite "RequestBroker macro":
check DualResponse.setProvider(overrideProc).isErr()
let noInput = waitFor DualResponse.request()
check noInput.isOk
check noInput.isOk()
check noInput.value.note == "base"
let stillResponse = waitFor DualResponse.request(" still works")
@ -191,8 +197,306 @@ suite "RequestBroker macro":
check DualResponse.setProvider(overrideProc).isOk()
let nowSuccWithOverride = waitFor DualResponse.request()
check nowSuccWithOverride.isOk
check nowSuccWithOverride.isOk()
check nowSuccWithOverride.value.note == "something else"
check nowSuccWithOverride.value.count == 1
DualResponse.clearProvider()
## ---------------------------------------------------------------------------
## Sync-mode brokers + tests
## ---------------------------------------------------------------------------
RequestBroker(sync):
type SimpleResponseSync = object
value*: string
proc signatureFetch*(): Result[SimpleResponseSync, string]
RequestBroker(sync):
type KeyedResponseSync = object
key*: string
payload*: string
proc signatureFetchWithKey*(
key: string, subKey: int
): Result[KeyedResponseSync, string]
RequestBroker(sync):
type DualResponseSync = object
note*: string
count*: int
proc signatureNoInput*(): Result[DualResponseSync, string]
proc signatureWithInput*(suffix: string): Result[DualResponseSync, string]
RequestBroker(sync):
type ImplicitResponseSync = ref object
note*: string
static:
doAssert typeof(SimpleResponseSync.request()) is Result[SimpleResponseSync, string]
doAssert not (
typeof(SimpleResponseSync.request()) is Future[Result[SimpleResponseSync, string]]
)
doAssert typeof(KeyedResponseSync.request("topic", 1)) is
Result[KeyedResponseSync, string]
suite "RequestBroker macro (sync mode)":
test "serves zero-argument providers (sync)":
check SimpleResponseSync
.setProvider(
proc(): Result[SimpleResponseSync, string] =
ok(SimpleResponseSync(value: "hi"))
)
.isOk()
let res = SimpleResponseSync.request()
check res.isOk()
check res.value.value == "hi"
SimpleResponseSync.clearProvider()
test "zero-argument request errors when unset (sync)":
let res = SimpleResponseSync.request()
check res.isErr()
check res.error.contains("no zero-arg provider")
test "serves input-based providers (sync)":
var seen: seq[string] = @[]
check KeyedResponseSync
.setProvider(
proc(key: string, subKey: int): Result[KeyedResponseSync, string] =
seen.add(key)
ok(KeyedResponseSync(key: key, payload: key & "-payload+" & $subKey))
)
.isOk()
let res = KeyedResponseSync.request("topic", 1)
check res.isOk()
check res.value.key == "topic"
check res.value.payload == "topic-payload+1"
check seen == @["topic"]
KeyedResponseSync.clearProvider()
test "catches provider exception (sync)":
check KeyedResponseSync
.setProvider(
proc(key: string, subKey: int): Result[KeyedResponseSync, string] =
raise newException(ValueError, "simulated failure")
)
.isOk()
let res = KeyedResponseSync.request("neglected", 11)
check res.isErr()
check res.error.contains("simulated failure")
KeyedResponseSync.clearProvider()
test "input request errors when unset (sync)":
let res = KeyedResponseSync.request("foo", 2)
check res.isErr()
check res.error.contains("input signature")
test "supports both provider types simultaneously (sync)":
check DualResponseSync
.setProvider(
proc(): Result[DualResponseSync, string] =
ok(DualResponseSync(note: "base", count: 1))
)
.isOk()
check DualResponseSync
.setProvider(
proc(suffix: string): Result[DualResponseSync, string] =
ok(DualResponseSync(note: "base" & suffix, count: suffix.len))
)
.isOk()
let noInput = DualResponseSync.request()
check noInput.isOk()
check noInput.value.note == "base"
let withInput = DualResponseSync.request("-extra")
check withInput.isOk()
check withInput.value.note == "base-extra"
check withInput.value.count == 6
DualResponseSync.clearProvider()
test "clearProvider resets both entries (sync)":
check DualResponseSync
.setProvider(
proc(): Result[DualResponseSync, string] =
ok(DualResponseSync(note: "temp", count: 0))
)
.isOk()
DualResponseSync.clearProvider()
let res = DualResponseSync.request()
check res.isErr()
test "implicit zero-argument provider works by default (sync)":
check ImplicitResponseSync
.setProvider(
proc(): Result[ImplicitResponseSync, string] =
ok(ImplicitResponseSync(note: "auto"))
)
.isOk()
let res = ImplicitResponseSync.request()
check res.isOk()
ImplicitResponseSync.clearProvider()
check res.value.note == "auto"
test "implicit zero-argument request errors when unset (sync)":
let res = ImplicitResponseSync.request()
check res.isErr()
check res.error.contains("no zero-arg provider")
test "implicit zero-argument provider raises error (sync)":
check ImplicitResponseSync
.setProvider(
proc(): Result[ImplicitResponseSync, string] =
raise newException(ValueError, "simulated failure")
)
.isOk()
let res = ImplicitResponseSync.request()
check res.isErr()
check res.error.contains("simulated failure")
ImplicitResponseSync.clearProvider()
## ---------------------------------------------------------------------------
## POD / external type brokers + tests (distinct/alias behavior)
## ---------------------------------------------------------------------------
type ExternalDefinedTypeAsync = object
label*: string
type ExternalDefinedTypeSync = object
label*: string
type ExternalDefinedTypeShared = object
label*: string
RequestBroker:
type PodResponse = int
proc signatureFetch*(): Future[Result[PodResponse, string]] {.async.}
RequestBroker:
type ExternalAliasedResponse = ExternalDefinedTypeAsync
proc signatureFetch*(): Future[Result[ExternalAliasedResponse, string]] {.async.}
RequestBroker(sync):
type ExternalAliasedResponseSync = ExternalDefinedTypeSync
proc signatureFetch*(): Result[ExternalAliasedResponseSync, string]
RequestBroker(sync):
type DistinctStringResponseA = distinct string
RequestBroker(sync):
type DistinctStringResponseB = distinct string
RequestBroker(sync):
type ExternalDistinctResponseA = distinct ExternalDefinedTypeShared
RequestBroker(sync):
type ExternalDistinctResponseB = distinct ExternalDefinedTypeShared
suite "RequestBroker macro (POD/external types)":
test "supports non-object response types (async)":
check PodResponse
.setProvider(
proc(): Future[Result[PodResponse, string]] {.async.} =
ok(PodResponse(123))
)
.isOk()
let res = waitFor PodResponse.request()
check res.isOk()
check int(res.value) == 123
PodResponse.clearProvider()
test "supports aliased external types (async)":
check ExternalAliasedResponse
.setProvider(
proc(): Future[Result[ExternalAliasedResponse, string]] {.async.} =
ok(ExternalAliasedResponse(ExternalDefinedTypeAsync(label: "ext")))
)
.isOk()
let res = waitFor ExternalAliasedResponse.request()
check res.isOk()
check ExternalDefinedTypeAsync(res.value).label == "ext"
ExternalAliasedResponse.clearProvider()
test "supports aliased external types (sync)":
check ExternalAliasedResponseSync
.setProvider(
proc(): Result[ExternalAliasedResponseSync, string] =
ok(ExternalAliasedResponseSync(ExternalDefinedTypeSync(label: "ext")))
)
.isOk()
let res = ExternalAliasedResponseSync.request()
check res.isOk()
check ExternalDefinedTypeSync(res.value).label == "ext"
ExternalAliasedResponseSync.clearProvider()
test "distinct response types avoid overload ambiguity (sync)":
check DistinctStringResponseA
.setProvider(
proc(): Result[DistinctStringResponseA, string] =
ok(DistinctStringResponseA("a"))
)
.isOk()
check DistinctStringResponseB
.setProvider(
proc(): Result[DistinctStringResponseB, string] =
ok(DistinctStringResponseB("b"))
)
.isOk()
check ExternalDistinctResponseA
.setProvider(
proc(): Result[ExternalDistinctResponseA, string] =
ok(ExternalDistinctResponseA(ExternalDefinedTypeShared(label: "ea")))
)
.isOk()
check ExternalDistinctResponseB
.setProvider(
proc(): Result[ExternalDistinctResponseB, string] =
ok(ExternalDistinctResponseB(ExternalDefinedTypeShared(label: "eb")))
)
.isOk()
let resA = DistinctStringResponseA.request()
let resB = DistinctStringResponseB.request()
check resA.isOk()
check resB.isOk()
check string(resA.value) == "a"
check string(resB.value) == "b"
let resEA = ExternalDistinctResponseA.request()
let resEB = ExternalDistinctResponseB.request()
check resEA.isOk()
check resEB.isOk()
check ExternalDefinedTypeShared(resEA.value).label == "ea"
check ExternalDefinedTypeShared(resEB.value).label == "eb"
DistinctStringResponseA.clearProvider()
DistinctStringResponseB.clearProvider()
ExternalDistinctResponseA.clearProvider()
ExternalDistinctResponseB.clearProvider()

View File

@ -13,6 +13,7 @@ import
node/peer_manager,
node/waku_node,
node/kernel_api,
node/kernel_api/lightpush,
waku_lightpush_legacy,
waku_lightpush_legacy/common,
waku_lightpush_legacy/protocol_metrics,
@ -56,7 +57,7 @@ suite "Waku Legacy Lightpush - End To End":
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await server.mountLegacyLightpush() # without rln-relay
check (await server.mountLegacyLightpush()).isOk() # without rln-relay
client.mountLegacyLightpushClient()
serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo()
@ -135,8 +136,8 @@ suite "RLN Proofs as a Lightpush Service":
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
anvilProc = runAnvil()
manager = waitFor setupOnchainGroupManager()
anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH))
manager = waitFor setupOnchainGroupManager(deployContracts = false)
# mount rln-relay
let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
@ -147,7 +148,7 @@ suite "RLN Proofs as a Lightpush Service":
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await server.mountRlnRelay(wakuRlnConfig)
await server.mountLegacyLightPush()
check (await server.mountLegacyLightPush()).isOk()
client.mountLegacyLightPushClient()
let manager1 = cast[OnchainGroupManager](server.wakuRlnRelay.groupManager)
@ -213,7 +214,7 @@ suite "Waku Legacy Lightpush message delivery":
assert false, "Failed to mount relay"
(await bridgeNode.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await bridgeNode.mountLegacyLightPush()
check (await bridgeNode.mountLegacyLightPush()).isOk()
lightNode.mountLegacyLightPushClient()
discard await lightNode.peerManager.dialPeer(
@ -249,3 +250,19 @@ suite "Waku Legacy Lightpush message delivery":
## Cleanup
await allFutures(lightNode.stop(), bridgeNode.stop(), destNode.stop())
suite "Waku Legacy Lightpush mounting behavior":
asyncTest "fails to mount when relay is not mounted":
## Given a node without Relay mounted
let
key = generateSecp256k1Key()
node = newTestWakuNode(key, parseIpAddress("0.0.0.0"), Port(0))
# Do not mount Relay on purpose
check node.wakuRelay.isNil()
## Then mounting Legacy Lightpush must fail
let res = await node.mountLegacyLightPush()
check:
res.isErr()
res.error == MountWithoutRelayError

View File

@ -13,6 +13,7 @@ import
node/peer_manager,
node/waku_node,
node/kernel_api,
node/kernel_api/lightpush,
waku_lightpush,
waku_rln_relay,
],
@ -55,7 +56,7 @@ suite "Waku Lightpush - End To End":
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await server.mountLightpush() # without rln-relay
check (await server.mountLightpush()).isOk() # without rln-relay
client.mountLightpushClient()
serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo()
@ -135,8 +136,8 @@ suite "RLN Proofs as a Lightpush Service":
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
anvilProc = runAnvil()
manager = waitFor setupOnchainGroupManager()
anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH))
manager = waitFor setupOnchainGroupManager(deployContracts = false)
# mount rln-relay
let wakuRlnConfig = getWakuRlnConfig(manager = manager, index = MembershipIndex(1))
@ -147,7 +148,7 @@ suite "RLN Proofs as a Lightpush Service":
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await server.mountRlnRelay(wakuRlnConfig)
await server.mountLightPush()
check (await server.mountLightPush()).isOk()
client.mountLightPushClient()
let manager1 = cast[OnchainGroupManager](server.wakuRlnRelay.groupManager)
@ -213,7 +214,7 @@ suite "Waku Lightpush message delivery":
assert false, "Failed to mount relay"
(await bridgeNode.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await bridgeNode.mountLightPush()
check (await bridgeNode.mountLightPush()).isOk()
lightNode.mountLightPushClient()
discard await lightNode.peerManager.dialPeer(
@ -251,3 +252,19 @@ suite "Waku Lightpush message delivery":
## Cleanup
await allFutures(lightNode.stop(), bridgeNode.stop(), destNode.stop())
suite "Waku Lightpush mounting behavior":
asyncTest "fails to mount when relay is not mounted":
## Given a node without Relay mounted
let
key = generateSecp256k1Key()
node = newTestWakuNode(key, parseIpAddress("0.0.0.0"), Port(0))
# Do not mount Relay on purpose
check node.wakuRelay.isNil()
## Then mounting Lightpush must fail
let res = await node.mountLightPush()
check:
res.isErr()
res.error == MountWithoutRelayError

View File

@ -66,15 +66,17 @@ suite "Waku Peer Exchange":
suite "fetchPeerExchangePeers":
var node2 {.threadvar.}: WakuNode
var node3 {.threadvar.}: WakuNode
asyncSetup:
node = newTestWakuNode(generateSecp256k1Key(), bindIp, bindPort)
node2 = newTestWakuNode(generateSecp256k1Key(), bindIp, bindPort)
node3 = newTestWakuNode(generateSecp256k1Key(), bindIp, bindPort)
await allFutures(node.start(), node2.start())
await allFutures(node.start(), node2.start(), node3.start())
asyncTeardown:
await allFutures(node.stop(), node2.stop())
await allFutures(node.stop(), node2.stop(), node3.stop())
asyncTest "Node fetches without mounting peer exchange":
# When a node, without peer exchange mounted, fetches peers
@ -104,12 +106,10 @@ suite "Waku Peer Exchange":
await allFutures([node.mountPeerExchangeClient(), node2.mountPeerExchange()])
check node.peerManager.switch.peerStore.peers.len == 0
# Mock that we discovered a node (to avoid running discv5)
var enr = enr.Record()
assert enr.fromUri(
"enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB"
), "Failed to parse ENR"
node2.wakuPeerExchange.enrCache.add(enr)
# Simulate node2 discovering node3 via Discv5
var rpInfo = node3.peerInfo.toRemotePeerInfo()
rpInfo.enr = some(node3.enr)
node2.peerManager.addPeer(rpInfo, PeerOrigin.Discv5)
# Set node2 as service peer (default one) for px protocol
node.peerManager.addServicePeer(
@ -121,10 +121,8 @@ suite "Waku Peer Exchange":
check res.tryGet() == 1
# Check that the peer ended up in the peerstore
let rpInfo = enr.toRemotePeerInfo.get()
check:
node.peerManager.switch.peerStore.peers.anyIt(it.peerId == rpInfo.peerId)
node.peerManager.switch.peerStore.peers.anyIt(it.addrs == rpInfo.addrs)
suite "setPeerExchangePeer":
var node2 {.threadvar.}: WakuNode

View File

@ -282,7 +282,7 @@ suite "Sharding":
asyncTest "lightpush":
# Given a connected server and client subscribed to the same pubsub topic
client.mountLegacyLightPushClient()
await server.mountLightpush()
check (await server.mountLightpush()).isOk()
let
topic = "/waku/2/rs/0/1"
@ -405,7 +405,7 @@ suite "Sharding":
asyncTest "lightpush (automatic sharding filtering)":
# Given a connected server and client using the same content topic (with two different formats)
client.mountLegacyLightPushClient()
await server.mountLightpush()
check (await server.mountLightpush()).isOk()
let
contentTopicShort = "/toychat/2/huilong/proto"
@ -563,7 +563,7 @@ suite "Sharding":
asyncTest "lightpush - exclusion (automatic sharding filtering)":
# Given a connected server and client using different content topics
client.mountLegacyLightPushClient()
await server.mountLightpush()
check (await server.mountLightpush()).isOk()
let
contentTopic1 = "/toychat/2/huilong/proto"
@ -874,7 +874,7 @@ suite "Sharding":
asyncTest "Waku LightPush Sharding (Static Sharding)":
# Given a connected server and client using two different pubsub topics
client.mountLegacyLightPushClient()
await server.mountLightpush()
check (await server.mountLightpush()).isOk()
# Given a connected server and client subscribed to multiple pubsub topics
let

View File

@ -142,9 +142,13 @@ suite "Waku Peer Exchange":
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node2 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node3 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node4 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
# Start and mount peer exchange
await allFutures([node1.start(), node2.start()])
await allFutures([node1.start(), node2.start(), node3.start(), node4.start()])
await allFutures([node1.mountPeerExchange(), node2.mountPeerExchangeClient()])
# Create connection
@ -154,18 +158,15 @@ suite "Waku Peer Exchange":
require:
connOpt.isSome
# Create some enr and add to peer exchange (simulating disv5)
var enr1, enr2 = enr.Record()
check enr1.fromUri(
"enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB"
)
check enr2.fromUri(
"enr:-Iu4QGJllOWlviPIh_SGR-VVm55nhnBIU5L-s3ran7ARz_4oDdtJPtUs3Bc5aqZHCiPQX6qzNYF2ARHER0JPX97TFbEBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQP3ULycvday4EkvtVu0VqbBdmOkbfVLJx8fPe0lE_dRkIN0Y3CC6mCFd2FrdTIB"
)
# Simulate node1 discovering node3 via Discv5
var info3 = node3.peerInfo.toRemotePeerInfo()
info3.enr = some(node3.enr)
node1.peerManager.addPeer(info3, PeerOrigin.Discv5)
# Mock that we have discovered these enrs
node1.wakuPeerExchange.enrCache.add(enr1)
node1.wakuPeerExchange.enrCache.add(enr2)
# Simulate node1 discovering node4 via Discv5
var info4 = node4.peerInfo.toRemotePeerInfo()
info4.enr = some(node4.enr)
node1.peerManager.addPeer(info4, PeerOrigin.Discv5)
# Request 2 peer from px. Test all request variants
let response1 = await node2.wakuPeerExchangeClient.request(2)
@ -185,12 +186,12 @@ suite "Waku Peer Exchange":
response3.get().peerInfos.len == 2
# Since it can return duplicates test that at least one of the enrs is in the response
response1.get().peerInfos.anyIt(it.enr == enr1.raw) or
response1.get().peerInfos.anyIt(it.enr == enr2.raw)
response2.get().peerInfos.anyIt(it.enr == enr1.raw) or
response2.get().peerInfos.anyIt(it.enr == enr2.raw)
response3.get().peerInfos.anyIt(it.enr == enr1.raw) or
response3.get().peerInfos.anyIt(it.enr == enr2.raw)
response1.get().peerInfos.anyIt(it.enr == node3.enr.raw) or
response1.get().peerInfos.anyIt(it.enr == node4.enr.raw)
response2.get().peerInfos.anyIt(it.enr == node3.enr.raw) or
response2.get().peerInfos.anyIt(it.enr == node4.enr.raw)
response3.get().peerInfos.anyIt(it.enr == node3.enr.raw) or
response3.get().peerInfos.anyIt(it.enr == node4.enr.raw)
asyncTest "Request fails gracefully":
let
@ -265,8 +266,8 @@ suite "Waku Peer Exchange":
peerInfo2.origin = PeerOrigin.Discv5
check:
not poolFilter(cluster, peerInfo1)
poolFilter(cluster, peerInfo2)
poolFilter(cluster, peerInfo1).isErr()
poolFilter(cluster, peerInfo2).isOk()
asyncTest "Request 0 peers, with 1 peer in PeerExchange":
# Given two valid nodes with PeerExchange
@ -275,9 +276,11 @@ suite "Waku Peer Exchange":
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node2 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node3 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
# Start and mount peer exchange
await allFutures([node1.start(), node2.start()])
await allFutures([node1.start(), node2.start(), node3.start()])
await allFutures([node1.mountPeerExchange(), node2.mountPeerExchangeClient()])
# Connect the nodes
@ -286,12 +289,10 @@ suite "Waku Peer Exchange":
)
assert dialResponse.isSome
# Mock that we have discovered one enr
var record = enr.Record()
check record.fromUri(
"enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB"
)
node1.wakuPeerExchange.enrCache.add(record)
# Simulate node1 discovering node3 via Discv5
var info3 = node3.peerInfo.toRemotePeerInfo()
info3.enr = some(node3.enr)
node1.peerManager.addPeer(info3, PeerOrigin.Discv5)
# When requesting 0 peers
let response = await node2.wakuPeerExchangeClient.request(0)
@ -312,13 +313,6 @@ suite "Waku Peer Exchange":
await allFutures([node1.start(), node2.start()])
await allFutures([node1.mountPeerExchangeClient(), node2.mountPeerExchange()])
# Mock that we have discovered one enr
var record = enr.Record()
check record.fromUri(
"enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB"
)
node2.wakuPeerExchange.enrCache.add(record)
# When making any request with an invalid peer info
var remotePeerInfo2 = node2.peerInfo.toRemotePeerInfo()
remotePeerInfo2.peerId.data.add(255.byte)
@ -362,17 +356,17 @@ suite "Waku Peer Exchange":
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node2 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node3 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
# Start and mount peer exchange
await allFutures([node1.start(), node2.start()])
await allFutures([node1.start(), node2.start(), node3.start()])
await allFutures([node1.mountPeerExchange(), node2.mountPeerExchange()])
# Mock that we have discovered these enrs
var enr1 = enr.Record()
check enr1.fromUri(
"enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB"
)
node1.wakuPeerExchange.enrCache.add(enr1)
# Simulate node1 discovering node3 via Discv5
var info3 = node3.peerInfo.toRemotePeerInfo()
info3.enr = some(node3.enr)
node1.peerManager.addPeer(info3, PeerOrigin.Discv5)
# Create connection
let connOpt = await node2.peerManager.dialPeer(
@ -396,7 +390,7 @@ suite "Waku Peer Exchange":
check:
decodedBuff.get().response.status_code == PeerExchangeResponseStatusCode.SUCCESS
decodedBuff.get().response.peerInfos.len == 1
decodedBuff.get().response.peerInfos[0].enr == enr1.raw
decodedBuff.get().response.peerInfos[0].enr == node3.enr.raw
asyncTest "RateLimit as expected":
let
@ -404,9 +398,11 @@ suite "Waku Peer Exchange":
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node2 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
node3 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
# Start and mount peer exchange
await allFutures([node1.start(), node2.start()])
await allFutures([node1.start(), node2.start(), node3.start()])
await allFutures(
[
node1.mountPeerExchange(rateLimit = (1, 150.milliseconds)),
@ -414,6 +410,11 @@ suite "Waku Peer Exchange":
]
)
# Simulate node1 discovering nodeA via Discv5
var info3 = node3.peerInfo.toRemotePeerInfo()
info3.enr = some(node3.enr)
node1.peerManager.addPeer(info3, PeerOrigin.Discv5)
# Create connection
let connOpt = await node2.peerManager.dialPeer(
node1.switch.peerInfo.toRemotePeerInfo(), WakuPeerExchangeCodec
@ -421,19 +422,6 @@ suite "Waku Peer Exchange":
require:
connOpt.isSome
# Create some enr and add to peer exchange (simulating disv5)
var enr1, enr2 = enr.Record()
check enr1.fromUri(
"enr:-Iu4QGNuTvNRulF3A4Kb9YHiIXLr0z_CpvWkWjWKU-o95zUPR_In02AWek4nsSk7G_-YDcaT4bDRPzt5JIWvFqkXSNcBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQKp9VzU2FAh7fwOwSpg1M_Ekz4zzl0Fpbg6po2ZwgVwQYN0Y3CC6mCFd2FrdTIB"
)
check enr2.fromUri(
"enr:-Iu4QGJllOWlviPIh_SGR-VVm55nhnBIU5L-s3ran7ARz_4oDdtJPtUs3Bc5aqZHCiPQX6qzNYF2ARHER0JPX97TFbEBgmlkgnY0gmlwhE0WsGeJc2VjcDI1NmsxoQP3ULycvday4EkvtVu0VqbBdmOkbfVLJx8fPe0lE_dRkIN0Y3CC6mCFd2FrdTIB"
)
# Mock that we have discovered these enrs
node1.wakuPeerExchange.enrCache.add(enr1)
node1.wakuPeerExchange.enrCache.add(enr2)
await sleepAsync(150.milliseconds)
# Request 2 peer from px. Test all request variants

View File

@ -0,0 +1,29 @@
{.used.}
{.push raises: [].}
import std/[options, os], results, testutils/unittests, chronos, web3
import
waku/[
waku_rln_relay,
waku_rln_relay/conversion_utils,
waku_rln_relay/group_manager/on_chain/group_manager,
],
./utils_onchain
suite "Token and RLN Contract Deployment":
test "anvil should dump state to file on exit":
# git will ignore this file, if the contract has been updated and the state file needs to be regenerated then this file can be renamed to replace the one in the repo (tests/waku_rln_relay/anvil_state/tests/waku_rln_relay/anvil_state/state-deployed-contracts-mint-and-approved.json)
let testStateFile = some("tests/waku_rln_relay/anvil_state/anvil_state.ignore.json")
let anvilProc = runAnvil(stateFile = testStateFile, dumpStateOnExit = true)
let manager = waitFor setupOnchainGroupManager(deployContracts = true)
stopAnvil(anvilProc)
check:
fileExists(testStateFile.get())
#The test should still pass even if thie compression fails
compressGzipFile(testStateFile.get(), testStateFile.get() & ".gz").isOkOr:
error "Failed to compress state file", error = error

View File

@ -33,8 +33,8 @@ suite "Onchain group manager":
var manager {.threadVar.}: OnchainGroupManager
setup:
anvilProc = runAnvil()
manager = waitFor setupOnchainGroupManager()
anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH))
manager = waitFor setupOnchainGroupManager(deployContracts = false)
teardown:
stopAnvil(anvilProc)

View File

@ -27,8 +27,8 @@ suite "Waku rln relay":
var manager {.threadVar.}: OnchainGroupManager
setup:
anvilProc = runAnvil()
manager = waitFor setupOnchainGroupManager()
anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH))
manager = waitFor setupOnchainGroupManager(deployContracts = false)
teardown:
stopAnvil(anvilProc)
@ -70,53 +70,6 @@ suite "Waku rln relay":
info "the generated identity credential: ", idCredential
test "hash Nim Wrappers":
# create an RLN instance
let rlnInstance = createRLNInstanceWrapper()
require:
rlnInstance.isOk()
# prepare the input
let
msg = "Hello".toBytes()
hashInput = encodeLengthPrefix(msg)
hashInputBuffer = toBuffer(hashInput)
# prepare other inputs to the hash function
let outputBuffer = default(Buffer)
let hashSuccess = sha256(unsafeAddr hashInputBuffer, unsafeAddr outputBuffer, true)
require:
hashSuccess
let outputArr = cast[ptr array[32, byte]](outputBuffer.`ptr`)[]
check:
"1e32b3ab545c07c8b4a7ab1ca4f46bc31e4fdc29ac3b240ef1d54b4017a26e4c" ==
outputArr.inHex()
let
hashOutput = cast[ptr array[32, byte]](outputBuffer.`ptr`)[]
hashOutputHex = hashOutput.toHex()
info "hash output", hashOutputHex
test "sha256 hash utils":
# create an RLN instance
let rlnInstance = createRLNInstanceWrapper()
require:
rlnInstance.isOk()
let rln = rlnInstance.get()
# prepare the input
let msg = "Hello".toBytes()
let hashRes = sha256(msg)
check:
hashRes.isOk()
"1e32b3ab545c07c8b4a7ab1ca4f46bc31e4fdc29ac3b240ef1d54b4017a26e4c" ==
hashRes.get().inHex()
test "poseidon hash utils":
# create an RLN instance
let rlnInstance = createRLNInstanceWrapper()

View File

@ -30,8 +30,8 @@ procSuite "WakuNode - RLN relay":
var manager {.threadVar.}: OnchainGroupManager
setup:
anvilProc = runAnvil()
manager = waitFor setupOnchainGroupManager()
anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH))
manager = waitFor setupOnchainGroupManager(deployContracts = false)
teardown:
stopAnvil(anvilProc)

View File

@ -3,7 +3,7 @@
{.push raises: [].}
import
std/[options, os, osproc, deques, streams, strutils, tempfiles, strformat],
std/[options, os, osproc, streams, strutils, strformat],
results,
stew/byteutils,
testutils/unittests,
@ -14,7 +14,6 @@ import
web3/conversions,
web3/eth_api_types,
json_rpc/rpcclient,
json,
libp2p/crypto/crypto,
eth/keys,
results
@ -24,25 +23,19 @@ import
waku_rln_relay,
waku_rln_relay/protocol_types,
waku_rln_relay/constants,
waku_rln_relay/contract,
waku_rln_relay/rln,
],
../testlib/common,
./utils
../testlib/common
const CHAIN_ID* = 1234'u256
template skip0xPrefix(hexStr: string): int =
## Returns the index of the first meaningful char in `hexStr` by skipping
## "0x" prefix
if hexStr.len > 1 and hexStr[0] == '0' and hexStr[1] in {'x', 'X'}: 2 else: 0
func strip0xPrefix(s: string): string =
let prefixLen = skip0xPrefix(s)
if prefixLen != 0:
s[prefixLen .. ^1]
else:
s
# Path to the file which Anvil loads at startup to initialize the chain with pre-deployed contracts, an account funded with tokens and approved for spending
const DEFAULT_ANVIL_STATE_PATH* =
"tests/waku_rln_relay/anvil_state/state-deployed-contracts-mint-and-approved.json.gz"
# The contract address of the TestStableToken used for the RLN Membership registration fee
const TOKEN_ADDRESS* = "0x5FbDB2315678afecb367f032d93F642f64180aa3"
# The contract address used ti interact with the WakuRLNV2 contract via the proxy
const WAKU_RLNV2_PROXY_ADDRESS* = "0x5fc8d32690cc91d4c39d9d3abcbd16989f875707"
proc generateCredentials*(): IdentityCredential =
let credRes = membershipKeyGen()
@ -106,7 +99,7 @@ proc sendMintCall(
recipientAddress: Address,
amountTokens: UInt256,
recipientBalanceBeforeExpectedTokens: Option[UInt256] = none(UInt256),
): Future[TxHash] {.async.} =
): Future[void] {.async.} =
let doBalanceAssert = recipientBalanceBeforeExpectedTokens.isSome()
if doBalanceAssert:
@ -142,7 +135,7 @@ proc sendMintCall(
tx.data = Opt.some(byteutils.hexToSeqByte(mintCallData))
trace "Sending mint call"
let txHash = await web3.send(tx)
discard await web3.send(tx)
let balanceOfSelector = "0x70a08231"
let balanceCallData = balanceOfSelector & paddedAddress
@ -157,8 +150,6 @@ proc sendMintCall(
assert balanceAfterMint == balanceAfterExpectedTokens,
fmt"Balance is {balanceAfterMint} after transfer but expected {balanceAfterExpectedTokens}"
return txHash
# Check how many tokens a spender (the RLN contract) is allowed to spend on behalf of the owner (account which wishes to register a membership)
proc checkTokenAllowance(
web3: Web3, tokenAddress: Address, owner: Address, spender: Address
@ -487,20 +478,64 @@ proc getAnvilPath*(): string =
anvilPath = joinPath(anvilPath, ".foundry/bin/anvil")
return $anvilPath
proc decompressGzipFile*(
compressedPath: string, targetPath: string
): Result[void, string] =
## Decompress a gzipped file using the gunzip command-line utility
let cmd = fmt"gunzip -c {compressedPath} > {targetPath}"
try:
let (output, exitCode) = execCmdEx(cmd)
if exitCode != 0:
return err(
"Failed to decompress '" & compressedPath & "' to '" & targetPath & "': " &
output
)
except OSError as e:
return err("Failed to execute gunzip command: " & e.msg)
except IOError as e:
return err("Failed to execute gunzip command: " & e.msg)
ok()
proc compressGzipFile*(sourcePath: string, targetPath: string): Result[void, string] =
## Compress a file with gzip using the gzip command-line utility
let cmd = fmt"gzip -c {sourcePath} > {targetPath}"
try:
let (output, exitCode) = execCmdEx(cmd)
if exitCode != 0:
return err(
"Failed to compress '" & sourcePath & "' to '" & targetPath & "': " & output
)
except OSError as e:
return err("Failed to execute gzip command: " & e.msg)
except IOError as e:
return err("Failed to execute gzip command: " & e.msg)
ok()
# Runs Anvil daemon
proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process =
proc runAnvil*(
port: int = 8540,
chainId: string = "1234",
stateFile: Option[string] = none(string),
dumpStateOnExit: bool = false,
): Process =
# Passed options are
# --port Port to listen on.
# --gas-limit Sets the block gas limit in WEI.
# --balance The default account balance, specified in ether.
# --chain-id Chain ID of the network.
# --load-state Initialize the chain from a previously saved state snapshot (read-only)
# --dump-state Dump the state on exit to the given file (write-only)
# See anvil documentation https://book.getfoundry.sh/reference/anvil/ for more details
try:
let anvilPath = getAnvilPath()
info "Anvil path", anvilPath
let runAnvil = startProcess(
anvilPath,
args = [
var args =
@[
"--port",
$port,
"--gas-limit",
@ -509,9 +544,54 @@ proc runAnvil*(port: int = 8540, chainId: string = "1234"): Process =
"1000000000",
"--chain-id",
$chainId,
],
options = {poUsePath, poStdErrToStdOut},
)
]
# Add state file argument if provided
if stateFile.isSome():
var statePath = stateFile.get()
info "State file parameter provided",
statePath = statePath,
dumpStateOnExit = dumpStateOnExit,
absolutePath = absolutePath(statePath)
# Check if the file is gzip compressed and handle decompression
if statePath.endsWith(".gz"):
let decompressedPath = statePath[0 .. ^4] # Remove .gz extension
debug "Gzip compressed state file detected",
compressedPath = statePath, decompressedPath = decompressedPath
if not fileExists(decompressedPath):
decompressGzipFile(statePath, decompressedPath).isOkOr:
error "Failed to decompress state file", error = error
return nil
statePath = decompressedPath
if dumpStateOnExit:
# Ensure the directory exists
let stateDir = parentDir(statePath)
if not dirExists(stateDir):
createDir(stateDir)
# Fresh deployment: start clean and dump state on exit
args.add("--dump-state")
args.add(statePath)
debug "Anvil configured to dump state on exit", path = statePath
else:
# Using cache: only load state, don't overwrite it (preserves clean cached state)
if fileExists(statePath):
args.add("--load-state")
args.add(statePath)
debug "Anvil configured to load state file (read-only)", path = statePath
else:
warn "State file does not exist, anvil will start fresh",
path = statePath, absolutePath = absolutePath(statePath)
else:
info "No state file provided, anvil will start fresh without state persistence"
info "Starting anvil with arguments", args = args.join(" ")
let runAnvil =
startProcess(anvilPath, args = args, options = {poUsePath, poStdErrToStdOut})
let anvilPID = runAnvil.processID
# We read stdout from Anvil to see when daemon is ready
@ -549,7 +629,14 @@ proc stopAnvil*(runAnvil: Process) {.used.} =
# Send termination signals
when not defined(windows):
discard execCmdEx(fmt"kill -TERM {anvilPID}")
discard execCmdEx(fmt"kill -9 {anvilPID}")
# Wait for graceful shutdown to allow state dumping
sleep(200)
# Only force kill if process is still running
let checkResult = execCmdEx(fmt"kill -0 {anvilPID} 2>/dev/null")
if checkResult.exitCode == 0:
info "Anvil process still running after TERM signal, sending KILL",
anvilPID = anvilPID
discard execCmdEx(fmt"kill -9 {anvilPID}")
else:
discard execCmdEx(fmt"taskkill /F /PID {anvilPID}")
@ -560,52 +647,100 @@ proc stopAnvil*(runAnvil: Process) {.used.} =
info "Error stopping Anvil daemon", anvilPID = anvilPID, error = e.msg
proc setupOnchainGroupManager*(
ethClientUrl: string = EthClient, amountEth: UInt256 = 10.u256
ethClientUrl: string = EthClient,
amountEth: UInt256 = 10.u256,
deployContracts: bool = true,
): Future[OnchainGroupManager] {.async.} =
## Setup an onchain group manager for testing
## If deployContracts is false, it will assume that the Anvil testnet already has the required contracts deployed, this significantly speeds up test runs.
## To run Anvil with a cached state file containing pre-deployed contracts, see runAnvil documentation.
##
## To generate/update the cached state file:
## 1. Call runAnvil with stateFile and dumpStateOnExit=true
## 2. Run setupOnchainGroupManager with deployContracts=true to deploy contracts
## 3. The state will be saved to the specified file when anvil exits
## 4. Commit this file to git
##
## To use cached state:
## 1. Call runAnvil with stateFile and dumpStateOnExit=false
## 2. Anvil loads state in read-only mode (won't overwrite the cached file)
## 3. Call setupOnchainGroupManager with deployContracts=false
## 4. Tests run fast using pre-deployed contracts
let rlnInstanceRes = createRlnInstance()
check:
rlnInstanceRes.isOk()
let rlnInstance = rlnInstanceRes.get()
# connect to the eth client
let web3 = await newWeb3(ethClientUrl)
let accounts = await web3.provider.eth_accounts()
web3.defaultAccount = accounts[1]
let (privateKey, acc) = createEthAccount(web3)
var privateKey: keys.PrivateKey
var acc: Address
var testTokenAddress: Address
var contractAddress: Address
# we just need to fund the default account
# the send procedure returns a tx hash that we don't use, hence discard
discard await sendEthTransfer(
web3, web3.defaultAccount, acc, ethToWei(1000.u256), some(0.u256)
)
if not deployContracts:
info "Using contract addresses from constants"
let testTokenAddress = (await deployTestToken(privateKey, acc, web3)).valueOr:
assert false, "Failed to deploy test token contract: " & $error
return
testTokenAddress = Address(hexToByteArray[20](TOKEN_ADDRESS))
contractAddress = Address(hexToByteArray[20](WAKU_RLNV2_PROXY_ADDRESS))
# mint the token from the generated account
discard await sendMintCall(
web3, web3.defaultAccount, testTokenAddress, acc, ethToWei(1000.u256), some(0.u256)
)
(privateKey, acc) = createEthAccount(web3)
let contractAddress = (await executeForgeContractDeployScripts(privateKey, acc, web3)).valueOr:
assert false, "Failed to deploy RLN contract: " & $error
return
# Fund the test account
discard await sendEthTransfer(web3, web3.defaultAccount, acc, ethToWei(1000.u256))
# If the generated account wishes to register a membership, it needs to approve the contract to spend its tokens
let tokenApprovalResult = await approveTokenAllowanceAndVerify(
web3,
acc,
privateKey,
testTokenAddress,
contractAddress,
ethToWei(200.u256),
some(0.u256),
)
# Mint tokens to the test account
await sendMintCall(
web3, web3.defaultAccount, testTokenAddress, acc, ethToWei(1000.u256)
)
assert tokenApprovalResult.isOk, tokenApprovalResult.error()
# Approve the contract to spend tokens
let tokenApprovalResult = await approveTokenAllowanceAndVerify(
web3, acc, privateKey, testTokenAddress, contractAddress, ethToWei(200.u256)
)
assert tokenApprovalResult.isOk(), tokenApprovalResult.error
else:
info "Performing Token and RLN contracts deployment"
(privateKey, acc) = createEthAccount(web3)
# fund the default account
discard await sendEthTransfer(
web3, web3.defaultAccount, acc, ethToWei(1000.u256), some(0.u256)
)
testTokenAddress = (await deployTestToken(privateKey, acc, web3)).valueOr:
assert false, "Failed to deploy test token contract: " & $error
return
# mint the token from the generated account
await sendMintCall(
web3,
web3.defaultAccount,
testTokenAddress,
acc,
ethToWei(1000.u256),
some(0.u256),
)
contractAddress = (await executeForgeContractDeployScripts(privateKey, acc, web3)).valueOr:
assert false, "Failed to deploy RLN contract: " & $error
return
# If the generated account wishes to register a membership, it needs to approve the contract to spend its tokens
let tokenApprovalResult = await approveTokenAllowanceAndVerify(
web3,
acc,
privateKey,
testTokenAddress,
contractAddress,
ethToWei(200.u256),
some(0.u256),
)
assert tokenApprovalResult.isOk(), tokenApprovalResult.error
let manager = OnchainGroupManager(
ethClientUrls: @[ethClientUrl],

View File

@ -41,8 +41,8 @@ suite "Waku v2 REST API - health":
var manager {.threadVar.}: OnchainGroupManager
setup:
anvilProc = runAnvil()
manager = waitFor setupOnchainGroupManager()
anvilProc = runAnvil(stateFile = some(DEFAULT_ANVIL_STATE_PATH))
manager = waitFor setupOnchainGroupManager(deployContracts = false)
teardown:
stopAnvil(anvilProc)

View File

@ -61,7 +61,7 @@ proc init(
assert false, "Failed to mount relay: " & $error
(await testSetup.serviceNode.mountRelay()).isOkOr:
assert false, "Failed to mount relay: " & $error
await testSetup.serviceNode.mountLightPush(rateLimit)
check (await testSetup.serviceNode.mountLightPush(rateLimit)).isOk()
testSetup.pushNode.mountLightPushClient()
testSetup.serviceNode.peerManager.addServicePeer(

View File

@ -61,7 +61,7 @@ proc init(
assert false, "Failed to mount relay"
(await testSetup.serviceNode.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await testSetup.serviceNode.mountLegacyLightPush(rateLimit)
check (await testSetup.serviceNode.mountLegacyLightPush(rateLimit)).isOk()
testSetup.pushNode.mountLegacyLightPushClient()
testSetup.serviceNode.peerManager.addServicePeer(

1
vendor/nim-ffi vendored Submodule

@ -0,0 +1 @@
Subproject commit d7a5492121aad190cf549436836e2fa42e34ff9b

@ -1 +1 @@
Subproject commit 900d4f95e0e618bdeb4c241f7a4b6347df6bb950
Subproject commit 8a338f354481e8a3f3d64a72e38fad4c62e32dcd

View File

@ -30,7 +30,8 @@ requires "nim >= 2.2.4",
"regex",
"results",
"db_connector",
"minilru"
"minilru",
"ffi"
### Helper functions
proc buildModule(filePath, params = "", lang = "c"): bool =
@ -61,27 +62,21 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
exec "nim " & lang & " --out:build/" & name & " --mm:refc " & extra_params & " " &
srcDir & name & ".nim"
proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "static") =
proc buildLibrary(lib_name: string, srcDir = "./", params = "", `type` = "static") =
if not dirExists "build":
mkDir "build"
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
var extra_params = params
for i in 2 ..< paramCount():
for i in 2 ..< (paramCount() - 1):
extra_params &= " " & paramStr(i)
if `type` == "static":
exec "nim c" & " --out:build/" & name &
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & name & ".nim"
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & "libwaku.nim"
else:
let lib_name = (when defined(windows): toDll(name) else: name & ".so")
when defined(windows):
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & name & ".nim"
else:
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & name & ".nim"
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & "libwaku.nim"
proc buildMobileAndroid(srcDir = ".", params = "") =
let cpu = getEnv("CPU")
@ -206,12 +201,12 @@ let chroniclesParams =
"--warning:UnusedImport:on " & "-d:chronicles_log_level=TRACE"
task libwakuStatic, "Build the cbindings waku node library":
let name = "libwaku"
buildLibrary name, "library/", chroniclesParams, "static"
let lib_name = paramStr(paramCount())
buildLibrary lib_name, "library/", chroniclesParams, "static"
task libwakuDynamic, "Build the cbindings waku node library":
let name = "libwaku"
buildLibrary name, "library/", chroniclesParams, "dynamic"
let lib_name = paramStr(paramCount())
buildLibrary lib_name, "library/", chroniclesParams, "dynamic"
### Mobile Android
task libWakuAndroid, "Build the mobile bindings for Android":

View File

@ -6,8 +6,15 @@
## Worth considering using it in a single provider, many requester scenario.
##
## Provides a declarative way to define an immutable value type together with a
## thread-local broker that can register an asynchronous provider, dispatch typed
## requests and clear provider.
## thread-local broker that can register an asynchronous or synchronous provider,
## dispatch typed requests and clear provider.
##
## For consideration use `sync` mode RequestBroker when you need to provide simple value(s)
## where there is no long-running async operation involved.
## Typically it act as a accessor for the local state of generic setting.
##
## `async` mode is better to be used when you request date that may involve some long IO operation
## or action.
##
## Usage:
## Declare your desired request type inside a `RequestBroker` macro, add any number of fields.
@ -24,6 +31,56 @@
## proc signature*(arg1: ArgType, arg2: AnotherArgType): Future[Result[TypeName, string]]
##
## ```
##
## Sync mode (no `async` / `Future`) can be generated with:
##
## ```nim
## RequestBroker(sync):
## type TypeName = object
## field1*: FieldType
##
## proc signature*(): Result[TypeName, string]
## proc signature*(arg1: ArgType): Result[TypeName, string]
## ```
##
## Note: When the request type is declared as a native type / alias / externally-defined
## type (i.e. not an inline `object` / `ref object` definition), RequestBroker
## will wrap it in `distinct` automatically unless you already used `distinct`.
## This avoids overload ambiguity when multiple brokers share the same
## underlying base type (Nim overload resolution does not consider return type).
##
## This means that for non-object request types you typically:
## - construct values with an explicit cast/constructor, e.g. `MyType("x")`
## - unwrap with a cast when needed, e.g. `string(myVal)` or `BaseType(myVal)`
##
## Example (native response type):
## ```nim
## RequestBroker(sync):
## type MyCount = int # exported as: `distinct int`
##
## MyCount.setProvider(proc(): Result[MyCount, string] = ok(MyCount(42)))
## let res = MyCount.request()
## if res.isOk():
## let raw = int(res.get())
## ```
##
## Example (externally-defined type):
## ```nim
## type External = object
## label*: string
##
## RequestBroker:
## type MyExternal = External # exported as: `distinct External`
##
## MyExternal.setProvider(
## proc(): Future[Result[MyExternal, string]] {.async.} =
## ok(MyExternal(External(label: "hi")))
## )
## let res = await MyExternal.request()
## if res.isOk():
## let base = External(res.get())
## echo base.label
## ```
## The 'TypeName' object defines the requestable data (but also can be seen as request for action with return value).
## The 'signature' proc defines the provider(s) signature, that is enforced at compile time.
## One signature can be with no arguments, another with any number of arguments - where the input arguments are
@ -31,12 +88,12 @@
##
## After this, you can register a provider anywhere in your code with
## `TypeName.setProvider(...)`, which returns error if already having a provider.
## Providers are async procs or lambdas that take no arguments and return a Future[Result[TypeName, string]].
## Providers are async procs/lambdas in default mode and sync procs in sync mode.
## Only one provider can be registered at a time per signature type (zero arg and/or multi arg).
##
## Requests can be made from anywhere with no direct dependency on the provider by
## calling `TypeName.request()` - with arguments respecting the signature(s).
## This will asynchronously call the registered provider and return a Future[Result[TypeName, string]].
## In async mode, this returns a Future[Result[TypeName, string]]. In sync mode, it returns Result[TypeName, string].
##
## Whenever you no want to process requests (or your object instance that provides the request goes out of scope),
## you can remove it from the broker with `TypeName.clearProvider()`.
@ -49,10 +106,10 @@
## text*: string
##
## ## Define the request and provider signature, that is enforced at compile time.
## proc signature*(): Future[Result[Greeting, string]]
## proc signature*(): Future[Result[Greeting, string]] {.async.}
##
## ## Also possible to define signature with arbitrary input arguments.
## proc signature*(lang: string): Future[Result[Greeting, string]]
## proc signature*(lang: string): Future[Result[Greeting, string]] {.async.}
##
## ...
## Greeting.setProvider(
@ -60,6 +117,23 @@
## ok(Greeting(text: "hello"))
## )
## let res = await Greeting.request()
##
##
## ...
## # using native type as response for a synchronous request.
## RequestBroker(sync):
## type NeedThatInfo = string
##
##...
## NeedThatInfo.setProvider(
## proc(): Result[NeedThatInfo, string] =
## ok("this is the info you wanted")
## )
## let res = NeedThatInfo.request().valueOr:
## echo "not ok due to: " & error
## NeedThatInfo(":-(")
##
## echo string(res)
## ```
## If no `signature` proc is declared, a zero-argument form is generated
## automatically, so the caller only needs to provide the type definition.
@ -77,7 +151,11 @@ proc errorFuture[T](message: string): Future[Result[T, string]] {.inline.} =
fut.complete(err(Result[T, string], message))
fut
proc isReturnTypeValid(returnType, typeIdent: NimNode): bool =
type RequestBrokerMode = enum
rbAsync
rbSync
proc isAsyncReturnTypeValid(returnType, typeIdent: NimNode): bool =
## Accept Future[Result[TypeIdent, string]] as the contract.
if returnType.kind != nnkBracketExpr or returnType.len != 2:
return false
@ -92,6 +170,23 @@ proc isReturnTypeValid(returnType, typeIdent: NimNode): bool =
return false
inner[2].kind == nnkIdent and inner[2].eqIdent("string")
proc isSyncReturnTypeValid(returnType, typeIdent: NimNode): bool =
## Accept Result[TypeIdent, string] as the contract.
if returnType.kind != nnkBracketExpr or returnType.len != 3:
return false
if returnType[0].kind != nnkIdent or not returnType[0].eqIdent("Result"):
return false
if returnType[1].kind != nnkIdent or not returnType[1].eqIdent($typeIdent):
return false
returnType[2].kind == nnkIdent and returnType[2].eqIdent("string")
proc isReturnTypeValid(returnType, typeIdent: NimNode, mode: RequestBrokerMode): bool =
case mode
of rbAsync:
isAsyncReturnTypeValid(returnType, typeIdent)
of rbSync:
isSyncReturnTypeValid(returnType, typeIdent)
proc cloneParams(params: seq[NimNode]): seq[NimNode] =
## Deep copy parameter definitions so they can be inserted in multiple places.
result = @[]
@ -109,73 +204,122 @@ proc collectParamNames(params: seq[NimNode]): seq[NimNode] =
continue
result.add(ident($nameNode))
proc makeProcType(returnType: NimNode, params: seq[NimNode]): NimNode =
proc makeProcType(
returnType: NimNode, params: seq[NimNode], mode: RequestBrokerMode
): NimNode =
var formal = newTree(nnkFormalParams)
formal.add(returnType)
for param in params:
formal.add(param)
let pragmas = newTree(nnkPragma, ident("async"))
newTree(nnkProcTy, formal, pragmas)
case mode
of rbAsync:
let pragmas = newTree(nnkPragma, ident("async"))
newTree(nnkProcTy, formal, pragmas)
of rbSync:
let raisesPragma = newTree(
nnkExprColonExpr, ident("raises"), newTree(nnkBracket, ident("CatchableError"))
)
let pragmas = newTree(nnkPragma, raisesPragma, ident("gcsafe"))
newTree(nnkProcTy, formal, pragmas)
macro RequestBroker*(body: untyped): untyped =
proc parseMode(modeNode: NimNode): RequestBrokerMode =
## Parses the mode selector for the 2-argument macro overload.
## Supported spellings: `sync` / `async` (case-insensitive).
let raw = ($modeNode).strip().toLowerAscii()
case raw
of "sync":
rbSync
of "async":
rbAsync
else:
error("RequestBroker mode must be `sync` or `async` (default is async)", modeNode)
proc ensureDistinctType(rhs: NimNode): NimNode =
## For PODs / aliases / externally-defined types, wrap in `distinct` unless
## it's already distinct.
if rhs.kind == nnkDistinctTy:
return copyNimTree(rhs)
newTree(nnkDistinctTy, copyNimTree(rhs))
proc generateRequestBroker(body: NimNode, mode: RequestBrokerMode): NimNode =
when defined(requestBrokerDebug):
echo body.treeRepr
echo "RequestBroker mode: ", $mode
var typeIdent: NimNode = nil
var objectDef: NimNode = nil
var isRefObject = false
for stmt in body:
if stmt.kind == nnkTypeSection:
for def in stmt:
if def.kind != nnkTypeDef:
continue
if not typeIdent.isNil():
error("Only one type may be declared inside RequestBroker", def)
typeIdent = baseTypeIdent(def[0])
let rhs = def[2]
var objectType: NimNode
## Support inline object types (fields are auto-exported)
## AND non-object types / aliases (e.g. `string`, `int`, `OtherType`).
case rhs.kind
of nnkObjectTy:
objectType = rhs
let recList = rhs[2]
if recList.kind != nnkRecList:
error("RequestBroker object must declare a standard field list", rhs)
var exportedRecList = newTree(nnkRecList)
for field in recList:
case field.kind
of nnkIdentDefs:
ensureFieldDef(field)
var cloned = copyNimTree(field)
for i in 0 ..< cloned.len - 2:
cloned[i] = exportIdentNode(cloned[i])
exportedRecList.add(cloned)
of nnkEmpty:
discard
else:
error(
"RequestBroker object definition only supports simple field declarations",
field,
)
objectDef = newTree(
nnkObjectTy, copyNimTree(rhs[0]), copyNimTree(rhs[1]), exportedRecList
)
of nnkRefTy:
isRefObject = true
if rhs.len != 1 or rhs[0].kind != nnkObjectTy:
error(
"RequestBroker ref object must wrap a concrete object definition", rhs
if rhs.len != 1:
error("RequestBroker ref type must have a single base", rhs)
if rhs[0].kind == nnkObjectTy:
let obj = rhs[0]
let recList = obj[2]
if recList.kind != nnkRecList:
error("RequestBroker object must declare a standard field list", obj)
var exportedRecList = newTree(nnkRecList)
for field in recList:
case field.kind
of nnkIdentDefs:
ensureFieldDef(field)
var cloned = copyNimTree(field)
for i in 0 ..< cloned.len - 2:
cloned[i] = exportIdentNode(cloned[i])
exportedRecList.add(cloned)
of nnkEmpty:
discard
else:
error(
"RequestBroker object definition only supports simple field declarations",
field,
)
let exportedObjectType = newTree(
nnkObjectTy, copyNimTree(obj[0]), copyNimTree(obj[1]), exportedRecList
)
objectType = rhs[0]
else:
continue
if not typeIdent.isNil():
error("Only one object type may be declared inside RequestBroker", def)
typeIdent = baseTypeIdent(def[0])
let recList = objectType[2]
if recList.kind != nnkRecList:
error("RequestBroker object must declare a standard field list", objectType)
var exportedRecList = newTree(nnkRecList)
for field in recList:
case field.kind
of nnkIdentDefs:
ensureFieldDef(field)
var cloned = copyNimTree(field)
for i in 0 ..< cloned.len - 2:
cloned[i] = exportIdentNode(cloned[i])
exportedRecList.add(cloned)
of nnkEmpty:
discard
objectDef = newTree(nnkRefTy, exportedObjectType)
else:
error(
"RequestBroker object definition only supports simple field declarations",
field,
)
let exportedObjectType = newTree(
nnkObjectTy,
copyNimTree(objectType[0]),
copyNimTree(objectType[1]),
exportedRecList,
)
if isRefObject:
objectDef = newTree(nnkRefTy, exportedObjectType)
## `ref SomeType` (SomeType can be defined elsewhere)
objectDef = ensureDistinctType(rhs)
else:
objectDef = exportedObjectType
## Non-object type / alias (e.g. `string`, `int`, `SomeExternalType`).
objectDef = ensureDistinctType(rhs)
if typeIdent.isNil():
error("RequestBroker body must declare exactly one object type", body)
error("RequestBroker body must declare exactly one type", body)
when defined(requestBrokerDebug):
echo "RequestBroker generating type: ", $typeIdent
@ -183,7 +327,6 @@ macro RequestBroker*(body: untyped): untyped =
let exportedTypeIdent = postfix(copyNimTree(typeIdent), "*")
let typeDisplayName = sanitizeIdentName(typeIdent)
let typeNameLit = newLit(typeDisplayName)
let isRefObjectLit = newLit(isRefObject)
var zeroArgSig: NimNode = nil
var zeroArgProviderName: NimNode = nil
var zeroArgFieldName: NimNode = nil
@ -211,10 +354,14 @@ macro RequestBroker*(body: untyped): untyped =
if params.len == 0:
error("Signature must declare a return type", stmt)
let returnType = params[0]
if not isReturnTypeValid(returnType, typeIdent):
error(
"Signature must return Future[Result[`" & $typeIdent & "`, string]]", stmt
)
if not isReturnTypeValid(returnType, typeIdent, mode):
case mode
of rbAsync:
error(
"Signature must return Future[Result[`" & $typeIdent & "`, string]]", stmt
)
of rbSync:
error("Signature must return Result[`" & $typeIdent & "`, string]", stmt)
let paramCount = params.len - 1
if paramCount == 0:
if zeroArgSig != nil:
@ -258,14 +405,20 @@ macro RequestBroker*(body: untyped): untyped =
var typeSection = newTree(nnkTypeSection)
typeSection.add(newTree(nnkTypeDef, exportedTypeIdent, newEmptyNode(), objectDef))
let returnType = quote:
Future[Result[`typeIdent`, string]]
let returnType =
case mode
of rbAsync:
quote:
Future[Result[`typeIdent`, string]]
of rbSync:
quote:
Result[`typeIdent`, string]
if not zeroArgSig.isNil():
let procType = makeProcType(returnType, @[])
let procType = makeProcType(returnType, @[], mode)
typeSection.add(newTree(nnkTypeDef, zeroArgProviderName, newEmptyNode(), procType))
if not argSig.isNil():
let procType = makeProcType(returnType, cloneParams(argParams))
let procType = makeProcType(returnType, cloneParams(argParams), mode)
typeSection.add(newTree(nnkTypeDef, argProviderName, newEmptyNode(), procType))
var brokerRecList = newTree(nnkRecList)
@ -316,33 +469,69 @@ macro RequestBroker*(body: untyped): untyped =
quote do:
`accessProcIdent`().`zeroArgFieldName` = nil
)
result.add(
quote do:
proc request*(
_: typedesc[`typeIdent`]
): Future[Result[`typeIdent`, string]] {.async: (raises: []).} =
let provider = `accessProcIdent`().`zeroArgFieldName`
if provider.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): no zero-arg provider registered"
)
let catchedRes = catch:
await provider()
case mode
of rbAsync:
result.add(
quote do:
proc request*(
_: typedesc[`typeIdent`]
): Future[Result[`typeIdent`, string]] {.async: (raises: []).} =
let provider = `accessProcIdent`().`zeroArgFieldName`
if provider.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): no zero-arg provider registered"
)
let catchedRes = catch:
await provider()
if catchedRes.isErr():
return err("Request failed:" & catchedRes.error.msg)
if catchedRes.isErr():
return err(
"RequestBroker(" & `typeNameLit` & "): provider threw exception: " &
catchedRes.error.msg
)
let providerRes = catchedRes.get()
when `isRefObjectLit`:
let providerRes = catchedRes.get()
if providerRes.isOk():
let resultValue = providerRes.get()
if resultValue.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): provider returned nil result"
)
return providerRes
when compiles(resultValue.isNil()):
if resultValue.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): provider returned nil result"
)
return providerRes
)
)
of rbSync:
result.add(
quote do:
proc request*(
_: typedesc[`typeIdent`]
): Result[`typeIdent`, string] {.gcsafe, raises: [].} =
let provider = `accessProcIdent`().`zeroArgFieldName`
if provider.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): no zero-arg provider registered"
)
var providerRes: Result[`typeIdent`, string]
try:
providerRes = provider()
except CatchableError as e:
return err(
"RequestBroker(" & `typeNameLit` & "): provider threw exception: " &
e.msg
)
if providerRes.isOk():
let resultValue = providerRes.get()
when compiles(resultValue.isNil()):
if resultValue.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): provider returned nil result"
)
return providerRes
)
if not argSig.isNil():
result.add(
quote do:
@ -363,10 +552,7 @@ macro RequestBroker*(body: untyped): untyped =
let argNameIdents = collectParamNames(requestParamDefs)
let providerSym = genSym(nskLet, "provider")
var formalParams = newTree(nnkFormalParams)
formalParams.add(
quote do:
Future[Result[`typeIdent`, string]]
)
formalParams.add(copyNimTree(returnType))
formalParams.add(
newTree(
nnkIdentDefs,
@ -378,8 +564,14 @@ macro RequestBroker*(body: untyped): untyped =
for paramDef in requestParamDefs:
formalParams.add(paramDef)
let requestPragmas = quote:
{.async: (raises: []), gcsafe.}
let requestPragmas =
case mode
of rbAsync:
quote:
{.async: (raises: []).}
of rbSync:
quote:
{.gcsafe, raises: [].}
var providerCall = newCall(providerSym)
for argName in argNameIdents:
providerCall.add(argName)
@ -396,23 +588,49 @@ macro RequestBroker*(body: untyped): untyped =
"): no provider registered for input signature"
)
)
requestBody.add(
quote do:
let catchedRes = catch:
await `providerCall`
if catchedRes.isErr():
return err("Request failed:" & catchedRes.error.msg)
let providerRes = catchedRes.get()
when `isRefObjectLit`:
case mode
of rbAsync:
requestBody.add(
quote do:
let catchedRes = catch:
await `providerCall`
if catchedRes.isErr():
return err(
"RequestBroker(" & `typeNameLit` & "): provider threw exception: " &
catchedRes.error.msg
)
let providerRes = catchedRes.get()
if providerRes.isOk():
let resultValue = providerRes.get()
if resultValue.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): provider returned nil result"
)
return providerRes
)
when compiles(resultValue.isNil()):
if resultValue.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): provider returned nil result"
)
return providerRes
)
of rbSync:
requestBody.add(
quote do:
var providerRes: Result[`typeIdent`, string]
try:
providerRes = `providerCall`
except CatchableError as e:
return err(
"RequestBroker(" & `typeNameLit` & "): provider threw exception: " & e.msg
)
if providerRes.isOk():
let resultValue = providerRes.get()
when compiles(resultValue.isNil()):
if resultValue.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): provider returned nil result"
)
return providerRes
)
# requestBody.add(providerCall)
result.add(
newTree(
@ -436,3 +654,17 @@ macro RequestBroker*(body: untyped): untyped =
when defined(requestBrokerDebug):
echo result.repr
return result
macro RequestBroker*(body: untyped): untyped =
## Default (async) mode.
generateRequestBroker(body, rbAsync)
macro RequestBroker*(mode: untyped, body: untyped): untyped =
## Explicit mode selector.
## Example:
## RequestBroker(sync):
## type Foo = object
## proc signature*(): Result[Foo, string]
generateRequestBroker(body, parseMode(mode))

View File

@ -606,7 +606,7 @@ proc build*(
let relayShardedPeerManagement = builder.relayShardedPeerManagement.get(false)
let wakuFlags = CapabilitiesBitfield.init(
lightpush = lightPush,
lightpush = lightPush and relay,
filter = filterServiceConf.isSome,
store = storeServiceConf.isSome,
relay = relay,

View File

@ -368,8 +368,11 @@ proc setupProtocols(
# NOTE Must be mounted after relay
if conf.lightPush:
try:
await mountLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH))
await mountLegacyLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH))
(await mountLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH))).isOkOr:
return err("failed to mount waku lightpush protocol: " & $error)
(await mountLegacyLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH))).isOkOr:
return err("failed to mount waku legacy lightpush protocol: " & $error)
except CatchableError:
return err("failed to mount waku lightpush protocol: " & getCurrentExceptionMsg())

View File

@ -13,7 +13,6 @@ import
libp2p/services/autorelayservice,
libp2p/services/hpservice,
libp2p/peerid,
libp2p/discovery/discoverymngr,
libp2p/discovery/rendezvousinterface,
eth/keys,
eth/p2p/discoveryv5/enr,
@ -63,7 +62,6 @@ type Waku* = ref object
dynamicBootstrapNodes*: seq[RemotePeerInfo]
dnsRetryLoopHandle: Future[void]
networkConnLoopHandle: Future[void]
discoveryMngr: DiscoveryManager
node*: WakuNode

View File

@ -34,26 +34,27 @@ import
logScope:
topics = "waku node lightpush api"
const MountWithoutRelayError* = "cannot mount lightpush because relay is not mounted"
## Waku lightpush
proc mountLegacyLightPush*(
node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit
) {.async.} =
): Future[Result[void, string]] {.async.} =
info "mounting legacy light push"
let pushHandler =
if node.wakuRelay.isNil:
info "mounting legacy lightpush without relay (nil)"
legacy_lightpush_protocol.getNilPushHandler()
if node.wakuRelay.isNil():
return err(MountWithoutRelayError)
info "mounting legacy lightpush with relay"
let rlnPeer =
if node.wakuRlnRelay.isNil():
info "mounting legacy lightpush without rln-relay"
none(WakuRLNRelay)
else:
info "mounting legacy lightpush with relay"
let rlnPeer =
if isNil(node.wakuRlnRelay):
info "mounting legacy lightpush without rln-relay"
none(WakuRLNRelay)
else:
info "mounting legacy lightpush with rln-relay"
some(node.wakuRlnRelay)
legacy_lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer)
info "mounting legacy lightpush with rln-relay"
some(node.wakuRlnRelay)
let pushHandler =
legacy_lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer)
node.wakuLegacyLightPush =
WakuLegacyLightPush.new(node.peerManager, node.rng, pushHandler, some(rateLimit))
@ -64,6 +65,9 @@ proc mountLegacyLightPush*(
node.switch.mount(node.wakuLegacyLightPush, protocolMatcher(WakuLegacyLightPushCodec))
info "legacy lightpush mounted successfully"
return ok()
proc mountLegacyLightPushClient*(node: WakuNode) =
info "mounting legacy light push client"
@ -146,23 +150,21 @@ proc legacyLightpushPublish*(
proc mountLightPush*(
node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit
) {.async.} =
): Future[Result[void, string]] {.async.} =
info "mounting light push"
let pushHandler =
if node.wakuRelay.isNil():
info "mounting lightpush v2 without relay (nil)"
lightpush_protocol.getNilPushHandler()
if node.wakuRelay.isNil():
return err(MountWithoutRelayError)
info "mounting lightpush with relay"
let rlnPeer =
if node.wakuRlnRelay.isNil():
info "mounting lightpush without rln-relay"
none(WakuRLNRelay)
else:
info "mounting lightpush with relay"
let rlnPeer =
if isNil(node.wakuRlnRelay):
info "mounting lightpush without rln-relay"
none(WakuRLNRelay)
else:
info "mounting lightpush with rln-relay"
some(node.wakuRlnRelay)
lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer)
info "mounting lightpush with rln-relay"
some(node.wakuRlnRelay)
let pushHandler = lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer)
node.wakuLightPush = WakuLightPush.new(
node.peerManager, node.rng, pushHandler, node.wakuAutoSharding, some(rateLimit)
@ -174,6 +176,9 @@ proc mountLightPush*(
node.switch.mount(node.wakuLightPush, protocolMatcher(WakuLightPushCodec))
info "lightpush mounted successfully"
return ok()
proc mountLightPushClient*(node: WakuNode) =
info "mounting light push client"

View File

@ -227,3 +227,17 @@ proc getPeersByCapability*(
): seq[RemotePeerInfo] =
return
peerStore.peers.filterIt(it.enr.isSome() and it.enr.get().supportsCapability(cap))
template forEnrPeers*(
peerStore: PeerStore,
peerId, peerConnectedness, peerOrigin, peerEnrRecord, body: untyped,
) =
let enrBook = peerStore[ENRBook]
let connBook = peerStore[ConnectionBook]
let sourceBook = peerStore[SourceBook]
for pid, enrRecord in tables.pairs(enrBook.book):
let peerId {.inject.} = pid
let peerConnectedness {.inject.} = connBook.book.getOrDefault(pid, NotConnected)
let peerOrigin {.inject.} = sourceBook.book.getOrDefault(pid, UnknownOrigin)
let peerEnrRecord {.inject.} = enrRecord
body

View File

@ -525,9 +525,6 @@ proc stop*(node: WakuNode) {.async.} =
if not node.wakuStoreTransfer.isNil():
node.wakuStoreTransfer.stop()
if not node.wakuPeerExchange.isNil() and not node.wakuPeerExchange.pxLoopHandle.isNil():
await node.wakuPeerExchange.pxLoopHandle.cancelAndWait()
if not node.wakuPeerExchangeClient.isNil() and
not node.wakuPeerExchangeClient.pxLoopHandle.isNil():
await node.wakuPeerExchangeClient.pxLoopHandle.cancelAndWait()

View File

@ -22,7 +22,6 @@ export WakuPeerExchangeCodec
declarePublicGauge waku_px_peers_received_unknown,
"number of previously unknown ENRs received via peer exchange"
declarePublicGauge waku_px_peers_cached, "number of peer exchange peer ENRs cached"
declarePublicCounter waku_px_errors, "number of peer exchange errors", ["type"]
declarePublicCounter waku_px_peers_sent,
"number of ENRs sent to peer exchange requesters"
@ -32,11 +31,9 @@ logScope:
type WakuPeerExchange* = ref object of LPProtocol
peerManager*: PeerManager
enrCache*: seq[enr.Record]
cluster*: Option[uint16]
# todo: next step: ring buffer; future: implement cache satisfying https://rfc.vac.dev/spec/34/
requestRateLimiter*: RequestRateLimiter
pxLoopHandle*: Future[void]
proc respond(
wpx: WakuPeerExchange, enrs: seq[enr.Record], conn: Connection
@ -79,61 +76,50 @@ proc respondError(
return ok()
proc getEnrsFromCache(
wpx: WakuPeerExchange, numPeers: uint64
): seq[enr.Record] {.gcsafe.} =
if wpx.enrCache.len() == 0:
info "peer exchange ENR cache is empty"
return @[]
# copy and shuffle
randomize()
var shuffledCache = wpx.enrCache
shuffledCache.shuffle()
# return numPeers or less if cache is smaller
return shuffledCache[0 ..< min(shuffledCache.len.int, numPeers.int)]
proc poolFilter*(cluster: Option[uint16], peer: RemotePeerInfo): bool =
if peer.origin != Discv5:
trace "peer not from discv5", peer = $peer, origin = $peer.origin
return false
proc poolFilter*(
cluster: Option[uint16], origin: PeerOrigin, enr: enr.Record
): Result[void, string] =
if origin != Discv5:
trace "peer not from discv5", origin = $origin
return err("peer not from discv5: " & $origin)
if cluster.isSome() and enr.isClusterMismatched(cluster.get()):
trace "peer has mismatching cluster"
return err("peer has mismatching cluster")
return ok()
proc poolFilter*(cluster: Option[uint16], peer: RemotePeerInfo): Result[void, string] =
if peer.enr.isNone():
info "peer has no ENR", peer = $peer
return false
return err("peer has no ENR: " & $peer)
return poolFilter(cluster, peer.origin, peer.enr.get())
if cluster.isSome() and peer.enr.get().isClusterMismatched(cluster.get()):
info "peer has mismatching cluster", peer = $peer
return false
return true
proc populateEnrCache(wpx: WakuPeerExchange) =
# share only peers that i) are reachable ii) come from discv5 iii) share cluster
let withEnr = wpx.peerManager.switch.peerStore.getReachablePeers().filterIt(
poolFilter(wpx.cluster, it)
)
# either what we have or max cache size
var newEnrCache = newSeq[enr.Record](0)
for i in 0 ..< min(withEnr.len, MaxPeersCacheSize):
newEnrCache.add(withEnr[i].enr.get())
# swap cache for new
wpx.enrCache = newEnrCache
trace "ENR cache populated"
proc updatePxEnrCache(wpx: WakuPeerExchange) {.async.} =
# try more aggressively to fill the cache at startup
var attempts = 50
while wpx.enrCache.len < MaxPeersCacheSize and attempts > 0:
attempts -= 1
wpx.populateEnrCache()
await sleepAsync(1.seconds)
heartbeat "Updating px enr cache", CacheRefreshInterval:
wpx.populateEnrCache()
proc getEnrsFromStore(
wpx: WakuPeerExchange, numPeers: uint64
): seq[enr.Record] {.gcsafe.} =
# Reservoir sampling (Algorithm R)
var i = 0
let k = min(MaxPeersCacheSize, numPeers.int)
let enrStoreLen = wpx.peerManager.switch.peerStore[ENRBook].len
var enrs = newSeqOfCap[enr.Record](min(k, enrStoreLen))
wpx.peerManager.switch.peerStore.forEnrPeers(
peerId, peerConnectedness, peerOrigin, peerEnrRecord
):
if peerConnectedness == CannotConnect:
debug "Could not retrieve ENR because cannot connect to peer",
remotePeerId = peerId
continue
poolFilter(wpx.cluster, peerOrigin, peerEnrRecord).isOkOr:
debug "Could not get ENR because no peer matched pool", error = error
continue
if i < k:
enrs.add(peerEnrRecord)
else:
# Add some randomness
let j = rand(i)
if j < k:
enrs[j] = peerEnrRecord
inc(i)
return enrs
proc initProtocolHandler(wpx: WakuPeerExchange) =
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
@ -174,7 +160,8 @@ proc initProtocolHandler(wpx: WakuPeerExchange) =
error "Failed to respond with BAD_REQUEST:", error = $error
return
let enrs = wpx.getEnrsFromCache(decBuf.request.numPeers)
let enrs = wpx.getEnrsFromStore(decBuf.request.numPeers)
info "peer exchange request received"
trace "px enrs to respond", enrs = $enrs
try:
@ -214,5 +201,4 @@ proc new*(
)
wpx.initProtocolHandler()
setServiceLimitMetric(WakuPeerExchangeCodec, rateLimitSetting)
asyncSpawn wpx.updatePxEnrCache()
return wpx

View File

@ -229,9 +229,20 @@ method register*(
var gasPrice: int
g.retryWrapper(gasPrice, "Failed to get gas price"):
int(await ethRpc.provider.eth_gasPrice()) * 2
let fetchedGasPrice = uint64(await ethRpc.provider.eth_gasPrice())
## Multiply by 2 to speed up the transaction
## Check for overflow when casting to int
if fetchedGasPrice > uint64(high(int) div 2):
warn "Gas price overflow detected, capping at maximum int value",
fetchedGasPrice = fetchedGasPrice, maxInt = high(int)
high(int)
else:
let calculatedGasPrice = int(fetchedGasPrice) * 2
debug "Gas price calculated",
fetchedGasPrice = fetchedGasPrice, gasPrice = calculatedGasPrice
calculatedGasPrice
let idCommitmentHex = identityCredential.idCommitment.inHex()
info "identityCredential idCommitmentHex", idCommitment = idCommitmentHex
debug "identityCredential idCommitmentHex", idCommitment = idCommitmentHex
let idCommitment = identityCredential.idCommitment.toUInt256()
let idCommitmentsToErase: seq[UInt256] = @[]
info "registering the member",
@ -248,11 +259,10 @@ method register*(
var tsReceipt: ReceiptObject
g.retryWrapper(tsReceipt, "Failed to get the transaction receipt"):
await ethRpc.getMinedTransactionReceipt(txHash)
info "registration transaction mined", txHash = txHash
debug "registration transaction mined", txHash = txHash
g.registrationTxHash = some(txHash)
# the receipt topic holds the hash of signature of the raised events
# TODO: make this robust. search within the event list for the event
info "ts receipt", receipt = tsReceipt[]
debug "ts receipt", receipt = tsReceipt[]
if tsReceipt.status.isNone():
raise newException(ValueError, "Transaction failed: status is None")
@ -261,18 +271,27 @@ method register*(
ValueError, "Transaction failed with status: " & $tsReceipt.status.get()
)
## Extract MembershipRegistered event from transaction logs (third event)
let thirdTopic = tsReceipt.logs[2].topics[0]
info "third topic", thirdTopic = thirdTopic
if thirdTopic !=
cast[FixedBytes[32]](keccak.keccak256.digest(
"MembershipRegistered(uint256,uint256,uint32)"
).data):
raise newException(ValueError, "register: unexpected event signature")
## Search through all transaction logs to find the MembershipRegistered event
let expectedEventSignature = cast[FixedBytes[32]](keccak.keccak256.digest(
"MembershipRegistered(uint256,uint256,uint32)"
).data)
## Parse MembershipRegistered event data: rateCommitment(256) || membershipRateLimit(256) || index(32)
let arguments = tsReceipt.logs[2].data
info "tx log data", arguments = arguments
var membershipRegisteredLog: Option[LogObject]
for log in tsReceipt.logs:
if log.topics.len > 0 and log.topics[0] == expectedEventSignature:
membershipRegisteredLog = some(log)
break
if membershipRegisteredLog.isNone():
raise newException(
ValueError, "register: MembershipRegistered event not found in transaction logs"
)
let registrationLog = membershipRegisteredLog.get()
## Parse MembershipRegistered event data: idCommitment(256) || membershipRateLimit(256) || index(32)
let arguments = registrationLog.data
trace "registration transaction log data", arguments = arguments
let
## Extract membership index from transaction log data (big endian)
membershipIndex = UInt256.fromBytesBE(arguments[64 .. 95])
@ -360,7 +379,7 @@ method generateProof*(
let x = keccak.keccak256.digest(data)
let extNullifier = poseidon(@[@(epoch), @(rlnIdentifier)]).valueOr:
let extNullifier = generateExternalNullifier(epoch, rlnIdentifier).valueOr:
return err("Failed to compute external nullifier: " & error)
let witness = RLNWitnessInput(
@ -438,10 +457,9 @@ method verifyProof*(
var normalizedProof = proof
normalizedProof.externalNullifier = poseidon(
@[@(proof.epoch), @(proof.rlnIdentifier)]
).valueOr:
let externalNullifier = generateExternalNullifier(proof.epoch, proof.rlnIdentifier).valueOr:
return err("Failed to compute external nullifier: " & error)
normalizedProof.externalNullifier = externalNullifier
let proofBytes = serialize(normalizedProof, input)
let proofBuffer = proofBytes.toBuffer()

View File

@ -6,7 +6,8 @@ import
stew/[arrayops, byteutils, endians2],
stint,
results,
std/[sequtils, strutils, tables]
std/[sequtils, strutils, tables],
nimcrypto/keccak as keccak
import ./rln_interface, ../conversion_utils, ../protocol_types, ../protocol_metrics
import ../../waku_core, ../../waku_keystore
@ -119,24 +120,6 @@ proc createRLNInstance*(): RLNResult =
res = createRLNInstanceLocal()
return res
proc sha256*(data: openArray[byte]): RlnRelayResult[MerkleNode] =
## a thin layer on top of the Nim wrapper of the sha256 hasher
var lenPrefData = encodeLengthPrefix(data)
var
hashInputBuffer = lenPrefData.toBuffer()
outputBuffer: Buffer # will holds the hash output
trace "sha256 hash input buffer length", bufflen = hashInputBuffer.len
let hashSuccess = sha256(addr hashInputBuffer, addr outputBuffer, true)
# check whether the hash call is done successfully
if not hashSuccess:
return err("error in sha256 hash")
let output = cast[ptr MerkleNode](outputBuffer.`ptr`)[]
return ok(output)
proc poseidon*(data: seq[seq[byte]]): RlnRelayResult[array[32, byte]] =
## a thin layer on top of the Nim wrapper of the poseidon hasher
var inputBytes = serialize(data)
@ -180,9 +163,18 @@ proc toLeaves*(rateCommitments: seq[RateCommitment]): RlnRelayResult[seq[seq[byt
leaves.add(leaf)
return ok(leaves)
proc generateExternalNullifier*(
epoch: Epoch, rlnIdentifier: RlnIdentifier
): RlnRelayResult[ExternalNullifier] =
let epochHash = keccak.keccak256.digest(@(epoch))
let rlnIdentifierHash = keccak.keccak256.digest(@(rlnIdentifier))
let externalNullifier = poseidon(@[@(epochHash), @(rlnIdentifierHash)]).valueOr:
return err("Failed to compute external nullifier: " & error)
return ok(externalNullifier)
proc extractMetadata*(proof: RateLimitProof): RlnRelayResult[ProofMetadata] =
let externalNullifier = poseidon(@[@(proof.epoch), @(proof.rlnIdentifier)]).valueOr:
return err("could not construct the external nullifier")
let externalNullifier = generateExternalNullifier(proof.epoch, proof.rlnIdentifier).valueOr:
return err("Failed to compute external nullifier: " & error)
return ok(
ProofMetadata(
nullifier: proof.nullifier,