Merge branch 'master' into release/v0.38

This commit is contained in:
Darshan 2026-02-19 16:28:32 +05:30 committed by GitHub
commit 12fbdfa668
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
58 changed files with 3202 additions and 267 deletions

View File

@ -10,7 +10,7 @@ assignees: ''
<!--
Add appropriate release number to title!
For detailed info on the release process refer to https://github.com/logos-messaging/nwaku/blob/master/docs/contributors/release-process.md
For detailed info on the release process refer to https://github.com/logos-messaging/logos-delivery/blob/master/docs/contributors/release-process.md
-->
### Items to complete
@ -21,23 +21,29 @@ All items below are to be completed by the owner of the given release.
- [ ] Assign release candidate tag to the release branch HEAD (e.g. `v0.X.0-beta-rc.0`, `v0.X.0-beta-rc.1`, ... `v0.X.0-beta-rc.N`).
- [ ] Generate and edit release notes in CHANGELOG.md.
- [ ] **Waku test and fleets validation**
- [ ] Ensure all the unit tests (specifically logos-messaging-js tests) are green against the release candidate.
- [ ] Deploy the release candidate to `waku.test` only through [deploy-waku-test job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-test/) and wait for it to finish (Jenkins access required; ask the infra team if you don't have it).
- After completion, disable [deployment job](https://ci.infra.status.im/job/nim-waku/) so that its version is not updated on every merge to master.
- Verify the deployed version at https://fleets.waku.org/.
- Confirm the container image exists on [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab).
- [ ] Analyze Kibana logs from the previous month (since the last release was deployed) for possible crashes or errors in `waku.test`.
- Most relevant logs are `(fleet: "waku.test" AND message: "SIGSEGV")`.
- [ ] Enable again the `waku.test` fleet to resume auto-deployment of the latest `master` commit.
- [ ] **Validation of release candidate**
- [ ] **Automated testing**
- [ ] Ensure all the unit tests (specifically logos-messaging-js tests) are green against the release candidate.
- [ ] **Waku fleet testing**
- [ ] Deploy the release candidate to `waku.test` through [deploy-waku-test job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-test/) and wait for it to finish (Jenkins access required; ask the infra team if you don't have it).
- After completion, disable fleet so that daily CI does not override your release candidate.
- Verify at https://fleets.waku.org/ that the fleet is locked to the release candidate image.
- Confirm the container image exists on [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab).
- [ ] Search [Kibana logs](https://kibana.infra.status.im/app/discover) from the previous month (since the last release was deployed) for possible crashes or errors in `waku.test`.
- Set time range to "Last 30 days" (or since last release).
- Most relevant search query: `(fleet: "waku.test" AND message: "SIGSEGV")`, `(fleet: "waku.test" AND message: "exception")`, `(fleet: "waku.test" AND message: "error")`.
- Document any crashes or errors found.
- [ ] If `waku.test` validation is successful, deploy to `waku.sandbox` using the [deploy-waku-sandbox job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/).
- [ ] Search [Kibana logs](https://kibana.infra.status.im/app/discover) for `waku.sandbox`: `(fleet: "waku.sandbox" AND message: "SIGSEGV")`, `(fleet: "waku.sandbox" AND message: "exception")`, `(fleet: "waku.sandbox" AND message: "error")`. most probably if there are no crashes or errors in `waku.test`, there will be no crashes or errors in `waku.sandbox`.
- [ ] Enable the `waku.test` fleet again to resume auto-deployment of the latest `master` commit.
- [ ] **Proceed with release**
- [ ] Assign a final release tag (`v0.X.0-beta`) to the same commit that contains the validated release-candidate tag (e.g. `v0.X.0-beta-rc.N`) and submit a PR from the release branch to `master`.
- [ ] Update [nwaku-compose](https://github.com/logos-messaging/nwaku-compose) and [waku-simulator](https://github.com/logos-messaging/waku-simulator) according to the new release.
- [ ] Bump nwaku dependency in [waku-rust-bindings](https://github.com/logos-messaging/waku-rust-bindings) and make sure all examples and tests work.
- [ ] Bump nwaku dependency in [waku-go-bindings](https://github.com/logos-messaging/waku-go-bindings) and make sure all tests work.
- [ ] Create GitHub release (https://github.com/logos-messaging/nwaku/releases).
- [ ] Update [logos-delivery-compose](https://github.com/logos-messaging/logos-delivery-compose) and [logos-delivery-simulator](https://github.com/logos-messaging/waku-simulator) according to the new release.
- [ ] Bump logos-delivery dependency in [logos-delivery-rust-bindings](https://github.com/logos-messaging/logos-delivery-rust-bindings) and make sure all examples and tests work.
- [ ] Bump logos-delivery dependency in [logos-delivery-go-bindings](https://github.com/logos-messaging/logos-delivery-go-bindings) and make sure all tests work.
- [ ] Create GitHub release (https://github.com/logos-messaging/logos-delivery/releases).
- [ ] Submit a PR to merge the release branch back to `master`. Make sure you use the option "Merge pull request (Create a merge commit)" to perform the merge. Ping repo admin if this option is not available.
- [ ] **Promote release to fleets**
@ -47,10 +53,11 @@ All items below are to be completed by the owner of the given release.
### Links
- [Release process](https://github.com/logos-messaging/nwaku/blob/master/docs/contributors/release-process.md)
- [Release notes](https://github.com/logos-messaging/nwaku/blob/master/CHANGELOG.md)
- [Release process](https://github.com/logos-messaging/logos-delivery/blob/master/docs/contributors/release-process.md)
- [Release notes](https://github.com/logos-messaging/logos-delivery/blob/master/CHANGELOG.md)
- [Fleet ownership](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64)
- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku)
- [Jenkins](https://ci.infra.status.im/job/nim-waku/)
- [Fleets](https://fleets.waku.org/)
- [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab)
- [Kibana](https://kibana.infra.status.im/app/)

View File

@ -10,7 +10,7 @@ assignees: ''
<!--
Add appropriate release number to title!
For detailed info on the release process refer to https://github.com/logos-messaging/nwaku/blob/master/docs/contributors/release-process.md
For detailed info on the release process refer to https://github.com/logos-messaging/logos-delivery/blob/master/docs/contributors/release-process.md
-->
### Items to complete
@ -25,40 +25,46 @@ All items below are to be completed by the owner of the given release.
- [ ] **Automated testing**
- [ ] Ensure all the unit tests (specifically logos-messaging-js tests) are green against the release candidate.
- [ ] Ask Vac-QA and Vac-DST to perform the available tests against the release candidate.
- [ ] Vac-DST (an additional report is needed; see [this](https://www.notion.so/DST-Reports-1228f96fb65c80729cd1d98a7496fe6f))
- [ ] **Waku fleet testing**
- [ ] Deploy the release candidate to `waku.test` and `waku.sandbox` fleets.
- Start the [deployment job](https://ci.infra.status.im/job/nim-waku/) for both fleets and wait for it to finish (Jenkins access required; ask the infra team if you don't have it).
- After completion, disable [deployment job](https://ci.infra.status.im/job/nim-waku/) so that its version is not updated on every merge to `master`.
- Verify the deployed version at https://fleets.waku.org/.
- [ ] Deploy the release candidate to `waku.test` fleet.
- Start the [deployment job](https://ci.infra.status.im/job/nim-waku/) and wait for it to finish (Jenkins access required; ask the infra team if you don't have it).
- After completion, disable fleet so that daily CI does not override your release candidate.
- Verify at https://fleets.waku.org/ that the fleet is locked to the release candidate image.
- Confirm the container image exists on [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab).
- [ ] Search _Kibana_ logs from the previous month (since the last release was deployed) for possible crashes or errors in `waku.test` and `waku.sandbox`.
- Most relevant logs are `(fleet: "waku.test" AND message: "SIGSEGV")` OR `(fleet: "waku.sandbox" AND message: "SIGSEGV")`.
- [ ] Enable again the `waku.test` fleet to resume auto-deployment of the latest `master` commit.
- [ ] Search [Kibana logs](https://kibana.infra.status.im/app/discover) from the previous month (since the last release was deployed) for possible crashes or errors in `waku.test`.
- Set time range to "Last 30 days" (or since last release).
- Most relevant search query: `(fleet: "waku.test" AND message: "SIGSEGV")`, `(fleet: "waku.test" AND message: "exception")`, `(fleet: "waku.test" AND message: "error")`.
- Document any crashes or errors found.
- [ ] If `waku.test` validation is successful, deploy to `waku.sandbox` using the same [deployment job](https://ci.infra.status.im/job/nim-waku/).
- [ ] Search [Kibana logs](https://kibana.infra.status.im/app/discover) for `waku.sandbox`: `(fleet: "waku.sandbox" AND message: "SIGSEGV")`, `(fleet: "waku.sandbox" AND message: "exception")`, `(fleet: "waku.sandbox" AND message: "error")`. most probably if there are no crashes or errors in `waku.test`, there will be no crashes or errors in `waku.sandbox`.
- [ ] Enable the `waku.test` fleet again to resume auto-deployment of the latest `master` commit.
- [ ] **Status fleet testing**
- [ ] Deploy release candidate to `status.staging`
- [ ] Perform [sanity check](https://www.notion.so/How-to-test-Nwaku-on-Status-12c6e4b9bf06420ca868bd199129b425) and log results as comments in this issue.
- [ ] Connect 2 instances to `status.staging` fleet, one in relay mode, the other one in light client.
- 1:1 Chats with each other
- Send and receive messages in a community
- Close one instance, send messages with second instance, reopen first instance and confirm messages sent while offline are retrieved from store
- [ ] Perform checks based on _end user impact_
- [ ] Inform other (Waku and Status) CCs to point their instances to `status.staging` for a few days. Ping Status colleagues on their Discord server or in the [Status community](https://status.app/c/G3kAAMSQtb05kog3aGbr3kiaxN4tF5xy4BAGEkkLwILk2z3GcoYlm5hSJXGn7J3laft-tnTwDWmYJ18dP_3bgX96dqr_8E3qKAvxDf3NrrCMUBp4R9EYkQez9XSM4486mXoC3mIln2zc-TNdvjdfL9eHVZ-mGgs=#zQ3shZeEJqTC1xhGUjxuS4rtHSrhJ8vUYp64v6qWkLpvdy9L9) (this is not a blocking point.)
- [ ] Ask Status-QA to perform sanity checks (as described above) and checks based on _end user impact_; specify the version being tested
- [ ] Ask Status-QA or infra to run the automated Status e2e tests against `status.staging`
- [ ] Get other CCs' sign-off: they should comment on this PR, e.g., "Used the app for a week, no problem." If problems are reported, resolve them and create a new RC.
- [ ] **Get Status-QA sign-off**, ensuring that the `status.test` update will not disturb ongoing activities.
- [ ] **QA and DST testing**
- [ ] Ask Vac-QA and Vac-DST to run their available tests against the release candidate; share all release candidates with both teams.
- [ ] Vac-DST: An additional report is needed ([see this example](https://www.notion.so/DST-Reports-1228f96fb65c80729cd1d98a7496fe6f)). Inform DST team about what are the expectations for this rc. For example, if we expect higher or lower bandwidth consumption.
- [ ] **Status fleet testing**
- [ ] Deploy release candidate to `status.staging`
- [ ] Perform [sanity check](https://www.notion.so/How-to-test-Nwaku-on-Status-12c6e4b9bf06420ca868bd199129b425) and log results as comments in this issue.
- [ ] Connect 2 instances to `status.staging` fleet, one in relay mode, the other one in light client.
- 1:1 Chats with each other
- Send and receive messages in a community
- Close one instance, send messages with second instance, reopen first instance and confirm messages sent while offline are retrieved from store
- [ ] Perform checks based on _end user impact_
- [ ] Inform other (Waku and Status) CCs to point their instances to `status.staging` for a few days. Ping Status colleagues on their Discord server or in the [Status community](https://status.app/c/G3kAAMSQtb05kog3aGbr3kiaxN4tF5xy4BAGEkkLwILk2z3GcoYlm5hSJXGn7J3laft-tnTwDWmYJ18dP_3bgX96dqr_8E3qKAvxDf3NrrCMUBp4R9EYkQez9XSM4486mXoC3mIln2zc-TNdvjdfL9eHVZ-mGgs=#zQ3shZeEJqTC1xhGUjxuS4rtHSrhJ8vUYp64v6qWkLpvdy9L9) (this is not a blocking point.)
- [ ] Ask Status-QA to perform sanity checks (as described above) and checks based on _end user impact_; specify the version being tested
- [ ] Ask Status-QA or infra to run the automated Status e2e tests against `status.staging`
- [ ] Get other CCs' sign-off: they should comment on this PR, e.g., "Used the app for a week, no problem." If problems are reported, resolve them and create a new RC.
- [ ] **Get Status-QA sign-off**, ensuring that the `status.test` update will not disturb ongoing activities.
- [ ] **Proceed with release**
- [ ] Assign a final release tag (`v0.X.0`) to the same commit that contains the validated release-candidate tag (e.g. `v0.X.0`).
- [ ] Update [nwaku-compose](https://github.com/logos-messaging/nwaku-compose) and [waku-simulator](https://github.com/logos-messaging/waku-simulator) according to the new release.
- [ ] Bump nwaku dependency in [waku-rust-bindings](https://github.com/logos-messaging/waku-rust-bindings) and make sure all examples and tests work.
- [ ] Bump nwaku dependency in [waku-go-bindings](https://github.com/logos-messaging/waku-go-bindings) and make sure all tests work.
- [ ] Create GitHub release (https://github.com/logos-messaging/nwaku/releases).
- [ ] Update [logos-delivery-compose](https://github.com/logos-messaging/logos-delivery-compose) and [logos-delivery-simulator](https://github.com/logos-messaging/logos-delivery-simulator) according to the new release.
- [ ] Bump logos-delivery dependency in [logos-delivery-rust-bindings](https://github.com/logos-messaging/logos-delivery-rust-bindings) and make sure all examples and tests work.
- [ ] Bump logos-delivery dependency in [logos-delivery-go-bindings](https://github.com/logos-messaging/logos-delivery-go-bindings) and make sure all tests work.
- [ ] Create GitHub release (https://github.com/logos-messaging/logos-delivery/releases).
- [ ] Submit a PR to merge the release branch back to `master`. Make sure you use the option "Merge pull request (Create a merge commit)" to perform the merge. Ping repo admin if this option is not available.
- [ ] **Promote release to fleets**
@ -67,10 +73,11 @@ All items below are to be completed by the owner of the given release.
### Links
- [Release process](https://github.com/logos-messaging/nwaku/blob/master/docs/contributors/release-process.md)
- [Release notes](https://github.com/logos-messaging/nwaku/blob/master/CHANGELOG.md)
- [Release process](https://github.com/logos-messaging/logos-delivery/blob/master/docs/contributors/release-process.md)
- [Release notes](https://github.com/logos-messaging/logos-delivery/blob/master/CHANGELOG.md)
- [Fleet ownership](https://www.notion.so/Fleet-Ownership-7532aad8896d46599abac3c274189741?pvs=4#d2d2f0fe4b3c429fbd860a1d64f89a64)
- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku)
- [Jenkins](https://ci.infra.status.im/job/nim-waku/)
- [Fleets](https://fleets.waku.org/)
- [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab)
- [Kibana](https://kibana.infra.status.im/app/)

View File

@ -1,4 +1,4 @@
name: Daily logos-messaging-nim CI
name: Daily logos-delivery CI
on:
schedule:
@ -72,7 +72,7 @@ jobs:
{\"name\": \"Status\", \"value\": \"$STATUS\", \"inline\": true}
],
\"url\": \"$RUN_URL\",
\"footer\": {\"text\": \"Daily logos-messaging-nim CI\"}
\"footer\": {\"text\": \"Daily logos-delivery CI\"}
}]
}" \
"$DISCORD_WEBHOOK_URL"

View File

@ -138,12 +138,12 @@ jobs:
build-docker-image:
needs: changes
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' || needs.changes.outputs.docker == 'true' }}
uses: logos-messaging/logos-messaging-nim/.github/workflows/container-image.yml@10dc3d3eb4b6a3d4313f7b2cc4a85a925e9ce039
uses: logos-messaging/logos-delivery/.github/workflows/container-image.yml@10dc3d3eb4b6a3d4313f7b2cc4a85a925e9ce039
secrets: inherit
nwaku-nwaku-interop-tests:
needs: build-docker-image
uses: logos-messaging/logos-messaging-interop-tests/.github/workflows/nim_waku_PR.yml@SMOKE_TEST_STABLE
uses: logos-messaging/logos-delivery-interop-tests/.github/workflows/nim_waku_PR.yml@SMOKE_TEST_STABLE
with:
node_nwaku: ${{ needs.build-docker-image.outputs.image }}
@ -151,14 +151,14 @@ jobs:
js-waku-node:
needs: build-docker-image
uses: logos-messaging/logos-messaging-js/.github/workflows/test-node.yml@master
uses: logos-messaging/logos-delivery-js/.github/workflows/test-node.yml@master
with:
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
test_type: node
js-waku-node-optional:
needs: build-docker-image
uses: logos-messaging/logos-messaging-js/.github/workflows/test-node.yml@master
uses: logos-messaging/logos-delivery-js/.github/workflows/test-node.yml@master
with:
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
test_type: node-optional

View File

@ -91,14 +91,14 @@ jobs:
build-docker-image:
needs: tag-name
uses: logos-messaging/nwaku/.github/workflows/container-image.yml@master
uses: logos-messaging/logos-delivery/.github/workflows/container-image.yml@master
with:
image_tag: ${{ needs.tag-name.outputs.tag }}
secrets: inherit
js-waku-node:
needs: build-docker-image
uses: logos-messaging/logos-messaging-js/.github/workflows/test-node.yml@master
uses: logos-messaging/logos-delivery-js/.github/workflows/test-node.yml@master
with:
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
test_type: node
@ -106,7 +106,7 @@ jobs:
js-waku-node-optional:
needs: build-docker-image
uses: logos-messaging/logos-messaging-js/.github/workflows/test-node.yml@master
uses: logos-messaging/logos-delivery-js/.github/workflows/test-node.yml@master
with:
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
test_type: node-optional
@ -150,7 +150,7 @@ jobs:
-u $(id -u) \
docker.io/wakuorg/sv4git:latest \
release-notes ${RELEASE_NOTES_TAG} --previous $(git tag -l --sort -creatordate | grep -e "^v[0-9]*\.[0-9]*\.[0-9]*$") |\
sed -E 's@#([0-9]+)@[#\1](https://github.com/logos-messaging/nwaku/issues/\1)@g' > release_notes.md
sed -E 's@#([0-9]+)@[#\1](https://github.com/logos-messaging/logos-delivery/issues/\1)@g' > release_notes.md
sed -i "s/^## .*/Generated at $(date)/" release_notes.md

View File

@ -7,7 +7,7 @@ ARG NIM_COMMIT
ARG LOG_LEVEL=TRACE
# Get build tools and required header files
RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq
RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq libbsd-dev
WORKDIR /app
COPY . .
@ -24,7 +24,6 @@ RUN make -j$(nproc) deps QUICK_AND_DIRTY_COMPILER=1 ${NIM_COMMIT}
# Build the final node binary
RUN make -j$(nproc) ${NIM_COMMIT} $MAKE_TARGET LOG_LEVEL=${LOG_LEVEL} NIMFLAGS="${NIMFLAGS}"
# REFERENCE IMAGE as BASE for specialized PRODUCTION IMAGES----------------------------------------
FROM alpine:3.18 AS base_lpt
@ -44,8 +43,8 @@ RUN apk add --no-cache libgcc libpq-dev \
wget \
iproute2 \
python3 \
jq
jq \
libstdc++
COPY --from=nim-build /app/build/lightpush_publisher_mix /usr/bin/
RUN chmod +x /usr/bin/lightpush_publisher_mix

View File

@ -434,10 +434,11 @@ docker-liteprotocoltester-push:
################
## C Bindings ##
################
.PHONY: cbindings cwaku_example libwaku
.PHONY: cbindings cwaku_example libwaku liblogosdelivery liblogosdelivery_example
STATIC ?= 0
BUILD_COMMAND ?= libwakuDynamic
LIBWAKU_BUILD_COMMAND ?= libwakuDynamic
LIBLOGOSDELIVERY_BUILD_COMMAND ?= liblogosdeliveryDynamic
ifeq ($(detected_OS),Windows)
LIB_EXT_DYNAMIC = dll
@ -453,11 +454,40 @@ endif
LIB_EXT := $(LIB_EXT_DYNAMIC)
ifeq ($(STATIC), 1)
LIB_EXT = $(LIB_EXT_STATIC)
BUILD_COMMAND = libwakuStatic
LIBWAKU_BUILD_COMMAND = libwakuStatic
LIBLOGOSDELIVERY_BUILD_COMMAND = liblogosdeliveryStatic
endif
libwaku: | build deps librln
echo -e $(BUILD_MSG) "build/$@.$(LIB_EXT)" && $(ENV_SCRIPT) nim $(BUILD_COMMAND) $(NIM_PARAMS) waku.nims $@.$(LIB_EXT)
echo -e $(BUILD_MSG) "build/$@.$(LIB_EXT)" && $(ENV_SCRIPT) nim $(LIBWAKU_BUILD_COMMAND) $(NIM_PARAMS) waku.nims $@.$(LIB_EXT)
liblogosdelivery: | build deps librln
echo -e $(BUILD_MSG) "build/$@.$(LIB_EXT)" && $(ENV_SCRIPT) nim $(LIBLOGOSDELIVERY_BUILD_COMMAND) $(NIM_PARAMS) waku.nims $@.$(LIB_EXT)
logosdelivery_example: | build liblogosdelivery
@echo -e $(BUILD_MSG) "build/$@"
ifeq ($(detected_OS),Darwin)
gcc -o build/$@ \
liblogosdelivery/examples/logosdelivery_example.c \
-I./liblogosdelivery \
-L./build \
-llogosdelivery \
-Wl,-rpath,./build
else ifeq ($(detected_OS),Linux)
gcc -o build/$@ \
liblogosdelivery/examples/logosdelivery_example.c \
-I./liblogosdelivery \
-L./build \
-llogosdelivery \
-Wl,-rpath,'$$ORIGIN'
else ifeq ($(detected_OS),Windows)
gcc -o build/$@.exe \
liblogosdelivery/examples/logosdelivery_example.c \
-I./liblogosdelivery \
-L./build \
-llogosdelivery \
-lws2_32
endif
#####################
## Mobile Bindings ##

View File

@ -2,7 +2,7 @@
## Introduction
The logos-messaging-nim, a.k.a. lmn or nwaku, repository implements a set of libp2p protocols aimed to bring
This repository implements a set of libp2p protocols aimed to bring
private communications.
- Nim implementation of [these specs](https://github.com/vacp2p/rfc-index/tree/main/waku).

View File

@ -30,6 +30,7 @@ import
protobuf/minprotobuf, # message serialisation/deserialisation from and to protobufs
nameresolving/dnsresolver,
protocols/mix/curve25519,
protocols/mix/mix_protocol,
] # define DNS resolution
import
waku/[
@ -38,6 +39,7 @@ import
waku_lightpush/rpc,
waku_enr,
discovery/waku_dnsdisc,
discovery/waku_kademlia,
waku_node,
node/waku_metrics,
node/peer_manager,
@ -453,14 +455,48 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
(await node.mountMix(conf.clusterId, mixPrivKey, conf.mixnodes)).isOkOr:
error "failed to mount waku mix protocol: ", error = $error
quit(QuitFailure)
await node.mountRendezvousClient(conf.clusterId)
# Setup extended kademlia discovery if bootstrap nodes are provided
if conf.kadBootstrapNodes.len > 0:
var kadBootstrapPeers: seq[(PeerId, seq[MultiAddress])]
for nodeStr in conf.kadBootstrapNodes:
let (peerId, ma) = parseFullAddress(nodeStr).valueOr:
error "Failed to parse kademlia bootstrap node", node = nodeStr, error = error
continue
kadBootstrapPeers.add((peerId, @[ma]))
if kadBootstrapPeers.len > 0:
node.wakuKademlia = WakuKademlia.new(
node.switch,
ExtendedKademliaDiscoveryParams(
bootstrapNodes: kadBootstrapPeers,
mixPubKey: some(mixPubKey),
advertiseMix: false,
),
node.peerManager,
getMixNodePoolSize = proc(): int {.gcsafe, raises: [].} =
if node.wakuMix.isNil():
0
else:
node.getMixNodePoolSize(),
isNodeStarted = proc(): bool {.gcsafe, raises: [].} =
node.started,
).valueOr:
error "failed to setup kademlia discovery", error = error
quit(QuitFailure)
#await node.mountRendezvousClient(conf.clusterId)
await node.start()
node.peerManager.start()
if not node.wakuKademlia.isNil():
(await node.wakuKademlia.start(minMixPeers = MinMixNodePoolSize)).isOkOr:
error "failed to start kademlia discovery", error = error
quit(QuitFailure)
await node.mountLibp2pPing()
await node.mountPeerExchangeClient()
#await node.mountPeerExchangeClient()
let pubsubTopic = conf.getPubsubTopic(node, conf.contentTopic)
echo "pubsub topic is: " & pubsubTopic
let nick = await readNick(transp)
@ -601,11 +637,6 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
node, pubsubTopic, conf.contentTopic, servicePeerInfo, false
)
echo "waiting for mix nodes to be discovered..."
while true:
if node.getMixNodePoolSize() >= MinMixNodePoolSize:
break
discard await node.fetchPeerExchangePeers()
await sleepAsync(1000)
while node.getMixNodePoolSize() < MinMixNodePoolSize:
info "waiting for mix nodes to be discovered",

View File

@ -203,13 +203,13 @@ type
fleet* {.
desc:
"Select the fleet to connect to. This sets the DNS discovery URL to the selected fleet.",
defaultValue: Fleet.test,
defaultValue: Fleet.none,
name: "fleet"
.}: Fleet
contentTopic* {.
desc: "Content topic for chat messages.",
defaultValue: "/toy-chat-mix/2/huilong/proto",
defaultValue: "/toy-chat/2/baixa-chiado/proto",
name: "content-topic"
.}: string
@ -228,7 +228,14 @@ type
desc: "WebSocket Secure Support.",
defaultValue: false,
name: "websocket-secure-support"
.}: bool ## rln-relay configuration
.}: bool
## Kademlia Discovery config
kadBootstrapNodes* {.
desc:
"Peer multiaddr for kademlia discovery bootstrap node (must include /p2p/<peerID>). Argument may be repeated.",
name: "kad-bootstrap-node"
.}: seq[string]
proc parseCmdArg*(T: type MixNodePubInfo, p: string): T =
let elements = p.split(":")

View File

@ -7,7 +7,7 @@ ARG NIM_COMMIT
ARG LOG_LEVEL=TRACE
# Get build tools and required header files
RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq
RUN apk add --no-cache bash git build-base openssl-dev linux-headers curl jq libbsd-dev
WORKDIR /app
COPY . .
@ -43,7 +43,8 @@ EXPOSE 30303 60000 8545
RUN apk add --no-cache libgcc libpq-dev \
wget \
iproute2 \
python3
python3 \
libstdc++
COPY --from=nim-build /app/build/liteprotocoltester /usr/bin/
RUN chmod +x /usr/bin/liteprotocoltester

View File

@ -20,7 +20,7 @@ For more context, see https://trunkbaseddevelopment.com/branch-for-release/
- **Full release**: follow the entire [Release process](#release-process--step-by-step).
- **Beta release**: skip just `6a` and `6c` steps from [Release process](#release-process--step-by-step).
- **Beta release**: skip just `6c` and `6d` steps from [Release process](#release-process--step-by-step).
- Choose the appropriate release process based on the release type:
- [Full Release](../../.github/ISSUE_TEMPLATE/prepare_full_release.md)
@ -70,20 +70,26 @@ For more context, see https://trunkbaseddevelopment.com/branch-for-release/
6a. **Automated testing**
- Ensure all the unit tests (specifically js-waku tests) are green against the release candidate.
- Ask Vac-QA and Vac-DST to run their available tests against the release candidate; share all release candidates with both teams.
> We need an additional report like [this](https://www.notion.so/DST-Reports-1228f96fb65c80729cd1d98a7496fe6f) specifically from the DST team.
6b. **Waku fleet testing**
- Start job on `waku.sandbox` and `waku.test` [Deployment job](https://ci.infra.status.im/job/nim-waku/), wait for completion of the job. If it fails, then debug it.
- After completion, disable [deployment job](https://ci.infra.status.im/job/nim-waku/) so that its version is not updated on every merge to `master`.
- Verify at https://fleets.waku.org/ that the fleet is locked to the release candidate version.
- Start job on `waku.test` [Deployment job](https://ci.infra.status.im/job/nim-waku/), wait for completion of the job. If it fails, then debug it.
- After completion, disable fleet so that daily ci not override your release candidate.
- Verify at https://fleets.waku.org/ that the fleet is locked to the release candidate image.
- Check if the image is created at [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab).
- Search _Kibana_ logs from the previous month (since the last release was deployed) for possible crashes or errors in `waku.test` and `waku.sandbox`.
- Most relevant logs are `(fleet: "waku.test" AND message: "SIGSEGV")` OR `(fleet: "waku.sandbox" AND message: "SIGSEGV")`.
- Search [Kibana logs](https://kibana.infra.status.im/app/discover) from the previous month (since the last release was deployed) for possible crashes or errors in `waku.test`.
- Set time range to "Last 30 days" (or since last release).
- Most relevant search query: `(fleet: "waku.test" AND message: "SIGSEGV")`, `(fleet: "waku.test" AND message: "exception")`, `(fleet: "waku.test" AND message: "error")`.
- Document any crashes or errors found.
- If `waku.test` validation is successful, deploy to `waku.sandbox` using the same [Deployment job](https://ci.infra.status.im/job/nim-waku/).
- Search [Kibana logs](https://kibana.infra.status.im/app/discover) for `waku.sandbox`: `(fleet: "waku.sandbox" AND message: "SIGSEGV")`, `(fleet: "waku.sandbox" AND message: "exception")`, `(fleet: "waku.sandbox" AND message: "error")`. most probably if there are no crashes or errors in `waku.test`, there will be no crashes or errors in `waku.sandbox`.
- Enable the `waku.test` fleet again to resume auto-deployment of the latest `master` commit.
6c. **Status fleet testing**
6c. **QA and DST testing**
- Ask Vac-QA and Vac-DST to run their available tests against the release candidate; share all release candidates with both teams.
> We need an additional report like [this](https://www.notion.so/DST-Reports-1228f96fb65c80729cd1d98a7496fe6f) specifically from the DST team. Inform DST team about what are the expectations for this rc. For example, if we expect higher or lower bandwidth consumption.
6d. **Status fleet testing**
- Deploy release candidate to `status.staging`
- Perform [sanity check](https://www.notion.so/How-to-test-Nwaku-on-Status-12c6e4b9bf06420ca868bd199129b425) and log results as comments in this issue.
- Connect 2 instances to `status.staging` fleet, one in relay mode, the other one in light client.
@ -120,10 +126,10 @@ We also need to merge the release branch back into master as a final step.
2. Deploy the release image to [Dockerhub](https://hub.docker.com/r/wakuorg/nwaku) by triggering [the manual Jenkins deployment job](https://ci.infra.status.im/job/nim-waku/job/docker-manual/).
> Ensure the following build parameters are set:
> - `MAKE_TARGET`: `wakunode2`
> - `IMAGE_TAG`: the release tag (e.g. `v0.36.0`)
> - `IMAGE_TAG`: the release tag (e.g. `v0.38.0`)
> - `IMAGE_NAME`: `wakuorg/nwaku`
> - `NIMFLAGS`: `--colors:off -d:disableMarchNative -d:chronicles_colors:none -d:postgres`
> - `GIT_REF` the release tag (e.g. `v0.36.0`)
> - `GIT_REF` the release tag (e.g. `v0.38.0`)
### Performing a patch release
@ -154,4 +160,5 @@ We also need to merge the release branch back into master as a final step.
- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku)
- [Jenkins](https://ci.infra.status.im/job/nim-waku/)
- [Fleets](https://fleets.waku.org/)
- [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab)
- [Harbor](https://harbor.status.im/harbor/projects/9/repositories/nwaku/artifacts-tab)
- [Kibana](https://kibana.infra.status.im/app/)

View File

@ -71,6 +71,13 @@
zerokitRln = zerokit.packages.${system}.rln;
};
liblogosdelivery = pkgs.callPackage ./nix/default.nix {
inherit stableSystems;
src = self;
targets = ["liblogosdelivery"];
zerokitRln = zerokit.packages.${system}.rln;
};
default = libwaku;
});

123
liblogosdelivery/BUILD.md Normal file
View File

@ -0,0 +1,123 @@
# Building liblogosdelivery and Examples
## Prerequisites
- Nim 2.x compiler
- Rust toolchain (for RLN dependencies)
- GCC or Clang compiler
- Make
## Building the Library
### Dynamic Library
```bash
make liblogosdelivery
```
This creates `build/liblogosdelivery.dylib` (macOS) or `build/liblogosdelivery.so` (Linux).
### Static Library
```bash
nim liblogosdelivery STATIC=1
```
This creates `build/liblogosdelivery.a`.
## Building Examples
### liblogosdelivery Example
Compile the C example that demonstrates all library features:
```bash
# Using Make (recommended)
make liblogosdelivery_example
## Running Examples
```bash
./build/liblogosdelivery_example
```
The example will:
1. Create a Logos Messaging node
2. Register event callbacks for message events
3. Start the node
4. Subscribe to a content topic
5. Send a message
6. Show message delivery events (sent, propagated, or error)
7. Unsubscribe and cleanup
## Build Artifacts
After building, you'll have:
```
build/
├── liblogosdelivery.dylib # Dynamic library (34MB)
├── liblogosdelivery.dylib.dSYM/ # Debug symbols
└── liblogosdelivery_example # Compiled example (34KB)
```
## Library Headers
The main header file is:
- `liblogosdelivery/liblogosdelivery.h` - C API declarations
## Troubleshooting
### Library not found at runtime
If you get "library not found" errors when running the example:
**macOS:**
```bash
export DYLD_LIBRARY_PATH=/path/to/build:$DYLD_LIBRARY_PATH
./build/liblogosdelivery_example
```
**Linux:**
```bash
export LD_LIBRARY_PATH=/path/to/build:$LD_LIBRARY_PATH
./build/liblogosdelivery_example
```
## Cross-Compilation
For cross-compilation, you need to:
1. Build the Nim library for the target platform
2. Use the appropriate cross-compiler
3. Link against the target platform's liblogosdelivery
Example for Linux from macOS:
```bash
# Build library for Linux (requires Docker or cross-compilation setup)
# Then compile with cross-compiler
```
## Integration with Your Project
### CMake
```cmake
find_library(LMAPI_LIBRARY NAMES lmapi PATHS ${PROJECT_SOURCE_DIR}/build)
include_directories(${PROJECT_SOURCE_DIR}/liblogosdelivery)
target_link_libraries(your_target ${LMAPI_LIBRARY})
```
### Makefile
```makefile
CFLAGS += -I/path/to/liblogosdelivery
LDFLAGS += -L/path/to/build -llmapi -Wl,-rpath,/path/to/build
your_program: your_program.c
$(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
```
## API Documentation
See:
- [liblogosdelivery.h](liblogosdelivery/liblogosdelivery.h) - API function declarations
- [MESSAGE_EVENTS.md](liblogosdelivery/MESSAGE_EVENTS.md) - Message event handling guide

View File

@ -0,0 +1,148 @@
# Message Event Handling in LMAPI
## Overview
The liblogosdelivery library emits three types of message delivery events that clients can listen to by registering an event callback using `logosdelivery_set_event_callback()`.
## Event Types
### 1. message_sent
Emitted when a message is successfully accepted by the send service and queued for delivery.
**JSON Structure:**
```json
{
"eventType": "message_sent",
"requestId": "unique-request-id",
"messageHash": "0x..."
}
```
**Fields:**
- `eventType`: Always "message_sent"
- `requestId`: Request ID returned from the send operation
- `messageHash`: Hash of the message that was sent
### 2. message_propagated
Emitted when a message has been successfully propagated to neighboring nodes on the network.
**JSON Structure:**
```json
{
"eventType": "message_propagated",
"requestId": "unique-request-id",
"messageHash": "0x..."
}
```
**Fields:**
- `eventType`: Always "message_propagated"
- `requestId`: Request ID from the send operation
- `messageHash`: Hash of the message that was propagated
### 3. message_error
Emitted when an error occurs during message sending or propagation.
**JSON Structure:**
```json
{
"eventType": "message_error",
"requestId": "unique-request-id",
"messageHash": "0x...",
"error": "error description"
}
```
**Fields:**
- `eventType`: Always "message_error"
- `requestId`: Request ID from the send operation
- `messageHash`: Hash of the message that failed
- `error`: Description of what went wrong
## Usage
### 1. Define an Event Callback
```c
void event_callback(int ret, const char *msg, size_t len, void *userData) {
if (ret != RET_OK || msg == NULL || len == 0) {
return;
}
// Parse the JSON message
// Extract eventType field
// Handle based on event type
if (eventType == "message_sent") {
// Handle message sent
} else if (eventType == "message_propagated") {
// Handle message propagated
} else if (eventType == "message_error") {
// Handle message error
}
}
```
### 2. Register the Callback
```c
void *ctx = logosdelivery_create_node(config, callback, userData);
logosdelivery_set_event_callback(ctx, event_callback, NULL);
```
### 3. Start the Node
Once the node is started, events will be delivered to your callback:
```c
logosdelivery_start_node(ctx, callback, userData);
```
## Event Flow
For a typical successful message send:
1. **send** → Returns request ID
2. **message_sent** → Message accepted and queued
3. **message_propagated** → Message delivered to peers
For a failed message send:
1. **send** → Returns request ID
2. **message_sent** → Message accepted and queued
3. **message_error** → Delivery failed with error description
## Important Notes
1. **Thread Safety**: The event callback is invoked from the FFI worker thread. Ensure your callback is thread-safe if it accesses shared state.
2. **Non-Blocking**: Keep the callback fast and non-blocking. Do not perform long-running operations in the callback.
3. **JSON Parsing**: The example uses a simple string-based parser. For production, use a proper JSON library like:
- [cJSON](https://github.com/DaveGamble/cJSON)
- [json-c](https://github.com/json-c/json-c)
- [Jansson](https://github.com/akheron/jansson)
4. **Memory Management**: The message buffer is owned by the library. Copy any data you need to retain.
5. **Event Order**: Events are delivered in the order they occur, but timing depends on network conditions.
## Example Implementation
See `examples/liblogosdelivery_example.c` for a complete working example that:
- Registers an event callback
- Sends a message
- Receives and prints all three event types
- Properly parses the JSON event structure
## Debugging Events
To see all events during development:
```c
void debug_event_callback(int ret, const char *msg, size_t len, void *userData) {
printf("Event received: %.*s\n", (int)len, msg);
}
```
This will print the raw JSON for all events, helping you understand the event structure.

262
liblogosdelivery/README.md Normal file
View File

@ -0,0 +1,262 @@
# Logos Messaging API (LMAPI) Library
A C FFI library providing a simplified interface to Logos Messaging functionality.
## Overview
This library wraps the high-level API functions from `waku/api/api.nim` and exposes them via a C FFI interface, making them accessible from C, C++, and other languages that support C FFI.
## API Functions
### Node Lifecycle
#### `logosdelivery_create_node`
Creates a new instance of the node from the given configuration JSON.
```c
void *logosdelivery_create_node(
const char *configJson,
FFICallBack callback,
void *userData
);
```
**Parameters:**
- `configJson`: JSON string containing node configuration
- `callback`: Callback function to receive the result
- `userData`: User data passed to the callback
**Returns:** Pointer to the context needed by other API functions, or NULL on error.
**Example configuration JSON:**
```json
{
"mode": "Core",
"clusterId": 1,
"entryNodes": [
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im"
],
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
```
#### `logosdelivery_start_node`
Starts the node.
```c
int logosdelivery_start_node(
void *ctx,
FFICallBack callback,
void *userData
);
```
#### `logosdelivery_stop_node`
Stops the node.
```c
int logosdelivery_stop_node(
void *ctx,
FFICallBack callback,
void *userData
);
```
#### `logosdelivery_destroy`
Destroys a node instance and frees resources.
```c
int logosdelivery_destroy(
void *ctx,
FFICallBack callback,
void *userData
);
```
### Messaging
#### `logosdelivery_subscribe`
Subscribe to a content topic to receive messages.
```c
int logosdelivery_subscribe(
void *ctx,
FFICallBack callback,
void *userData,
const char *contentTopic
);
```
**Parameters:**
- `ctx`: Context pointer from `logosdelivery_create_node`
- `callback`: Callback function to receive the result
- `userData`: User data passed to the callback
- `contentTopic`: Content topic string (e.g., "/myapp/1/chat/proto")
#### `logosdelivery_unsubscribe`
Unsubscribe from a content topic.
```c
int logosdelivery_unsubscribe(
void *ctx,
FFICallBack callback,
void *userData,
const char *contentTopic
);
```
#### `logosdelivery_send`
Send a message.
```c
int logosdelivery_send(
void *ctx,
FFICallBack callback,
void *userData,
const char *messageJson
);
```
**Parameters:**
- `messageJson`: JSON string containing the message
**Example message JSON:**
```json
{
"contentTopic": "/myapp/1/chat/proto",
"payload": "SGVsbG8gV29ybGQ=",
"ephemeral": false
}
```
Note: The `payload` field should be base64-encoded.
**Returns:** Request ID in the callback message that can be used to track message delivery.
### Events
#### `logosdelivery_set_event_callback`
Sets a callback that will be invoked whenever an event occurs (e.g., message received).
```c
void logosdelivery_set_event_callback(
void *ctx,
FFICallBack callback,
void *userData
);
```
**Important:** The callback should be fast, non-blocking, and thread-safe.
## Building
The library follows the same build system as the main Logos Messaging project.
### Build the library
```bash
make liblogosdeliveryStatic # Build static library
# or
make liblogosdeliveryDynamic # Build dynamic library
```
## Return Codes
All functions that return `int` use the following return codes:
- `RET_OK` (0): Success
- `RET_ERR` (1): Error
- `RET_MISSING_CALLBACK` (2): Missing callback function
## Callback Function
All API functions use the following callback signature:
```c
typedef void (*FFICallBack)(
int callerRet,
const char *msg,
size_t len,
void *userData
);
```
**Parameters:**
- `callerRet`: Return code (RET_OK, RET_ERR, etc.)
- `msg`: Response message (may be empty for success)
- `len`: Length of the message
- `userData`: User data passed in the original call
## Example Usage
```c
#include "liblogosdelivery.h"
#include <stdio.h>
void callback(int ret, const char *msg, size_t len, void *userData) {
if (ret == RET_OK) {
printf("Success: %.*s\n", (int)len, msg);
} else {
printf("Error: %.*s\n", (int)len, msg);
}
}
int main() {
const char *config = "{"
"\"mode\": \"Core\","
"\"clusterId\": 1"
"}";
// Create node
void *ctx = logosdelivery_create_node(config, callback, NULL);
if (ctx == NULL) {
return 1;
}
// Start node
logosdelivery_start_node(ctx, callback, NULL);
// Subscribe to a topic
logosdelivery_subscribe(ctx, callback, NULL, "/myapp/1/chat/proto");
// Send a message
const char *msg = "{"
"\"contentTopic\": \"/myapp/1/chat/proto\","
"\"payload\": \"SGVsbG8gV29ybGQ=\","
"\"ephemeral\": false"
"}";
logosdelivery_send(ctx, callback, NULL, msg);
// Clean up
logosdelivery_stop_node(ctx, callback, NULL);
logosdelivery_destroy(ctx, callback, NULL);
return 0;
}
```
## Architecture
The library is structured as follows:
- `liblogosdelivery.h`: C header file with function declarations
- `liblogosdelivery.nim`: Main library entry point
- `declare_lib.nim`: Library declaration and initialization
- `lmapi/node_api.nim`: Node lifecycle API implementation
- `lmapi/messaging_api.nim`: Subscribe/send API implementation
The library uses the nim-ffi framework for FFI infrastructure, which handles:
- Thread-safe request processing
- Async operation management
- Memory management between C and Nim
- Callback marshaling
## See Also
- Main API documentation: `waku/api/api.nim`
- Original libwaku library: `library/libwaku.nim`
- nim-ffi framework: `vendor/nim-ffi/`

View File

@ -0,0 +1,24 @@
import ffi
import waku/factory/waku
declareLibrary("logosdelivery")
template requireInitializedNode*(
ctx: ptr FFIContext[Waku], opName: string, onError: untyped
) =
if isNil(ctx):
let errMsg {.inject.} = opName & " failed: invalid context"
onError
elif isNil(ctx.myLib) or isNil(ctx.myLib[]):
let errMsg {.inject.} = opName & " failed: node is not initialized"
onError
proc logosdelivery_set_event_callback(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.dynlib, exportc, cdecl.} =
if isNil(ctx):
echo "error: invalid context in logosdelivery_set_event_callback"
return
ctx[].eventCallback = cast[pointer](callback)
ctx[].eventUserData = userData

View File

@ -0,0 +1,193 @@
#include "../liblogosdelivery.h"
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <stdlib.h>
static int create_node_ok = -1;
// Helper function to extract a JSON string field value
// Very basic parser - for production use a proper JSON library
const char* extract_json_field(const char *json, const char *field, char *buffer, size_t bufSize) {
char searchStr[256];
snprintf(searchStr, sizeof(searchStr), "\"%s\":\"", field);
const char *start = strstr(json, searchStr);
if (!start) {
return NULL;
}
start += strlen(searchStr);
const char *end = strchr(start, '"');
if (!end) {
return NULL;
}
size_t len = end - start;
if (len >= bufSize) {
len = bufSize - 1;
}
memcpy(buffer, start, len);
buffer[len] = '\0';
return buffer;
}
// Event callback that handles message events
void event_callback(int ret, const char *msg, size_t len, void *userData) {
if (ret != RET_OK || msg == NULL || len == 0) {
return;
}
// Create null-terminated string for easier parsing
char *eventJson = malloc(len + 1);
if (!eventJson) {
return;
}
memcpy(eventJson, msg, len);
eventJson[len] = '\0';
// Extract eventType
char eventType[64];
if (!extract_json_field(eventJson, "eventType", eventType, sizeof(eventType))) {
free(eventJson);
return;
}
// Handle different event types
if (strcmp(eventType, "message_sent") == 0) {
char requestId[128];
char messageHash[128];
extract_json_field(eventJson, "requestId", requestId, sizeof(requestId));
extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash));
printf("📤 [EVENT] Message sent - RequestID: %s, Hash: %s\n", requestId, messageHash);
} else if (strcmp(eventType, "message_error") == 0) {
char requestId[128];
char messageHash[128];
char error[256];
extract_json_field(eventJson, "requestId", requestId, sizeof(requestId));
extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash));
extract_json_field(eventJson, "error", error, sizeof(error));
printf("❌ [EVENT] Message error - RequestID: %s, Hash: %s, Error: %s\n",
requestId, messageHash, error);
} else if (strcmp(eventType, "message_propagated") == 0) {
char requestId[128];
char messageHash[128];
extract_json_field(eventJson, "requestId", requestId, sizeof(requestId));
extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash));
printf("✅ [EVENT] Message propagated - RequestID: %s, Hash: %s\n", requestId, messageHash);
} else {
printf(" [EVENT] Unknown event type: %s\n", eventType);
}
free(eventJson);
}
// Simple callback that prints results
void simple_callback(int ret, const char *msg, size_t len, void *userData) {
const char *operation = (const char *)userData;
if (operation != NULL && strcmp(operation, "create_node") == 0) {
create_node_ok = (ret == RET_OK) ? 1 : 0;
}
if (ret == RET_OK) {
if (len > 0) {
printf("[%s] Success: %.*s\n", operation, (int)len, msg);
} else {
printf("[%s] Success\n", operation);
}
} else {
printf("[%s] Error: %.*s\n", operation, (int)len, msg);
}
}
int main() {
printf("=== Logos Messaging API (LMAPI) Example ===\n\n");
// Configuration JSON for creating a node
const char *config = "{"
"\"logLevel\": \"DEBUG\","
// "\"mode\": \"Edge\","
"\"mode\": \"Core\","
"\"protocolsConfig\": {"
"\"entryNodes\": [\"/dns4/node-01.do-ams3.misc.logos-chat.status.im/tcp/30303/p2p/16Uiu2HAkxoqUTud5LUPQBRmkeL2xP4iKx2kaABYXomQRgmLUgf78\"],"
"\"clusterId\": 42,"
"\"autoShardingConfig\": {"
"\"numShardsInCluster\": 8"
"}"
"},"
"\"networkingConfig\": {"
"\"listenIpv4\": \"0.0.0.0\","
"\"p2pTcpPort\": 60000,"
"\"discv5UdpPort\": 9000"
"}"
"}";
printf("1. Creating node...\n");
void *ctx = logosdelivery_create_node(config, simple_callback, (void *)"create_node");
if (ctx == NULL) {
printf("Failed to create node\n");
return 1;
}
// Wait a bit for the callback
sleep(1);
if (create_node_ok != 1) {
printf("Create node failed, stopping example early.\n");
logosdelivery_destroy(ctx, simple_callback, (void *)"destroy");
return 1;
}
printf("\n2. Setting up event callback...\n");
logosdelivery_set_event_callback(ctx, event_callback, NULL);
printf("Event callback registered for message events\n");
printf("\n3. Starting node...\n");
logosdelivery_start_node(ctx, simple_callback, (void *)"start_node");
// Wait for node to start
sleep(2);
printf("\n4. Subscribing to content topic...\n");
const char *contentTopic = "/example/1/chat/proto";
logosdelivery_subscribe(ctx, simple_callback, (void *)"subscribe", contentTopic);
// Wait for subscription
sleep(1);
printf("\n5. Sending a message...\n");
printf("Watch for message events (sent, propagated, or error):\n");
// Create base64-encoded payload: "Hello, Logos Messaging!"
const char *message = "{"
"\"contentTopic\": \"/example/1/chat/proto\","
"\"payload\": \"SGVsbG8sIExvZ29zIE1lc3NhZ2luZyE=\","
"\"ephemeral\": false"
"}";
logosdelivery_send(ctx, simple_callback, (void *)"send", message);
// Wait for message events to arrive
printf("Waiting for message delivery events...\n");
sleep(60);
printf("\n6. Unsubscribing from content topic...\n");
logosdelivery_unsubscribe(ctx, simple_callback, (void *)"unsubscribe", contentTopic);
sleep(1);
printf("\n7. Stopping node...\n");
logosdelivery_stop_node(ctx, simple_callback, (void *)"stop_node");
sleep(1);
printf("\n8. Destroying context...\n");
logosdelivery_destroy(ctx, simple_callback, (void *)"destroy");
printf("\n=== Example completed ===\n");
return 0;
}

View File

@ -0,0 +1,27 @@
import std/[json, macros]
type JsonEvent*[T] = ref object
eventType*: string
payload*: T
macro toFlatJson*(event: JsonEvent): JsonNode =
## Serializes JsonEvent[T] to flat JSON with eventType first,
## followed by all fields from T's payload
result = quote:
var jsonObj = newJObject()
jsonObj["eventType"] = %`event`.eventType
# Serialize payload fields into the same object (flattening)
let payloadJson = %`event`.payload
for key, val in payloadJson.pairs:
jsonObj[key] = val
jsonObj
proc `$`*[T](event: JsonEvent[T]): string =
$toFlatJson(event)
proc newJsonEvent*[T](eventType: string, payload: T): JsonEvent[T] =
## Creates a new JsonEvent with the given eventType and payload.
## The payload's fields will be flattened into the JSON output.
JsonEvent[T](eventType: eventType, payload: payload)

View File

@ -0,0 +1,82 @@
// Generated manually and inspired by libwaku.h
// Header file for Logos Messaging API (LMAPI) library
#pragma once
#ifndef __liblogosdelivery__
#define __liblogosdelivery__
#include <stddef.h>
#include <stdint.h>
// The possible returned values for the functions that return int
#define RET_OK 0
#define RET_ERR 1
#define RET_MISSING_CALLBACK 2
#ifdef __cplusplus
extern "C"
{
#endif
typedef void (*FFICallBack)(int callerRet, const char *msg, size_t len, void *userData);
// Creates a new instance of the node from the given configuration JSON.
// Returns a pointer to the Context needed by the rest of the API functions.
// Configuration should be in JSON format following the NodeConfig structure.
void *logosdelivery_create_node(
const char *configJson,
FFICallBack callback,
void *userData);
// Starts the node.
int logosdelivery_start_node(void *ctx,
FFICallBack callback,
void *userData);
// Stops the node.
int logosdelivery_stop_node(void *ctx,
FFICallBack callback,
void *userData);
// Destroys an instance of a node created with logosdelivery_create_node
int logosdelivery_destroy(void *ctx,
FFICallBack callback,
void *userData);
// Subscribe to a content topic.
// contentTopic: string representing the content topic (e.g., "/myapp/1/chat/proto")
int logosdelivery_subscribe(void *ctx,
FFICallBack callback,
void *userData,
const char *contentTopic);
// Unsubscribe from a content topic.
int logosdelivery_unsubscribe(void *ctx,
FFICallBack callback,
void *userData,
const char *contentTopic);
// Send a message.
// messageJson: JSON string with the following structure:
// {
// "contentTopic": "/myapp/1/chat/proto",
// "payload": "base64-encoded-payload",
// "ephemeral": false
// }
// Returns a request ID that can be used to track the message delivery.
int logosdelivery_send(void *ctx,
FFICallBack callback,
void *userData,
const char *messageJson);
// Sets a callback that will be invoked whenever an event occurs.
// It is crucial that the passed callback is fast, non-blocking and potentially thread-safe.
void logosdelivery_set_event_callback(void *ctx,
FFICallBack callback,
void *userData);
#ifdef __cplusplus
}
#endif
#endif /* __liblogosdelivery__ */

View File

@ -0,0 +1,29 @@
import std/[atomics, options]
import chronicles, chronos, chronos/threadsync, ffi
import waku/factory/waku, waku/node/waku_node, ./declare_lib
################################################################################
## Include different APIs, i.e. all procs with {.ffi.} pragma
include ./logos_delivery_api/node_api, ./logos_delivery_api/messaging_api
################################################################################
### Exported procs
proc logosdelivery_destroy(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
): cint {.dynlib, exportc, cdecl.} =
initializeLibrary()
checkParams(ctx, callback, userData)
ffi.destroyFFIContext(ctx).isOkOr:
let msg = "liblogosdelivery error: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
## always need to invoke the callback although we don't retrieve value to the caller
callback(RET_OK, nil, 0, userData)
return RET_OK
# ### End of exported procs
# ################################################################################

View File

@ -0,0 +1,91 @@
import std/[json]
import chronos, results, ffi
import stew/byteutils
import
waku/common/base64,
waku/factory/waku,
waku/waku_core/topics/content_topic,
waku/api/[api, types],
../declare_lib
proc logosdelivery_subscribe(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
contentTopicStr: cstring,
) {.ffi.} =
requireInitializedNode(ctx, "Subscribe"):
return err(errMsg)
# ContentTopic is just a string type alias
let contentTopic = ContentTopic($contentTopicStr)
(await api.subscribe(ctx.myLib[], contentTopic)).isOkOr:
let errMsg = $error
return err("Subscribe failed: " & errMsg)
return ok("")
proc logosdelivery_unsubscribe(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
contentTopicStr: cstring,
) {.ffi.} =
requireInitializedNode(ctx, "Unsubscribe"):
return err(errMsg)
# ContentTopic is just a string type alias
let contentTopic = ContentTopic($contentTopicStr)
api.unsubscribe(ctx.myLib[], contentTopic).isOkOr:
let errMsg = $error
return err("Unsubscribe failed: " & errMsg)
return ok("")
proc logosdelivery_send(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
messageJson: cstring,
) {.ffi.} =
requireInitializedNode(ctx, "Send"):
return err(errMsg)
## Parse the message JSON and send the message
var jsonNode: JsonNode
try:
jsonNode = parseJson($messageJson)
except Exception as e:
return err("Failed to parse message JSON: " & e.msg)
# Extract content topic
if not jsonNode.hasKey("contentTopic"):
return err("Missing contentTopic field")
# ContentTopic is just a string type alias
let contentTopic = ContentTopic(jsonNode["contentTopic"].getStr())
# Extract payload (expect base64 encoded string)
if not jsonNode.hasKey("payload"):
return err("Missing payload field")
let payloadStr = jsonNode["payload"].getStr()
let payload = base64.decode(Base64String(payloadStr)).valueOr:
return err("invalid payload format: " & error)
# Extract ephemeral flag
let ephemeral = jsonNode.getOrDefault("ephemeral").getBool(false)
# Create message envelope
let envelope = MessageEnvelope.init(
contentTopic = contentTopic, payload = payload, ephemeral = ephemeral
)
# Send the message
let requestId = (await api.send(ctx.myLib[], envelope)).valueOr:
let errMsg = $error
return err("Send failed: " & errMsg)
return ok($requestId)

View File

@ -0,0 +1,111 @@
import std/json
import chronos, results, ffi
import
waku/factory/waku,
waku/node/waku_node,
waku/api/[api, api_conf, types],
waku/events/message_events,
../declare_lib,
../json_event
# Add JSON serialization for RequestId
proc `%`*(id: RequestId): JsonNode =
%($id)
registerReqFFI(CreateNodeRequest, ctx: ptr FFIContext[Waku]):
proc(configJson: cstring): Future[Result[string, string]] {.async.} =
## Parse the JSON configuration and create a node
let nodeConfig =
try:
decodeNodeConfigFromJson($configJson)
except SerializationError as e:
return err("Failed to parse config JSON: " & e.msg)
# Create the node
ctx.myLib[] = (await api.createNode(nodeConfig)).valueOr:
let errMsg = $error
chronicles.error "CreateNodeRequest failed", err = errMsg
return err(errMsg)
return ok("")
proc logosdelivery_create_node(
configJson: cstring, callback: FFICallback, userData: pointer
): pointer {.dynlib, exportc, cdecl.} =
initializeLibrary()
if isNil(callback):
echo "error: missing callback in logosdelivery_create_node"
return nil
var ctx = ffi.createFFIContext[Waku]().valueOr:
let msg = "Error in createFFIContext: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return nil
ctx.userData = userData
ffi.sendRequestToFFIThread(
ctx, CreateNodeRequest.ffiNewReq(callback, userData, configJson)
).isOkOr:
let msg = "error in sendRequestToFFIThread: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return nil
return ctx
proc logosdelivery_start_node(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
requireInitializedNode(ctx, "START_NODE"):
return err(errMsg)
# setting up outgoing event listeners
let sentListener = MessageSentEvent.listen(
ctx.myLib[].brokerCtx,
proc(event: MessageSentEvent) {.async: (raises: []).} =
callEventCallback(ctx, "onMessageSent"):
$newJsonEvent("message_sent", event),
).valueOr:
chronicles.error "MessageSentEvent.listen failed", err = $error
return err("MessageSentEvent.listen failed: " & $error)
let errorListener = MessageErrorEvent.listen(
ctx.myLib[].brokerCtx,
proc(event: MessageErrorEvent) {.async: (raises: []).} =
callEventCallback(ctx, "onMessageError"):
$newJsonEvent("message_error", event),
).valueOr:
chronicles.error "MessageErrorEvent.listen failed", err = $error
return err("MessageErrorEvent.listen failed: " & $error)
let propagatedListener = MessagePropagatedEvent.listen(
ctx.myLib[].brokerCtx,
proc(event: MessagePropagatedEvent) {.async: (raises: []).} =
callEventCallback(ctx, "onMessagePropagated"):
$newJsonEvent("message_propagated", event),
).valueOr:
chronicles.error "MessagePropagatedEvent.listen failed", err = $error
return err("MessagePropagatedEvent.listen failed: " & $error)
(await startWaku(addr ctx.myLib[])).isOkOr:
let errMsg = $error
chronicles.error "START_NODE failed", err = errMsg
return err("failed to start: " & errMsg)
return ok("")
proc logosdelivery_stop_node(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
requireInitializedNode(ctx, "STOP_NODE"):
return err(errMsg)
MessageErrorEvent.dropAllListeners(ctx.myLib[].brokerCtx)
MessageSentEvent.dropAllListeners(ctx.myLib[].brokerCtx)
MessagePropagatedEvent.dropAllListeners(ctx.myLib[].brokerCtx)
(await ctx.myLib[].stop()).isOkOr:
let errMsg = $error
chronicles.error "STOP_NODE failed", err = errMsg
return err("failed to stop: " & errMsg)
return ok("")

27
liblogosdelivery/nim.cfg Normal file
View File

@ -0,0 +1,27 @@
# Nim configuration for liblogosdelivery
# Ensure correct compiler configuration
--gc:
refc
--threads:
on
# Include paths
--path:
"../vendor/nim-ffi"
--path:
"../"
# Optimization and debugging
--opt:
speed
--debugger:
native
# Export symbols for dynamic library
--app:
lib
--noMain
# Enable FFI macro features when needed for debugging
# --define:ffiDumpMacros

View File

@ -23,6 +23,10 @@ let
tools = pkgs.callPackage ./tools.nix {};
version = tools.findKeyValue "^version = \"([a-f0-9.-]+)\"$" ../waku.nimble;
revision = lib.substring 0 8 (src.rev or src.dirtyRev or "00000000");
copyLibwaku = lib.elem "libwaku" targets;
copyLiblogosdelivery = lib.elem "liblogosdelivery" targets;
copyWakunode2 = lib.elem "wakunode2" targets;
hasKnownInstallTarget = copyLibwaku || copyLiblogosdelivery || copyWakunode2;
in stdenv.mkDerivation {
pname = "logos-messaging-nim";
@ -46,8 +50,8 @@ in stdenv.mkDerivation {
];
# Environment variables required for Android builds
ANDROID_SDK_ROOT="${pkgs.androidPkgs.sdk}";
ANDROID_NDK_HOME="${pkgs.androidPkgs.ndk}";
ANDROID_SDK_ROOT = "${pkgs.androidPkgs.sdk}";
ANDROID_NDK_HOME = "${pkgs.androidPkgs.ndk}";
NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}";
XDG_CACHE_HOME = "/tmp";
@ -61,6 +65,15 @@ in stdenv.mkDerivation {
configurePhase = ''
patchShebangs . vendor/nimbus-build-system > /dev/null
# build_nim.sh guards "rm -rf dist/checksums" with NIX_BUILD_TOP != "/build",
# but on macOS the nix sandbox uses /private/tmp/... so the check fails and
# dist/checksums (provided via preBuild) gets deleted. Fix the check to skip
# the removal whenever NIX_BUILD_TOP is set (i.e. any nix build).
substituteInPlace vendor/nimbus-build-system/scripts/build_nim.sh \
--replace 'if [[ "''${NIX_BUILD_TOP}" != "/build" ]]; then' \
'if [[ -z "''${NIX_BUILD_TOP}" ]]; then'
make nimbus-build-system-paths
make nimbus-build-system-nimble-dir
'';
@ -91,11 +104,39 @@ in stdenv.mkDerivation {
'' else ''
mkdir -p $out/bin $out/include
# Copy library files
cp build/* $out/bin/ 2>/dev/null || true
# Copy artifacts from build directory (created by Make during buildPhase)
# Note: build/ is in the source tree, not result/ (which is a post-build symlink)
if [ -d build ]; then
${lib.optionalString copyLibwaku ''
cp build/libwaku.{so,dylib,dll,a,lib} $out/bin/ 2>/dev/null || true
''}
# Copy the header file
cp library/libwaku.h $out/include/
${lib.optionalString copyLiblogosdelivery ''
cp build/liblogosdelivery.{so,dylib,dll,a,lib} $out/bin/ 2>/dev/null || true
''}
${lib.optionalString copyWakunode2 ''
cp build/wakunode2 $out/bin/ 2>/dev/null || true
''}
${lib.optionalString (!hasKnownInstallTarget) ''
cp build/lib*.{so,dylib,dll,a,lib} $out/bin/ 2>/dev/null || true
''}
fi
# Copy header files
${lib.optionalString copyLibwaku ''
cp library/libwaku.h $out/include/ 2>/dev/null || true
''}
${lib.optionalString copyLiblogosdelivery ''
cp liblogosdelivery/liblogosdelivery.h $out/include/ 2>/dev/null || true
''}
${lib.optionalString (!hasKnownInstallTarget) ''
cp library/libwaku.h $out/include/ 2>/dev/null || true
cp liblogosdelivery/liblogosdelivery.h $out/include/ 2>/dev/null || true
''}
'';
meta = with pkgs.lib; {

247
nix/submodules.json Normal file
View File

@ -0,0 +1,247 @@
[
{
"path": "vendor/db_connector",
"url": "https://github.com/nim-lang/db_connector.git",
"rev": "74aef399e5c232f95c9fc5c987cebac846f09d62"
}
,
{
"path": "vendor/dnsclient.nim",
"url": "https://github.com/ba0f3/dnsclient.nim.git",
"rev": "23214235d4784d24aceed99bbfe153379ea557c8"
}
,
{
"path": "vendor/nim-bearssl",
"url": "https://github.com/status-im/nim-bearssl.git",
"rev": "11e798b62b8e6beabe958e048e9e24c7e0f9ee63"
}
,
{
"path": "vendor/nim-chronicles",
"url": "https://github.com/status-im/nim-chronicles.git",
"rev": "54f5b726025e8c7385e3a6529d3aa27454c6e6ff"
}
,
{
"path": "vendor/nim-chronos",
"url": "https://github.com/status-im/nim-chronos.git",
"rev": "85af4db764ecd3573c4704139560df3943216cf1"
}
,
{
"path": "vendor/nim-confutils",
"url": "https://github.com/status-im/nim-confutils.git",
"rev": "e214b3992a31acece6a9aada7d0a1ad37c928f3b"
}
,
{
"path": "vendor/nim-dnsdisc",
"url": "https://github.com/status-im/nim-dnsdisc.git",
"rev": "b71d029f4da4ec56974d54c04518bada00e1b623"
}
,
{
"path": "vendor/nim-eth",
"url": "https://github.com/status-im/nim-eth.git",
"rev": "d9135e6c3c5d6d819afdfb566aa8d958756b73a8"
}
,
{
"path": "vendor/nim-faststreams",
"url": "https://github.com/status-im/nim-faststreams.git",
"rev": "c3ac3f639ed1d62f59d3077d376a29c63ac9750c"
}
,
{
"path": "vendor/nim-ffi",
"url": "https://github.com/logos-messaging/nim-ffi",
"rev": "06111de155253b34e47ed2aaed1d61d08d62cc1b"
}
,
{
"path": "vendor/nim-http-utils",
"url": "https://github.com/status-im/nim-http-utils.git",
"rev": "79cbab1460f4c0cdde2084589d017c43a3d7b4f1"
}
,
{
"path": "vendor/nim-json-rpc",
"url": "https://github.com/status-im/nim-json-rpc.git",
"rev": "9665c265035f49f5ff94bbffdeadde68e19d6221"
}
,
{
"path": "vendor/nim-json-serialization",
"url": "https://github.com/status-im/nim-json-serialization.git",
"rev": "b65fd6a7e64c864dabe40e7dfd6c7d07db0014ac"
}
,
{
"path": "vendor/nim-jwt",
"url": "https://github.com/vacp2p/nim-jwt.git",
"rev": "18f8378de52b241f321c1f9ea905456e89b95c6f"
}
,
{
"path": "vendor/nim-libbacktrace",
"url": "https://github.com/status-im/nim-libbacktrace.git",
"rev": "d8bd4ce5c46bb6d2f984f6b3f3d7380897d95ecb"
}
,
{
"path": "vendor/nim-libp2p",
"url": "https://github.com/vacp2p/nim-libp2p.git",
"rev": "eb7e6ff89889e41b57515f891ba82986c54809fb"
}
,
{
"path": "vendor/nim-lsquic",
"url": "https://github.com/vacp2p/nim-lsquic",
"rev": "f3fe33462601ea34eb2e8e9c357c92e61f8d121b"
}
,
{
"path": "vendor/nim-metrics",
"url": "https://github.com/status-im/nim-metrics.git",
"rev": "ecf64c6078d1276d3b7d9b3d931fbdb70004db11"
}
,
{
"path": "vendor/nim-minilru",
"url": "https://github.com/status-im/nim-minilru.git",
"rev": "0c4b2bce959591f0a862e9b541ba43c6d0cf3476"
}
,
{
"path": "vendor/nim-nat-traversal",
"url": "https://github.com/status-im/nim-nat-traversal.git",
"rev": "860e18c37667b5dd005b94c63264560c35d88004"
}
,
{
"path": "vendor/nim-presto",
"url": "https://github.com/status-im/nim-presto.git",
"rev": "92b1c7ff141e6920e1f8a98a14c35c1fa098e3be"
}
,
{
"path": "vendor/nim-regex",
"url": "https://github.com/nitely/nim-regex.git",
"rev": "4593305ed1e49731fc75af1dc572dd2559aad19c"
}
,
{
"path": "vendor/nim-results",
"url": "https://github.com/arnetheduck/nim-results.git",
"rev": "df8113dda4c2d74d460a8fa98252b0b771bf1f27"
}
,
{
"path": "vendor/nim-secp256k1",
"url": "https://github.com/status-im/nim-secp256k1.git",
"rev": "9dd3df62124aae79d564da636bb22627c53c7676"
}
,
{
"path": "vendor/nim-serialization",
"url": "https://github.com/status-im/nim-serialization.git",
"rev": "6f525d5447d97256750ca7856faead03e562ed20"
}
,
{
"path": "vendor/nim-sqlite3-abi",
"url": "https://github.com/arnetheduck/nim-sqlite3-abi.git",
"rev": "bdf01cf4236fb40788f0733466cdf6708783cbac"
}
,
{
"path": "vendor/nim-stew",
"url": "https://github.com/status-im/nim-stew.git",
"rev": "e5740014961438610d336cd81706582dbf2c96f0"
}
,
{
"path": "vendor/nim-stint",
"url": "https://github.com/status-im/nim-stint.git",
"rev": "470b7892561b5179ab20bd389a69217d6213fe58"
}
,
{
"path": "vendor/nim-taskpools",
"url": "https://github.com/status-im/nim-taskpools.git",
"rev": "9e8ccc754631ac55ac2fd495e167e74e86293edb"
}
,
{
"path": "vendor/nim-testutils",
"url": "https://github.com/status-im/nim-testutils.git",
"rev": "94d68e796c045d5b37cabc6be32d7bfa168f8857"
}
,
{
"path": "vendor/nim-toml-serialization",
"url": "https://github.com/status-im/nim-toml-serialization.git",
"rev": "fea85b27f0badcf617033ca1bc05444b5fd8aa7a"
}
,
{
"path": "vendor/nim-unicodedb",
"url": "https://github.com/nitely/nim-unicodedb.git",
"rev": "66f2458710dc641dd4640368f9483c8a0ec70561"
}
,
{
"path": "vendor/nim-unittest2",
"url": "https://github.com/status-im/nim-unittest2.git",
"rev": "8b51e99b4a57fcfb31689230e75595f024543024"
}
,
{
"path": "vendor/nim-web3",
"url": "https://github.com/status-im/nim-web3.git",
"rev": "81ee8ce479d86acb73be7c4f365328e238d9b4a3"
}
,
{
"path": "vendor/nim-websock",
"url": "https://github.com/status-im/nim-websock.git",
"rev": "ebe308a79a7b440a11dfbe74f352be86a3883508"
}
,
{
"path": "vendor/nim-zlib",
"url": "https://github.com/status-im/nim-zlib.git",
"rev": "daa8723fd32299d4ca621c837430c29a5a11e19a"
}
,
{
"path": "vendor/nimbus-build-system",
"url": "https://github.com/status-im/nimbus-build-system.git",
"rev": "e6c2c9da39c2d368d9cf420ac22692e99715d22c"
}
,
{
"path": "vendor/nimcrypto",
"url": "https://github.com/cheatfate/nimcrypto.git",
"rev": "721fb99ee099b632eb86dfad1f0d96ee87583774"
}
,
{
"path": "vendor/nph",
"url": "https://github.com/arnetheduck/nph.git",
"rev": "c6e03162dc2820d3088660f644818d7040e95791"
}
,
{
"path": "vendor/waku-rlnv2-contract",
"url": "https://github.com/logos-messaging/waku-rlnv2-contract.git",
"rev": "8a338f354481e8a3f3d64a72e38fad4c62e32dcd"
}
,
{
"path": "vendor/zerokit",
"url": "https://github.com/vacp2p/zerokit.git",
"rev": "70c79fbc989d4f87d9352b2f4bddcb60ebe55b19"
}
]

View File

@ -0,0 +1,82 @@
#!/usr/bin/env bash
# Generates nix/submodules.json from .gitmodules and git ls-tree.
# This allows Nix to fetch all git submodules without requiring
# locally initialized submodules or the '?submodules=1' URI flag.
#
# Usage: ./scripts/generate_nix_submodules.sh
#
# Run this script after:
# - Adding/removing submodules
# - Updating submodule commits (e.g. after 'make update')
# - Any change to .gitmodules
#
# Compatible with macOS bash 3.x (no associative arrays).
set -euo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
OUTPUT="${REPO_ROOT}/nix/submodules.json"
cd "$REPO_ROOT"
TMP_URLS=$(mktemp)
TMP_REVS=$(mktemp)
trap 'rm -f "$TMP_URLS" "$TMP_REVS"' EXIT
# Parse .gitmodules: extract (path, url) pairs
current_path=""
while IFS= read -r line; do
case "$line" in
*"path = "*)
current_path="${line#*path = }"
;;
*"url = "*)
if [ -n "$current_path" ]; then
url="${line#*url = }"
url="${url%/}"
printf '%s\t%s\n' "$current_path" "$url" >> "$TMP_URLS"
current_path=""
fi
;;
esac
done < .gitmodules
# Get pinned commit hashes from git tree
git ls-tree HEAD vendor/ | while IFS= read -r tree_line; do
mode=$(echo "$tree_line" | awk '{print $1}')
type=$(echo "$tree_line" | awk '{print $2}')
hash=$(echo "$tree_line" | awk '{print $3}')
path=$(echo "$tree_line" | awk '{print $4}')
if [ "$type" = "commit" ]; then
path="${path%/}"
printf '%s\t%s\n' "$path" "$hash" >> "$TMP_REVS"
fi
done
# Generate JSON by joining urls and revs on path
printf '[\n' > "$OUTPUT"
first=true
sort "$TMP_URLS" | while IFS="$(printf '\t')" read -r path url; do
rev=$(grep "^${path} " "$TMP_REVS" | cut -f2 || true)
if [ -z "$rev" ]; then
echo "WARNING: No commit hash found for submodule '$path', skipping" >&2
continue
fi
if [ "$first" = true ]; then
first=false
else
printf ' ,\n' >> "$OUTPUT"
fi
printf ' {\n "path": "%s",\n "url": "%s",\n "rev": "%s"\n }\n' \
"$path" "$url" "$rev" >> "$OUTPUT"
done
printf ']\n' >> "$OUTPUT"
count=$(grep -c '"path"' "$OUTPUT" || echo 0)
echo "Generated $OUTPUT with $count submodule entries"

View File

@ -1,16 +1,17 @@
log-level = "INFO"
log-level = "TRACE"
relay = true
mix = true
filter = true
store = false
store = true
lightpush = true
max-connections = 150
peer-exchange = true
peer-exchange = false
metrics-logging = false
cluster-id = 2
discv5-discovery = true
discv5-discovery = false
discv5-udp-port = 9000
discv5-enr-auto-update = true
enable-kad-discovery = true
rest = true
rest-admin = true
ports-shift = 1
@ -19,7 +20,9 @@ shard = [0]
agent-string = "nwaku-mix"
nodekey = "f98e3fba96c32e8d1967d460f1b79457380e1a895f7971cecc8528abe733781a"
mixkey = "a87db88246ec0eedda347b9b643864bee3d6933eb15ba41e6d58cb678d813258"
rendezvous = true
rendezvous = false
listen-address = "127.0.0.1"
nat = "extip:127.0.0.1"
ext-multiaddr = ["/ip4/127.0.0.1/tcp/60001"]
ext-multiaddr-only = true
ip-colocation-limit=0

View File

@ -1,17 +1,18 @@
log-level = "INFO"
log-level = "TRACE"
relay = true
mix = true
filter = true
store = false
lightpush = true
max-connections = 150
peer-exchange = true
peer-exchange = false
metrics-logging = false
cluster-id = 2
discv5-discovery = true
discv5-discovery = false
discv5-udp-port = 9001
discv5-enr-auto-update = true
discv5-bootstrap-node = ["enr:-LG4QBaAbcA921hmu3IrreLqGZ4y3VWCjBCgNN9mpX9vqkkbSrM3HJHZTXnb5iVXgc5pPtDhWLxkB6F3yY25hSwMezkEgmlkgnY0gmlwhH8AAAGKbXVsdGlhZGRyc4oACATAqEQ-BuphgnJzhQACAQAAiXNlY3AyNTZrMaEDpEW1UlUGHRJg6g_zGuCddKWmIUBGZCQX13xGfh9J6KiDdGNwguphg3VkcIIjKYV3YWt1Mg0"]
kad-bootstrap-node = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o"]
rest = true
rest-admin = true
ports-shift = 2
@ -20,8 +21,10 @@ shard = [0]
agent-string = "nwaku-mix"
nodekey = "09e9d134331953357bd38bbfce8edb377f4b6308b4f3bfbe85c610497053d684"
mixkey = "c86029e02c05a7e25182974b519d0d52fcbafeca6fe191fbb64857fb05be1a53"
rendezvous = true
rendezvous = false
listen-address = "127.0.0.1"
nat = "extip:127.0.0.1"
ext-multiaddr = ["/ip4/127.0.0.1/tcp/60002"]
ext-multiaddr-only = true
ip-colocation-limit=0
#staticnode = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o", "/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA","/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f","/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu"]

View File

@ -1,17 +1,18 @@
log-level = "INFO"
log-level = "TRACE"
relay = true
mix = true
filter = true
store = false
lightpush = true
max-connections = 150
peer-exchange = true
peer-exchange = false
metrics-logging = false
cluster-id = 2
discv5-discovery = true
discv5-discovery = false
discv5-udp-port = 9002
discv5-enr-auto-update = true
discv5-bootstrap-node = ["enr:-LG4QBaAbcA921hmu3IrreLqGZ4y3VWCjBCgNN9mpX9vqkkbSrM3HJHZTXnb5iVXgc5pPtDhWLxkB6F3yY25hSwMezkEgmlkgnY0gmlwhH8AAAGKbXVsdGlhZGRyc4oACATAqEQ-BuphgnJzhQACAQAAiXNlY3AyNTZrMaEDpEW1UlUGHRJg6g_zGuCddKWmIUBGZCQX13xGfh9J6KiDdGNwguphg3VkcIIjKYV3YWt1Mg0"]
kad-bootstrap-node = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o"]
rest = false
rest-admin = false
ports-shift = 3
@ -20,8 +21,10 @@ shard = [0]
agent-string = "nwaku-mix"
nodekey = "ed54db994682e857d77cd6fb81be697382dc43aa5cd78e16b0ec8098549f860e"
mixkey = "b858ac16bbb551c4b2973313b1c8c8f7ea469fca03f1608d200bbf58d388ec7f"
rendezvous = true
rendezvous = false
listen-address = "127.0.0.1"
nat = "extip:127.0.0.1"
ext-multiaddr = ["/ip4/127.0.0.1/tcp/60003"]
ext-multiaddr-only = true
ip-colocation-limit=0
#staticnode = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o", "/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF","/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f","/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu"]

View File

@ -1,17 +1,18 @@
log-level = "INFO"
log-level = "TRACE"
relay = true
mix = true
filter = true
store = false
lightpush = true
max-connections = 150
peer-exchange = true
peer-exchange = false
metrics-logging = false
cluster-id = 2
discv5-discovery = true
discv5-discovery = false
discv5-udp-port = 9003
discv5-enr-auto-update = true
discv5-bootstrap-node = ["enr:-LG4QBaAbcA921hmu3IrreLqGZ4y3VWCjBCgNN9mpX9vqkkbSrM3HJHZTXnb5iVXgc5pPtDhWLxkB6F3yY25hSwMezkEgmlkgnY0gmlwhH8AAAGKbXVsdGlhZGRyc4oACATAqEQ-BuphgnJzhQACAQAAiXNlY3AyNTZrMaEDpEW1UlUGHRJg6g_zGuCddKWmIUBGZCQX13xGfh9J6KiDdGNwguphg3VkcIIjKYV3YWt1Mg0"]
kad-bootstrap-node = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o"]
rest = false
rest-admin = false
ports-shift = 4
@ -20,8 +21,10 @@ shard = [0]
agent-string = "nwaku-mix"
nodekey = "42f96f29f2d6670938b0864aced65a332dcf5774103b4c44ec4d0ea4ef3c47d6"
mixkey = "d8bd379bb394b0f22dd236d63af9f1a9bc45266beffc3fbbe19e8b6575f2535b"
rendezvous = true
rendezvous = false
listen-address = "127.0.0.1"
nat = "extip:127.0.0.1"
ext-multiaddr = ["/ip4/127.0.0.1/tcp/60004"]
ext-multiaddr-only = true
ip-colocation-limit=0
#staticnode = ["/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF", "/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA","/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o","/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu"]

View File

@ -1,17 +1,18 @@
log-level = "INFO"
log-level = "TRACE"
relay = true
mix = true
filter = true
store = false
lightpush = true
max-connections = 150
peer-exchange = true
peer-exchange = false
metrics-logging = false
cluster-id = 2
discv5-discovery = true
discv5-discovery = false
discv5-udp-port = 9004
discv5-enr-auto-update = true
discv5-bootstrap-node = ["enr:-LG4QBaAbcA921hmu3IrreLqGZ4y3VWCjBCgNN9mpX9vqkkbSrM3HJHZTXnb5iVXgc5pPtDhWLxkB6F3yY25hSwMezkEgmlkgnY0gmlwhH8AAAGKbXVsdGlhZGRyc4oACATAqEQ-BuphgnJzhQACAQAAiXNlY3AyNTZrMaEDpEW1UlUGHRJg6g_zGuCddKWmIUBGZCQX13xGfh9J6KiDdGNwguphg3VkcIIjKYV3YWt1Mg0"]
kad-bootstrap-node = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o"]
rest = false
rest-admin = false
ports-shift = 5
@ -20,8 +21,10 @@ shard = [0]
agent-string = "nwaku-mix"
nodekey = "3ce887b3c34b7a92dd2868af33941ed1dbec4893b054572cd5078da09dd923d4"
mixkey = "780fff09e51e98df574e266bf3266ec6a3a1ddfcf7da826a349a29c137009d49"
rendezvous = true
rendezvous = false
listen-address = "127.0.0.1"
nat = "extip:127.0.0.1"
ext-multiaddr = ["/ip4/127.0.0.1/tcp/60005"]
ext-multiaddr-only = true
ip-colocation-limit=0
#staticnode = ["/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o", "/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA","/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f","/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF"]

View File

@ -1,2 +1,2 @@
../../build/chat2mix --cluster-id=2 --num-shards-in-network=1 --shard=0 --servicenode="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o" --log-level=TRACE
../../build/chat2mix --cluster-id=2 --num-shards-in-network=1 --shard=0 --servicenode="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o" --log-level=TRACE --kad-bootstrap-node="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o"
#--mixnode="/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF:9231e86da6432502900a84f867004ce78632ab52cd8e30b1ec322cd795710c2a" --mixnode="/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA:275cd6889e1f29ca48e5b9edb800d1a94f49f13d393a0ecf1a07af753506de6c" --mixnode="/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f:e0ed594a8d506681be075e8e23723478388fb182477f7a469309a25e7076fc18" --mixnode="/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu:8fd7a1a7c19b403d231452a9b1ea40eb1cc76f455d918ef8980e7685f9eeeb1f"

View File

@ -1 +1 @@
../../build/wakunode2 --config-file="config.toml"
../../build/wakunode2 --config-file="config.toml" 2>&1 | tee mix_node.log

View File

@ -1 +1 @@
../../build/wakunode2 --config-file="config1.toml"
../../build/wakunode2 --config-file="config1.toml" 2>&1 | tee mix_node1.log

View File

@ -1 +1 @@
../../build/wakunode2 --config-file="config2.toml"
../../build/wakunode2 --config-file="config2.toml" 2>&1 | tee mix_node2.log

View File

@ -1 +1 @@
../../build/wakunode2 --config-file="config3.toml"
../../build/wakunode2 --config-file="config3.toml" 2>&1 | tee mix_node3.log

View File

@ -1 +1 @@
../../build/wakunode2 --config-file="config4.toml"
../../build/wakunode2 --config-file="config4.toml" 2>&1 | tee mix_node4.log

View File

@ -1,7 +1,9 @@
{.used.}
import std/options, results, stint, testutils/unittests
import json_serialization
import waku/api/api_conf, waku/factory/waku_conf, waku/factory/networks_config
import waku/common/logging
suite "LibWaku Conf - toWakuConf":
test "Minimal configuration":
@ -298,3 +300,709 @@ suite "LibWaku Conf - toWakuConf":
check:
wakuConf.staticNodes.len == 1
wakuConf.staticNodes[0] == entryNodes[1]
suite "NodeConfig JSON - complete format":
test "Full NodeConfig from complete JSON with field validation":
## Given
let jsonStr =
"""
{
"mode": "Core",
"protocolsConfig": {
"entryNodes": ["enrtree://TREE@nodes.example.com"],
"staticStoreNodes": ["/ip4/1.2.3.4/tcp/80/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"],
"clusterId": 10,
"autoShardingConfig": {
"numShardsInCluster": 4
},
"messageValidation": {
"maxMessageSize": "100 KiB",
"rlnConfig": null
}
},
"networkingConfig": {
"listenIpv4": "192.168.1.1",
"p2pTcpPort": 7000,
"discv5UdpPort": 7001
},
"ethRpcEndpoints": ["http://localhost:8545"],
"p2pReliability": true,
"logLevel": "WARN",
"logFormat": "TEXT"
}
"""
## When
let config = decodeNodeConfigFromJson(jsonStr)
## Then — check every field
check:
config.mode == WakuMode.Core
config.ethRpcEndpoints == @["http://localhost:8545"]
config.p2pReliability == true
config.logLevel == LogLevel.WARN
config.logFormat == LogFormat.TEXT
check:
config.networkingConfig.listenIpv4 == "192.168.1.1"
config.networkingConfig.p2pTcpPort == 7000
config.networkingConfig.discv5UdpPort == 7001
let pc = config.protocolsConfig
check:
pc.entryNodes == @["enrtree://TREE@nodes.example.com"]
pc.staticStoreNodes ==
@[
"/ip4/1.2.3.4/tcp/80/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"
]
pc.clusterId == 10
pc.autoShardingConfig.numShardsInCluster == 4
pc.messageValidation.maxMessageSize == "100 KiB"
pc.messageValidation.rlnConfig.isNone()
test "Full NodeConfig with RlnConfig present":
## Given
let jsonStr =
"""
{
"mode": "Edge",
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"messageValidation": {
"maxMessageSize": "150 KiB",
"rlnConfig": {
"contractAddress": "0x1234567890ABCDEF1234567890ABCDEF12345678",
"chainId": 5,
"epochSizeSec": 600
}
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
## When
let config = decodeNodeConfigFromJson(jsonStr)
## Then
check config.mode == WakuMode.Edge
let mv = config.protocolsConfig.messageValidation
check:
mv.maxMessageSize == "150 KiB"
mv.rlnConfig.isSome()
let rln = mv.rlnConfig.get()
check:
rln.contractAddress == "0x1234567890ABCDEF1234567890ABCDEF12345678"
rln.chainId == 5'u
rln.epochSizeSec == 600'u64
test "Round-trip encode/decode preserves all fields":
## Given
let original = NodeConfig.init(
mode = Edge,
protocolsConfig = ProtocolsConfig.init(
entryNodes = @["enrtree://TREE@example.com"],
staticStoreNodes =
@[
"/ip4/1.2.3.4/tcp/80/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"
],
clusterId = 42,
autoShardingConfig = AutoShardingConfig(numShardsInCluster: 16),
messageValidation = MessageValidation(
maxMessageSize: "256 KiB",
rlnConfig: some(
RlnConfig(
contractAddress: "0xAABBCCDDEEFF00112233445566778899AABBCCDD",
chainId: 137,
epochSizeSec: 300,
)
),
),
),
networkingConfig =
NetworkingConfig(listenIpv4: "10.0.0.1", p2pTcpPort: 9090, discv5UdpPort: 9091),
ethRpcEndpoints = @["https://rpc.example.com"],
p2pReliability = true,
logLevel = LogLevel.DEBUG,
logFormat = LogFormat.JSON,
)
## When
let decoded = decodeNodeConfigFromJson(Json.encode(original))
## Then — check field by field
check:
decoded.mode == original.mode
decoded.ethRpcEndpoints == original.ethRpcEndpoints
decoded.p2pReliability == original.p2pReliability
decoded.logLevel == original.logLevel
decoded.logFormat == original.logFormat
decoded.networkingConfig.listenIpv4 == original.networkingConfig.listenIpv4
decoded.networkingConfig.p2pTcpPort == original.networkingConfig.p2pTcpPort
decoded.networkingConfig.discv5UdpPort == original.networkingConfig.discv5UdpPort
decoded.protocolsConfig.entryNodes == original.protocolsConfig.entryNodes
decoded.protocolsConfig.staticStoreNodes ==
original.protocolsConfig.staticStoreNodes
decoded.protocolsConfig.clusterId == original.protocolsConfig.clusterId
decoded.protocolsConfig.autoShardingConfig.numShardsInCluster ==
original.protocolsConfig.autoShardingConfig.numShardsInCluster
decoded.protocolsConfig.messageValidation.maxMessageSize ==
original.protocolsConfig.messageValidation.maxMessageSize
decoded.protocolsConfig.messageValidation.rlnConfig.isSome()
let decodedRln = decoded.protocolsConfig.messageValidation.rlnConfig.get()
let originalRln = original.protocolsConfig.messageValidation.rlnConfig.get()
check:
decodedRln.contractAddress == originalRln.contractAddress
decodedRln.chainId == originalRln.chainId
decodedRln.epochSizeSec == originalRln.epochSizeSec
suite "NodeConfig JSON - partial format with defaults":
test "Minimal NodeConfig - empty object uses all defaults":
## Given
let config = decodeNodeConfigFromJson("{}")
let defaultConfig = NodeConfig.init()
## Then — compare field by field against defaults
check:
config.mode == defaultConfig.mode
config.ethRpcEndpoints == defaultConfig.ethRpcEndpoints
config.p2pReliability == defaultConfig.p2pReliability
config.logLevel == defaultConfig.logLevel
config.logFormat == defaultConfig.logFormat
config.networkingConfig.listenIpv4 == defaultConfig.networkingConfig.listenIpv4
config.networkingConfig.p2pTcpPort == defaultConfig.networkingConfig.p2pTcpPort
config.networkingConfig.discv5UdpPort ==
defaultConfig.networkingConfig.discv5UdpPort
config.protocolsConfig.entryNodes == defaultConfig.protocolsConfig.entryNodes
config.protocolsConfig.staticStoreNodes ==
defaultConfig.protocolsConfig.staticStoreNodes
config.protocolsConfig.clusterId == defaultConfig.protocolsConfig.clusterId
config.protocolsConfig.autoShardingConfig.numShardsInCluster ==
defaultConfig.protocolsConfig.autoShardingConfig.numShardsInCluster
config.protocolsConfig.messageValidation.maxMessageSize ==
defaultConfig.protocolsConfig.messageValidation.maxMessageSize
config.protocolsConfig.messageValidation.rlnConfig.isSome() ==
defaultConfig.protocolsConfig.messageValidation.rlnConfig.isSome()
test "Minimal NodeConfig keeps network preset defaults":
## Given
let config = decodeNodeConfigFromJson("{}")
## Then
check:
config.protocolsConfig.entryNodes == TheWakuNetworkPreset.entryNodes
config.protocolsConfig.messageValidation.rlnConfig.isSome()
test "NodeConfig with only mode specified":
## Given
let config = decodeNodeConfigFromJson("""{"mode": "Edge"}""")
## Then
check:
config.mode == WakuMode.Edge
## Remaining fields get defaults
config.logLevel == LogLevel.INFO
config.logFormat == LogFormat.TEXT
config.p2pReliability == false
config.ethRpcEndpoints == newSeq[string]()
test "ProtocolsConfig partial - optional fields get defaults":
## Given — only entryNodes and clusterId provided
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": ["enrtree://X@y.com"],
"clusterId": 5
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
## When
let config = decodeNodeConfigFromJson(jsonStr)
## Then — required fields are set, optionals get defaults
check:
config.protocolsConfig.entryNodes == @["enrtree://X@y.com"]
config.protocolsConfig.clusterId == 5
config.protocolsConfig.staticStoreNodes == newSeq[string]()
config.protocolsConfig.autoShardingConfig.numShardsInCluster ==
DefaultAutoShardingConfig.numShardsInCluster
config.protocolsConfig.messageValidation.maxMessageSize ==
DefaultMessageValidation.maxMessageSize
config.protocolsConfig.messageValidation.rlnConfig.isNone()
test "MessageValidation partial - rlnConfig omitted defaults to none":
## Given
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"messageValidation": {
"maxMessageSize": "200 KiB"
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
## When
let config = decodeNodeConfigFromJson(jsonStr)
## Then
check:
config.protocolsConfig.messageValidation.maxMessageSize == "200 KiB"
config.protocolsConfig.messageValidation.rlnConfig.isNone()
test "logLevel and logFormat omitted use defaults":
## Given
let jsonStr =
"""
{
"mode": "Core",
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
## When
let config = decodeNodeConfigFromJson(jsonStr)
## Then
check:
config.logLevel == LogLevel.INFO
config.logFormat == LogFormat.TEXT
suite "NodeConfig JSON - unsupported fields raise errors":
test "Unknown field at NodeConfig level raises":
let jsonStr =
"""
{
"mode": "Core",
"unknownTopLevel": true
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Typo in NodeConfig field name raises":
let jsonStr =
"""
{
"modes": "Core"
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Unknown field in ProtocolsConfig raises":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"futureField": "something"
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Unknown field in NetworkingConfig raises":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000,
"futureNetworkField": "value"
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Unknown field in MessageValidation raises":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"messageValidation": {
"maxMessageSize": "150 KiB",
"maxMesssageSize": "typo"
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Unknown field in RlnConfig raises":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"messageValidation": {
"maxMessageSize": "150 KiB",
"rlnConfig": {
"contractAddress": "0xABCDEF1234567890ABCDEF1234567890ABCDEF12",
"chainId": 1,
"epochSizeSec": 600,
"unknownRlnField": true
}
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Unknown field in AutoShardingConfig raises":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"autoShardingConfig": {
"numShardsInCluster": 8,
"shardPrefix": "extra"
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
suite "NodeConfig JSON - missing required fields":
test "Missing 'entryNodes' in ProtocolsConfig":
let jsonStr =
"""
{
"protocolsConfig": {
"clusterId": 1
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Missing 'clusterId' in ProtocolsConfig":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": []
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Missing required fields in NetworkingConfig":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1
},
"networkingConfig": {
"listenIpv4": "0.0.0.0"
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Missing 'numShardsInCluster' in AutoShardingConfig":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"autoShardingConfig": {}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Missing required fields in RlnConfig":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"messageValidation": {
"maxMessageSize": "150 KiB",
"rlnConfig": {
"contractAddress": "0xABCD"
}
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Missing 'maxMessageSize' in MessageValidation":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"messageValidation": {
"rlnConfig": null
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
suite "NodeConfig JSON - invalid values":
test "Invalid enum value for mode":
var raised = false
try:
discard decodeNodeConfigFromJson("""{"mode": "InvalidMode"}""")
except SerializationError:
raised = true
check raised
test "Invalid enum value for logLevel":
var raised = false
try:
discard decodeNodeConfigFromJson("""{"logLevel": "SUPERVERBOSE"}""")
except SerializationError:
raised = true
check raised
test "Wrong type for clusterId (string instead of number)":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": "not-a-number"
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Completely invalid JSON syntax":
var raised = false
try:
discard decodeNodeConfigFromJson("""{ not valid json at all }""")
except SerializationError:
raised = true
check raised
suite "NodeConfig JSON -> WakuConf integration":
test "Decoded config translates to valid WakuConf":
## Given
let jsonStr =
"""
{
"mode": "Core",
"protocolsConfig": {
"entryNodes": [
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im"
],
"staticStoreNodes": [
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"
],
"clusterId": 55,
"autoShardingConfig": {
"numShardsInCluster": 6
},
"messageValidation": {
"maxMessageSize": "256 KiB",
"rlnConfig": null
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
},
"ethRpcEndpoints": ["http://localhost:8545"],
"p2pReliability": true,
"logLevel": "INFO",
"logFormat": "TEXT"
}
"""
## When
let nodeConfig = decodeNodeConfigFromJson(jsonStr)
let wakuConfRes = toWakuConf(nodeConfig)
## Then
require wakuConfRes.isOk()
let wakuConf = wakuConfRes.get()
require wakuConf.validate().isOk()
check:
wakuConf.clusterId == 55
wakuConf.shardingConf.numShardsInCluster == 6
wakuConf.maxMessageSizeBytes == 256'u64 * 1024'u64
wakuConf.staticNodes.len == 1
wakuConf.p2pReliability == true

View File

@ -621,6 +621,20 @@ with the drawback of consuming some more bandwidth.""",
name: "mixnode"
.}: seq[MixNodePubInfo]
# Kademlia Discovery config
enableKadDiscovery* {.
desc:
"Enable extended kademlia discovery. Can be enabled without bootstrap nodes for the first node in the network.",
defaultValue: false,
name: "enable-kad-discovery"
.}: bool
kadBootstrapNodes* {.
desc:
"Peer multiaddr for kademlia discovery bootstrap node (must include /p2p/<peerID>). Argument may be repeated.",
name: "kad-bootstrap-node"
.}: seq[string]
## websocket config
websocketSupport* {.
desc: "Enable websocket: true|false",
@ -1057,4 +1071,7 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.rateLimitConf.withRateLimits(n.rateLimits)
b.kademliaDiscoveryConf.withEnabled(n.enableKadDiscovery)
b.kademliaDiscoveryConf.withBootstrapNodes(n.kadBootstrapNodes)
return b.build()

2
vendor/nim-libp2p vendored

@ -1 +1 @@
Subproject commit ca48c3718246bb411ff0e354a70cb82d9a28de0d
Subproject commit ff8d51857b4b79a68468e7bcc27b2026cca02996

View File

@ -24,7 +24,7 @@ requires "nim >= 2.2.4",
"stew",
"stint",
"metrics",
"libp2p >= 1.14.3",
"libp2p >= 1.15.0",
"web3",
"presto",
"regex",
@ -64,7 +64,7 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
exec "nim " & lang & " --out:build/" & name & " --mm:refc " & extra_params & " " &
srcDir & name & ".nim"
proc buildLibrary(lib_name: string, srcDir = "./", params = "", `type` = "static") =
proc buildLibrary(lib_name: string, srcDir = "./", params = "", `type` = "static", srcFile = "libwaku.nim", mainPrefix = "libwaku") =
if not dirExists "build":
mkDir "build"
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
@ -73,12 +73,12 @@ proc buildLibrary(lib_name: string, srcDir = "./", params = "", `type` = "static
extra_params &= " " & paramStr(i)
if `type` == "static":
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & "libwaku.nim"
" --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:" & mainPrefix & " --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & srcFile
else:
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & "libwaku.nim"
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:" & mainPrefix & " --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & srcFile
proc buildMobileAndroid(srcDir = ".", params = "") =
let cpu = getEnv("CPU")
@ -400,3 +400,11 @@ task libWakuIOS, "Build the mobile bindings for iOS":
let srcDir = "./library"
let extraParams = "-d:chronicles_log_level=ERROR"
buildMobileIOS srcDir, extraParams
task liblogosdeliveryStatic, "Build the liblogosdelivery (Logos Messaging Delivery API) static library":
let lib_name = paramStr(paramCount())
buildLibrary lib_name, "liblogosdelivery/", chroniclesParams, "static", "liblogosdelivery.nim", "liblogosdelivery"
task liblogosdeliveryDynamic, "Build the liblogosdelivery (Logos Messaging Delivery API) dynamic library":
let lib_name = paramStr(paramCount())
buildLibrary lib_name, "liblogosdelivery/", chroniclesParams, "dynamic", "liblogosdelivery.nim", "liblogosdelivery"

View File

@ -4,6 +4,7 @@ import waku/factory/waku
import waku/[requests/health_requests, waku_core, waku_node]
import waku/node/delivery_service/send_service
import waku/node/delivery_service/subscription_service
import libp2p/peerid
import ./[api_conf, types]
logScope:

View File

@ -1,14 +1,18 @@
import std/[net, options]
import results
import json_serialization, json_serialization/std/options as json_options
import
waku/common/utils/parse_size_units,
waku/common/logging,
waku/factory/waku_conf,
waku/factory/conf_builder/conf_builder,
waku/factory/networks_config,
./entry_nodes
export json_serialization, json_options
type AutoShardingConfig* {.requiresInit.} = object
numShardsInCluster*: uint16
@ -87,6 +91,8 @@ type NodeConfig* {.requiresInit.} = object
networkingConfig: NetworkingConfig
ethRpcEndpoints: seq[string]
p2pReliability: bool
logLevel: LogLevel
logFormat: LogFormat
proc init*(
T: typedesc[NodeConfig],
@ -95,6 +101,8 @@ proc init*(
networkingConfig: NetworkingConfig = DefaultNetworkingConfig,
ethRpcEndpoints: seq[string] = @[],
p2pReliability: bool = false,
logLevel: LogLevel = LogLevel.INFO,
logFormat: LogFormat = LogFormat.TEXT,
): T =
return T(
mode: mode,
@ -102,11 +110,57 @@ proc init*(
networkingConfig: networkingConfig,
ethRpcEndpoints: ethRpcEndpoints,
p2pReliability: p2pReliability,
logLevel: logLevel,
logFormat: logFormat,
)
# -- Getters for ProtocolsConfig (private fields) - used for testing --
proc entryNodes*(c: ProtocolsConfig): seq[string] =
c.entryNodes
proc staticStoreNodes*(c: ProtocolsConfig): seq[string] =
c.staticStoreNodes
proc clusterId*(c: ProtocolsConfig): uint16 =
c.clusterId
proc autoShardingConfig*(c: ProtocolsConfig): AutoShardingConfig =
c.autoShardingConfig
proc messageValidation*(c: ProtocolsConfig): MessageValidation =
c.messageValidation
# -- Getters for NodeConfig (private fields) - used for testing --
proc mode*(c: NodeConfig): WakuMode =
c.mode
proc protocolsConfig*(c: NodeConfig): ProtocolsConfig =
c.protocolsConfig
proc networkingConfig*(c: NodeConfig): NetworkingConfig =
c.networkingConfig
proc ethRpcEndpoints*(c: NodeConfig): seq[string] =
c.ethRpcEndpoints
proc p2pReliability*(c: NodeConfig): bool =
c.p2pReliability
proc logLevel*(c: NodeConfig): LogLevel =
c.logLevel
proc logFormat*(c: NodeConfig): LogFormat =
c.logFormat
proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] =
var b = WakuConfBuilder.init()
# Apply log configuration
b.withLogLevel(nodeConfig.logLevel)
b.withLogFormat(nodeConfig.logFormat)
# Apply networking configuration
let networkingConfig = nodeConfig.networkingConfig
let ip = parseIpAddress(networkingConfig.listenIpv4)
@ -214,3 +268,260 @@ proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] =
return err("Failed to validate configuration: " & error)
return ok(wakuConf)
# ---- JSON serialization (writeValue / readValue) ----
# ---------- AutoShardingConfig ----------
proc writeValue*(w: var JsonWriter, val: AutoShardingConfig) {.raises: [IOError].} =
w.beginRecord()
w.writeField("numShardsInCluster", val.numShardsInCluster)
w.endRecord()
proc readValue*(
r: var JsonReader, val: var AutoShardingConfig
) {.raises: [SerializationError, IOError].} =
var numShardsInCluster: Option[uint16]
for fieldName in readObjectFields(r):
case fieldName
of "numShardsInCluster":
numShardsInCluster = some(r.readValue(uint16))
else:
r.raiseUnexpectedField(fieldName, "AutoShardingConfig")
if numShardsInCluster.isNone():
r.raiseUnexpectedValue("Missing required field 'numShardsInCluster'")
val = AutoShardingConfig(numShardsInCluster: numShardsInCluster.get())
# ---------- RlnConfig ----------
proc writeValue*(w: var JsonWriter, val: RlnConfig) {.raises: [IOError].} =
w.beginRecord()
w.writeField("contractAddress", val.contractAddress)
w.writeField("chainId", val.chainId)
w.writeField("epochSizeSec", val.epochSizeSec)
w.endRecord()
proc readValue*(
r: var JsonReader, val: var RlnConfig
) {.raises: [SerializationError, IOError].} =
var
contractAddress: Option[string]
chainId: Option[uint]
epochSizeSec: Option[uint64]
for fieldName in readObjectFields(r):
case fieldName
of "contractAddress":
contractAddress = some(r.readValue(string))
of "chainId":
chainId = some(r.readValue(uint))
of "epochSizeSec":
epochSizeSec = some(r.readValue(uint64))
else:
r.raiseUnexpectedField(fieldName, "RlnConfig")
if contractAddress.isNone():
r.raiseUnexpectedValue("Missing required field 'contractAddress'")
if chainId.isNone():
r.raiseUnexpectedValue("Missing required field 'chainId'")
if epochSizeSec.isNone():
r.raiseUnexpectedValue("Missing required field 'epochSizeSec'")
val = RlnConfig(
contractAddress: contractAddress.get(),
chainId: chainId.get(),
epochSizeSec: epochSizeSec.get(),
)
# ---------- NetworkingConfig ----------
proc writeValue*(w: var JsonWriter, val: NetworkingConfig) {.raises: [IOError].} =
w.beginRecord()
w.writeField("listenIpv4", val.listenIpv4)
w.writeField("p2pTcpPort", val.p2pTcpPort)
w.writeField("discv5UdpPort", val.discv5UdpPort)
w.endRecord()
proc readValue*(
r: var JsonReader, val: var NetworkingConfig
) {.raises: [SerializationError, IOError].} =
var
listenIpv4: Option[string]
p2pTcpPort: Option[uint16]
discv5UdpPort: Option[uint16]
for fieldName in readObjectFields(r):
case fieldName
of "listenIpv4":
listenIpv4 = some(r.readValue(string))
of "p2pTcpPort":
p2pTcpPort = some(r.readValue(uint16))
of "discv5UdpPort":
discv5UdpPort = some(r.readValue(uint16))
else:
r.raiseUnexpectedField(fieldName, "NetworkingConfig")
if listenIpv4.isNone():
r.raiseUnexpectedValue("Missing required field 'listenIpv4'")
if p2pTcpPort.isNone():
r.raiseUnexpectedValue("Missing required field 'p2pTcpPort'")
if discv5UdpPort.isNone():
r.raiseUnexpectedValue("Missing required field 'discv5UdpPort'")
val = NetworkingConfig(
listenIpv4: listenIpv4.get(),
p2pTcpPort: p2pTcpPort.get(),
discv5UdpPort: discv5UdpPort.get(),
)
# ---------- MessageValidation ----------
proc writeValue*(w: var JsonWriter, val: MessageValidation) {.raises: [IOError].} =
w.beginRecord()
w.writeField("maxMessageSize", val.maxMessageSize)
w.writeField("rlnConfig", val.rlnConfig)
w.endRecord()
proc readValue*(
r: var JsonReader, val: var MessageValidation
) {.raises: [SerializationError, IOError].} =
var
maxMessageSize: Option[string]
rlnConfig: Option[Option[RlnConfig]]
for fieldName in readObjectFields(r):
case fieldName
of "maxMessageSize":
maxMessageSize = some(r.readValue(string))
of "rlnConfig":
rlnConfig = some(r.readValue(Option[RlnConfig]))
else:
r.raiseUnexpectedField(fieldName, "MessageValidation")
if maxMessageSize.isNone():
r.raiseUnexpectedValue("Missing required field 'maxMessageSize'")
val = MessageValidation(
maxMessageSize: maxMessageSize.get(), rlnConfig: rlnConfig.get(none(RlnConfig))
)
# ---------- ProtocolsConfig ----------
proc writeValue*(w: var JsonWriter, val: ProtocolsConfig) {.raises: [IOError].} =
w.beginRecord()
w.writeField("entryNodes", val.entryNodes)
w.writeField("staticStoreNodes", val.staticStoreNodes)
w.writeField("clusterId", val.clusterId)
w.writeField("autoShardingConfig", val.autoShardingConfig)
w.writeField("messageValidation", val.messageValidation)
w.endRecord()
proc readValue*(
r: var JsonReader, val: var ProtocolsConfig
) {.raises: [SerializationError, IOError].} =
var
entryNodes: Option[seq[string]]
staticStoreNodes: Option[seq[string]]
clusterId: Option[uint16]
autoShardingConfig: Option[AutoShardingConfig]
messageValidation: Option[MessageValidation]
for fieldName in readObjectFields(r):
case fieldName
of "entryNodes":
entryNodes = some(r.readValue(seq[string]))
of "staticStoreNodes":
staticStoreNodes = some(r.readValue(seq[string]))
of "clusterId":
clusterId = some(r.readValue(uint16))
of "autoShardingConfig":
autoShardingConfig = some(r.readValue(AutoShardingConfig))
of "messageValidation":
messageValidation = some(r.readValue(MessageValidation))
else:
r.raiseUnexpectedField(fieldName, "ProtocolsConfig")
if entryNodes.isNone():
r.raiseUnexpectedValue("Missing required field 'entryNodes'")
if clusterId.isNone():
r.raiseUnexpectedValue("Missing required field 'clusterId'")
val = ProtocolsConfig.init(
entryNodes = entryNodes.get(),
staticStoreNodes = staticStoreNodes.get(@[]),
clusterId = clusterId.get(),
autoShardingConfig = autoShardingConfig.get(DefaultAutoShardingConfig),
messageValidation = messageValidation.get(DefaultMessageValidation),
)
# ---------- NodeConfig ----------
proc writeValue*(w: var JsonWriter, val: NodeConfig) {.raises: [IOError].} =
w.beginRecord()
w.writeField("mode", val.mode)
w.writeField("protocolsConfig", val.protocolsConfig)
w.writeField("networkingConfig", val.networkingConfig)
w.writeField("ethRpcEndpoints", val.ethRpcEndpoints)
w.writeField("p2pReliability", val.p2pReliability)
w.writeField("logLevel", val.logLevel)
w.writeField("logFormat", val.logFormat)
w.endRecord()
proc readValue*(
r: var JsonReader, val: var NodeConfig
) {.raises: [SerializationError, IOError].} =
var
mode: Option[WakuMode]
protocolsConfig: Option[ProtocolsConfig]
networkingConfig: Option[NetworkingConfig]
ethRpcEndpoints: Option[seq[string]]
p2pReliability: Option[bool]
logLevel: Option[LogLevel]
logFormat: Option[LogFormat]
for fieldName in readObjectFields(r):
case fieldName
of "mode":
mode = some(r.readValue(WakuMode))
of "protocolsConfig":
protocolsConfig = some(r.readValue(ProtocolsConfig))
of "networkingConfig":
networkingConfig = some(r.readValue(NetworkingConfig))
of "ethRpcEndpoints":
ethRpcEndpoints = some(r.readValue(seq[string]))
of "p2pReliability":
p2pReliability = some(r.readValue(bool))
of "logLevel":
logLevel = some(r.readValue(LogLevel))
of "logFormat":
logFormat = some(r.readValue(LogFormat))
else:
r.raiseUnexpectedField(fieldName, "NodeConfig")
val = NodeConfig.init(
mode = mode.get(WakuMode.Core),
protocolsConfig = protocolsConfig.get(TheWakuNetworkPreset),
networkingConfig = networkingConfig.get(DefaultNetworkingConfig),
ethRpcEndpoints = ethRpcEndpoints.get(@[]),
p2pReliability = p2pReliability.get(false),
logLevel = logLevel.get(LogLevel.INFO),
logFormat = logFormat.get(LogFormat.TEXT),
)
# ---------- Decode helper ----------
# Json.decode returns T via `result`, which conflicts with {.requiresInit.}
# on Nim 2.x. This helper avoids the issue by using readValue into a var.
proc decodeNodeConfigFromJson*(
jsonStr: string
): NodeConfig {.raises: [SerializationError].} =
var val = NodeConfig.init() # default-initialized
try:
var stream = unsafeMemoryInput(jsonStr)
var reader = JsonReader[DefaultFlavor].init(stream)
reader.readValue(val)
except IOError as err:
raise (ref SerializationError)(msg: err.msg)
return val

View File

@ -48,8 +48,8 @@ proc check(db: DbConn): Result[void, string] =
return err("exception in check: " & getCurrentExceptionMsg())
if message.len > 0:
let truncatedErr = message[0 .. 80]
## libpq sometimes gives extremely long error messages
let truncatedErr = message[0 ..< min(80, message.len)]
error "postgres check issue. see truncated db error.", error = truncatedErr
return err(truncatedErr)
return ok()

View File

@ -0,0 +1,280 @@
{.push raises: [].}
import std/[options, sequtils]
import
chronos,
chronicles,
results,
stew/byteutils,
libp2p/[peerid, multiaddress, switch],
libp2p/extended_peer_record,
libp2p/crypto/curve25519,
libp2p/protocols/[kademlia, kad_disco],
libp2p/protocols/kademlia_discovery/types as kad_types,
libp2p/protocols/mix/mix_protocol
import waku/waku_core, waku/node/peer_manager
logScope:
topics = "waku extended kademlia discovery"
const
DefaultExtendedKademliaDiscoveryInterval* = chronos.seconds(5)
ExtendedKademliaDiscoveryStartupDelay* = chronos.seconds(5)
type
MixNodePoolSizeProvider* = proc(): int {.gcsafe, raises: [].}
NodeStartedProvider* = proc(): bool {.gcsafe, raises: [].}
ExtendedKademliaDiscoveryParams* = object
bootstrapNodes*: seq[(PeerId, seq[MultiAddress])]
mixPubKey*: Option[Curve25519Key]
advertiseMix*: bool = false
WakuKademlia* = ref object
protocol*: KademliaDiscovery
peerManager: PeerManager
discoveryLoop: Future[void]
running*: bool
getMixNodePoolSize: MixNodePoolSizeProvider
isNodeStarted: NodeStartedProvider
proc new*(
T: type WakuKademlia,
switch: Switch,
params: ExtendedKademliaDiscoveryParams,
peerManager: PeerManager,
getMixNodePoolSize: MixNodePoolSizeProvider = nil,
isNodeStarted: NodeStartedProvider = nil,
): Result[T, string] =
if params.bootstrapNodes.len == 0:
info "creating kademlia discovery as seed node (no bootstrap nodes)"
let kademlia = KademliaDiscovery.new(
switch,
bootstrapNodes = params.bootstrapNodes,
config = KadDHTConfig.new(
validator = kad_types.ExtEntryValidator(), selector = kad_types.ExtEntrySelector()
),
codec = ExtendedKademliaDiscoveryCodec,
)
try:
switch.mount(kademlia)
except CatchableError:
return err("failed to mount kademlia discovery: " & getCurrentExceptionMsg())
# Register services BEFORE starting kademlia so they are included in the
# initial self-signed peer record published to the DHT
if params.advertiseMix:
if params.mixPubKey.isSome():
let alreadyAdvertising = kademlia.startAdvertising(
ServiceInfo(id: MixProtocolID, data: @(params.mixPubKey.get()))
)
if alreadyAdvertising:
warn "mix service was already being advertised"
debug "extended kademlia advertising mix service",
keyHex = byteutils.toHex(params.mixPubKey.get()),
bootstrapNodes = params.bootstrapNodes.len
else:
warn "mix advertising enabled but no key provided"
info "kademlia discovery created",
bootstrapNodes = params.bootstrapNodes.len, advertiseMix = params.advertiseMix
return ok(
WakuKademlia(
protocol: kademlia,
peerManager: peerManager,
running: false,
getMixNodePoolSize: getMixNodePoolSize,
isNodeStarted: isNodeStarted,
)
)
proc extractMixPubKey(service: ServiceInfo): Option[Curve25519Key] =
if service.id != MixProtocolID:
trace "service is not mix protocol",
serviceId = service.id, mixProtocolId = MixProtocolID
return none(Curve25519Key)
if service.data.len != Curve25519KeySize:
warn "invalid mix pub key length from kademlia record",
expected = Curve25519KeySize,
actual = service.data.len,
dataHex = byteutils.toHex(service.data)
return none(Curve25519Key)
debug "found mix protocol service",
dataLen = service.data.len, expectedLen = Curve25519KeySize
let key = intoCurve25519Key(service.data)
debug "successfully extracted mix pub key", keyHex = byteutils.toHex(key)
return some(key)
proc remotePeerInfoFrom(record: ExtendedPeerRecord): Option[RemotePeerInfo] =
debug "processing kademlia record",
peerId = record.peerId,
numAddresses = record.addresses.len,
numServices = record.services.len,
serviceIds = record.services.mapIt(it.id)
if record.addresses.len == 0:
trace "kademlia record missing addresses", peerId = record.peerId
return none(RemotePeerInfo)
let addrs = record.addresses.mapIt(it.address)
if addrs.len == 0:
trace "kademlia record produced no dialable addresses", peerId = record.peerId
return none(RemotePeerInfo)
let protocols = record.services.mapIt(it.id)
var mixPubKey = none(Curve25519Key)
for service in record.services:
debug "checking service",
peerId = record.peerId, serviceId = service.id, dataLen = service.data.len
mixPubKey = extractMixPubKey(service)
if mixPubKey.isSome():
debug "extracted mix public key from service", peerId = record.peerId
break
if record.services.len > 0 and mixPubKey.isNone():
debug "record has services but no valid mix key",
peerId = record.peerId, services = record.services.mapIt(it.id)
return none(RemotePeerInfo)
return some(
RemotePeerInfo.init(
record.peerId,
addrs = addrs,
protocols = protocols,
origin = PeerOrigin.Kademlia,
mixPubKey = mixPubKey,
)
)
proc lookupMixPeers*(
wk: WakuKademlia
): Future[Result[int, string]] {.async: (raises: []).} =
## Lookup mix peers via kademlia and add them to the peer store.
## Returns the number of mix peers found and added.
if wk.protocol.isNil():
return err("cannot lookup mix peers: kademlia not mounted")
let mixService = ServiceInfo(id: MixProtocolID, data: @[])
var records: seq[ExtendedPeerRecord]
try:
records = await wk.protocol.lookup(mixService)
except CatchableError:
return err("mix peer lookup failed: " & getCurrentExceptionMsg())
debug "mix peer lookup returned records", numRecords = records.len
var added = 0
for record in records:
let peerOpt = remotePeerInfoFrom(record)
if peerOpt.isNone():
continue
let peerInfo = peerOpt.get()
if peerInfo.mixPubKey.isNone():
continue
wk.peerManager.addPeer(peerInfo, PeerOrigin.Kademlia)
info "mix peer added via kademlia lookup",
peerId = $peerInfo.peerId, mixPubKey = byteutils.toHex(peerInfo.mixPubKey.get())
added.inc()
info "mix peer lookup complete", found = added
return ok(added)
proc runDiscoveryLoop(
wk: WakuKademlia, interval: Duration, minMixPeers: int
) {.async: (raises: []).} =
info "extended kademlia discovery loop started", interval = interval
try:
while true:
# Wait for node to be started
if not wk.isNodeStarted.isNil() and not wk.isNodeStarted():
await sleepAsync(ExtendedKademliaDiscoveryStartupDelay)
continue
var records: seq[ExtendedPeerRecord]
try:
records = await wk.protocol.randomRecords()
except CatchableError as e:
warn "extended kademlia discovery failed", error = e.msg
await sleepAsync(interval)
continue
debug "received random records from kademlia", numRecords = records.len
var added = 0
for record in records:
let peerOpt = remotePeerInfoFrom(record)
if peerOpt.isNone():
continue
let peerInfo = peerOpt.get()
wk.peerManager.addPeer(peerInfo, PeerOrigin.Kademlia)
debug "peer added via extended kademlia discovery",
peerId = $peerInfo.peerId,
addresses = peerInfo.addrs.mapIt($it),
protocols = peerInfo.protocols,
hasMixPubKey = peerInfo.mixPubKey.isSome()
added.inc()
if added > 0:
info "added peers from extended kademlia discovery", count = added
# Targeted mix peer lookup when pool is low
if minMixPeers > 0 and not wk.getMixNodePoolSize.isNil() and
wk.getMixNodePoolSize() < minMixPeers:
debug "mix node pool below threshold, performing targeted lookup",
currentPoolSize = wk.getMixNodePoolSize(), threshold = minMixPeers
let found = (await wk.lookupMixPeers()).valueOr:
warn "targeted mix peer lookup failed", error = error
0
if found > 0:
info "found mix peers via targeted kademlia lookup", count = found
await sleepAsync(interval)
except CancelledError as e:
debug "extended kademlia discovery loop cancelled", error = e.msg
except CatchableError as e:
error "extended kademlia discovery loop failed", error = e.msg
proc start*(
wk: WakuKademlia,
interval: Duration = DefaultExtendedKademliaDiscoveryInterval,
minMixPeers: int = 0,
): Future[Result[void, string]] {.async: (raises: []).} =
if wk.running:
return err("already running")
try:
await wk.protocol.start()
except CatchableError as e:
return err("failed to start kademlia discovery: " & e.msg)
wk.discoveryLoop = wk.runDiscoveryLoop(interval, minMixPeers)
info "kademlia discovery started"
return ok()
proc stop*(wk: WakuKademlia) {.async: (raises: []).} =
if not wk.running:
return
info "Stopping kademlia discovery"
wk.running = false
if not wk.discoveryLoop.isNil():
await wk.discoveryLoop.cancelAndWait()
wk.discoveryLoop = nil
if not wk.protocol.isNil():
await wk.protocol.stop()
info "Successfully stopped kademlia discovery"

View File

@ -10,10 +10,12 @@ import
./metrics_server_conf_builder,
./rate_limit_conf_builder,
./rln_relay_conf_builder,
./mix_conf_builder
./mix_conf_builder,
./kademlia_discovery_conf_builder
export
waku_conf_builder, filter_service_conf_builder, store_sync_conf_builder,
store_service_conf_builder, rest_server_conf_builder, dns_discovery_conf_builder,
discv5_conf_builder, web_socket_conf_builder, metrics_server_conf_builder,
rate_limit_conf_builder, rln_relay_conf_builder, mix_conf_builder
rate_limit_conf_builder, rln_relay_conf_builder, mix_conf_builder,
kademlia_discovery_conf_builder

View File

@ -0,0 +1,40 @@
import chronicles, std/options, results
import libp2p/[peerid, multiaddress, peerinfo]
import waku/factory/waku_conf
logScope:
topics = "waku conf builder kademlia discovery"
#######################################
## Kademlia Discovery Config Builder ##
#######################################
type KademliaDiscoveryConfBuilder* = object
enabled*: bool
bootstrapNodes*: seq[string]
proc init*(T: type KademliaDiscoveryConfBuilder): KademliaDiscoveryConfBuilder =
KademliaDiscoveryConfBuilder()
proc withEnabled*(b: var KademliaDiscoveryConfBuilder, enabled: bool) =
b.enabled = enabled
proc withBootstrapNodes*(
b: var KademliaDiscoveryConfBuilder, bootstrapNodes: seq[string]
) =
b.bootstrapNodes = bootstrapNodes
proc build*(
b: KademliaDiscoveryConfBuilder
): Result[Option[KademliaDiscoveryConf], string] =
# Kademlia is enabled if explicitly enabled OR if bootstrap nodes are provided
let enabled = b.enabled or b.bootstrapNodes.len > 0
if not enabled:
return ok(none(KademliaDiscoveryConf))
var parsedNodes: seq[(PeerId, seq[MultiAddress])]
for nodeStr in b.bootstrapNodes:
let (peerId, ma) = parseFullAddress(nodeStr).valueOr:
return err("Failed to parse kademlia bootstrap node: " & error)
parsedNodes.add((peerId, @[ma]))
return ok(some(KademliaDiscoveryConf(bootstrapNodes: parsedNodes)))

View File

@ -25,7 +25,8 @@ import
./metrics_server_conf_builder,
./rate_limit_conf_builder,
./rln_relay_conf_builder,
./mix_conf_builder
./mix_conf_builder,
./kademlia_discovery_conf_builder
logScope:
topics = "waku conf builder"
@ -80,6 +81,7 @@ type WakuConfBuilder* = object
mixConf*: MixConfBuilder
webSocketConf*: WebSocketConfBuilder
rateLimitConf*: RateLimitConfBuilder
kademliaDiscoveryConf*: KademliaDiscoveryConfBuilder
# End conf builders
relay: Option[bool]
lightPush: Option[bool]
@ -140,6 +142,7 @@ proc init*(T: type WakuConfBuilder): WakuConfBuilder =
storeServiceConf: StoreServiceConfBuilder.init(),
webSocketConf: WebSocketConfBuilder.init(),
rateLimitConf: RateLimitConfBuilder.init(),
kademliaDiscoveryConf: KademliaDiscoveryConfBuilder.init(),
)
proc withNetworkConf*(b: var WakuConfBuilder, networkConf: NetworkConf) =
@ -506,6 +509,9 @@ proc build*(
let rateLimit = builder.rateLimitConf.build().valueOr:
return err("Rate limits Conf building failed: " & $error)
let kademliaDiscoveryConf = builder.kademliaDiscoveryConf.build().valueOr:
return err("Kademlia Discovery Conf building failed: " & $error)
# End - Build sub-configs
let logLevel =
@ -628,6 +634,7 @@ proc build*(
restServerConf: restServerConf,
dnsDiscoveryConf: dnsDiscoveryConf,
mixConf: mixConf,
kademliaDiscoveryConf: kademliaDiscoveryConf,
# end confs
nodeKey: nodeKey,
clusterId: clusterId,

View File

@ -6,7 +6,8 @@ import
libp2p/protocols/pubsub/gossipsub,
libp2p/protocols/connectivity/relay/relay,
libp2p/nameresolving/dnsresolver,
libp2p/crypto/crypto
libp2p/crypto/crypto,
libp2p/crypto/curve25519
import
./internal_config,
@ -32,6 +33,7 @@ import
../waku_store_legacy/common as legacy_common,
../waku_filter_v2,
../waku_peer_exchange,
../discovery/waku_kademlia,
../node/peer_manager,
../node/peer_manager/peer_store/waku_peer_storage,
../node/peer_manager/peer_store/migrations as peer_store_sqlite_migrations,
@ -165,13 +167,36 @@ proc setupProtocols(
#mount mix
if conf.mixConf.isSome():
(
await node.mountMix(
conf.clusterId, conf.mixConf.get().mixKey, conf.mixConf.get().mixnodes
)
).isOkOr:
let mixConf = conf.mixConf.get()
(await node.mountMix(conf.clusterId, mixConf.mixKey, mixConf.mixnodes)).isOkOr:
return err("failed to mount waku mix protocol: " & $error)
# Setup extended kademlia discovery
if conf.kademliaDiscoveryConf.isSome():
let mixPubKey =
if conf.mixConf.isSome():
some(conf.mixConf.get().mixPubKey)
else:
none(Curve25519Key)
node.wakuKademlia = WakuKademlia.new(
node.switch,
ExtendedKademliaDiscoveryParams(
bootstrapNodes: conf.kademliaDiscoveryConf.get().bootstrapNodes,
mixPubKey: mixPubKey,
advertiseMix: conf.mixConf.isSome(),
),
node.peerManager,
getMixNodePoolSize = proc(): int {.gcsafe, raises: [].} =
if node.wakuMix.isNil():
0
else:
node.getMixNodePoolSize(),
isNodeStarted = proc(): bool {.gcsafe, raises: [].} =
node.started,
).valueOr:
return err("failed to setup kademlia discovery: " & error)
if conf.storeServiceConf.isSome():
let storeServiceConf = conf.storeServiceConf.get()
if storeServiceConf.supportV2:
@ -477,6 +502,11 @@ proc startNode*(
if conf.relay:
node.peerManager.start()
if not node.wakuKademlia.isNil():
let minMixPeers = if conf.mixConf.isSome(): 4 else: 0
(await node.wakuKademlia.start(minMixPeers = minMixPeers)).isOkOr:
return err("failed to start kademlia discovery: " & error)
return ok()
proc setupNode*(

View File

@ -203,6 +203,9 @@ proc new*(
else:
nil
# Set the extMultiAddrsOnly flag so the node knows not to replace explicit addresses
node.extMultiAddrsOnly = wakuConf.endpointConf.extMultiAddrsOnly
node.setupAppCallbacks(wakuConf, appCallbacks, healthMonitor).isOkOr:
error "Failed setting up app callbacks", error = error
return err("Failed setting up app callbacks: " & $error)

View File

@ -4,6 +4,7 @@ import
libp2p/crypto/crypto,
libp2p/multiaddress,
libp2p/crypto/curve25519,
libp2p/peerid,
secp256k1,
results
@ -51,6 +52,10 @@ type MixConf* = ref object
mixPubKey*: Curve25519Key
mixnodes*: seq[MixNodePubInfo]
type KademliaDiscoveryConf* = object
bootstrapNodes*: seq[(PeerId, seq[MultiAddress])]
## Bootstrap nodes for extended kademlia discovery.
type StoreServiceConf* {.requiresInit.} = object
dbMigration*: bool
dbURl*: string
@ -109,6 +114,7 @@ type WakuConf* {.requiresInit.} = ref object
metricsServerConf*: Option[MetricsServerConf]
webSocketConf*: Option[WebSocketConf]
mixConf*: Option[MixConf]
kademliaDiscoveryConf*: Option[KademliaDiscoveryConf]
portsShift*: uint16
dnsAddrsNameServers*: seq[IpAddress]

View File

@ -91,6 +91,7 @@ proc setupSendProcessorChain(
for i in 1 ..< processors.len:
currentProcessor.chain(processors[i])
currentProcessor = processors[i]
trace "Send processor chain", index = i, processor = type(processors[i]).name
return ok(processors[0])

View File

@ -43,9 +43,6 @@ type
# Keeps track of peer shards
ShardBook* = ref object of PeerBook[seq[uint16]]
# Keeps track of Mix protocol public keys of peers
MixPubKeyBook* = ref object of PeerBook[Curve25519Key]
proc getPeer*(peerStore: PeerStore, peerId: PeerId): RemotePeerInfo =
let addresses =
if peerStore[LastSeenBook][peerId].isSome():
@ -85,7 +82,7 @@ proc delete*(peerStore: PeerStore, peerId: PeerId) =
proc peers*(peerStore: PeerStore): seq[RemotePeerInfo] =
let allKeys = concat(
toSeq(peerStore[LastSeenBook].book.keys()),
toSeq(peerStore[LastSeenOutboundBook].book.keys()),
toSeq(peerStore[AddressBook].book.keys()),
toSeq(peerStore[ProtoBook].book.keys()),
toSeq(peerStore[KeyBook].book.keys()),

View File

@ -66,6 +66,7 @@ import
events/health_events,
events/peer_events,
],
waku/discovery/waku_kademlia,
./net_config,
./peer_manager,
./health_monitor/health_status,
@ -141,6 +142,7 @@ type
wakuRendezvous*: WakuRendezVous
wakuRendezvousClient*: rendezvous_client.WakuRendezVousClient
announcedAddresses*: seq[MultiAddress]
extMultiAddrsOnly*: bool # When true, skip automatic IP address replacement
started*: bool # Indicates that node has started listening
topicSubscriptionQueue*: AsyncEventQueue[SubscriptionEvent]
rateLimitSettings*: ProtocolRateLimitSettings
@ -149,6 +151,8 @@ type
edgeHealthEvent*: AsyncEvent
edgeHealthLoop: Future[void]
peerEventListener*: EventWakuPeerListener
kademliaDiscoveryLoop*: Future[void]
wakuKademlia*: WakuKademlia
proc deduceRelayShard(
node: WakuNode,
@ -303,7 +307,7 @@ proc mountAutoSharding*(
return ok()
proc getMixNodePoolSize*(node: WakuNode): int =
return node.wakuMix.getNodePoolSize()
return node.wakuMix.poolSize()
proc mountMix*(
node: WakuNode,
@ -455,6 +459,11 @@ proc isBindIpWithZeroPort(inputMultiAdd: MultiAddress): bool =
return false
proc updateAnnouncedAddrWithPrimaryIpAddr*(node: WakuNode): Result[void, string] =
# Skip automatic IP replacement if extMultiAddrsOnly is set
# This respects the user's explicitly configured announced addresses
if node.extMultiAddrsOnly:
return ok()
let peerInfo = node.switch.peerInfo
var announcedStr = ""
var listenStr = ""
@ -705,6 +714,9 @@ proc stop*(node: WakuNode) {.async.} =
not node.wakuPeerExchangeClient.pxLoopHandle.isNil():
await node.wakuPeerExchangeClient.pxLoopHandle.cancelAndWait()
if not node.wakuKademlia.isNil():
await node.wakuKademlia.stop()
if not node.wakuRendezvous.isNil():
await node.wakuRendezvous.stopWait()

View File

@ -1347,8 +1347,10 @@ proc removePartition(
(await self.performWriteQuery(detachPartitionQuery)).isOkOr:
info "detected error when trying to detach partition", error
if ($error).contains("FINALIZE") or
($error).contains("already pending detach in part"):
if ($error).contains("FINALIZE") or ($error).contains("already pending"):
## We assume "already pending detach in partitioned table ..." as possible error
debug "enforce detach with FINALIZE because of detected error", error
## We assume the database is suggesting to use FINALIZE when detaching a partition
let detachPartitionFinalizeQuery =
"ALTER TABLE messages DETACH PARTITION " & partitionName & " FINALIZE;"

View File

@ -38,6 +38,7 @@ type
Static
PeerExchange
Dns
Kademlia
PeerDirection* = enum
UnknownDirection

View File

@ -1,22 +1,23 @@
{.push raises: [].}
import chronicles, std/[options, tables, sequtils], chronos, results, metrics, strutils
import chronicles, std/options, chronos, results, metrics
import
libp2p/crypto/curve25519,
libp2p/crypto/crypto,
libp2p/protocols/mix,
libp2p/protocols/mix/mix_node,
libp2p/protocols/mix/mix_protocol,
libp2p/protocols/mix/mix_metrics,
libp2p/[multiaddress, multicodec, peerid],
libp2p/protocols/mix/delay_strategy,
libp2p/[multiaddress, peerid],
eth/common/keys
import
../node/peer_manager,
../waku_core,
../waku_enr,
../node/peer_manager/waku_peer_store,
../common/nimchronos
waku/node/peer_manager,
waku/waku_core,
waku/waku_enr,
waku/node/peer_manager/waku_peer_store
logScope:
topics = "waku mix"
@ -27,7 +28,6 @@ type
WakuMix* = ref object of MixProtocol
peerManager*: PeerManager
clusterId: uint16
nodePoolLoopHandle: Future[void]
pubKey*: Curve25519Key
WakuMixResult*[T] = Result[T, string]
@ -36,106 +36,10 @@ type
multiAddr*: string
pubKey*: Curve25519Key
proc filterMixNodes(cluster: Option[uint16], peer: RemotePeerInfo): bool =
# Note that origin based(discv5) filtering is not done intentionally
# so that more mix nodes can be discovered.
if peer.mixPubKey.isNone():
trace "remote peer has no mix Pub Key", peer = $peer
return false
if cluster.isSome() and peer.enr.isSome() and
peer.enr.get().isClusterMismatched(cluster.get()):
trace "peer has mismatching cluster", peer = $peer
return false
return true
proc appendPeerIdToMultiaddr*(multiaddr: MultiAddress, peerId: PeerId): MultiAddress =
if multiaddr.contains(multiCodec("p2p")).get():
return multiaddr
var maddrStr = multiaddr.toString().valueOr:
error "Failed to convert multiaddress to string.", err = error
return multiaddr
maddrStr.add("/p2p/" & $peerId)
var cleanAddr = MultiAddress.init(maddrStr).valueOr:
error "Failed to convert string to multiaddress.", err = error
return multiaddr
return cleanAddr
func getIPv4Multiaddr*(maddrs: seq[MultiAddress]): Option[MultiAddress] =
for multiaddr in maddrs:
trace "checking multiaddr", addr = $multiaddr
if multiaddr.contains(multiCodec("ip4")).get():
trace "found ipv4 multiaddr", addr = $multiaddr
return some(multiaddr)
trace "no ipv4 multiaddr found"
return none(MultiAddress)
proc populateMixNodePool*(mix: WakuMix) =
# populate only peers that i) are reachable ii) share cluster iii) support mix
let remotePeers = mix.peerManager.switch.peerStore.peers().filterIt(
filterMixNodes(some(mix.clusterId), it)
)
var mixNodes = initTable[PeerId, MixPubInfo]()
for i in 0 ..< min(remotePeers.len, 100):
let ipv4addr = getIPv4Multiaddr(remotePeers[i].addrs).valueOr:
trace "peer has no ipv4 address", peer = $remotePeers[i]
continue
let maddrWithPeerId = appendPeerIdToMultiaddr(ipv4addr, remotePeers[i].peerId)
trace "remote peer info", info = remotePeers[i]
if remotePeers[i].mixPubKey.isNone():
trace "peer has no mix Pub Key", remotePeerId = $remotePeers[i]
continue
let peerMixPubKey = remotePeers[i].mixPubKey.get()
var peerPubKey: crypto.PublicKey
if not remotePeers[i].peerId.extractPublicKey(peerPubKey):
warn "Failed to extract public key from peerId, skipping node",
remotePeerId = remotePeers[i].peerId
continue
if peerPubKey.scheme != PKScheme.Secp256k1:
warn "Peer public key is not Secp256k1, skipping node",
remotePeerId = remotePeers[i].peerId, scheme = peerPubKey.scheme
continue
let mixNodePubInfo = MixPubInfo.init(
remotePeers[i].peerId,
ipv4addr,
intoCurve25519Key(peerMixPubKey),
peerPubKey.skkey,
)
trace "adding mix node to pool",
remotePeerId = remotePeers[i].peerId, multiAddr = $ipv4addr
mixNodes[remotePeers[i].peerId] = mixNodePubInfo
# set the mix node pool
mix.setNodePool(mixNodes)
mix_pool_size.set(len(mixNodes))
trace "mix node pool updated", poolSize = mix.getNodePoolSize()
# Once mix protocol starts to use info from PeerStore, then this can be removed.
proc startMixNodePoolMgr*(mix: WakuMix) {.async.} =
info "starting mix node pool manager"
# try more aggressively to populate the pool at startup
var attempts = 50
# TODO: make initial pool size configurable
while mix.getNodePoolSize() < 100 and attempts > 0:
attempts -= 1
mix.populateMixNodePool()
await sleepAsync(1.seconds)
# TODO: make interval configurable
heartbeat "Updating mix node pool", 5.seconds:
mix.populateMixNodePool()
proc processBootNodes(
bootnodes: seq[MixNodePubInfo], peermgr: PeerManager
): Table[PeerId, MixPubInfo] =
var mixNodes = initTable[PeerId, MixPubInfo]()
bootnodes: seq[MixNodePubInfo], peermgr: PeerManager, mix: WakuMix
) =
var count = 0
for node in bootnodes:
let pInfo = parsePeerInfo(node.multiAddr).valueOr:
error "Failed to get peer id from multiaddress: ",
@ -156,14 +60,15 @@ proc processBootNodes(
error "Failed to parse multiaddress", multiAddr = node.multiAddr, error = error
continue
mixNodes[peerId] = MixPubInfo.init(peerId, multiAddr, node.pubKey, peerPubKey.skkey)
let mixPubInfo = MixPubInfo.init(peerId, multiAddr, node.pubKey, peerPubKey.skkey)
mix.nodePool.add(mixPubInfo)
count.inc()
peermgr.addPeer(
RemotePeerInfo.init(peerId, @[multiAddr], mixPubKey = some(node.pubKey))
)
mix_pool_size.set(len(mixNodes))
info "using mix bootstrap nodes ", bootNodes = mixNodes
return mixNodes
mix_pool_size.set(count)
info "using mix bootstrap nodes ", count = count
proc new*(
T: type WakuMix,
@ -183,22 +88,28 @@ proc new*(
)
if bootnodes.len < minMixPoolSize:
warn "publishing with mix won't work until atleast 3 mix nodes in node pool"
let initTable = processBootNodes(bootnodes, peermgr)
if len(initTable) < minMixPoolSize:
warn "publishing with mix won't work until atleast 3 mix nodes in node pool"
var m = WakuMix(peerManager: peermgr, clusterId: clusterId, pubKey: mixPubKey)
procCall MixProtocol(m).init(localMixNodeInfo, initTable, peermgr.switch)
procCall MixProtocol(m).init(
localMixNodeInfo,
peermgr.switch,
delayStrategy =
ExponentialDelayStrategy.new(meanDelayMs = 50, rng = crypto.newRng()),
)
processBootNodes(bootnodes, peermgr, m)
if m.nodePool.len < minMixPoolSize:
warn "publishing with mix won't work until atleast 3 mix nodes in node pool"
return ok(m)
proc poolSize*(mix: WakuMix): int =
mix.nodePool.len
method start*(mix: WakuMix) =
info "starting waku mix protocol"
mix.nodePoolLoopHandle = mix.startMixNodePoolMgr()
method stop*(mix: WakuMix) {.async.} =
if mix.nodePoolLoopHandle.isNil():
return
await mix.nodePoolLoopHandle.cancelAndWait()
mix.nodePoolLoopHandle = nil
discard
# Mix Protocol