Merge remote-tracking branch 'origin/master' into feat/service-incentivization-poc

This commit is contained in:
Sergei Tikhomirov 2025-05-23 13:46:15 +02:00
commit 34856918f4
254 changed files with 7554 additions and 3400 deletions

View File

@ -76,14 +76,14 @@ jobs:
tar -cvzf ${{steps.vars.outputs.nwakutools}} ./build/wakucanary ./build/networkmonitor
- name: upload artifacts
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: wakunode2
path: ${{steps.vars.outputs.nwaku}}
retention-days: 2
- name: upload artifacts
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: wakutools
path: ${{steps.vars.outputs.nwakutools}}

3
.gitignore vendored
View File

@ -76,3 +76,6 @@ coverage_html_report/
.qmake.stash
main-qt
waku_handler.moc.cpp
# Nix build result
result

12
.gitmodules vendored
View File

@ -168,7 +168,7 @@
path = vendor/db_connector
url = https://github.com/nim-lang/db_connector.git
ignore = untracked
branch = master
branch = devel
[submodule "vendor/nph"]
ignore = untracked
branch = master
@ -179,16 +179,6 @@
url = https://github.com/status-im/nim-minilru.git
ignore = untracked
branch = master
[submodule "vendor/nim-quic"]
path = vendor/nim-quic
url = https://github.com/status-im/nim-quic.git
ignore = untracked
branch = master
[submodule "vendor/nim-ngtcp2"]
path = vendor/nim-ngtcp2
url = https://github.com/vacp2p/nim-ngtcp2.git
ignore = untracked
branch = master
[submodule "vendor/waku-rlnv2-contract"]
path = vendor/waku-rlnv2-contract
url = https://github.com/waku-org/waku-rlnv2-contract.git

View File

@ -1,3 +1,22 @@
## v0.35.1 (2025-03-30)
### Bug fixes
* Update RLN references ([3287](https://github.com/waku-org/nwaku/pull/3287)) ([ea961fa](https://github.com/waku-org/nwaku/pull/3287/commits/ea961faf4ed4f8287a2043a6b5d84b660745072b))
**Info:** before upgrading to this version, make sure you delete the previous rln_tree folder, i.e.,
the one that is passed through this CLI: `--rln-relay-tree-path`.
This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/):
| Protocol | Spec status | Protocol id |
| ---: | :---: | :--- |
| [`11/WAKU2-RELAY`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/11/relay.md) | `stable` | `/vac/waku/relay/2.0.0` |
| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` <br />`/vac/waku/filter-subscribe/2.0.0-beta1` <br />`/vac/waku/filter-push/2.0.0-beta1` |
| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` |
| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` |
| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` |
| [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/feat--waku-sync/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` |
## v0.35.0 (2025-03-03)
### Notes

View File

@ -1,5 +1,5 @@
# BUILD NIM APP ----------------------------------------------------------------
FROM rust:1.77.1-alpine3.18 AS nim-build
FROM rust:1.81.0-alpine3.19 AS nim-build
ARG NIMFLAGS
ARG MAKE_TARGET=wakunode2

View File

@ -4,8 +4,8 @@
# - MIT license
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
BUILD_SYSTEM_DIR := vendor/nimbus-build-system
EXCLUDED_NIM_PACKAGES := vendor/nim-dnsdisc/vendor
export BUILD_SYSTEM_DIR := vendor/nimbus-build-system
export EXCLUDED_NIM_PACKAGES := vendor/nim-dnsdisc/vendor
LINK_PCRE := 0
FORMAT_MSG := "\\x1B[95mFormatting:\\x1B[39m"
# we don't want an error here, so we can handle things later, in the ".DEFAULT" target
@ -40,8 +40,8 @@ ifeq ($(detected_OS),Windows)
NIM_PARAMS += --passL:"-L$(MINGW_PATH)/lib"
NIM_PARAMS += --passL:"-Lvendor/nim-nat-traversal/vendor/miniupnp/miniupnpc"
NIM_PARAMS += --passL:"-Lvendor/nim-nat-traversal/vendor/libnatpmp-upstream"
LIBS = -static -lws2_32 -lbcrypt -liphlpapi -luserenv -lntdll -lminiupnpc -lnatpmp -lpq
LIBS = -static -lws2_32 -lbcrypt -liphlpapi -luserenv -lntdll -lminiupnpc -lnatpmp -lpq
NIM_PARAMS += $(foreach lib,$(LIBS),--passL:"$(lib)")
endif
@ -83,7 +83,7 @@ HEAPTRACKER_INJECT ?= 0
ifeq ($(HEAPTRACKER), 1)
# Needed to make nimbus-build-system use the Nim's 'heaptrack_support' branch
DOCKER_NIM_COMMIT := NIM_COMMIT=heaptrack_support
TARGET := prod-with-heaptrack
TARGET := heaptrack-build
ifeq ($(HEAPTRACKER_INJECT), 1)
# the Nim compiler will load 'libheaptrack_inject.so'
@ -152,6 +152,12 @@ endif
clean: | clean-libbacktrace
### Create nimble links (used when building with Nix)
nimbus-build-system-nimble-dir:
NIMBLE_DIR="$(CURDIR)/$(NIMBLE_DIR)" \
PWD_CMD="$(PWD)" \
$(CURDIR)/scripts/generate_nimble_links.sh
##################
## RLN ##
@ -159,7 +165,7 @@ clean: | clean-libbacktrace
.PHONY: librln
LIBRLN_BUILDDIR := $(CURDIR)/vendor/zerokit
LIBRLN_VERSION := v0.5.1
LIBRLN_VERSION := v0.7.0
ifeq ($(detected_OS),Windows)
LIBRLN_FILE := rln.lib
@ -334,6 +340,17 @@ docker-image:
--target $(TARGET) \
--tag $(DOCKER_IMAGE_NAME) .
docker-quick-image: MAKE_TARGET ?= wakunode2
docker-quick-image: DOCKER_IMAGE_TAG ?= $(MAKE_TARGET)-$(GIT_VERSION)
docker-quick-image: DOCKER_IMAGE_NAME ?= wakuorg/nwaku:$(DOCKER_IMAGE_TAG)
docker-quick-image: NIM_PARAMS := $(NIM_PARAMS) -d:chronicles_colors:none -d:insecure -d:postgres --passL:$(LIBRLN_FILE) --passL:-lm
docker-quick-image: | build deps librln wakunode2
docker build \
--build-arg="MAKE_TARGET=$(MAKE_TARGET)" \
--tag $(DOCKER_IMAGE_NAME) \
--file docker/binaries/Dockerfile.bn.amd64 \
.
docker-push:
docker push $(DOCKER_IMAGE_NAME)
@ -361,6 +378,14 @@ docker-liteprotocoltester:
--file apps/liteprotocoltester/Dockerfile.liteprotocoltester.compile \
.
docker-quick-liteprotocoltester: DOCKER_LPT_TAG ?= latest
docker-quick-liteprotocoltester: DOCKER_LPT_NAME ?= wakuorg/liteprotocoltester:$(DOCKER_LPT_TAG)
docker-quick-liteprotocoltester: | liteprotocoltester
docker build \
--tag $(DOCKER_LPT_NAME) \
--file apps/liteprotocoltester/Dockerfile.liteprotocoltester \
.
docker-liteprotocoltester-push:
docker push $(DOCKER_LPT_NAME)

View File

@ -21,6 +21,13 @@ The standard developer tools, including a C compiler, GNU Make, Bash, and Git. M
> In some distributions (Fedora linux for example), you may need to install `which` utility separately. Nimbus build system is relying on it.
You'll also need an installation of Rust and its toolchain (specifically `rustc` and `cargo`).
The easiest way to install these, is using `rustup`:
```bash
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
```
### Wakunode
```bash
@ -126,6 +133,9 @@ Binary will be created as `<path to your test file.nim>.bin` under the `build` d
make test/tests/common/test_enr_builder.nim
```
### Testing against `js-waku`
Refer to [js-waku repo](https://github.com/waku-org/js-waku/tree/master/packages/tests) for instructions.
## Formatting
Nim files are expected to be formatted using the [`nph`](https://github.com/arnetheduck/nph) version present in `vendor/nph`.

View File

@ -381,7 +381,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
if conf.relay:
let shards =
conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it)))
await node.mountRelay(shards)
(await node.mountRelay(shards)).isOkOr:
echo "failed to mount relay: " & error
return
await node.mountLibp2pPing()
@ -535,7 +537,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
node.subscribe(
(kind: PubsubSub, topic: DefaultPubsubTopic), some(WakuRelayHandler(handler))
)
).isOkOr:
error "failed to subscribe to pubsub topic",
topic = DefaultPubsubTopic, error = error
if conf.rlnRelay:
info "WakuRLNRelay is enabled"
@ -553,14 +557,18 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
echo "rln-relay preparation is in progress..."
let rlnConf = WakuRlnConfig(
rlnRelayDynamic: conf.rlnRelayDynamic,
rlnRelayCredIndex: conf.rlnRelayCredIndex,
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
rlnRelayCredPath: conf.rlnRelayCredPath,
rlnRelayCredPassword: conf.rlnRelayCredPassword,
rlnRelayUserMessageLimit: conf.rlnRelayUserMessageLimit,
rlnEpochSizeSec: conf.rlnEpochSizeSec,
dynamic: conf.rlnRelayDynamic,
credIndex: conf.rlnRelayCredIndex,
chainId: conf.rlnRelayChainId,
ethClientUrls: conf.ethClientUrls.mapIt(string(it)),
creds: some(
RlnRelayCreds(
path: conf.rlnRelayCredPath, password: conf.rlnRelayCredPassword
)
),
userMessageLimit: conf.rlnRelayUserMessageLimit,
epochSizeSec: conf.rlnEpochSizeSec,
treePath: conf.rlnRelayTreePath,
)
waitFor node.mountRlnRelay(rlnConf, spamHandler = some(spamHandler))

View File

@ -18,7 +18,8 @@ type
prod
test
EthRpcUrl = distinct string
EthRpcUrl* = distinct string
Chat2Conf* = object ## General node config
logLevel* {.
desc: "Sets the log level.", defaultValue: LogLevel.INFO, name: "log-level"
@ -213,6 +214,13 @@ type
name: "rln-relay"
.}: bool
rlnRelayChainId* {.
desc:
"Chain ID of the provided contract (optional, will fetch from RPC provider if not used)",
defaultValue: 0,
name: "rln-relay-chain-id"
.}: uint
rlnRelayCredPath* {.
desc: "The path for peristing rln-relay credential",
defaultValue: "",
@ -241,11 +249,12 @@ type
name: "rln-relay-id-commitment-key"
.}: string
rlnRelayEthClientAddress* {.
desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/",
defaultValue: "http://localhost:8540/",
ethClientUrls* {.
desc:
"HTTP address of an Ethereum testnet client e.g., http://localhost:8540/. Argument may be repeated.",
defaultValue: newSeq[EthRpcUrl](0),
name: "rln-relay-eth-client-address"
.}: EthRpcUrl
.}: seq[EthRpcUrl]
rlnRelayEthContractAddress* {.
desc: "Address of membership contract on an Ethereum testnet",
@ -273,6 +282,12 @@ type
name: "rln-relay-epoch-sec"
.}: uint64
rlnRelayTreePath* {.
desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)",
defaultValue: "",
name: "rln-relay-tree-path"
.}: string
# NOTE: Keys are different in nim-libp2p
proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T =
try:

View File

@ -215,7 +215,10 @@ proc start*(cmb: Chat2MatterBridge) {.async.} =
# Always mount relay for bridge
# `triggerSelf` is false on a `bridge` to avoid duplicates
await cmb.nodev2.mountRelay()
(await cmb.nodev2.mountRelay()).isOkOr:
error "failed to mount relay", error = error
return
cmb.nodev2.wakuRelay.triggerSelf = false
# Bridging
@ -229,7 +232,9 @@ proc start*(cmb: Chat2MatterBridge) {.async.} =
except:
error "exception in relayHandler: " & getCurrentExceptionMsg()
cmb.nodev2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler))
cmb.nodev2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr:
error "failed to subscribe to relay", topic = DefaultPubsubTopic, error = error
return
proc stop*(cmb: Chat2MatterBridge) {.async: (raises: [Exception]).} =
info "Stopping Chat2MatterBridge"

View File

@ -12,16 +12,16 @@ MIN_MESSAGE_SIZE=15Kb
MAX_MESSAGE_SIZE=145Kb
## for wakusim
#PUBSUB=/waku/2/rs/66/0
#SHARD=0
#CONTENT_TOPIC=/tester/2/light-pubsub-test/wakusim
#CLUSTER_ID=66
## for status.prod
PUBSUB=/waku/2/rs/16/32
#SHARDS=32
CONTENT_TOPIC=/tester/2/light-pubsub-test/fleet
CLUSTER_ID=16
## for TWN
#PUBSUB=/waku/2/rs/1/4
#SHARD=4
#CONTENT_TOPIC=/tester/2/light-pubsub-test/twn
#CLUSTER_ID=1

View File

@ -55,6 +55,8 @@ RUN chmod +x /usr/bin/liteprotocoltester
FROM base_lpt AS standalone_lpt
COPY --from=nim-build /app/apps/liteprotocoltester/run_tester_node.sh /usr/bin/
COPY --from=nim-build /app/apps/liteprotocoltester/run_tester_node_on_fleet.sh /usr/bin/
RUN chmod +x /usr/bin/run_tester_node.sh
ENTRYPOINT ["/usr/bin/run_tester_node.sh", "/usr/bin/liteprotocoltester"]

View File

@ -127,7 +127,7 @@ Run a SENDER role liteprotocoltester and a RECEIVER role one on different termin
| ---: | :--- | :--- |
| NUM_MESSAGES | Number of message to publish, 0 means infinite | 120 |
| MESSAGE_INTERVAL_MILLIS | Frequency of messages in milliseconds | 1000 |
| PUBSUB | Used pubsub_topic for testing | /waku/2/rs/66/0 |
| SHARD | Used shard for testing | 0 |
| CONTENT_TOPIC | content_topic for testing | /tester/1/light-pubsub-example/proto |
| CLUSTER_ID | cluster_id of the network | 16 |
| START_PUBLISHING_AFTER_SECS | Delay in seconds before starting to publish to let service node connected | 5 |
@ -272,7 +272,7 @@ export NUM_MESSAGES=200
export MESSAGE_INTERVAL_MILLIS=1000
export MIN_MESSAGE_SIZE=15Kb
export MAX_MESSAGE_SIZE=145Kb
export PUBSUB=/waku/2/rs/16/32
export SHARD=32
export CONTENT_TOPIC=/tester/2/light-pubsub-test/fleet
export CLUSTER_ID=16
@ -307,7 +307,7 @@ export NUM_MESSAGES=300
export MESSAGE_INTERVAL_MILLIS=7000
export MIN_MESSAGE_SIZE=15Kb
export MAX_MESSAGE_SIZE=145Kb
export PUBSUB=/waku/2/rs/1/4
export SHARD=4
export CONTENT_TOPIC=/tester/2/light-pubsub-test/twn
export CLUSTER_ID=1

View File

@ -42,7 +42,7 @@ proc `$`*(cap: Capabilities): string =
proc allPeers(pm: PeerManager): string =
var allStr: string = ""
for idx, peer in pm.wakuPeerStore.peers():
for idx, peer in pm.switch.peerStore.peers():
allStr.add(
" " & $idx & ". | " & constructMultiaddrStr(peer) & " | agent: " &
peer.getAgent() & " | protos: " & $peer.protocols & " | caps: " &
@ -51,10 +51,10 @@ proc allPeers(pm: PeerManager): string =
return allStr
proc logSelfPeers*(pm: PeerManager) =
let selfLighpushPeers = pm.wakuPeerStore.getPeersByProtocol(WakuLightPushCodec)
let selfRelayPeers = pm.wakuPeerStore.getPeersByProtocol(WakuRelayCodec)
let selfFilterPeers = pm.wakuPeerStore.getPeersByProtocol(WakuFilterSubscribeCodec)
let selfPxPeers = pm.wakuPeerStore.getPeersByProtocol(WakuPeerExchangeCodec)
let selfLighpushPeers = pm.switch.peerStore.getPeersByProtocol(WakuLightPushCodec)
let selfRelayPeers = pm.switch.peerStore.getPeersByProtocol(WakuRelayCodec)
let selfFilterPeers = pm.switch.peerStore.getPeersByProtocol(WakuFilterSubscribeCodec)
let selfPxPeers = pm.switch.peerStore.getPeersByProtocol(WakuPeerExchangeCodec)
let printable = catch:
"""*------------------------------------------------------------------------------------------*

View File

@ -16,7 +16,7 @@ x-rln-environment: &rln_env
x-test-running-conditions: &test_running_conditions
NUM_MESSAGES: ${NUM_MESSAGES:-120}
MESSAGE_INTERVAL_MILLIS: "${MESSAGE_INTERVAL_MILLIS:-1000}"
PUBSUB: ${PUBSUB:-/waku/2/rs/66/0}
SHARD: ${SHARD:-0}
CONTENT_TOPIC: ${CONTENT_TOPIC:-/tester/2/light-pubsub-test/wakusim}
CLUSTER_ID: ${CLUSTER_ID:-66}
MIN_MESSAGE_SIZE: ${MIN_MESSAGE_SIZE:-1Kb}

View File

@ -16,7 +16,7 @@ x-rln-environment: &rln_env
x-test-running-conditions: &test_running_conditions
NUM_MESSAGES: ${NUM_MESSAGES:-120}
MESSAGE_INTERVAL_MILLIS: "${MESSAGE_INTERVAL_MILLIS:-1000}"
PUBSUB: ${PUBSUB:-/waku/2/rs/66/0}
SHARD: ${SHARD:-0}
CONTENT_TOPIC: ${CONTENT_TOPIC:-/tester/2/light-pubsub-test/wakusim}
CLUSTER_ID: ${CLUSTER_ID:-66}
MIN_MESSAGE_SIZE: ${MIN_MESSAGE_SIZE:-1Kb}

View File

@ -130,7 +130,9 @@ proc setupAndSubscribe*(
var stats: PerPeerStatistics
actualFilterPeer = servicePeer
let pushHandler = proc(pubsubTopic: PubsubTopic, message: WakuMessage) {.async.} =
let pushHandler = proc(
pubsubTopic: PubsubTopic, message: WakuMessage
): Future[void] {.async, closure.} =
let payloadStr = string.fromBytes(message.payload)
let testerMessage = js.Json.decode(payloadStr, ProtocolTesterMessage)
let msgHash = computeMessageHash(pubsubTopic, message).to0xHex
@ -163,7 +165,7 @@ proc setupAndSubscribe*(
if conf.numMessages > 0 and
waitFor stats.checkIfAllMessagesReceived(maxWaitForLastMessage):
waitFor unsubscribe(wakuNode, conf.pubsubTopics[0], conf.contentTopics[0])
waitFor unsubscribe(wakuNode, conf.getPubsubTopic(), conf.contentTopics[0])
info "All messages received. Exiting."
## for gracefull shutdown through signal hooks
@ -176,5 +178,5 @@ proc setupAndSubscribe*(
# Start maintaining subscription
asyncSpawn maintainSubscription(
wakuNode, conf.pubsubTopics[0], conf.contentTopics[0], conf.fixedServicePeer
wakuNode, conf.getPubsubTopic(), conf.contentTopics[0], conf.fixedServicePeer
)

View File

@ -4,7 +4,7 @@ NUM_MESSAGES=300
MESSAGE_INTERVAL_MILLIS=1000
MIN_MESSAGE_SIZE=15Kb
MAX_MESSAGE_SIZE=145Kb
PUBSUB=/waku/2/rs/16/32
SHARD=32
CONTENT_TOPIC=/tester/2/light-pubsub-test-at-infra/status-prod
CLUSTER_ID=16
LIGHTPUSH_BOOTSTRAP=enr:-QEKuED9AJm2HGgrRpVaJY2nj68ao_QiPeUT43sK-aRM7sMJ6R4G11OSDOwnvVacgN1sTw-K7soC5dzHDFZgZkHU0u-XAYJpZIJ2NIJpcISnYxMvim11bHRpYWRkcnO4WgAqNiVib290LTAxLmRvLWFtczMuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfACw2JWJvb3QtMDEuZG8tYW1zMy5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEC3rRtFQSgc24uWewzXaxTY8hDAHB8sgnxr9k8Rjb5GeSDdGNwgnZfg3VkcIIjKIV3YWt1Mg0

View File

@ -145,13 +145,20 @@ proc publishMessages(
lightpushContentTopic,
renderMsgSize,
)
let publishStartTime = Moment.now()
let wlpRes = await wakuNode.legacyLightpushPublish(
some(lightpushPubsubTopic), message, actualServicePeer
)
let publishDuration = Moment.now() - publishStartTime
let msgHash = computeMessageHash(lightpushPubsubTopic, message).to0xHex
if wlpRes.isOk():
lpt_publish_duration_seconds.observe(publishDuration.milliseconds.float / 1000)
sentMessages[messagesSent] = (hash: msgHash, relayed: true)
notice "published message using lightpush",
index = messagesSent + 1,
@ -251,7 +258,7 @@ proc setupAndPublish*(
asyncSpawn publishMessages(
wakuNode,
servicePeer,
conf.pubsubTopics[0],
conf.getPubsubTopic(),
conf.contentTopics[0],
conf.numMessages,
(min: parsedMinMsgSize, max: parsedMaxMsgSize),

View File

@ -99,7 +99,7 @@ when isMainModule:
wakuConf.dnsAddrs = true
wakuConf.dnsAddrsNameServers = @[parseIpAddress("8.8.8.8"), parseIpAddress("1.1.1.1")]
wakuConf.pubsubTopics = conf.pubsubTopics
wakuConf.shards = @[conf.shard]
wakuConf.contentTopics = conf.contentTopics
wakuConf.clusterId = conf.clusterId
## TODO: Depending on the tester needs we might extend here with shards, clusterId, etc...
@ -118,6 +118,7 @@ when isMainModule:
wakuConf.store = false
wakuConf.rest = false
wakuConf.relayServiceRatio = "40:60"
# NOTE: {.threadvar.} is used to make the global variable GC safe for the closure uses it
# It will always be called from main thread anyway.
@ -126,7 +127,7 @@ when isMainModule:
nodeHealthMonitor = WakuNodeHealthMonitor()
nodeHealthMonitor.setOverallHealth(HealthStatus.INITIALIZING)
let restServer = rest_server_builder.startRestServerEsentials(
let restServer = rest_server_builder.startRestServerEssentials(
nodeHealthMonitor, wakuConf
).valueOr:
error "Starting esential REST server failed.", error = $error
@ -202,10 +203,8 @@ when isMainModule:
var codec = WakuLightPushCodec
# mounting relevant client, for PX filter client must be mounted ahead
if conf.testFunc == TesterFunctionality.SENDER:
wakuApp.node.mountLegacyLightPushClient()
codec = WakuLightPushCodec
else:
waitFor wakuApp.node.mountFilterClient()
codec = WakuFilterSubscribeCodec
var lookForServiceNode = false

View File

@ -47,3 +47,10 @@ declarePublicGauge lpt_px_peers,
declarePublicGauge lpt_dialed_peers, "Number of peers successfully dialed", ["agent"]
declarePublicGauge lpt_dial_failures, "Number of dial failures by cause", ["agent"]
declarePublicHistogram lpt_publish_duration_seconds,
"duration to lightpush messages",
buckets = [
0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0,
15.0, 20.0, 30.0, Inf,
]

View File

@ -5,10 +5,10 @@ IP=$(ip a | grep "inet " | grep -Fv 127.0.0.1 | sed 's/.*inet \([^/]*\).*/\1/')
echo "Service node IP: ${IP}"
if [ -n "${PUBSUB}" ]; then
PUBSUB=--pubsub-topic="${PUBSUB}"
if [ -n "${SHARD}" ]; then
SHARD=--shard="${SHARD}"
else
PUBSUB=--pubsub-topic="/waku/2/rs/66/0"
SHARD=--shard="0"
fi
if [ -n "${CLUSTER_ID}" ]; then
@ -59,5 +59,5 @@ exec /usr/bin/wakunode\
--metrics-server-port=8003\
--metrics-server-address=0.0.0.0\
--nat=extip:${IP}\
${PUBSUB}\
${SHARD}\
${CLUSTER_ID}

View File

@ -93,10 +93,10 @@ else
FULL_NODE=--bootstrap-node="${SERIVCE_NODE_ADDR}"
fi
if [ -n "${PUBSUB}" ]; then
PUBSUB=--pubsub-topic="${PUBSUB}"
if [ -n "${SHARD}" ]; then
SHARD=--shard="${SHARD}"
else
PUBSUB=--pubsub-topic="/waku/2/rs/66/0"
SHARD=--shard="0"
fi
if [ -n "${CONTENT_TOPIC}" ]; then
@ -128,19 +128,25 @@ if [ -n "${MESSAGE_INTERVAL_MILLIS}" ]; then
MESSAGE_INTERVAL_MILLIS=--message-interval="${MESSAGE_INTERVAL_MILLIS}"
fi
if [ -n "${LOG_LEVEL}" ]; then
LOG_LEVEL=--log-level=${LOG_LEVEL}
else
LOG_LEVEL=--log-level=INFO
fi
echo "Running binary: ${BINARY_PATH}"
echo "Tester node: ${FUNCTION}"
echo "Using service node: ${SERIVCE_NODE_ADDR}"
echo "My external IP: ${MY_EXT_IP}"
exec "${BINARY_PATH}"\
--log-level=INFO\
--nat=extip:${MY_EXT_IP}\
--test-peers\
${LOG_LEVEL}\
${FULL_NODE}\
${MESSAGE_INTERVAL_MILLIS}\
${NUM_MESSAGES}\
${PUBSUB}\
${SHARD}\
${CONTENT_TOPIC}\
${CLUSTER_ID}\
${FUNCTION}\

View File

@ -48,10 +48,10 @@ fi
MY_EXT_IP=$(wget -qO- --no-check-certificate https://api4.ipify.org)
if [ -n "${PUBSUB}" ]; then
PUBSUB=--pubsub-topic="${PUBSUB}"
if [ -n "${SHARD}" ]; then
SHARD=--shard="${SHARD}"
else
PUBSUB=--pubsub-topic="/waku/2/rs/66/0"
SHARD=--shard="0"
fi
if [ -n "${CONTENT_TOPIC}" ]; then
@ -83,19 +83,25 @@ if [ -n "${MESSAGE_INTERVAL_MILLIS}" ]; then
MESSAGE_INTERVAL_MILLIS=--message-interval="${MESSAGE_INTERVAL_MILLIS}"
fi
if [ -n "${LOG_LEVEL}" ]; then
LOG_LEVEL=--log-level=${LOG_LEVEL}
else
LOG_LEVEL=--log-level=INFO
fi
echo "Running binary: ${BINARY_PATH}"
echo "Node function is: ${FUNCTION}"
echo "Using service/bootstrap node as: ${NODE_ARG}"
echo "My external IP: ${MY_EXT_IP}"
exec "${BINARY_PATH}"\
--log-level=INFO\
--nat=extip:${MY_EXT_IP}\
--test-peers\
${LOG_LEVEL}\
${NODE_ARG}\
${MESSAGE_INTERVAL_MILLIS}\
${NUM_MESSAGES}\
${PUBSUB}\
${SHARD}\
${CONTENT_TOPIC}\
${CLUSTER_ID}\
${FUNCTION}\

View File

@ -48,10 +48,10 @@ fi
MY_EXT_IP=$(wget -qO- --no-check-certificate https://api4.ipify.org)
if [ -n "${PUBSUB}" ]; then
PUBSUB=--pubsub-topic="${PUBSUB}"
if [ -n "${SHARD}" ]; then
SHARD=--shard=${SHARD}
else
PUBSUB=--pubsub-topic="/waku/2/rs/66/0"
SHARD=--shard=0
fi
if [ -n "${CONTENT_TOPIC}" ]; then
@ -79,8 +79,14 @@ if [ -n "${NUM_MESSAGES}" ]; then
NUM_MESSAGES=--num-messages="${NUM_MESSAGES}"
fi
if [ -n "${DELAY_MESSAGES}" ]; then
DELAY_MESSAGES=--delay-messages="${DELAY_MESSAGES}"
if [ -n "${MESSAGE_INTERVAL_MILLIS}" ]; then
MESSAGE_INTERVAL_MILLIS=--message-interval="${MESSAGE_INTERVAL_MILLIS}"
fi
if [ -n "${LOG_LEVEL}" ]; then
LOG_LEVEL=--log-level=${LOG_LEVEL}
else
LOG_LEVEL=--log-level=INFO
fi
echo "Running binary: ${BINARY_PATH}"
@ -89,12 +95,12 @@ echo "Using service/bootstrap node as: ${NODE_ARG}"
echo "My external IP: ${MY_EXT_IP}"
exec "${BINARY_PATH}"\
--log-level=INFO\
--nat=extip:${MY_EXT_IP}\
${LOG_LEVEL}\
${NODE_ARG}\
${DELAY_MESSAGES}\
${MESSAGE_INTERVAL_MILLIS}\
${NUM_MESSAGES}\
${PUBSUB}\
${SHARD}\
${CONTENT_TOPIC}\
${CLUSTER_ID}\
${FUNCTION}\

View File

@ -61,7 +61,7 @@ proc selectRandomCapablePeer*(
elif codec.contains("filter"):
cap = Capabilities.Filter
var supportivePeers = pm.wakuPeerStore.getPeersByCapability(cap)
var supportivePeers = pm.switch.peerStore.getPeersByCapability(cap)
trace "Found supportive peers count", count = supportivePeers.len()
trace "Found supportive peers", supportivePeers = $supportivePeers
@ -102,7 +102,7 @@ proc tryCallAllPxPeers*(
elif codec.contains("filter"):
capability = Capabilities.Filter
var supportivePeers = pm.wakuPeerStore.getPeersByCapability(capability)
var supportivePeers = pm.switch.peerStore.getPeersByCapability(capability)
lpt_px_peers.set(supportivePeers.len)
debug "Found supportive peers count", count = supportivePeers.len()
@ -189,14 +189,14 @@ proc pxLookupServiceNode*(
if conf.testPeers:
let peersOpt =
await tryCallAllPxPeers(node.peerManager, codec, conf.pubsubTopics[0])
await tryCallAllPxPeers(node.peerManager, codec, conf.getPubsubTopic())
if peersOpt.isSome():
info "Found service peers for codec",
codec = codec, peer_count = peersOpt.get().len()
return ok(peersOpt.get().len > 0)
else:
let peerOpt =
await selectRandomCapablePeer(node.peerManager, codec, conf.pubsubTopics[0])
await selectRandomCapablePeer(node.peerManager, codec, conf.getPubsubTopic())
if peerOpt.isSome():
info "Found service peer for codec", codec = codec, peer = peerOpt.get()
return ok(true)
@ -215,7 +215,7 @@ proc selectRandomServicePeer*(
if actualPeer.isSome():
alreadyUsedServicePeers.add(actualPeer.get())
let supportivePeers = pm.wakuPeerStore.getPeersByProtocol(codec).filterIt(
let supportivePeers = pm.switch.peerStore.getPeersByProtocol(codec).filterIt(
it notin alreadyUsedServicePeers
)
if supportivePeers.len == 0:

View File

@ -18,6 +18,7 @@ import
common/logging,
factory/external_config,
waku_core,
waku_core/topics/pubsub_topic,
]
export confTomlDefs, confTomlNet, confEnvvarDefs, confEnvvarNet
@ -95,18 +96,9 @@ type LiteProtocolTesterConf* = object
name: "message-interval"
.}: uint32
pubsubTopics* {.
desc: "Default pubsub topic to subscribe to. Argument may be repeated.",
defaultValue: @[LitePubsubTopic],
name: "pubsub-topic"
.}: seq[PubsubTopic]
shard* {.desc: "Shards index to subscribe to. ", defaultValue: 0, name: "shard".}:
uint16
## TODO: extend lite protocol tester configuration based on testing needs
# shards* {.
# desc: "Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.",
# defaultValue: @[],
# name: "shard"
# .}: seq[uint16]
contentTopics* {.
desc: "Default content topic to subscribe to. Argument may be repeated.",
defaultValue: @[LiteContentTopic],
@ -195,4 +187,7 @@ proc load*(T: type LiteProtocolTesterConf, version = ""): ConfResult[T] =
except CatchableError:
err(getCurrentExceptionMsg())
proc getPubsubTopic*(conf: LiteProtocolTesterConf): PubsubTopic =
return $RelayShard(clusterId: conf.clusterId, shardId: conf.shard)
{.pop.}

View File

@ -462,7 +462,7 @@ proc initAndStartApp(
nodeBuilder.withNodeKey(key)
nodeBuilder.withRecord(record)
nodeBUilder.withSwitchConfiguration(maxConnections = some(MaxConnectedPeers))
nodeBuilder.withSwitchConfiguration(maxConnections = some(MaxConnectedPeers))
nodeBuilder.withPeerManagerConfig(
maxConnections = MaxConnectedPeers,
@ -554,7 +554,9 @@ proc subscribeAndHandleMessages(
else:
msgPerContentTopic[msg.contentTopic] = 1
node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler)))
node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler))).isOkOr:
error "failed to subscribe to pubsub topic", pubsubTopic, error
quit(1)
when isMainModule:
# known issue: confutils.nim(775, 17) Error: can raise an unlisted exception: ref IOError
@ -619,7 +621,10 @@ when isMainModule:
let (node, discv5) = nodeRes.get()
waitFor node.mountRelay()
(waitFor node.mountRelay()).isOkOr:
error "failed to mount waku relay protocol: ", err = error
quit 1
waitFor node.mountLibp2pPing()
var onFatalErrorAction = proc(msg: string) {.gcsafe, closure.} =
@ -630,14 +635,13 @@ when isMainModule:
if conf.rlnRelay and conf.rlnRelayEthContractAddress != "":
let rlnConf = WakuRlnConfig(
rlnRelayDynamic: conf.rlnRelayDynamic,
rlnRelayCredIndex: some(uint(0)),
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
rlnRelayCredPath: "",
rlnRelayCredPassword: "",
rlnRelayTreePath: conf.rlnRelayTreePath,
rlnEpochSizeSec: conf.rlnEpochSizeSec,
dynamic: conf.rlnRelayDynamic,
credIndex: some(uint(0)),
ethContractAddress: conf.rlnRelayEthContractAddress,
ethClientUrls: conf.ethClientUrls.mapIt(string(it)),
treePath: conf.rlnRelayTreePath,
epochSizeSec: conf.rlnEpochSizeSec,
creds: none(RlnRelayCreds),
onFatalErrorAction: onFatalErrorAction,
)

View File

@ -8,7 +8,7 @@ import
stew/shims/net,
regex
type EthRpcUrl = distinct string
type EthRpcUrl* = distinct string
type NetworkMonitorConf* = object
logLevel* {.
@ -82,11 +82,12 @@ type NetworkMonitorConf* = object
name: "rln-relay-tree-path"
.}: string
rlnRelayEthClientAddress* {.
desc: "HTTP address of an Ethereum testnet client e.g., http://localhost:8540/",
defaultValue: "http://localhost:8540/",
ethClientUrls* {.
desc:
"HTTP address of an Ethereum testnet client e.g., http://localhost:8540/. Argument may be repeated.",
defaultValue: newSeq[EthRpcUrl](0),
name: "rln-relay-eth-client-address"
.}: EthRpcUrl
.}: seq[EthRpcUrl]
rlnRelayEthContractAddress* {.
desc: "Address of membership contract on an Ethereum testnet",

View File

@ -9,7 +9,7 @@ x-logging: &logging
x-rln-relay-eth-client-address: &rln_relay_eth_client_address ${RLN_RELAY_ETH_CLIENT_ADDRESS:-} # Add your RLN_RELAY_ETH_CLIENT_ADDRESS after the "-"
x-rln-environment: &rln_env
RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xCB33Aa5B38d79E3D9Fa8B10afF38AA201399a7e3}
RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8}
RLN_RELAY_CRED_PATH: ${RLN_RELAY_CRED_PATH:-} # Optional: Add your RLN_RELAY_CRED_PATH after the "-"
RLN_RELAY_CRED_PASSWORD: ${RLN_RELAY_CRED_PASSWORD:-} # Optional: Add your RLN_RELAY_CRED_PASSWORD after the "-"

View File

@ -24,7 +24,7 @@ fi
docker run -v $(pwd)/keystore:/keystore/:Z harbor.status.im/wakuorg/nwaku:v0.30.1 generateRlnKeystore \
--rln-relay-eth-client-address=${RLN_RELAY_ETH_CLIENT_ADDRESS} \
--rln-relay-eth-private-key=${ETH_TESTNET_KEY} \
--rln-relay-eth-contract-address=0xCB33Aa5B38d79E3D9Fa8B10afF38AA201399a7e3 \
--rln-relay-eth-contract-address=0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8 \
--rln-relay-cred-path=/keystore/keystore.json \
--rln-relay-cred-password="${RLN_RELAY_CRED_PASSWORD}" \
--rln-relay-user-message-limit=20 \

View File

@ -1,5 +1,5 @@
import
std/[strutils, sequtils, tables],
std/[strutils, sequtils, tables, strformat],
confutils,
chronos,
stew/shims/net,
@ -21,6 +21,14 @@ const ProtocolsTable = {
"relay": "/vac/waku/relay/",
"lightpush": "/vac/waku/lightpush/",
"filter": "/vac/waku/filter-subscribe/2",
"filter-push": "/vac/waku/filter-push/",
"ipfs-id": "/ipfs/id/",
"autonat": "/libp2p/autonat/",
"circuit-relay": "/libp2p/circuit/relay/",
"metadata": "/vac/waku/metadata/",
"rendezvous": "/rendezvous/",
"ipfs-ping": "/ipfs/ping/",
"peer-exchange": "/vac/waku/peer-exchange/",
}.toTable
const WebSocketPortOffset = 1000
@ -105,21 +113,30 @@ proc parseCmdArg*(T: type chronos.Duration, p: string): T =
proc completeCmdArg*(T: type chronos.Duration, val: string): seq[string] =
return @[]
# checks if rawProtocols (skipping version) are supported in nodeProtocols
proc areProtocolsSupported(
rawProtocols: seq[string], nodeProtocols: seq[string]
toValidateProtocols: seq[string], nodeProtocols: seq[string]
): bool =
## Checks if all toValidateProtocols are contained in nodeProtocols.
## nodeProtocols contains the full list of protocols currently informed by the node under analysis.
## toValidateProtocols contains the protocols, without version number, that we want to check if they are supported by the node.
var numOfSupportedProt: int = 0
for nodeProtocol in nodeProtocols:
for rawProtocol in rawProtocols:
let protocolTag = ProtocolsTable[rawProtocol]
for rawProtocol in toValidateProtocols:
let protocolTag = ProtocolsTable[rawProtocol]
debug "Checking if protocol is supported", expected_protocol_tag = protocolTag
var protocolSupported = false
for nodeProtocol in nodeProtocols:
if nodeProtocol.startsWith(protocolTag):
info "Supported protocol ok", expected = protocolTag, supported = nodeProtocol
info "The node supports the protocol", supported_protocol = nodeProtocol
numOfSupportedProt += 1
protocolSupported = true
break
if numOfSupportedProt == rawProtocols.len:
if not protocolSupported:
error "The node does not support the protocol", expected_protocol = protocolTag
if numOfSupportedProt == toValidateProtocols.len:
return true
return false
@ -167,7 +184,7 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
let peerRes = parsePeerInfo(conf.address)
if peerRes.isErr():
error "Couldn't parse 'conf.address'", error = peerRes.error
return 1
quit(QuitFailure)
let peer = peerRes.value
@ -195,13 +212,19 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
let netConfig = NetConfig.init(
bindIp = bindIp,
bindPort = nodeTcpPort,
wsBindPort = wsBindPort,
wsBindPort = some(wsBindPort),
wsEnabled = isWs,
wssEnabled = isWss,
)
var enrBuilder = EnrBuilder.init(nodeKey)
enrBuilder.withWakuRelaySharding(
RelayShards(clusterId: conf.clusterId, shardIds: conf.shards)
).isOkOr:
error "could not initialize ENR with shards", error
quit(QuitFailure)
let recordRes = enrBuilder.build()
let record =
if recordRes.isErr():
@ -217,7 +240,7 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
createDir(CertsDirectory)
if generateSelfSignedCertificate(certPath, keyPath) != 0:
error "Error generating key and certificate"
return 1
quit(QuitFailure)
builder.withRecord(record)
builder.withNetworkConfiguration(netConfig.tryGet())
@ -232,7 +255,11 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
await mountLibp2pPing(node)
except CatchableError:
error "failed to mount libp2p ping protocol: " & getCurrentExceptionMsg()
return 1
quit(QuitFailure)
node.mountMetadata(conf.clusterId).isOkOr:
error "failed to mount metadata protocol", error
quit(QuitFailure)
await node.start()
@ -243,23 +270,24 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
let timedOut = not await node.connectToNodes(@[peer]).withTimeout(conf.timeout)
if timedOut:
error "Timedout after", timeout = conf.timeout
return 1
quit(QuitFailure)
let lp2pPeerStore = node.switch.peerStore
let conStatus = node.peerManager.wakuPeerStore[ConnectionBook][peer.peerId]
let conStatus = node.peerManager.switch.peerStore[ConnectionBook][peer.peerId]
if conf.ping:
discard await pingFut
if conStatus in [Connected, CanConnect]:
let nodeProtocols = lp2pPeerStore[ProtoBook][peer.peerId]
if not areProtocolsSupported(conf.protocols, nodeProtocols):
error "Not all protocols are supported",
expected = conf.protocols, supported = nodeProtocols
return 1
quit(QuitFailure)
elif conStatus == CannotConnect:
error "Could not connect", peerId = peer.peerId
return 1
quit(QuitFailure)
return 0
when isMainModule:

View File

@ -38,17 +38,19 @@ when isMainModule:
const versionString = "version / git commit hash: " & waku.git_version
var conf = WakuNodeConf.load(version = versionString).valueOr:
var wakuNodeConf = WakuNodeConf.load(version = versionString).valueOr:
error "failure while loading the configuration", error = error
quit(QuitFailure)
## Also called within Waku.new. The call to startRestServerEsentials needs the following line
logging.setupLog(conf.logLevel, conf.logFormat)
## Also called within Waku.new. The call to startRestServerEssentials needs the following line
logging.setupLog(wakuNodeConf.logLevel, wakuNodeConf.logFormat)
case conf.cmd
case wakuNodeConf.cmd
of generateRlnKeystore:
let conf = wakuNodeConf.toKeystoreGeneratorConf()
doRlnKeystoreGenerator(conf)
of inspectRlnDb:
let conf = wakuNodeConf.toInspectRlnDbConf()
doInspectRlnDb(conf)
of noCommand:
# NOTE: {.threadvar.} is used to make the global variable GC safe for the closure uses it
@ -58,15 +60,20 @@ when isMainModule:
nodeHealthMonitor = WakuNodeHealthMonitor()
nodeHealthMonitor.setOverallHealth(HealthStatus.INITIALIZING)
var confCopy = conf
let restServer = rest_server_builder.startRestServerEsentials(
nodeHealthMonitor, confCopy
).valueOr:
error "Starting esential REST server failed.", error = $error
let conf = wakuNodeConf.toWakuConf().valueOr:
error "Waku configuration failed", error = error
quit(QuitFailure)
var waku = Waku.new(confCopy).valueOr:
var restServer: WakuRestServerRef = nil
if conf.restServerConf.isSome():
restServer = rest_server_builder.startRestServerEssentials(
nodeHealthMonitor, conf.restServerConf.get(), conf.portsShift
).valueOr:
error "Starting essential REST server failed.", error = $error
quit(QuitFailure)
var waku = Waku.new(conf).valueOr:
error "Waku initialization failed", error = error
quit(QuitFailure)
@ -78,15 +85,27 @@ when isMainModule:
error "Starting waku failed", error = error
quit(QuitFailure)
rest_server_builder.startRestServerProtocolSupport(
restServer, waku.node, waku.wakuDiscv5, confCopy
).isOkOr:
error "Starting protocols support REST server failed.", error = $error
quit(QuitFailure)
if conf.restServerConf.isSome():
rest_server_builder.startRestServerProtocolSupport(
restServer,
waku.node,
waku.wakuDiscv5,
conf.restServerConf.get(),
conf.relay,
conf.lightPush,
conf.clusterId,
conf.shards,
conf.contentTopics,
).isOkOr:
error "Starting protocols support REST server failed.", error = $error
quit(QuitFailure)
waku.metricsServer = waku_metrics.startMetricsServerAndLogging(confCopy).valueOr:
error "Starting monitoring and external interfaces failed", error = error
quit(QuitFailure)
if conf.metricsServerConf.isSome():
waku.metricsServer = waku_metrics.startMetricsServerAndLogging(
conf.metricsServerConf.get(), conf.portsShift
).valueOr:
error "Starting monitoring and external interfaces failed", error = error
quit(QuitFailure)
nodeHealthMonitor.setOverallHealth(HealthStatus.READY)

View File

@ -78,7 +78,7 @@ pipeline {
"--build-arg=NIMFLAGS='${params.NIMFLAGS} -d:postgres ' " +
"--build-arg=LOG_LEVEL='${params.LOWEST_LOG_LEVEL_ALLOWED}' " +
"--build-arg=DEBUG='${params.DEBUG ? "1" : "0"} ' " +
"--target=${params.HEAPTRACK ? "prod-with-heaptrack" : "prod"} ."
"--target=${params.HEAPTRACK ? "heaptrack-build" : "prod"} ."
)
} }
}

View File

@ -0,0 +1,18 @@
## App description
This is a very simple example that shows how to invoke libwaku functions from a C program.
## Build
1. Open terminal
2. cd to nwaku root folder
3. make cwaku_example -j8
This will create libwaku.so and cwaku_example binary within the build folder.
## Run
1. Open terminal
2. cd to nwaku root folder
3. export LD_LIBRARY_PATH=build
4. `./build/cwaku_example --host=0.0.0.0 --port=60001`
Use `./build/cwaku_example --help` to see some other options.

View File

@ -14,7 +14,6 @@
#include "base64.h"
#include "../../library/libwaku.h"
// Shared synchronization variables
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
@ -29,7 +28,6 @@ void waitForCallback() {
pthread_mutex_unlock(&mutex);
}
#define WAKU_CALL(call) \
do { \
int ret = call; \
@ -107,6 +105,13 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
return 0;
}
void signal_cond() {
pthread_mutex_lock(&mutex);
callback_executed = 1;
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
}
static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 };
void event_handler(int callerRet, const char* msg, size_t len, void* userData) {
@ -118,10 +123,7 @@ void event_handler(int callerRet, const char* msg, size_t len, void* userData) {
printf("Receiving event: %s\n", msg);
}
pthread_mutex_lock(&mutex);
callback_executed = 1;
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
signal_cond();
}
void on_event_received(int callerRet, const char* msg, size_t len, void* userData) {
@ -142,6 +144,7 @@ void handle_content_topic(int callerRet, const char* msg, size_t len, void* user
contentTopic = malloc(len * sizeof(char) + 1);
strcpy(contentTopic, msg);
signal_cond();
}
char* publishResponse = NULL;
@ -158,33 +161,30 @@ void handle_publish_ok(int callerRet, const char* msg, size_t len, void* userDat
#define MAX_MSG_SIZE 65535
void publish_message(char* pubsubTopic, const char* msg) {
void publish_message(const char* msg) {
char jsonWakuMsg[MAX_MSG_SIZE];
char *msgPayload = b64_encode(msg, strlen(msg));
WAKU_CALL( waku_content_topic(RET_OK,
WAKU_CALL( waku_content_topic(ctx,
"appName",
1,
"contentTopicName",
"encoding",
handle_content_topic,
userData) );
snprintf(jsonWakuMsg,
MAX_MSG_SIZE,
"{\"payload\":\"%s\",\"content_topic\":\"%s\"}",
"{\"payload\":\"%s\",\"contentTopic\":\"%s\"}",
msgPayload, contentTopic);
free(msgPayload);
WAKU_CALL( waku_relay_publish(&ctx,
pubsubTopic,
WAKU_CALL( waku_relay_publish(ctx,
"/waku/2/rs/16/32",
jsonWakuMsg,
10000 /*timeout ms*/,
event_handler,
userData) );
printf("waku relay response [%s]\n", publishResponse);
}
void show_help_and_exit() {
@ -194,20 +194,12 @@ void show_help_and_exit() {
void print_default_pubsub_topic(int callerRet, const char* msg, size_t len, void* userData) {
printf("Default pubsub topic: %s\n", msg);
pthread_mutex_lock(&mutex);
callback_executed = 1;
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
signal_cond();
}
void print_waku_version(int callerRet, const char* msg, size_t len, void* userData) {
printf("Git Version: %s\n", msg);
pthread_mutex_lock(&mutex);
callback_executed = 1;
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
signal_cond();
}
// Beginning of UI program logic
@ -236,9 +228,6 @@ void handle_user_input() {
return;
}
int c;
while ( (c = getchar()) != '\n' && c != EOF ) { }
switch (atoi(cmd))
{
case SUBSCRIBE_TOPIC_MENU:
@ -247,7 +236,7 @@ void handle_user_input() {
char pubsubTopic[128];
scanf("%127s", pubsubTopic);
WAKU_CALL( waku_relay_subscribe(&ctx,
WAKU_CALL( waku_relay_subscribe(ctx,
pubsubTopic,
event_handler,
userData) );
@ -262,21 +251,17 @@ void handle_user_input() {
printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n");
char peerAddr[512];
scanf("%511s", peerAddr);
WAKU_CALL(waku_connect(&ctx, peerAddr, 10000 /* timeoutMs */, event_handler, userData));
WAKU_CALL(waku_connect(ctx, peerAddr, 10000 /* timeoutMs */, event_handler, userData));
show_main_menu();
break;
case PUBLISH_MESSAGE_MENU:
{
printf("Indicate the Pubsubtopic:\n");
char pubsubTopic[128];
scanf("%127s", pubsubTopic);
printf("Type the message tp publish:\n");
printf("Type the message to publish:\n");
char msg[1024];
scanf("%1023s", msg);
publish_message(pubsubTopic, msg);
publish_message(msg);
show_main_menu();
}
@ -311,24 +296,24 @@ int main(int argc, char** argv) {
char jsonConfig[5000];
snprintf(jsonConfig, 5000, "{ \
\"clusterId\": 16, \
\"shards\": [ 1, 32, 64, 128, 256 ], \
\"listenAddress\": \"%s\", \
\"tcpPort\": %d, \
\"nodekey\": \"%s\", \
\"relay\": %s, \
\"store\": %s, \
\"storeMessageDbUrl\": \"%s\", \
\"storeMessageRetentionPolicy\": \"%s\", \
\"storeMaxNumDbConnections\": %d , \
\"logLevel\": \"DEBUG\", \
\"logLevel\": \"FATAL\", \
\"discv5Discovery\": true, \
\"discv5BootstrapNodes\": \
[\"enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw\", \"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\"], \
\"discv5UdpPort\": 9999, \
\"dnsDiscoveryUrl\": \"enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im\", \
\"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \
\"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \
}", cfgNode.host,
cfgNode.port,
cfgNode.key,
cfgNode.relay ? "true":"false",
cfgNode.store ? "true":"false",
cfgNode.storeDbUrl,
@ -351,14 +336,6 @@ int main(int argc, char** argv) {
WAKU_CALL( waku_listen_addresses(ctx, event_handler, userData) );
printf("Establishing connection with: %s\n", cfgNode.peers);
WAKU_CALL( waku_connect(ctx,
cfgNode.peers,
10000 /* timeoutMs */,
event_handler,
userData) );
WAKU_CALL( waku_relay_subscribe(ctx,
"/waku/2/rs/0/0",
event_handler,

18
examples/cpp/README.md Normal file
View File

@ -0,0 +1,18 @@
## App description
This is a very simple example that shows how to invoke libwaku functions from a C++ program.
## Build
1. Open terminal
2. cd to nwaku root folder
3. make cppwaku_example -j8
This will create libwaku.so and cppwaku_example binary within the build folder.
## Run
1. Open terminal
2. cd to nwaku root folder
3. export LD_LIBRARY_PATH=build
4. `./build/cppwaku_example --host=0.0.0.0 --port=60001`
Use `./build/cppwaku_example --help` to see some other options.

View File

@ -16,12 +16,34 @@
#include "base64.h"
#include "../../library/libwaku.h"
// Shared synchronization variables
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
int callback_executed = 0;
void waitForCallback() {
pthread_mutex_lock(&mutex);
while (!callback_executed) {
pthread_cond_wait(&cond, &mutex);
}
callback_executed = 0;
pthread_mutex_unlock(&mutex);
}
void signal_cond() {
pthread_mutex_lock(&mutex);
callback_executed = 1;
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
}
#define WAKU_CALL(call) \
do { \
int ret = call; \
if (ret != 0) { \
std::cout << "Failed the call to: " << #call << ". Code: " << ret << "\n"; \
} \
waitForCallback(); \
} while (0)
struct ConfigNode {
@ -78,6 +100,24 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) {
return 0;
}
void event_handler(const char* msg, size_t len) {
printf("Receiving event: %s\n", msg);
}
void handle_error(const char* msg, size_t len) {
printf("handle_error: %s\n", msg);
exit(1);
}
template <class F>
auto cify(F&& f) {
static F fn = std::forward<F>(f);
return [](int callerRet, const char* msg, size_t len, void* userData) {
signal_cond();
return fn(msg, len);
};
}
static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 };
// Beginning of UI program logic
@ -98,7 +138,7 @@ void show_main_menu() {
printf("\t3.) Publish a message\n");
}
void handle_user_input() {
void handle_user_input(void* ctx) {
char cmd[1024];
memset(cmd, 0, 1024);
int numRead = read(0, cmd, 1024);
@ -106,9 +146,6 @@ void handle_user_input() {
return;
}
int c;
while ( (c = getchar()) != '\n' && c != EOF ) { }
switch (atoi(cmd))
{
case SUBSCRIBE_TOPIC_MENU:
@ -116,10 +153,14 @@ void handle_user_input() {
printf("Indicate the Pubsubtopic to subscribe:\n");
char pubsubTopic[128];
scanf("%127s", pubsubTopic);
// if (!waku_relay_subscribe(pubsubTopic, &mResp)) {
// printf("Error subscribing to PubsubTopic: %s\n", mResp->data);
// }
// printf("Waku Relay subscription response: %s\n", mResp->data);
WAKU_CALL( waku_relay_subscribe(ctx,
pubsubTopic,
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr) );
printf("The subscription went well\n");
show_main_menu();
}
@ -130,41 +171,51 @@ void handle_user_input() {
printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n");
char peerAddr[512];
scanf("%511s", peerAddr);
// if (!waku_connect(peerAddr, 10000 /* timeoutMs */, &mResp)) {
// printf("Couldn't connect to the remote peer: %s\n", mResp->data);
// }
WAKU_CALL( waku_connect(ctx,
peerAddr,
10000 /* timeoutMs */,
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr));
show_main_menu();
break;
case PUBLISH_MESSAGE_MENU:
{
printf("Indicate the Pubsubtopic:\n");
char pubsubTopic[128];
scanf("%127s", pubsubTopic);
printf("Type the message tp publish:\n");
printf("Type the message to publish:\n");
char msg[1024];
scanf("%1023s", msg);
char jsonWakuMsg[1024];
char jsonWakuMsg[2048];
std::vector<char> msgPayload;
b64_encode(msg, strlen(msg), msgPayload);
// waku_content_topic("appName",
// 1,
// "contentTopicName",
// "encoding",
// &mResp);
std::string contentTopic;
waku_content_topic(ctx,
"appName",
1,
"contentTopicName",
"encoding",
cify([&contentTopic](const char* msg, size_t len) {
contentTopic = msg;
}),
nullptr);
// snprintf(jsonWakuMsg,
// 1024,
// "{\"payload\":\"%s\",\"content_topic\":\"%s\"}",
// msgPayload, mResp->data);
snprintf(jsonWakuMsg,
2048,
"{\"payload\":\"%s\",\"contentTopic\":\"%s\"}",
msgPayload.data(), contentTopic.c_str());
// free(msgPayload);
WAKU_CALL( waku_relay_publish(ctx,
"/waku/2/rs/16/32",
jsonWakuMsg,
10000 /*timeout ms*/,
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr) );
// waku_relay_publish(pubsubTopic, jsonWakuMsg, 10000 /*timeout ms*/, &mResp);
// printf("waku relay response [%s]\n", mResp->data);
show_main_menu();
}
break;
@ -181,23 +232,6 @@ void show_help_and_exit() {
exit(1);
}
void event_handler(const char* msg, size_t len) {
printf("Receiving message %s\n", msg);
}
void handle_error(const char* msg, size_t len) {
printf("Error: %s\n", msg);
exit(1);
}
template <class F>
auto cify(F&& f) {
static F fn = std::forward<F>(f);
return [](const char* msg, size_t len) {
return fn(msg, len);
};
}
int main(int argc, char** argv) {
struct ConfigNode cfgNode;
// default values
@ -212,60 +246,86 @@ int main(int argc, char** argv) {
show_help_and_exit();
}
char jsonConfig[1024];
snprintf(jsonConfig, 1024, "{ \
char jsonConfig[2048];
snprintf(jsonConfig, 2048, "{ \
\"host\": \"%s\", \
\"port\": %d, \
\"key\": \"%s\", \
\"relay\": %s, \
\"logLevel\": \"DEBUG\" \
\"relay\": true, \
\"clusterId\": 16, \
\"shards\": [ 1, 32, 64, 128, 256 ], \
\"logLevel\": \"FATAL\", \
\"discv5Discovery\": true, \
\"discv5BootstrapNodes\": \
[\"enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw\", \"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\"], \
\"discv5UdpPort\": 9999, \
\"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \
\"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \
}", cfgNode.host,
cfgNode.port,
cfgNode.key,
cfgNode.relay ? "true":"false");
cfgNode.port);
WAKU_CALL(waku_new(jsonConfig, cify([](const char* msg, size_t len) {
std::cout << "Error: " << msg << std::endl;
exit(1);
})));
void* ctx =
waku_new(jsonConfig,
cify([](const char* msg, size_t len) {
std::cout << "waku_new feedback: " << msg << std::endl;
}
),
nullptr
);
waitForCallback();
// example on how to retrieve a value from the `libwaku` callback.
std::string defaultPubsubTopic;
WAKU_CALL(waku_default_pubsub_topic(cify([&defaultPubsubTopic](const char* msg, size_t len) {
defaultPubsubTopic = msg;
})));
WAKU_CALL(
waku_default_pubsub_topic(
ctx,
cify([&defaultPubsubTopic](const char* msg, size_t len) {
defaultPubsubTopic = msg;
}
),
nullptr));
std::cout << "Default pubsub topic: " << defaultPubsubTopic << std::endl;
WAKU_CALL(waku_version(cify([&](const char* msg, size_t len) {
std::cout << "Git Version: " << msg << std::endl;
})));
WAKU_CALL(waku_version(ctx,
cify([&](const char* msg, size_t len) {
std::cout << "Git Version: " << msg << std::endl;
}),
nullptr));
printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port);
printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES": "NO");
std::string pubsubTopic;
WAKU_CALL(waku_pubsub_topic("example", cify([&](const char* msg, size_t len) {
pubsubTopic = msg;
})));
WAKU_CALL(waku_pubsub_topic(ctx,
"example",
cify([&](const char* msg, size_t len) {
pubsubTopic = msg;
}),
nullptr));
std::cout << "Custom pubsub topic: " << pubsubTopic << std::endl;
waku_set_event_callback(event_handler);
waku_start();
waku_set_event_callback(ctx,
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr);
WAKU_CALL( waku_connect(cfgNode.peers,
10000 /* timeoutMs */,
handle_error) );
WAKU_CALL( waku_start(ctx,
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr));
WAKU_CALL( waku_relay_subscribe(defaultPubsubTopic.c_str(),
handle_error) );
std::cout << "Establishing connection with: " << cfgNode.peers << std::endl;
WAKU_CALL(waku_connect(cfgNode.peers, 10000 /* timeoutMs */, handle_error));
WAKU_CALL( waku_relay_subscribe(ctx,
defaultPubsubTopic.c_str(),
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr) );
show_main_menu();
while(1) {
handle_user_input();
handle_user_input(ctx);
}
}

View File

@ -86,7 +86,10 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} =
)
await node.start()
await node.mountRelay()
(await node.mountRelay()).isOkOr:
error "failed to mount relay", error = error
quit(1)
node.peerManager.start()
(await wakuDiscv5.start()).isOkOr:
@ -95,7 +98,7 @@ proc setupAndPublish(rng: ref HmacDrbgContext) {.async.} =
# wait for a minimum of peers to be connected, otherwise messages wont be gossiped
while true:
let numConnectedPeers = node.peerManager.wakuPeerStore[ConnectionBook].book
let numConnectedPeers = node.peerManager.switch.peerStore[ConnectionBook].book
.values()
.countIt(it == Connected)
if numConnectedPeers >= 6:

View File

@ -84,7 +84,9 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
)
await node.start()
await node.mountRelay()
(await node.mountRelay()).isOkOr:
error "failed to mount relay", error = error
quit(1)
node.peerManager.start()
(await wakuDiscv5.start()).isOkOr:
@ -93,7 +95,7 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
# wait for a minimum of peers to be connected, otherwise messages wont be gossiped
while true:
let numConnectedPeers = node.peerManager.wakuPeerStore[ConnectionBook].book
let numConnectedPeers = node.peerManager.switch.peerStore[ConnectionBook].book
.values()
.countIt(it == Connected)
if numConnectedPeers >= 6:
@ -118,7 +120,9 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
contentTopic = msg.contentTopic,
timestamp = msg.timestamp
node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler)))
node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler))).isOkOr:
error "failed to subscribe to pubsub topic", pubsubTopic, error
quit(1)
when isMainModule:
let rng = crypto.newRng()

View File

@ -36,7 +36,6 @@ proc setup*(): Waku =
conf.clusterId = twnClusterConf.clusterId
conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
conf.rlnRelayBandwidthThreshold = twnClusterConf.rlnRelayBandwidthThreshold
conf.discv5Discovery = twnClusterConf.discv5Discovery
conf.discv5BootstrapNodes =
conf.discv5BootstrapNodes & twnClusterConf.discv5BootstrapNodes

View File

@ -187,5 +187,7 @@ proc new*(
except CatchableError:
error "could not handle SCP message: ", err = getCurrentExceptionMsg()
waku.node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(handler))
waku.node.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(handler)).isOkOr:
error "could not subscribe to pubsub topic: ", err = $error
return err("could not subscribe to pubsub topic: " & $error)
return ok(SCP)

49
flake.lock generated Normal file
View File

@ -0,0 +1,49 @@
{
"nodes": {
"nixpkgs": {
"locked": {
"lastModified": 1740603184,
"narHash": "sha256-t+VaahjQAWyA+Ctn2idyo1yxRIYpaDxMgHkgCNiMJa4=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "f44bd8ca21e026135061a0a57dcf3d0775b67a49",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "f44bd8ca21e026135061a0a57dcf3d0775b67a49",
"type": "github"
}
},
"root": {
"inputs": {
"nixpkgs": "nixpkgs",
"zerokit": "zerokit"
}
},
"zerokit": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"locked": {
"lastModified": 1743756626,
"narHash": "sha256-SvhfEl0bJcRsCd79jYvZbxQecGV2aT+TXjJ57WVv7Aw=",
"owner": "vacp2p",
"repo": "zerokit",
"rev": "c60e0c33fc6350a4b1c20e6b6727c44317129582",
"type": "github"
},
"original": {
"owner": "vacp2p",
"repo": "zerokit",
"rev": "c60e0c33fc6350a4b1c20e6b6727c44317129582",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

64
flake.nix Normal file
View File

@ -0,0 +1,64 @@
{
description = "NWaku build flake";
nixConfig = {
extra-substituters = [ "https://nix-cache.status.im/" ];
extra-trusted-public-keys = [ "nix-cache.status.im-1:x/93lOfLU+duPplwMSBR+OlY4+mo+dCN7n0mr4oPwgY=" ];
};
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs?rev=f44bd8ca21e026135061a0a57dcf3d0775b67a49";
zerokit = {
url = "github:vacp2p/zerokit?rev=c60e0c33fc6350a4b1c20e6b6727c44317129582";
inputs.nixpkgs.follows = "nixpkgs";
};
};
outputs = { self, nixpkgs, zerokit }:
let
stableSystems = [
"x86_64-linux" "aarch64-linux"
"x86_64-darwin" "aarch64-darwin"
"x86_64-windows" "i686-linux"
"i686-windows"
];
forAllSystems = f: nixpkgs.lib.genAttrs stableSystems (system: f system);
pkgsFor = forAllSystems (
system: import nixpkgs {
inherit system;
config = {
android_sdk.accept_license = true;
allowUnfree = true;
};
overlays = [
(final: prev: {
androidEnvCustom = prev.callPackage ./nix/pkgs/android-sdk { };
androidPkgs = final.androidEnvCustom.pkgs;
androidShell = final.androidEnvCustom.shell;
})
];
}
);
in rec {
packages = forAllSystems (system: let
pkgs = pkgsFor.${system};
in rec {
libwaku-android-arm64 = pkgs.callPackage ./nix/default.nix {
inherit stableSystems;
src = self;
targets = ["libwaku-android-arm64"];
androidArch = "aarch64-linux-android";
abidir = "arm64-v8a";
zerokitPkg = zerokit.packages.${system}.zerokit-android-arm64;
};
default = libwaku-android-arm64;
});
devShells = forAllSystems (system: {
default = pkgsFor.${system}.callPackage ./nix/shell.nix {};
});
};
}

View File

@ -9,9 +9,6 @@ type JsonConnectionChangeEvent* = ref object of JsonEvent
proc new*(
T: type JsonConnectionChangeEvent, peerId: string, peerEvent: PeerEventKind
): T =
# Returns a JsonConnectionChangeEvent event as indicated in
# https://rfc.vac.dev/spec/36/#jsonmessageevent-type
return JsonConnectionChangeEvent(
eventType: "connection_change", peerId: peerId, peerEvent: peerEvent
)

View File

@ -71,7 +71,7 @@ type JsonMessageEvent* = ref object of JsonEvent
proc new*(T: type JsonMessageEvent, pubSubTopic: string, msg: WakuMessage): T =
# Returns a WakuMessage event as indicated in
# https://rfc.vac.dev/spec/36/#jsonmessageevent-type
# https://github.com/vacp2p/rfc/blob/master/content/docs/rfcs/36/README.md#jsonmessageevent-type
var payload = newSeq[byte](len(msg.payload))
if len(msg.payload) != 0:

View File

@ -10,9 +10,6 @@ type JsonTopicHealthChangeEvent* = ref object of JsonEvent
proc new*(
T: type JsonTopicHealthChangeEvent, pubsubTopic: string, topicHealth: TopicHealth
): T =
# Returns a TopicHealthChange event as indicated in
# https://rfc.vac.dev/spec/36/#jsonmessageevent-type
return JsonTopicHealthChangeEvent(
eventType: "relay_topic_health_change",
pubsubTopic: pubsubTopic,

View File

@ -117,11 +117,21 @@ int waku_relay_get_num_connected_peers(void* ctx,
WakuCallBack callback,
void* userData);
int waku_relay_get_connected_peers(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_get_num_peers_in_mesh(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_get_peers_in_mesh(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_store_query(void* ctx,
const char* jsonQuery,
const char* peerAddr,
@ -158,6 +168,10 @@ int waku_get_peerids_from_peerstore(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_connected_peers_info(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_peerids_by_protocol(void* ctx,
const char* protocol,
WakuCallBack callback,

View File

@ -42,7 +42,8 @@ import
template checkLibwakuParams*(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
) =
ctx[].userData = userData
if not isNil(ctx):
ctx[].userData = userData
if isNil(callback):
return RET_MISSING_CALLBACK
@ -52,10 +53,6 @@ template callEventCallback(ctx: ptr WakuContext, eventName: string, body: untype
error eventName & " - eventCallback is nil"
return
if isNil(ctx[].eventUserData):
error eventName & " - eventUserData is nil"
return
foreignThreadGc:
try:
let event = body
@ -228,19 +225,11 @@ proc waku_content_topic(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
let appStr = appName.alloc()
let ctnStr = contentTopicName.alloc()
let encodingStr = encoding.alloc()
let contentTopic = fmt"/{$appStr}/{appVersion}/{$ctnStr}/{$encodingStr}"
let contentTopic = fmt"/{$appName}/{$appVersion}/{$contentTopicName}/{$encoding}"
callback(
RET_OK, unsafeAddr contentTopic[0], cast[csize_t](len(contentTopic)), userData
)
deallocShared(appStr)
deallocShared(ctnStr)
deallocShared(encodingStr)
return RET_OK
proc waku_pubsub_topic(
@ -251,15 +240,11 @@ proc waku_pubsub_topic(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
let topicNameStr = topicName.alloc()
let outPubsubTopic = fmt"/waku/2/{$topicNameStr}"
let outPubsubTopic = fmt"/waku/2/{$topicName}"
callback(
RET_OK, unsafeAddr outPubsubTopic[0], cast[csize_t](len(outPubsubTopic)), userData
)
deallocShared(topicNameStr)
return RET_OK
proc waku_default_pubsub_topic(
@ -292,12 +277,9 @@ proc waku_relay_publish(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
let jwm = jsonWakuMessage.alloc()
defer:
deallocShared(jwm)
var jsonMessage: JsonMessage
try:
let jsonContent = parseJson($jwm)
let jsonContent = parseJson($jsonWakuMessage)
jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr:
raise newException(JsonParsingError, $error)
except JsonParsingError:
@ -310,14 +292,10 @@ proc waku_relay_publish(
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
let pst = pubSubTopic.alloc()
defer:
deallocShared(pst)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.PUBLISH, pst, nil, wakuMessage),
RelayRequest.createShared(RelayMsgType.PUBLISH, pubSubTopic, nil, wakuMessage),
callback,
userData,
)
@ -357,15 +335,12 @@ proc waku_relay_subscribe(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
let pst = pubSubTopic.alloc()
defer:
deallocShared(pst)
var cb = onReceivedMessage(ctx)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.SUBSCRIBE, pst, WakuRelayHandler(cb)),
RelayRequest.createShared(RelayMsgType.SUBSCRIBE, pubSubTopic, WakuRelayHandler(cb)),
callback,
userData,
)
@ -380,9 +355,6 @@ proc waku_relay_add_protected_shard(
): cint {.dynlib, exportc, cdecl.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
let pubk = publicKey.alloc()
defer:
deallocShared(pubk)
handleRequest(
ctx,
@ -391,7 +363,7 @@ proc waku_relay_add_protected_shard(
RelayMsgType.ADD_PROTECTED_SHARD,
clusterId = clusterId,
shardId = shardId,
publicKey = pubk,
publicKey = publicKey,
),
callback,
userData,
@ -406,15 +378,11 @@ proc waku_relay_unsubscribe(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
let pst = pubSubTopic.alloc()
defer:
deallocShared(pst)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(
RelayMsgType.UNSUBSCRIBE, pst, WakuRelayHandler(onReceivedMessage(ctx))
RelayMsgType.UNSUBSCRIBE, pubSubTopic, WakuRelayHandler(onReceivedMessage(ctx))
),
callback,
userData,
@ -429,14 +397,27 @@ proc waku_relay_get_num_connected_peers(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
let pst = pubSubTopic.alloc()
defer:
deallocShared(pst)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.NUM_CONNECTED_PEERS, pubSubTopic),
callback,
userData,
)
proc waku_relay_get_connected_peers(
ctx: ptr WakuContext,
pubSubTopic: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.LIST_CONNECTED_PEERS, pst),
RelayRequest.createShared(RelayMsgType.LIST_CONNECTED_PEERS, pubSubTopic),
callback,
userData,
)
@ -450,14 +431,27 @@ proc waku_relay_get_num_peers_in_mesh(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
let pst = pubSubTopic.alloc()
defer:
deallocShared(pst)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.NUM_MESH_PEERS, pubSubTopic),
callback,
userData,
)
proc waku_relay_get_peers_in_mesh(
ctx: ptr WakuContext,
pubSubTopic: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.LIST_MESH_PEERS, pst),
RelayRequest.createShared(RelayMsgType.LIST_MESH_PEERS, pubSubTopic),
callback,
userData,
)
@ -527,15 +521,9 @@ proc waku_lightpush_publish(
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
let jwm = jsonWakuMessage.alloc()
let pst = pubSubTopic.alloc()
defer:
deallocShared(jwm)
deallocShared(pst)
var jsonMessage: JsonMessage
try:
let jsonContent = parseJson($jwm)
let jsonContent = parseJson($jsonWakuMessage)
jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr:
raise newException(JsonParsingError, $error)
except JsonParsingError:
@ -551,7 +539,7 @@ proc waku_lightpush_publish(
handleRequest(
ctx,
RequestType.LIGHTPUSH,
LightpushRequest.createShared(LightpushMsgType.PUBLISH, pst, wakuMessage),
LightpushRequest.createShared(LightpushMsgType.PUBLISH, pubSubTopic, wakuMessage),
callback,
userData,
)
@ -650,6 +638,20 @@ proc waku_get_peerids_from_peerstore(
userData,
)
proc waku_get_connected_peers_info(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(PeerManagementMsgType.GET_CONNECTED_PEERS_INFO),
callback,
userData,
)
proc waku_get_connected_peers(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =

View File

@ -143,7 +143,7 @@ proc process*(
of PEER_EXCHANGE:
let numValidPeers = (await performPeerExchangeRequestTo(self[].numPeers, waku)).valueOr:
error "PEER_EXCHANGE failed", error = error
return err("error calling performPeerExchangeRequestTo: " & $error)
return err($error)
return ok($numValidPeers)
error "discovery request not handled"

View File

@ -72,7 +72,11 @@ proc createWaku(
appCallbacks.relayHandler = nil
appCallbacks.topicHealthChangeHandler = nil
let wakuRes = Waku.new(conf, appCallbacks).valueOr:
# TODO: Convert `confJson` directly to `WakuConf`
let wakuConf = conf.toWakuConf().valueOr:
return err("Configuration error: " & $error)
let wakuRes = Waku.new(wakuConf, appCallbacks).valueOr:
error "waku initialization failed", error = error
return err("Failed setting up Waku: " & $error)
@ -88,16 +92,16 @@ proc process*(
of CREATE_NODE:
waku[] = (await createWaku(self.configJson, self.appCallbacks)).valueOr:
error "CREATE_NODE failed", error = error
return err("error processing createWaku request: " & $error)
return err($error)
of START_NODE:
(await waku.startWaku()).isOkOr:
error "START_NODE failed", error = error
return err("problem starting waku: " & $error)
return err($error)
of STOP_NODE:
try:
await waku[].stop()
except Exception:
error "STOP_NODE failed", error = getCurrentExceptionMsg()
return err("exception stopping node: " & getCurrentExceptionMsg())
return err(getCurrentExceptionMsg())
return ok("")

View File

@ -1,5 +1,5 @@
import std/[sequtils, strutils]
import chronicles, chronos, results, options
import chronicles, chronos, results, options, json
import
../../../../waku/factory/waku,
../../../../waku/node/waku_node,
@ -9,6 +9,7 @@ import
type PeerManagementMsgType* {.pure.} = enum
CONNECT_TO
GET_ALL_PEER_IDS
GET_CONNECTED_PEERS_INFO
GET_PEER_IDS_BY_PROTOCOL
DISCONNECT_PEER_BY_ID
DIAL_PEER
@ -22,6 +23,10 @@ type PeerManagementRequest* = object
protocol: cstring
peerId: cstring
type PeerInfo = object
protocols: seq[string]
addresses: seq[string]
proc createShared*(
T: type PeerManagementRequest,
op: PeerManagementMsgType,
@ -81,11 +86,29 @@ proc process*(
of GET_ALL_PEER_IDS:
## returns a comma-separated string of peerIDs
let peerIDs =
waku.node.peerManager.wakuPeerStore.peers().mapIt($it.peerId).join(",")
waku.node.peerManager.switch.peerStore.peers().mapIt($it.peerId).join(",")
return ok(peerIDs)
of GET_CONNECTED_PEERS_INFO:
## returns a JSON string mapping peerIDs to objects with protocols and addresses
var peersMap = initTable[string, PeerInfo]()
let peers = waku.node.peerManager.switch.peerStore.peers().filterIt(
it.connectedness == Connected
)
# Build a map of peer IDs to peer info objects
for peer in peers:
let peerIdStr = $peer.peerId
peersMap[peerIdStr] =
PeerInfo(protocols: peer.protocols, addresses: peer.addrs.mapIt($it))
# Convert the map to JSON string
let jsonObj = %*peersMap
let jsonStr = $jsonObj
return ok(jsonStr)
of GET_PEER_IDS_BY_PROTOCOL:
## returns a comma-separated string of peerIDs that mount the given protocol
let connectedPeers = waku.node.peerManager.wakuPeerStore
let connectedPeers = waku.node.peerManager.switch.peerStore
.peers($self[].protocol)
.filterIt(it.connectedness == Connected)
.mapIt($it.peerId)

View File

@ -104,6 +104,6 @@ proc process*(
)
).valueOr:
error "PUBLISH failed", error = error
return err("LightpushRequest error publishing: " & $error)
return err($error)
return ok(msgHashHex)

View File

@ -1,4 +1,4 @@
import std/net
import std/[net, sequtils, strutils]
import chronicles, chronos, stew/byteutils, results
import
../../../../../waku/waku_core/message/message,
@ -7,15 +7,19 @@ import
../../../../../waku/waku_core/message,
../../../../../waku/waku_core/time, # Timestamp
../../../../../waku/waku_core/topics/pubsub_topic,
../../../../../waku/waku_core/topics,
../../../../../waku/waku_relay/protocol,
../../../../../waku/node/peer_manager,
../../../../alloc
type RelayMsgType* = enum
SUBSCRIBE
UNSUBSCRIBE
PUBLISH
NUM_CONNECTED_PEERS
LIST_CONNECTED_PEERS
## to return the list of all connected peers to an specific pubsub topic
NUM_MESH_PEERS
LIST_MESH_PEERS
## to return the list of only the peers that conform the mesh for a particular pubsub topic
ADD_PROTECTED_SHARD ## Protects a shard with a public key
@ -105,33 +109,48 @@ proc process*(
case self.operation
of SUBSCRIBE:
# TO DO: properly perform 'subscribe'
waku.node.registerRelayDefaultHandler($self.pubsubTopic)
discard waku.node.wakuRelay.subscribe($self.pubsubTopic, self.relayEventCallback)
waku.node.subscribe(
(kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic),
handler = some(self.relayEventCallback),
).isOkOr:
error "SUBSCRIBE failed", error
return err($error)
of UNSUBSCRIBE:
# TODO: properly perform 'unsubscribe'
waku.node.wakuRelay.unsubscribeAll($self.pubsubTopic)
waku.node.unsubscribe((kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic)).isOkOr:
error "UNSUBSCRIBE failed", error
return err($error)
of PUBLISH:
let msg = self.message.toWakuMessage()
let pubsubTopic = $self.pubsubTopic
(await waku.node.wakuRelay.publish(pubsubTopic, msg)).isOkOr:
let errorMsg = "Message not sent." & $error
error "PUBLISH failed", error = errorMsg
return err(errorMsg)
error "PUBLISH failed", error
return err($error)
let msgHash = computeMessageHash(pubSubTopic, msg).to0xHex
return ok(msgHash)
of LIST_CONNECTED_PEERS:
of NUM_CONNECTED_PEERS:
let numConnPeers = waku.node.wakuRelay.getNumConnectedPeers($self.pubsubTopic).valueOr:
error "LIST_CONNECTED_PEERS failed", error = error
error "NUM_CONNECTED_PEERS failed", error
return err($error)
return ok($numConnPeers)
of LIST_MESH_PEERS:
of LIST_CONNECTED_PEERS:
let connPeers = waku.node.wakuRelay.getConnectedPeers($self.pubsubTopic).valueOr:
error "LIST_CONNECTED_PEERS failed", error = error
return err($error)
## returns a comma-separated string of peerIDs
return ok(connPeers.mapIt($it).join(","))
of NUM_MESH_PEERS:
let numPeersInMesh = waku.node.wakuRelay.getNumPeersInMesh($self.pubsubTopic).valueOr:
error "LIST_MESH_PEERS failed", error = error
error "NUM_MESH_PEERS failed", error = error
return err($error)
return ok($numPeersInMesh)
of LIST_MESH_PEERS:
let meshPeers = waku.node.wakuRelay.getPeersInMesh($self.pubsubTopic).valueOr:
error "LIST_MESH_PEERS failed", error = error
return err($error)
## returns a comma-separated string of peerIDs
return ok(meshPeers.mapIt($it).join(","))
of ADD_PROTECTED_SHARD:
try:
let relayShard =
@ -142,5 +161,5 @@ proc process*(
@[protectedShard], uint16(self.clusterId)
)
except ValueError:
return err("ADD_PROTECTED_SHARD exception: " & getCurrentExceptionMsg())
return err(getCurrentExceptionMsg())
return ok("")

View File

@ -24,50 +24,49 @@ func fromJsonNode(
T: type StoreRequest, jsonContent: JsonNode
): Result[StoreQueryRequest, string] =
var contentTopics: seq[string]
if jsonContent.contains("content_topics"):
if jsonContent.contains("contentTopics"):
contentTopics = collect(newSeq):
for cTopic in jsonContent["content_topics"].getElems():
for cTopic in jsonContent["contentTopics"].getElems():
cTopic.getStr()
var msgHashes: seq[WakuMessageHash]
if jsonContent.contains("message_hashes"):
for hashJsonObj in jsonContent["message_hashes"].getElems():
if jsonContent.contains("messageHashes"):
for hashJsonObj in jsonContent["messageHashes"].getElems():
let hash = hashJsonObj.getStr().hexToHash().valueOr:
return err("Failed converting message hash hex string to bytes: " & error)
msgHashes.add(hash)
let pubsubTopic =
if jsonContent.contains("pubsub_topic"):
some(jsonContent["pubsub_topic"].getStr())
if jsonContent.contains("pubsubTopic"):
some(jsonContent["pubsubTopic"].getStr())
else:
none(string)
let paginationCursor =
if jsonContent.contains("pagination_cursor"):
let hash = jsonContent["pagination_cursor"].getStr().hexToHash().valueOr:
return
err("Failed converting pagination_cursor hex string to bytes: " & error)
if jsonContent.contains("paginationCursor"):
let hash = jsonContent["paginationCursor"].getStr().hexToHash().valueOr:
return err("Failed converting paginationCursor hex string to bytes: " & error)
some(hash)
else:
none(WakuMessageHash)
let paginationForwardBool = jsonContent["pagination_forward"].getBool()
let paginationForwardBool = jsonContent["paginationForward"].getBool()
let paginationForward =
if paginationForwardBool: PagingDirection.FORWARD else: PagingDirection.BACKWARD
let paginationLimit =
if jsonContent.contains("pagination_limit"):
some(uint64(jsonContent["pagination_limit"].getInt()))
if jsonContent.contains("paginationLimit"):
some(uint64(jsonContent["paginationLimit"].getInt()))
else:
none(uint64)
let startTime = ?jsonContent.getProtoInt64("time_start")
let endTime = ?jsonContent.getProtoInt64("time_end")
let startTime = ?jsonContent.getProtoInt64("timeStart")
let endTime = ?jsonContent.getProtoInt64("timeEnd")
return ok(
StoreQueryRequest(
requestId: jsonContent["request_id"].getStr(),
includeData: jsonContent["include_data"].getBool(),
requestId: jsonContent["requestId"].getStr(),
includeData: jsonContent["includeData"].getBool(),
pubsubTopic: pubsubTopic,
contentTopics: contentTopics,
startTime: startTime,

File diff suppressed because it is too large Load Diff

35
nix/README.md Normal file
View File

@ -0,0 +1,35 @@
# Usage
## Shell
A development shell can be started using:
```sh
nix develop
```
## Building
To build a Codex you can use:
```sh
nix build '.?submodules=1#default'
```
The `?submodules=1` part should eventually not be necessary.
For more details see:
https://github.com/NixOS/nix/issues/4423
It can be also done without even cloning the repo:
```sh
nix build 'git+https://github.com/waku-org/nwaku?submodules=1#'
```
## Running
```sh
nix run 'git+https://github.com/waku-org/nwaku?submodules=1#''
```
## Testing
```sh
nix flake check ".?submodules=1#"
```

12
nix/atlas.nix Normal file
View File

@ -0,0 +1,12 @@
{ pkgs ? import <nixpkgs> { } }:
let
tools = pkgs.callPackage ./tools.nix {};
sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim;
in pkgs.fetchFromGitHub {
owner = "nim-lang";
repo = "atlas";
rev = tools.findKeyValue "^ +AtlasStableCommit = \"([a-f0-9]+)\"$" sourceFile;
# WARNING: Requires manual updates when Nim compiler version changes.
hash = "sha256-G1TZdgbRPSgxXZ3VsBP2+XFCLHXVb3an65MuQx67o/k=";
}

12
nix/checksums.nix Normal file
View File

@ -0,0 +1,12 @@
{ pkgs ? import <nixpkgs> { } }:
let
tools = pkgs.callPackage ./tools.nix {};
sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim;
in pkgs.fetchFromGitHub {
owner = "nim-lang";
repo = "checksums";
rev = tools.findKeyValue "^ +ChecksumsStableCommit = \"([a-f0-9]+)\"$" sourceFile;
# WARNING: Requires manual updates when Nim compiler version changes.
hash = "sha256-Bm5iJoT2kAvcTexiLMFBa9oU5gf7d4rWjo3OiN7obWQ=";
}

12
nix/csources.nix Normal file
View File

@ -0,0 +1,12 @@
{ pkgs ? import <nixpkgs> { } }:
let
tools = pkgs.callPackage ./tools.nix {};
sourceFile = ../vendor/nimbus-build-system/vendor/Nim/config/build_config.txt;
in pkgs.fetchFromGitHub {
owner = "nim-lang";
repo = "csources_v2";
rev = tools.findKeyValue "^nim_csourcesHash=([a-f0-9]+)$" sourceFile;
# WARNING: Requires manual updates when Nim compiler version changes.
hash = "sha256-UCLtoxOcGYjBdvHx7A47x6FjLMi6VZqpSs65MN7fpBs=";
}

116
nix/default.nix Normal file
View File

@ -0,0 +1,116 @@
{
config ? {},
pkgs ? import <nixpkgs> { },
src ? ../.,
targets ? ["libwaku-android-arm64"],
verbosity ? 2,
useSystemNim ? true,
quickAndDirty ? true,
stableSystems ? [
"x86_64-linux" "aarch64-linux"
],
androidArch,
abidir,
zerokitPkg,
}:
assert pkgs.lib.assertMsg ((src.submodules or true) == true)
"Unable to build without submodules. Append '?submodules=1#' to the URI.";
let
inherit (pkgs) stdenv lib writeScriptBin callPackage;
revision = lib.substring 0 8 (src.rev or "dirty");
in stdenv.mkDerivation rec {
pname = "nwaku";
version = "1.0.0-${revision}";
inherit src;
buildInputs = with pkgs; [
openssl
gmp
zip
];
# Dependencies that should only exist in the build environment.
nativeBuildInputs = let
# Fix for Nim compiler calling 'git rev-parse' and 'lsb_release'.
fakeGit = writeScriptBin "git" "echo ${version}";
# Fix for the zerokit package that is built with cargo/rustup/cross.
fakeCargo = writeScriptBin "cargo" "echo ${version}";
# Fix for the zerokit package that is built with cargo/rustup/cross.
fakeRustup = writeScriptBin "rustup" "echo ${version}";
# Fix for the zerokit package that is built with cargo/rustup/cross.
fakeCross = writeScriptBin "cross" "echo ${version}";
in
with pkgs; [
cmake
which
lsb-release
zerokitPkg
nim-unwrapped-2_0
fakeGit
fakeCargo
fakeRustup
fakeCross
];
# Environment variables required for Android builds
ANDROID_SDK_ROOT="${pkgs.androidPkgs.sdk}";
ANDROID_NDK_HOME="${pkgs.androidPkgs.ndk}";
NIMFLAGS = "-d:disableMarchNative -d:git_revision_override=${revision}";
XDG_CACHE_HOME = "/tmp";
androidManifest = "<manifest xmlns:android=\"http://schemas.android.com/apk/res/android\" package=\"com.example.mylibrary\" />";
makeFlags = targets ++ [
"V=${toString verbosity}"
"QUICK_AND_DIRTY_COMPILER=${if quickAndDirty then "1" else "0"}"
"QUICK_AND_DIRTY_NIMBLE=${if quickAndDirty then "1" else "0"}"
"USE_SYSTEM_NIM=${if useSystemNim then "1" else "0"}"
];
configurePhase = ''
patchShebangs . vendor/nimbus-build-system > /dev/null
make nimbus-build-system-paths
make nimbus-build-system-nimble-dir
'';
preBuild = ''
ln -s waku.nimble waku.nims
pushd vendor/nimbus-build-system/vendor/Nim
mkdir dist
cp -r ${callPackage ./nimble.nix {}} dist/nimble
chmod 777 -R dist/nimble
mkdir -p dist/nimble/dist
cp -r ${callPackage ./checksums.nix {}} dist/checksums # need both
cp -r ${callPackage ./checksums.nix {}} dist/nimble/dist/checksums
cp -r ${callPackage ./atlas.nix {}} dist/atlas
chmod 777 -R dist/atlas
mkdir dist/atlas/dist
cp -r ${callPackage ./sat.nix {}} dist/nimble/dist/sat
cp -r ${callPackage ./sat.nix {}} dist/atlas/dist/sat
cp -r ${callPackage ./csources.nix {}} csources_v2
chmod 777 -R dist/nimble csources_v2
popd
mkdir -p vendor/zerokit/target/${androidArch}/release
cp ${zerokitPkg}/librln.so vendor/zerokit/target/${androidArch}/release/
'';
installPhase = ''
mkdir -p $out/jni
cp -r ./build/android/${abidir}/* $out/jni/
echo '${androidManifest}' > $out/jni/AndroidManifest.xml
cd $out && zip -r libwaku.aar *
'';
meta = with pkgs.lib; {
description = "NWaku derivation to build libwaku for mobile targets using Android NDK and Rust.";
homepage = "https://github.com/status-im/nwaku";
license = licenses.mit;
platforms = stableSystems;
};
}

12
nix/nimble.nix Normal file
View File

@ -0,0 +1,12 @@
{ pkgs ? import <nixpkgs> { } }:
let
tools = pkgs.callPackage ./tools.nix {};
sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim;
in pkgs.fetchFromGitHub {
owner = "nim-lang";
repo = "nimble";
rev = tools.findKeyValue "^ +NimbleStableCommit = \"([a-f0-9]+)\".+" sourceFile;
# WARNING: Requires manual updates when Nim compiler version changes.
hash = "sha256-MVHf19UbOWk8Zba2scj06PxdYYOJA6OXrVyDQ9Ku6Us=";
}

View File

@ -0,0 +1,26 @@
#
# This Nix expression centralizes the configuration
# for the Android development environment.
#
{ androidenv, lib, stdenv }:
assert lib.assertMsg (stdenv.system != "aarch64-darwin")
"aarch64-darwin not supported for Android SDK. Use: NIXPKGS_SYSTEM_OVERRIDE=x86_64-darwin";
# The "android-sdk-license" license is accepted
# by setting android_sdk.accept_license = true.
androidenv.composeAndroidPackages {
cmdLineToolsVersion = "9.0";
toolsVersion = "26.1.1";
platformToolsVersion = "33.0.3";
buildToolsVersions = [ "34.0.0" ];
platformVersions = [ "34" ];
cmakeVersions = [ "3.22.1" ];
ndkVersion = "25.2.9519653";
includeNDK = true;
includeExtras = [
"extras;android;m2repository"
"extras;google;m2repository"
];
}

View File

@ -0,0 +1,14 @@
#
# This Nix expression centralizes the configuration
# for the Android development environment.
#
{ callPackage }:
let
compose = callPackage ./compose.nix { };
pkgs = callPackage ./pkgs.nix { inherit compose; };
shell = callPackage ./shell.nix { androidPkgs = pkgs; };
in {
inherit compose pkgs shell;
}

View File

@ -0,0 +1,17 @@
{ stdenv, compose }:
#
# This derivation simply symlinks some stuff to get
# shorter paths as libexec/android-sdk is quite the mouthful.
# With this you can just do `androidPkgs.sdk` and `androidPkgs.ndk`.
#
stdenv.mkDerivation {
name = "${compose.androidsdk.name}-mod";
phases = [ "symlinkPhase" ];
outputs = [ "out" "sdk" "ndk" ];
symlinkPhase = ''
ln -s ${compose.androidsdk} $out
ln -s ${compose.androidsdk}/libexec/android-sdk $sdk
ln -s ${compose.androidsdk}/libexec/android-sdk/ndk-bundle $ndk
'';
}

View File

@ -0,0 +1,19 @@
{ mkShell, openjdk, androidPkgs }:
mkShell {
name = "android-sdk-shell";
buildInputs = [ openjdk ];
shellHook = ''
export ANDROID_HOME="${androidPkgs.sdk}"
export ANDROID_NDK_ROOT="${androidPkgs.ndk}"
export ANDROID_SDK_ROOT="$ANDROID_HOME"
export ANDROID_NDK_HOME="${androidPkgs.ndk}"
export PATH="$ANDROID_NDK_ROOT:$PATH"
export PATH="$ANDROID_SDK_ROOT/tools:$PATH"
export PATH="$ANDROID_SDK_ROOT/tools/bin:$PATH"
export PATH="$(echo $ANDROID_SDK_ROOT/cmdline-tools/*/bin):$PATH"
export PATH="$ANDROID_SDK_ROOT/platform-tools:$PATH"
'';
}

12
nix/sat.nix Normal file
View File

@ -0,0 +1,12 @@
{ pkgs ? import <nixpkgs> { } }:
let
tools = pkgs.callPackage ./tools.nix {};
sourceFile = ../vendor/nimbus-build-system/vendor/Nim/koch.nim;
in pkgs.fetchFromGitHub {
owner = "nim-lang";
repo = "sat";
rev = tools.findKeyValue "^ +SatStableCommit = \"([a-f0-9]+)\"$" sourceFile;
# WARNING: Requires manual updates when Nim compiler version changes.
hash = "sha256-JFrrSV+mehG0gP7NiQ8hYthL0cjh44HNbXfuxQNhq7c=";
}

26
nix/shell.nix Normal file
View File

@ -0,0 +1,26 @@
{
pkgs ? import <nixpkgs> { },
}:
let
optionalDarwinDeps = pkgs.lib.optionals pkgs.stdenv.isDarwin [
pkgs.libiconv
pkgs.darwin.apple_sdk.frameworks.Security
];
in
pkgs.mkShell {
inputsFrom = [
pkgs.androidShell
] ++ optionalDarwinDeps;
buildInputs = with pkgs; [
git
cargo
rustup
cmake
nim-unwrapped-2_0
];
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath [
pkgs.pcre
];
}

15
nix/tools.nix Normal file
View File

@ -0,0 +1,15 @@
{ pkgs ? import <nixpkgs> { } }:
let
inherit (pkgs.lib) fileContents last splitString flatten remove;
inherit (builtins) map match;
in {
findKeyValue = regex: sourceFile:
let
linesFrom = file: splitString "\n" (fileContents file);
matching = regex: lines: map (line: match regex line) lines;
extractMatch = matches: last (flatten (remove null matches));
in
extractMatch (matching regex (linesFrom sourceFile));
}

View File

@ -0,0 +1,29 @@
#!/usr/bin/env bash
# This script is used for building Nix derivation which doesn't allow Git commands.
# It implements similar logic as $(NIMBLE_DIR) target in nimbus-build-system Makefile.
create_nimble_link_script_path="$(pwd)/${BUILD_SYSTEM_DIR}/scripts/create_nimble_link.sh"
process_gitmodules() {
local gitmodules_file="$1"
local gitmodules_dir=$(dirname "$gitmodules_file")
# Extract all submodule paths from the .gitmodules file
grep "path" $gitmodules_file | awk '{print $3}' | while read submodule_path; do
# Change pwd to the submodule dir and execute script
pushd "$gitmodules_dir/$submodule_path" > /dev/null
NIMBLE_DIR=$NIMBLE_DIR PWD_CMD=$PWD_CMD EXCLUDED_NIM_PACKAGES=$EXCLUDED_NIM_PACKAGES \
"$create_nimble_link_script_path" "$submodule_path"
popd > /dev/null
done
}
# Create the base directory if it doesn't exist
mkdir -p "${NIMBLE_DIR}/pkgs"
# Find all .gitmodules files and process them
for gitmodules_file in $(find . -name '.gitmodules'); do
echo "Processing .gitmodules file: $gitmodules_file"
process_gitmodules "$gitmodules_file"
done

View File

@ -1,22 +0,0 @@
{ pkgs ? import (builtins.fetchTarball {
url = "https://github.com/NixOS/nixpkgs/archive/dbf1d73cd1a17276196afeee169b4cf7834b7a96.tar.gz";
sha256 = "sha256:1k5nvn2yzw370cqsfh62lncsgydq2qkbjrx34cprzf0k6b93v7ch";
}) {} }:
pkgs.mkShell {
name = "nim-waku-build-shell";
# Versions dependent on nixpkgs commit. Update manually.
buildInputs = with pkgs; [
git # 2.37.3
which # 2.21
rustc # 1.63.0
] ++ lib.optionals stdenv.isDarwin [
libiconv
darwin.apple_sdk.frameworks.Security
];
LD_LIBRARY_PATH = pkgs.lib.makeLibraryPath [
pkgs.pcre
];
}

View File

@ -85,7 +85,8 @@ import
./test_waku_noise_sessions,
./test_waku_netconfig,
./test_waku_switch,
./test_waku_rendezvous
./test_waku_rendezvous,
./waku_discv5/test_waku_discv5
# Waku Keystore test suite
import ./test_waku_keystore_keyfile, ./test_waku_keystore
@ -98,6 +99,7 @@ import
./wakunode_rest/test_rest_relay_serdes,
./wakunode_rest/test_rest_serdes,
./wakunode_rest/test_rest_filter,
./wakunode_rest/test_rest_lightpush,
./wakunode_rest/test_rest_lightpush_legacy,
./wakunode_rest/test_rest_admin,
./wakunode_rest/test_rest_cors,
@ -106,4 +108,4 @@ import
import ./waku_rln_relay/test_all
# Node Factory
import ./factory/test_config
import ./factory/test_external_config

View File

@ -1,6 +1,6 @@
{.used.}
import std/strutils, stew/[results, byteutils], testutils/unittests
import std/strutils, results, stew/byteutils, testutils/unittests
import waku/common/base64
suite "Waku Common - stew base64 wrapper":

View File

@ -2,7 +2,7 @@
import
std/[os, options],
stew/results,
results,
stew/shims/net as stewNet,
testutils/unittests,
confutils,

View File

@ -1,6 +1,6 @@
{.used.}
import std/options, stew/results, stew/shims/net, testutils/unittests
import std/options, results, stew/shims/net, testutils/unittests
import waku/common/enr, ../testlib/wakucore
suite "nim-eth ENR - builder and typed record":

View File

@ -1,6 +1,6 @@
{.used.}
import testutils/unittests, stew/results
import testutils/unittests, results
import waku/common/utils/parse_size_units
suite "Size serialization test":

View File

@ -10,7 +10,7 @@
import testutils/unittests
import chronos, libp2p/stream/connection
import std/[sequtils, options, tables]
import std/[options, tables]
import ../../waku/common/rate_limit/request_limiter
import ../../waku/common/rate_limit/timed_map

View File

@ -10,7 +10,7 @@
import testutils/unittests
import chronos, libp2p/stream/connection
import std/[sequtils, options]
import std/options
import ../../waku/common/rate_limit/request_limiter
import ../../waku/common/rate_limit/timed_map

View File

@ -1,6 +1,6 @@
{.used.}
import std/[strutils, os], stew/results, testutils/unittests
import std/[strutils, os], results, testutils/unittests
import waku/common/databases/db_sqlite {.all.}, ../waku_archive/archive_utils
template sourceDir(): string =

View File

@ -1,157 +0,0 @@
{.used.}
import
std/options,
testutils/unittests,
chronos,
libp2p/crypto/[crypto, secp],
libp2p/multiaddress,
nimcrypto/utils,
secp256k1,
confutils
import
../../waku/factory/external_config,
../../waku/factory/internal_config,
../../waku/factory/networks_config,
../../waku/common/logging
suite "Waku config - apply preset":
test "Default preset is TWN":
## Setup
let expectedConf = ClusterConf.TheWakuNetworkConf()
## Given
let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn")
## When
let res = applyPresetConfiguration(preConfig)
assert res.isOk(), $res.error
## Then
let conf = res.get()
assert conf.maxMessageSize == expectedConf.maxMessageSize
assert conf.clusterId == expectedConf.clusterId
assert conf.rlnRelay == expectedConf.rlnRelay
assert conf.rlnRelayEthContractAddress == expectedConf.rlnRelayEthContractAddress
assert conf.rlnRelayDynamic == expectedConf.rlnRelayDynamic
assert conf.rlnRelayChainId == expectedConf.rlnRelayChainId
assert conf.rlnRelayBandwidthThreshold == expectedConf.rlnRelayBandwidthThreshold
assert conf.rlnEpochSizeSec == expectedConf.rlnEpochSizeSec
assert conf.rlnRelayUserMessageLimit == expectedConf.rlnRelayUserMessageLimit
assert conf.numShardsInNetwork == expectedConf.numShardsInNetwork
assert conf.discv5BootstrapNodes == expectedConf.discv5BootstrapNodes
test "Subscribes to all valid shards in twn":
## Setup
let expectedConf = ClusterConf.TheWakuNetworkConf()
## Given
let shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7]
let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
## When
let res = applyPresetConfiguration(preConfig)
assert res.isOk(), $res.error
## Then
let conf = res.get()
assert conf.shards.len == expectedConf.numShardsInNetwork.int
test "Subscribes to some valid shards in twn":
## Setup
let expectedConf = ClusterConf.TheWakuNetworkConf()
## Given
let shards: seq[uint16] = @[0, 4, 7]
let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
## When
let resConf = applyPresetConfiguration(preConfig)
let res = validateShards(resConf.get())
assert res.isOk(), $res.error
## Then
let conf = resConf.get()
assert conf.shards.len() == shards.len()
for index, shard in shards:
assert shard in conf.shards
test "Subscribes to invalid shards in twn":
## Setup
## Given
let shards: seq[uint16] = @[0, 4, 7, 10]
let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
let postConfig = applyPresetConfiguration(preConfig)
## When
let res = validateShards(postConfig.get())
## Then
assert res.isErr(), "Invalid shard was accepted"
suite "Waku config - node key":
test "Passed node key is used":
## Setup
let nodeKeyStr =
"0011223344556677889900aabbccddeeff0011223344556677889900aabbccddeeff"
let nodekey = block:
let key = SkPrivateKey.init(utils.fromHex(nodeKeyStr)).tryGet()
crypto.PrivateKey(scheme: Secp256k1, skkey: key)
## Given
let config = WakuNodeConf.load(version = "", cmdLine = @["--nodekey=" & nodeKeyStr])
## When
let res = getNodeKey(config)
assert res.isOk(), $res.error
## Then
let resKey = res.get()
assert utils.toHex(resKey.getRawBytes().get()) ==
utils.toHex(nodekey.getRawBytes().get())
suite "Waku config - Shards":
test "Shards are valid":
## Setup
## Given
let shards: seq[uint16] = @[0, 2, 4]
let numShardsInNetwork = 5.uint32
let config = WakuNodeConf(
cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
)
## When
let res = validateShards(config)
## Then
assert res.isOk(), $res.error
test "Shards are not in range":
## Setup
## Given
let shards: seq[uint16] = @[0, 2, 5]
let numShardsInNetwork = 5.uint32
let config = WakuNodeConf(
cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
)
## When
let res = validateShards(config)
## Then
assert res.isErr(), "Invalid shard was accepted"
test "Shard is passed without num shards":
## Setup
## Given
let config = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"])
## When
let res = validateShards(config)
## Then
assert res.isOk(), $res.error

View File

@ -0,0 +1,208 @@
{.used.}
import
std/options,
testutils/unittests,
chronos,
libp2p/crypto/[crypto, secp],
libp2p/multiaddress,
nimcrypto/utils,
secp256k1,
confutils
import
../../waku/factory/external_config,
../../waku/factory/networks_config,
../../waku/factory/waku_conf,
../../waku/common/logging,
../../waku/common/utils/parse_size_units
suite "Waku config - apply preset":
test "Default preset is TWN":
## Setup
let expectedConf = ClusterConf.TheWakuNetworkConf()
## Given
let preConfig = WakuNodeConf(
cmd: noCommand,
preset: "twn",
relay: true,
ethClientUrls: @["http://someaddress".EthRpcUrl],
rlnRelayTreePath: "/tmp/sometreepath",
)
## When
let res = preConfig.toWakuConf()
assert res.isOk(), $res.error
## Then
let conf = res.get()
check conf.maxMessageSizeBytes ==
uint64(parseCorrectMsgSize(expectedConf.maxMessageSize))
check conf.clusterId == expectedConf.clusterId
check conf.rlnRelayConf.isSome() == expectedConf.rlnRelay
if conf.rlnRelayConf.isSome():
let rlnRelayConf = conf.rlnRelayConf.get()
check rlnRelayConf.ethContractAddress == expectedConf.rlnRelayEthContractAddress
check rlnRelayConf.dynamic == expectedConf.rlnRelayDynamic
check rlnRelayConf.chainId == expectedConf.rlnRelayChainId
check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec
check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit
check conf.numShardsInNetwork == expectedConf.numShardsInNetwork
check conf.discv5Conf.isSome() == expectedConf.discv5Discovery
if conf.discv5Conf.isSome():
let discv5Conf = conf.discv5Conf.get()
check discv5Conf.bootstrapNodes == expectedConf.discv5BootstrapNodes
test "Subscribes to all valid shards in twn":
## Setup
let expectedConf = ClusterConf.TheWakuNetworkConf()
## Given
let shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7]
let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
## When
let res = preConfig.toWakuConf()
assert res.isOk(), $res.error
## Then
let conf = res.get()
check conf.shards.len == expectedConf.numShardsInNetwork.int
test "Subscribes to some valid shards in twn":
## Setup
let expectedConf = ClusterConf.TheWakuNetworkConf()
## Given
let shards: seq[uint16] = @[0, 4, 7]
let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
## When
let resConf = preConfig.toWakuConf()
assert resConf.isOk(), $resConf.error
## Then
let conf = resConf.get()
assert conf.shards.len() == shards.len()
for index, shard in shards:
assert shard in conf.shards
test "Subscribes to invalid shards in twn":
## Setup
## Given
let shards: seq[uint16] = @[0, 4, 7, 10]
let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
## When
let res = preConfig.toWakuConf()
## Then
assert res.isErr(), "Invalid shard was accepted"
test "Apply TWN preset when cluster id = 1":
## Setup
let expectedConf = ClusterConf.TheWakuNetworkConf()
## Given
let preConfig = WakuNodeConf(
cmd: noCommand,
clusterId: 1.uint16,
relay: true,
ethClientUrls: @["http://someaddress".EthRpcUrl],
rlnRelayTreePath: "/tmp/sometreepath",
)
## When
let res = preConfig.toWakuConf()
assert res.isOk(), $res.error
## Then
let conf = res.get()
check conf.maxMessageSizeBytes ==
uint64(parseCorrectMsgSize(expectedConf.maxMessageSize))
check conf.clusterId == expectedConf.clusterId
check conf.rlnRelayConf.isSome() == expectedConf.rlnRelay
if conf.rlnRelayConf.isSome():
let rlnRelayConf = conf.rlnRelayConf.get()
check rlnRelayConf.ethContractAddress == expectedConf.rlnRelayEthContractAddress
check rlnRelayConf.dynamic == expectedConf.rlnRelayDynamic
check rlnRelayConf.chainId == expectedConf.rlnRelayChainId
check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec
check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit
check conf.numShardsInNetwork == expectedConf.numShardsInNetwork
check conf.discv5Conf.isSome() == expectedConf.discv5Discovery
if conf.discv5Conf.isSome():
let discv5Conf = conf.discv5Conf.get()
check discv5Conf.bootstrapNodes == expectedConf.discv5BootstrapNodes
suite "Waku config - node key":
test "Passed node key is used":
## Setup
let nodeKeyStr =
"0011223344556677889900aabbccddeeff0011223344556677889900aabbccddeeff"
let nodekey = block:
let key = SkPrivateKey.init(utils.fromHex(nodeKeyStr)).tryGet()
crypto.PrivateKey(scheme: Secp256k1, skkey: key)
## Given
let config = WakuNodeConf.load(version = "", cmdLine = @["--nodekey=" & nodeKeyStr])
## When
let res = config.toWakuConf()
assert res.isOk(), $res.error
## Then
let resKey = res.get().nodeKey
assert utils.toHex(resKey.getRawBytes().get()) ==
utils.toHex(nodekey.getRawBytes().get())
suite "Waku config - Shards":
test "Shards are valid":
## Setup
## Given
let shards: seq[uint16] = @[0, 2, 4]
let numShardsInNetwork = 5.uint32
let wakuNodeConf = WakuNodeConf(
cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
)
## When
let res = wakuNodeConf.toWakuConf()
assert res.isOk(), $res.error
## Then
let wakuConf = res.get()
let vRes = wakuConf.validate()
assert vRes.isOk(), $vRes.error
test "Shards are not in range":
## Setup
## Given
let shards: seq[uint16] = @[0, 2, 5]
let numShardsInNetwork = 5.uint32
let wakuNodeConf = WakuNodeConf(
cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
)
## When
let res = wakuNodeConf.toWakuConf()
## Then
assert res.isErr(), "Invalid shard was accepted"
test "Shard is passed without num shards":
## Setup
## Given
let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"])
## When
let res = wakuNodeConf.toWakuConf()
## Then
let wakuConf = res.get()
let vRes = wakuConf.validate()
assert vRes.isOk(), $vRes.error

View File

@ -2,11 +2,15 @@
import testutils/unittests, chronos, libp2p/protocols/connectivity/relay/relay
import ../testlib/wakunode, waku/factory/node_factory, waku/waku_node
import
../testlib/wakunode,
waku/factory/node_factory,
waku/waku_node,
waku/factory/conf_builder/conf_builder
suite "Node Factory":
test "Set up a node based on default configurations":
let conf = defaultTestWakuNodeConf()
let conf = defaultTestWakuConf()
let node = setupNode(conf, relay = Relay.new()).valueOr:
raiseAssert error
@ -20,8 +24,10 @@ suite "Node Factory":
not node.wakuRendezvous.isNil()
test "Set up a node with Store enabled":
var conf = defaultTestWakuNodeConf()
conf.store = true
var confBuilder = defaultTestWakuConfBuilder()
confBuilder.storeServiceConf.withEnabled(true)
confBuilder.storeServiceConf.withDbUrl("sqlite://store.sqlite3")
let conf = confBuilder.build().value
let node = setupNode(conf, relay = Relay.new()).valueOr:
raiseAssert error
@ -32,8 +38,9 @@ suite "Node Factory":
not node.wakuArchive.isNil()
test "Set up a node with Filter enabled":
var conf = defaultTestWakuNodeConf()
conf.filter = true
var confBuilder = defaultTestWakuConfBuilder()
confBuilder.filterServiceConf.withEnabled(true)
let conf = confBuilder.build().value
let node = setupNode(conf, relay = Relay.new()).valueOr:
raiseAssert error
@ -43,7 +50,7 @@ test "Set up a node with Filter enabled":
not node.wakuFilter.isNil()
test "Start a node based on default configurations":
let conf = defaultTestWakuNodeConf()
let conf = defaultTestWakuConf()
let node = setupNode(conf, relay = Relay.new()).valueOr:
raiseAssert error

View File

@ -0,0 +1,272 @@
{.used.}
import
libp2p/crypto/[crypto, secp],
libp2p/multiaddress,
nimcrypto/utils,
std/[options, sequtils],
results,
testutils/unittests
import
waku/factory/waku_conf,
waku/factory/waku_conf_builder,
waku/factory/networks_config,
waku/common/utils/parse_size_units
suite "Waku Conf - build with cluster conf":
test "Cluster Conf is passed and relay is enabled":
## Setup
let clusterConf = ClusterConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
builder.discv5Conf.withUdpPort(9000)
builder.withRelayServiceRatio("50:50")
# Mount all shards in network
let expectedShards = toSeq[0.uint16 .. 7.uint16]
## Given
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
builder.withClusterConf(clusterConf)
builder.withRelay(true)
builder.rlnRelayConf.withTreePath("/tmp/test-tree-path")
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
check conf.clusterId == clusterConf.clusterId
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
check conf.shards == expectedShards
check conf.maxMessageSizeBytes ==
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
if clusterConf.rlnRelay:
assert conf.rlnRelayConf.isSome(), "RLN Relay conf is disabled"
let rlnRelayConf = conf.rlnRelayConf.get()
check rlnRelayConf.ethContractAddress.string ==
clusterConf.rlnRelayEthContractAddress
check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic
check rlnRelayConf.chainId == clusterConf.rlnRelayChainId
check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec
check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit
test "Cluster Conf is passed, but relay is disabled":
## Setup
let clusterConf = ClusterConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
builder.withRelayServiceRatio("50:50")
builder.discv5Conf.withUdpPort(9000)
# Mount all shards in network
let expectedShards = toSeq[0.uint16 .. 7.uint16]
## Given
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
builder.withClusterConf(clusterConf)
builder.withRelay(false)
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
check conf.clusterId == clusterConf.clusterId
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
check conf.shards == expectedShards
check conf.maxMessageSizeBytes ==
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
assert conf.rlnRelayConf.isNone
test "Cluster Conf is passed, but rln relay is disabled":
## Setup
let clusterConf = ClusterConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
let # Mount all shards in network
expectedShards = toSeq[0.uint16 .. 7.uint16]
## Given
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
builder.withClusterConf(clusterConf)
builder.rlnRelayConf.withEnabled(false)
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
check conf.clusterId == clusterConf.clusterId
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
check conf.shards == expectedShards
check conf.maxMessageSizeBytes ==
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
assert conf.rlnRelayConf.isNone
test "Cluster Conf is passed and valid shards are specified":
## Setup
let clusterConf = ClusterConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
let shards = @[2.uint16, 3.uint16]
## Given
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
builder.withClusterConf(clusterConf)
builder.withShards(shards)
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
check conf.clusterId == clusterConf.clusterId
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
check conf.shards == shards
check conf.maxMessageSizeBytes ==
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
test "Cluster Conf is passed and invalid shards are specified":
## Setup
let clusterConf = ClusterConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
let shards = @[2.uint16, 10.uint16]
## Given
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
builder.withClusterConf(clusterConf)
builder.withShards(shards)
## When
let resConf = builder.build()
## Then
assert resConf.isErr(), "Invalid shard was accepted"
test "Cluster Conf is passed and RLN contract is overridden":
## Setup
let clusterConf = ClusterConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
# Mount all shards in network
let expectedShards = toSeq[0.uint16 .. 7.uint16]
let contractAddress = "0x0123456789ABCDEF"
## Given
builder.rlnRelayConf.withEthContractAddress(contractAddress)
builder.withClusterConf(clusterConf)
builder.withRelay(true)
builder.rlnRelayConf.withTreePath("/tmp/test")
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
check conf.clusterId == clusterConf.clusterId
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
check conf.shards == expectedShards
check conf.maxMessageSizeBytes ==
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
check conf.discv5Conf.isSome == clusterConf.discv5Discovery
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
if clusterConf.rlnRelay:
assert conf.rlnRelayConf.isSome
let rlnRelayConf = conf.rlnRelayConf.get()
check rlnRelayConf.ethContractAddress.string == contractAddress
check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic
check rlnRelayConf.chainId == clusterConf.rlnRelayChainId
check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec
check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit
suite "Waku Conf - node key":
test "Node key is generated":
## Setup
var builder = WakuConfBuilder.init()
builder.withClusterId(1)
## Given
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
let pubkey = getPublicKey(conf.nodeKey)
assert pubkey.isOk()
test "Passed node key is used":
## Setup
let nodeKeyStr =
"0011223344556677889900aabbccddeeff0011223344556677889900aabbccddeeff"
let nodeKey = block:
let key = SkPrivateKey.init(utils.fromHex(nodeKeyStr)).tryGet()
crypto.PrivateKey(scheme: Secp256k1, skkey: key)
var builder = WakuConfBuilder.init()
builder.withClusterId(1)
## Given
builder.withNodeKey(nodeKey)
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
assert utils.toHex(conf.nodeKey.getRawBytes().get()) ==
utils.toHex(nodeKey.getRawBytes().get()),
"Passed node key isn't in config:" & $nodeKey & $conf.nodeKey
suite "Waku Conf - extMultiaddrs":
test "Valid multiaddresses are passed and accepted":
## Setup
var builder = WakuConfBuilder.init()
builder.withClusterId(1)
## Given
let multiaddrs =
@["/ip4/127.0.0.1/udp/9090/quic", "/ip6/::1/tcp/3217", "/dns4/foo.com/tcp/80"]
builder.withExtMultiAddrs(multiaddrs)
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
check multiaddrs.len == conf.networkConf.extMultiAddrs.len
let resMultiaddrs = conf.networkConf.extMultiAddrs.map(
proc(m: MultiAddress): string =
$m
)
for m in multiaddrs:
check m in resMultiaddrs

View File

@ -1,17 +1,9 @@
{.used.}
import
std/options,
testutils/unittests,
chronos,
web3,
stew/byteutils,
stint,
strutils,
tests/testlib/testasync
import std/options, testutils/unittests, chronos, web3, stint, tests/testlib/testasync
import
waku/[node/peer_manager, waku_core],
waku/node/peer_manager,
waku/incentivization/[rpc, eligibility_manager],
../waku_rln_relay/[utils_onchain, utils]

View File

@ -13,6 +13,9 @@ import
waku/[node/peer_manager, waku_core],
waku/incentivization/[rpc, reputation_manager],
waku/waku_lightpush/[rpc, common]
import std/options, testutils/unittests, chronos, web3
import waku/incentivization/reputation_manager, waku/waku_lightpush_legacy/rpc
suite "Waku Incentivization PoC Reputation":
var manager {.threadvar.}: ReputationManager

View File

@ -1,4 +1,4 @@
import std/options, stew/results, libp2p/peerstore
import std/options, results
import
waku/node/peer_manager/[waku_peer_store, peer_store/waku_peer_storage],
@ -7,6 +7,3 @@ import
proc newTestWakuPeerStorage*(path: Option[string] = string.none()): WakuPeerStorage =
let db = newSqliteDatabase(path)
WakuPeerStorage.new(db).value()
proc peerExists*(peerStore: PeerStore, peerId: PeerId): bool =
return peerStore[AddressBook].contains(peerId)

View File

@ -76,8 +76,10 @@ suite "Peer Manager":
# And both mount metadata and relay
discard client.mountMetadata(0) # clusterId irrelevant, overridden by topic
discard server.mountMetadata(0) # clusterId irrelevant, overridden by topic
await client.mountRelay()
await server.mountRelay()
(await client.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# And both nodes are started
await allFutures(server.start(), client.start())
@ -89,7 +91,8 @@ suite "Peer Manager":
await sleepAsync(FUTURE_TIMEOUT)
# When making an operation that triggers onPeerMetadata
client.subscribe((kind: SubscriptionKind.PubsubSub, topic: "newTopic"))
client.subscribe((kind: SubscriptionKind.PubsubSub, topic: "newTopic")).isOkOr:
assert false, "Failed to subscribe to relay"
await sleepAsync(FUTURE_TIMEOUT)
check:
@ -109,8 +112,10 @@ suite "Peer Manager":
# And both mount metadata and relay
discard client.mountMetadata(0) # clusterId irrelevant, overridden by topic
discard server.mountMetadata(0) # clusterId irrelevant, overridden by topic
await client.mountRelay()
await server.mountRelay()
(await client.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# And both nodes are started
await allFutures(server.start(), client.start())
@ -122,7 +127,8 @@ suite "Peer Manager":
await sleepAsync(FUTURE_TIMEOUT)
# When making an operation that triggers onPeerMetadata
client.subscribe((kind: SubscriptionKind.PubsubSub, topic: "newTopic"))
client.subscribe((kind: SubscriptionKind.PubsubSub, topic: "newTopic")).isOkOr:
assert false, "Failed to subscribe to relay"
await sleepAsync(FUTURE_TIMEOUT)
check:

View File

@ -6,7 +6,6 @@ import
testutils/unittests,
chronos,
chronicles,
os,
libp2p/[peerstore, crypto/crypto]
import
@ -136,7 +135,8 @@ suite "Waku Filter - End to End":
asyncTest "Client Node can't receive Push from Server Node, via Relay":
# Given the server node has Relay enabled
await server.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "error mounting relay: " & $error
# And valid filter subscription
let subscribeResponse = await client.filterSubscribe(
@ -160,7 +160,8 @@ suite "Waku Filter - End to End":
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
await server.start()
await server.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "error mounting relay: " & $error
let serverRemotePeerInfo = server.peerInfo.toRemotePeerInfo()
@ -223,7 +224,8 @@ suite "Waku Filter - End to End":
pushedMsg == msg
asyncTest "Filter Client Node can't receive messages after subscribing and restarting, via Relay":
await server.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "error mounting relay: " & $error
# Given a valid filter subscription
let subscribeResponse = await client.filterSubscribe(

View File

@ -1,31 +1,24 @@
{.used.}
import
std/[options, tables, sequtils, tempfiles, strutils],
std/[options, tempfiles],
stew/shims/net as stewNet,
testutils/unittests,
chronos,
chronicles,
std/strformat,
os,
libp2p/[peerstore, crypto/crypto]
libp2p/crypto/crypto
import
waku/[
waku_core,
node/peer_manager,
node/waku_node,
waku_filter_v2,
waku_filter_v2/client,
waku_filter_v2/subscriptions,
waku_lightpush_legacy,
waku_lightpush_legacy/common,
waku_lightpush_legacy/client,
waku_lightpush_legacy/protocol_metrics,
waku_lightpush_legacy/rpc,
waku_rln_relay,
],
../testlib/[assertions, common, wakucore, wakunode, testasync, futures, testutils],
../testlib/[wakucore, wakunode, testasync, futures],
../resources/payloads
suite "Waku Legacy Lightpush - End To End":
@ -59,7 +52,9 @@ suite "Waku Legacy Lightpush - End To End":
await allFutures(server.start(), client.start())
await server.start()
await server.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await server.mountLegacyLightpush() # without rln-relay
client.mountLegacyLightpushClient()
@ -139,17 +134,18 @@ suite "RLN Proofs as a Lightpush Service":
# mount rln-relay
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
dynamic: false,
credIndex: some(1.uint),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode"),
)
await allFutures(server.start(), client.start())
await server.start()
await server.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await server.mountRlnRelay(wakuRlnConfig)
await server.mountLegacyLightPush()
client.mountLegacyLightPushClient()
@ -194,8 +190,10 @@ suite "Waku Legacy Lightpush message delivery":
await allFutures(destNode.start(), bridgeNode.start(), lightNode.start())
await destNode.mountRelay(@[DefaultRelayShard])
await bridgeNode.mountRelay(@[DefaultRelayShard])
(await destNode.mountRelay(@[DefaultRelayShard])).isOkOr:
assert false, "Failed to mount relay"
(await bridgeNode.mountRelay(@[DefaultRelayShard])).isOkOr:
assert false, "Failed to mount relay"
await bridgeNode.mountLegacyLightPush()
lightNode.mountLegacyLightPushClient()
@ -206,24 +204,25 @@ suite "Waku Legacy Lightpush message delivery":
await destNode.connectToNodes(@[bridgeNode.peerInfo.toRemotePeerInfo()])
## Given
const CustomPubsubTopic = "/waku/2/rs/0/1"
let message = fakeWakuMessage()
var completionFutRelay = newFuture[bool]()
proc relayHandler(
topic: PubsubTopic, msg: WakuMessage
): Future[void] {.async, gcsafe.} =
check:
topic == DefaultPubsubTopic
topic == CustomPubsubTopic
msg == message
completionFutRelay.complete(true)
destNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler))
destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), some(relayHandler)).isOkOr:
assert false, "Failed to subscribe to topic:" & $error
# Wait for subscription to take effect
await sleepAsync(100.millis)
## When
let res = await lightNode.legacyLightpushPublish(some(DefaultPubsubTopic), message)
let res = await lightNode.legacyLightpushPublish(some(CustomPubsubTopic), message)
assert res.isOk(), $res.error
## Then

View File

@ -14,14 +14,11 @@ import
node/peer_manager,
waku_core,
waku_store_legacy,
waku_store_legacy/client,
waku_archive_legacy,
waku_archive_legacy/driver/sqlite_driver,
common/databases/db_sqlite,
],
../waku_store_legacy/store_utils,
../waku_archive_legacy/archive_utils,
../testlib/[common, wakucore, wakunode, testasync, futures, testutils]
../testlib/[wakucore, wakunode, testasync, testutils]
suite "Waku Store - End to End - Sorted Archive":
var pubsubTopic {.threadvar.}: PubsubTopic

View File

@ -1,27 +1,16 @@
{.used.}
import
std/[options, tables, sequtils, tempfiles, strutils],
std/[options, tempfiles],
stew/shims/net as stewNet,
testutils/unittests,
chronos,
chronicles,
std/strformat,
os,
libp2p/[peerstore, crypto/crypto]
libp2p/crypto/crypto
import
waku/[
waku_core,
node/peer_manager,
node/waku_node,
waku_filter_v2,
waku_filter_v2/client,
waku_filter_v2/subscriptions,
waku_lightpush,
waku_rln_relay,
],
../testlib/[assertions, common, wakucore, wakunode, testasync, futures, testutils],
waku/[waku_core, node/peer_manager, node/waku_node, waku_lightpush, waku_rln_relay],
../testlib/[wakucore, wakunode, testasync, futures],
../resources/payloads
const PublishedToOnePeer = 1
@ -57,7 +46,8 @@ suite "Waku Lightpush - End To End":
await allFutures(server.start(), client.start())
await server.start()
await server.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await server.mountLightpush() # without rln-relay
client.mountLightpushClient()
@ -138,17 +128,18 @@ suite "RLN Proofs as a Lightpush Service":
# mount rln-relay
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
dynamic: false,
credIndex: some(1.uint),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode"),
)
await allFutures(server.start(), client.start())
await server.start()
await server.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await server.mountRlnRelay(wakuRlnConfig)
await server.mountLightPush()
client.mountLightPushClient()
@ -193,8 +184,10 @@ suite "Waku Lightpush message delivery":
await allFutures(destNode.start(), bridgeNode.start(), lightNode.start())
await destNode.mountRelay(@[DefaultRelayShard])
await bridgeNode.mountRelay(@[DefaultRelayShard])
(await destNode.mountRelay(@[DefaultRelayShard])).isOkOr:
assert false, "Failed to mount relay"
(await bridgeNode.mountRelay(@[DefaultRelayShard])).isOkOr:
assert false, "Failed to mount relay"
await bridgeNode.mountLightPush()
lightNode.mountLightPushClient()
@ -205,6 +198,7 @@ suite "Waku Lightpush message delivery":
await destNode.connectToNodes(@[bridgeNode.peerInfo.toRemotePeerInfo()])
## Given
const CustomPubsubTopic = "/waku/2/rs/0/1"
let message = fakeWakuMessage()
var completionFutRelay = newFuture[bool]()
@ -212,17 +206,18 @@ suite "Waku Lightpush message delivery":
topic: PubsubTopic, msg: WakuMessage
): Future[void] {.async, gcsafe.} =
check:
topic == DefaultPubsubTopic
topic == CustomPubsubTopic
msg == message
completionFutRelay.complete(true)
destNode.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler))
destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), some(relayHandler)).isOkOr:
assert false, "Failed to subscribe to relay"
# Wait for subscription to take effect
await sleepAsync(100.millis)
## When
let res = await lightNode.lightpushPublish(some(DefaultPubsubTopic), message)
let res = await lightNode.lightpushPublish(some(CustomPubsubTopic), message)
assert res.isOk(), $res.error
assert res.get() == 1, "Expected to relay the message to 1 node"

View File

@ -13,14 +13,8 @@ import
eth/p2p/discoveryv5/enr
import
waku/[
waku_node,
discovery/waku_discv5,
waku_peer_exchange,
node/peer_manager,
waku_relay/protocol,
waku_core,
],
waku/
[waku_node, discovery/waku_discv5, waku_peer_exchange, node/peer_manager, waku_core],
../waku_peer_exchange/utils,
../testlib/[wakucore, wakunode, testasync]
@ -83,7 +77,7 @@ suite "Waku Peer Exchange":
# Then no peers are fetched
check:
node.peerManager.wakuPeerStore.peers.len == 0
node.peerManager.switch.peerStore.peers.len == 0
res.error.status_code == SERVICE_UNAVAILABLE
res.error.status_desc == some("PeerExchange is not mounted")
@ -98,12 +92,12 @@ suite "Waku Peer Exchange":
res.error.status_desc == some("peer_not_found_failure")
# Then no peers are fetched
check node.peerManager.wakuPeerStore.peers.len == 0
check node.peerManager.switch.peerStore.peers.len == 0
asyncTest "Node succesfully exchanges px peers with faked discv5":
# Given both nodes mount peer exchange
await allFutures([node.mountPeerExchange(), node2.mountPeerExchange()])
check node.peerManager.wakuPeerStore.peers.len == 0
check node.peerManager.switch.peerStore.peers.len == 0
# Mock that we discovered a node (to avoid running discv5)
var enr = enr.Record()
@ -124,8 +118,8 @@ suite "Waku Peer Exchange":
# Check that the peer ended up in the peerstore
let rpInfo = enr.toRemotePeerInfo.get()
check:
node.peerManager.wakuPeerStore.peers.anyIt(it.peerId == rpInfo.peerId)
node.peerManager.wakuPeerStore.peers.anyIt(it.addrs == rpInfo.addrs)
node.peerManager.switch.peerStore.peers.anyIt(it.peerId == rpInfo.peerId)
node.peerManager.switch.peerStore.peers.anyIt(it.addrs == rpInfo.addrs)
suite "setPeerExchangePeer":
var node2 {.threadvar.}: WakuNode
@ -142,7 +136,7 @@ suite "Waku Peer Exchange":
asyncTest "peer set successfully":
# Given a node with peer exchange mounted
await node.mountPeerExchange()
let initialPeers = node.peerManager.wakuPeerStore.peers.len
let initialPeers = node.peerManager.switch.peerStore.peers.len
# And a valid peer info
let remotePeerInfo2 = node2.peerInfo.toRemotePeerInfo()
@ -152,12 +146,12 @@ suite "Waku Peer Exchange":
# Then the peer is added to the peer store
check:
node.peerManager.wakuPeerStore.peers.len == (initialPeers + 1)
node.peerManager.switch.peerStore.peers.len == (initialPeers + 1)
asyncTest "peer exchange not mounted":
# Given a node without peer exchange mounted
check node.wakuPeerExchange == nil
let initialPeers = node.peerManager.wakuPeerStore.peers.len
let initialPeers = node.peerManager.switch.peerStore.peers.len
# And a valid peer info
let invalidMultiAddress = MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()
@ -167,12 +161,12 @@ suite "Waku Peer Exchange":
# Then no peer is added to the peer store
check:
node.peerManager.wakuPeerStore.peers.len == initialPeers
node.peerManager.switch.peerStore.peers.len == initialPeers
asyncTest "peer info parse error":
# Given a node with peer exchange mounted
await node.mountPeerExchange()
let initialPeers = node.peerManager.wakuPeerStore.peers.len
let initialPeers = node.peerManager.switch.peerStore.peers.len
# And given a peer info with an invalid peer id
var remotePeerInfo2 = node2.peerInfo.toRemotePeerInfo()
@ -183,7 +177,7 @@ suite "Waku Peer Exchange":
# Then no peer is added to the peer store
check:
node.peerManager.wakuPeerStore.peers.len == initialPeers
node.peerManager.switch.peerStore.peers.len == initialPeers
suite "Waku Peer Exchange with discv5":
asyncTest "Node successfully exchanges px peers with real discv5":
@ -286,13 +280,13 @@ suite "Waku Peer Exchange with discv5":
let
requestPeers = 1
currentPeers = node3.peerManager.wakuPeerStore.peers.len
currentPeers = node3.peerManager.switch.peerStore.peers.len
let res = await node3.fetchPeerExchangePeers(1)
check res.tryGet() == 1
# Then node3 has received 1 peer from node1
check:
node3.peerManager.wakuPeerStore.peers.len == currentPeers + requestPeers
node3.peerManager.switch.peerStore.peers.len == currentPeers + requestPeers
await allFutures(
[node1.stop(), node2.stop(), node3.stop(), disc1.stop(), disc2.stop()]

View File

@ -18,18 +18,15 @@ import
waku_core,
node/peer_manager,
node/waku_node,
waku_enr/sharding,
discovery/waku_discv5,
waku_filter_v2/common,
waku_relay/protocol,
],
../testlib/
[wakucore, wakunode, testasync, testutils, assertions, comparisons, futures],
../testlib/[wakucore, wakunode, testasync, testutils, comparisons],
../waku_enr/utils,
../waku_archive/archive_utils,
../waku_discv5/utils,
./peer_manager/peer_store/utils,
./utils
./peer_manager/peer_store/utils
const DEFAULT_PROTOCOLS: seq[string] =
@["/ipfs/id/1.0.0", "/libp2p/autonat/1.0.0", "/libp2p/circuit/relay/0.2.0/hop"]
@ -45,9 +42,9 @@ suite "Peer Manager":
var
server {.threadvar.}: WakuNode
serverPeerStore {.threadvar.}: WakuPeerStore
serverPeerStore {.threadvar.}: PeerStore
client {.threadvar.}: WakuNode
clientPeerStore {.threadvar.}: WakuPeerStore
clientPeerStore {.threadvar.}: PeerStore
var
serverRemotePeerInfo {.threadvar.}: RemotePeerInfo
@ -64,9 +61,9 @@ suite "Peer Manager":
clientKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, listenIp, Port(3000))
serverPeerStore = server.peerManager.wakuPeerStore
serverPeerStore = server.peerManager.switch.peerStore
client = newTestWakuNode(clientKey, listenIp, Port(3001))
clientPeerStore = client.peerManager.wakuPeerStore
clientPeerStore = client.peerManager.switch.peerStore
await allFutures(server.start(), client.start())
@ -140,7 +137,7 @@ suite "Peer Manager":
clientPeerStore.peers().len == 1
# Given the server is marked as CannotConnect
client.peerManager.wakuPeerStore[ConnectionBook].book[serverPeerId] =
client.peerManager.switch.peerStore[ConnectionBook].book[serverPeerId] =
CannotConnect
# When pruning the client's store
@ -177,7 +174,7 @@ suite "Peer Manager":
clientPeerStore.peers().len == 1
# Given the server is marked as having 1 failed connection
client.peerManager.wakuPeerStore[NumberFailedConnBook].book[serverPeerId] = 1
client.peerManager.switch.peerStore[NumberFailedConnBook].book[serverPeerId] = 1
# When pruning the client's store
client.peerManager.prunePeerStore()
@ -196,7 +193,7 @@ suite "Peer Manager":
clientPeerStore.peers().len == 1
# Given the server is marked as not connected
client.peerManager.wakuPeerStore[ConnectionBook].book[serverPeerId] =
client.peerManager.switch.peerStore[ConnectionBook].book[serverPeerId] =
CannotConnect
# When pruning the client's store
@ -220,7 +217,7 @@ suite "Peer Manager":
# Given the server is marked as not connected
# (There's only one shard in the ENR so avg shards will be the same as the shard count; hence it will be purged.)
client.peerManager.wakuPeerStore[ConnectionBook].book[serverPeerId] =
client.peerManager.switch.peerStore[ConnectionBook].book[serverPeerId] =
CannotConnect
# When pruning the client's store
@ -311,7 +308,8 @@ suite "Peer Manager":
asyncTest "Peer Protocol Support Verification (Before Connection)":
# Given the server has mounted some Waku protocols
await server.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await server.mountFilter()
# When connecting to the server
@ -338,7 +336,8 @@ suite "Peer Manager":
server2RemotePeerInfo = server2.switch.peerInfo.toRemotePeerInfo()
server2PeerId = server2RemotePeerInfo.peerId
await server2.mountRelay()
(await server2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# When connecting to both servers
await client.connectToNodes(@[serverRemotePeerInfo, server2RemotePeerInfo])
@ -536,8 +535,10 @@ suite "Peer Manager":
suite "Peer Connectivity States":
asyncTest "State Tracking & Transition":
# Given two correctly initialised nodes, but not connected
await server.mountRelay()
await client.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
(await client.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# Then their connectedness should be NotConnected
check:
@ -590,8 +591,10 @@ suite "Peer Manager":
suite "Automatic Reconnection":
asyncTest "Automatic Reconnection Implementation":
# Given two correctly initialised nodes, that are available for reconnection
await server.mountRelay()
await client.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
(await client.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await client.connectToNodes(@[serverRemotePeerInfo])
waitActive:
@ -714,8 +717,8 @@ suite "Persistence Check":
client = newTestWakuNode(
clientKey, listenIp, listenPort, peerStorage = clientPeerStorage
)
serverPeerStore = server.peerManager.wakuPeerStore
clientPeerStore = client.peerManager.wakuPeerStore
serverPeerStore = server.peerManager.switch.peerStore
clientPeerStore = client.peerManager.switch.peerStore
await allFutures(server.start(), client.start())
@ -731,7 +734,7 @@ suite "Persistence Check":
newClient = newTestWakuNode(
clientKey, listenIp, listenPort, peerStorage = newClientPeerStorage
)
newClientPeerStore = newClient.peerManager.wakuPeerStore
newClientPeerStore = newClient.peerManager.switch.peerStore
await newClient.start()
@ -756,8 +759,8 @@ suite "Persistence Check":
client = newTestWakuNode(
clientKey, listenIp, listenPort, peerStorage = clientPeerStorage
)
serverPeerStore = server.peerManager.wakuPeerStore
clientPeerStore = client.peerManager.wakuPeerStore
serverPeerStore = server.peerManager.switch.peerStore
clientPeerStore = client.peerManager.switch.peerStore
await allFutures(server.start(), client.start())
@ -776,8 +779,8 @@ suite "Persistence Check":
clientKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, listenIp, listenPort)
client = newTestWakuNode(clientKey, listenIp, listenPort)
serverPeerStore = server.peerManager.wakuPeerStore
clientPeerStore = client.peerManager.wakuPeerStore
serverPeerStore = server.peerManager.switch.peerStore
clientPeerStore = client.peerManager.switch.peerStore
await allFutures(server.start(), client.start())
@ -792,13 +795,13 @@ suite "Mount Order":
var
client {.threadvar.}: WakuNode
clientRemotePeerInfo {.threadvar.}: RemotePeerInfo
clientPeerStore {.threadvar.}: WakuPeerStore
clientPeerStore {.threadvar.}: PeerStore
asyncSetup:
let clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, listenIp, listenPort)
clientPeerStore = client.peerManager.wakuPeerStore
clientPeerStore = client.peerManager.switch.peerStore
await client.start()
@ -813,7 +816,8 @@ suite "Mount Order":
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, listenIp, listenPort)
await server.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await server.start()
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
@ -837,7 +841,8 @@ suite "Mount Order":
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, listenIp, listenPort)
await server.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
serverPeerId = serverRemotePeerInfo.peerId
@ -862,7 +867,8 @@ suite "Mount Order":
server = newTestWakuNode(serverKey, listenIp, listenPort)
await server.start()
await server.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
serverPeerId = serverRemotePeerInfo.peerId
@ -889,7 +895,8 @@ suite "Mount Order":
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
serverPeerId = serverRemotePeerInfo.peerId
await server.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# When connecting to the server
await client.connectToNodes(@[serverRemotePeerInfo])
@ -913,7 +920,8 @@ suite "Mount Order":
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
serverPeerId = serverRemotePeerInfo.peerId
await server.start()
await server.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
# When connecting to the server
await client.connectToNodes(@[serverRemotePeerInfo])
@ -935,7 +943,8 @@ suite "Mount Order":
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
serverPeerId = serverRemotePeerInfo.peerId
await server.mountRelay()
(await server.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await server.start()
# When connecting to the server

View File

@ -83,16 +83,15 @@ proc getWakuRlnConfigOnChain*(
ethClientAddress: Option[string] = none(string),
): WakuRlnConfig =
return WakuRlnConfig(
rlnRelayDynamic: true,
rlnRelayCredIndex: some(credIndex),
rlnRelayEthContractAddress: rlnRelayEthContractAddress,
rlnRelayEthClientAddress: ethClientAddress.get(EthClient),
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $credIndex),
rlnEpochSizeSec: 1,
dynamic: true,
credIndex: some(credIndex),
ethContractAddress: rlnRelayEthContractAddress,
ethClientAddress: ethClientAddress.get(EthClient),
treePath: genTempPath("rln_tree", "wakunode_" & $credIndex),
epochSizeSec: 1,
onFatalErrorAction: fatalErrorHandler.get(fatalErrorVoidHandler),
# If these are used, initialisation fails with "failed to mount WakuRlnRelay: could not initialize the group manager: the commitment does not have a membership"
rlnRelayCredPath: keystorePath,
rlnRelayCredPassword: password,
creds: some(RlnRelayCreds(path: keystorePath, password: password)),
)
proc setupRelayWithOnChainRln*(
@ -227,13 +226,13 @@ suite "Waku RlnRelay - End to End - Static":
let contractAddress = await uploadRLNContract(EthClient)
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: true,
rlnRelayCredIndex: some(0.uint),
rlnRelayUserMessageLimit: 111,
rlnRelayTreepath: genTempPath("rln_tree", "wakunode_0"),
rlnRelayEthClientAddress: EthClient,
rlnRelayEthContractAddress: $contractAddress,
rlnRelayChainId: 1337,
dynamic: true,
credIndex: some(0.uint),
userMessageLimit: 111,
treepath: genTempPath("rln_tree", "wakunode_0"),
ethClientAddress: EthClient,
ethContractAddress: $contractAddress,
chainId: 1337,
onFatalErrorAction: proc(errStr: string) =
raiseAssert errStr
,
@ -263,7 +262,9 @@ suite "Waku RlnRelay - End to End - Static":
completionFut.complete((topic, msg))
let subscriptionEvent = (kind: PubsubSub, topic: pubsubTopic)
server.subscribe(subscriptionEvent, some(relayHandler))
server.subscribe(subscriptionEvent, some(relayHandler)).isOkOr:
assert false, "Failed to subscribe to pubsub topic"
await sleepAsync(FUTURE_TIMEOUT)
# Generate Messages
@ -357,7 +358,9 @@ suite "Waku RlnRelay - End to End - Static":
completionFut.complete((topic, msg))
let subscriptionEvent = (kind: PubsubSub, topic: pubsubTopic)
server.subscribe(subscriptionEvent, some(relayHandler))
server.subscribe(subscriptionEvent, some(relayHandler)).isOkOr:
assert false, "Failed to subscribe to pubsub topic"
await sleepAsync(FUTURE_TIMEOUT)
# Generate Messages

View File

@ -1,7 +1,7 @@
{.used.}
import
std/[options, sequtils, algorithm, sets],
std/[options, sequtils, sets],
stew/shims/net as stewNet,
testutils/unittests,
chronos,
@ -15,14 +15,11 @@ import
waku_core,
waku_core/message/digest,
waku_store,
waku_store/client,
waku_archive,
waku_archive/driver/sqlite_driver,
common/databases/db_sqlite,
],
../waku_store/store_utils,
../waku_archive/archive_utils,
../testlib/[common, wakucore, wakunode, testasync, futures, testutils]
../testlib/[wakucore, wakunode, testasync, testutils]
suite "Waku Store - End to End - Sorted Archive":
var pubsubTopic {.threadvar.}: PubsubTopic

View File

@ -1,6 +1,6 @@
{.used.}
import std/[sets, random], stew/[results, byteutils], testutils/unittests
import std/[sets, random], results, stew/byteutils, testutils/unittests
import waku/waku_core, waku/waku_api/message_cache, ./testlib/wakucore
randomize()

View File

@ -1,7 +1,7 @@
{.used.}
import
std/[options, sequtils, times, sugar, net],
std/[sequtils, times, sugar, net],
stew/shims/net as stewNet,
testutils/unittests,
chronos,
@ -27,7 +27,6 @@ import
waku_relay/protocol,
waku_filter_v2/common,
waku_store/common,
waku_lightpush/common,
waku_peer_exchange,
waku_metadata,
incentivization/reputation_manager,
@ -51,10 +50,10 @@ procSuite "Peer Manager":
check:
connOk == true
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[1].peerInfo.peerId
)
nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) ==
nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) ==
Connectedness.Connected
asyncTest "dialPeer() works":
@ -81,13 +80,13 @@ procSuite "Peer Manager":
# Check that node2 is being managed in node1
check:
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[1].peerInfo.peerId
)
# Check connectedness
check:
nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) ==
nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) ==
Connectedness.Connected
await allFutures(nodes.mapIt(it.stop()))
@ -142,12 +141,12 @@ procSuite "Peer Manager":
# Check peers were successfully added to peer manager
check:
node.peerManager.wakuPeerStore.peers().len == 2
node.peerManager.wakuPeerStore.peers(WakuFilterSubscribeCodec).allIt(
node.peerManager.switch.peerStore.peers().len == 2
node.peerManager.switch.peerStore.peers(WakuFilterSubscribeCodec).allIt(
it.peerId == filterPeer.peerId and it.addrs.contains(filterLoc) and
it.protocols.contains(WakuFilterSubscribeCodec)
)
node.peerManager.wakuPeerStore.peers(WakuStoreCodec).allIt(
node.peerManager.switch.peerStore.peers(WakuStoreCodec).allIt(
it.peerId == storePeer.peerId and it.addrs.contains(storeLoc) and
it.protocols.contains(WakuStoreCodec)
)
@ -167,7 +166,7 @@ procSuite "Peer Manager":
nodes[0].peerManager.addPeer(nodes[1].peerInfo.toRemotePeerInfo())
check:
# No information about node2's connectedness
nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) ==
nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) ==
NotConnected
# Failed connection
@ -184,7 +183,7 @@ procSuite "Peer Manager":
check:
# Cannot connect to node2
nodes[0].peerManager.wakuPeerStore.connectedness(nonExistentPeer.peerId) ==
nodes[0].peerManager.switch.peerStore.connectedness(nonExistentPeer.peerId) ==
CannotConnect
# Successful connection
@ -195,14 +194,14 @@ procSuite "Peer Manager":
check:
# Currently connected to node2
nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) ==
nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) ==
Connected
# Stop node. Gracefully disconnect from all peers.
await nodes[0].stop()
check:
# Not currently connected to node2, but had recent, successful connection.
nodes[0].peerManager.wakuPeerStore.connectedness(nodes[1].peerInfo.peerId) ==
nodes[0].peerManager.switch.peerStore.connectedness(nodes[1].peerInfo.peerId) ==
CanConnect
await nodes[1].stop()
@ -233,12 +232,13 @@ procSuite "Peer Manager":
let conn1Ok = await nodes[0].peerManager.connectPeer(nonExistentPeer)
check:
# Cannot connect to node2
nodes[0].peerManager.wakuPeerStore.connectedness(nonExistentPeer.peerId) ==
nodes[0].peerManager.switch.peerStore.connectedness(nonExistentPeer.peerId) ==
CannotConnect
nodes[0].peerManager.wakuPeerStore[ConnectionBook][nonExistentPeer.peerId] ==
nodes[0].peerManager.switch.peerStore[ConnectionBook][nonExistentPeer.peerId] ==
CannotConnect
nodes[0].peerManager.wakuPeerStore[NumberFailedConnBook][nonExistentPeer.peerId] ==
1
nodes[0].peerManager.switch.peerStore[NumberFailedConnBook][
nonExistentPeer.peerId
] == 1
# Connection attempt failed
conn1Ok == false
@ -254,14 +254,17 @@ procSuite "Peer Manager":
nodes[0].peerManager.canBeConnected(nodes[1].peerInfo.peerId) == true
# After a successful connection, the number of failed connections is reset
nodes[0].peerManager.wakuPeerStore[NumberFailedConnBook][nodes[1].peerInfo.peerId] =
4
nodes[0].peerManager.switch.peerStore[NumberFailedConnBook][
nodes[1].peerInfo.peerId
] = 4
let conn2Ok =
await nodes[0].peerManager.connectPeer(nodes[1].peerInfo.toRemotePeerInfo())
check:
conn2Ok == true
nodes[0].peerManager.wakuPeerStore[NumberFailedConnBook][nodes[1].peerInfo.peerId] ==
0
nodes[0].peerManager.switch.peerStore[NumberFailedConnBook][
nodes[1].peerInfo.peerId
] == 0
await allFutures(nodes.mapIt(it.stop()))
@ -280,8 +283,10 @@ procSuite "Peer Manager":
await node1.start()
await node2.start()
await node1.mountRelay()
await node2.mountRelay()
(await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
(await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let peerInfo2 = node2.switch.peerInfo
var remotePeerInfo2 = peerInfo2.toRemotePeerInfo()
@ -291,7 +296,7 @@ procSuite "Peer Manager":
assert is12Connected == true, "Node 1 and 2 not connected"
check:
node1.peerManager.wakuPeerStore[AddressBook][remotePeerInfo2.peerId] ==
node1.peerManager.switch.peerStore[AddressBook][remotePeerInfo2.peerId] ==
remotePeerInfo2.addrs
# wait for the peer store update
@ -299,9 +304,9 @@ procSuite "Peer Manager":
check:
# Currently connected to node2
node1.peerManager.wakuPeerStore.peers().len == 1
node1.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node1.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
node1.peerManager.switch.peerStore.peers().len == 1
node1.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node1.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected
# Simulate restart by initialising a new node using the same storage
let node3 = newTestWakuNode(
@ -317,11 +322,12 @@ procSuite "Peer Manager":
check:
# Node2 has been loaded after "restart", but we have not yet reconnected
node3.peerManager.wakuPeerStore.peers().len == 1
node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == NotConnected
node3.peerManager.switch.peerStore.peers().len == 1
node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == NotConnected
await node3.mountRelay()
(await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node3.peerManager.connectToRelayPeers()
@ -329,9 +335,9 @@ procSuite "Peer Manager":
check:
# Reconnected to node2 after "restart"
node3.peerManager.wakuPeerStore.peers().len == 1
node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
node3.peerManager.switch.peerStore.peers().len == 1
node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected
await allFutures([node1.stop(), node2.stop(), node3.stop()])
@ -350,8 +356,10 @@ procSuite "Peer Manager":
await node1.start()
await node2.start()
await node1.mountRelay()
await node2.mountRelay()
(await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
(await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let peerInfo2 = node2.switch.peerInfo
var remotePeerInfo2 = peerInfo2.toRemotePeerInfo()
@ -361,7 +369,7 @@ procSuite "Peer Manager":
assert is12Connected == true, "Node 1 and 2 not connected"
check:
node1.peerManager.wakuPeerStore[AddressBook][remotePeerInfo2.peerId] ==
node1.peerManager.switch.peerStore[AddressBook][remotePeerInfo2.peerId] ==
remotePeerInfo2.addrs
# wait for the peer store update
@ -369,9 +377,9 @@ procSuite "Peer Manager":
check:
# Currently connected to node2
node1.peerManager.wakuPeerStore.peers().len == 1
node1.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node1.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
node1.peerManager.switch.peerStore.peers().len == 1
node1.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node1.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected
# Simulate restart by initialising a new node using the same storage
let node3 = newTestWakuNode(
@ -387,11 +395,12 @@ procSuite "Peer Manager":
check:
# Node2 has been loaded after "restart", but we have not yet reconnected
node3.peerManager.wakuPeerStore.peers().len == 1
node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == NotConnected
node3.peerManager.switch.peerStore.peers().len == 1
node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == NotConnected
await node3.mountRelay()
(await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node3.peerManager.manageRelayPeers()
@ -399,9 +408,9 @@ procSuite "Peer Manager":
check:
# Reconnected to node2 after "restart"
node3.peerManager.wakuPeerStore.peers().len == 1
node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
node3.peerManager.switch.peerStore.peers().len == 1
node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected
await allFutures([node1.stop(), node2.stop(), node3.stop()])
@ -480,21 +489,23 @@ procSuite "Peer Manager":
await node1.start()
await node2.start()
await node1.mountRelay()
(await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
node1.wakuRelay.codec = betaCodec
await node2.mountRelay()
(await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
node2.wakuRelay.codec = betaCodec
require:
(await node1.peerManager.connectPeer(peerInfo2.toRemotePeerInfo())) == true
check:
# Currently connected to node2
node1.peerManager.wakuPeerStore.peers().len == 1
node1.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node1.peerManager.wakuPeerStore.peers().anyIt(
node1.peerManager.switch.peerStore.peers().len == 1
node1.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node1.peerManager.switch.peerStore.peers().anyIt(
it.protocols.contains(node2.wakuRelay.codec)
)
node1.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
node1.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected
# Simulate restart by initialising a new node using the same storage
let node3 = newTestWakuNode(
@ -504,27 +515,30 @@ procSuite "Peer Manager":
peerStorage = storage,
)
await node3.mountRelay()
(await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
node3.wakuRelay.codec = stableCodec
check:
# Node 2 and 3 have differing codecs
node2.wakuRelay.codec == betaCodec
node3.wakuRelay.codec == stableCodec
# Node2 has been loaded after "restart", but we have not yet reconnected
node3.peerManager.wakuPeerStore.peers().len == 1
node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node3.peerManager.wakuPeerStore.peers().anyIt(it.protocols.contains(betaCodec))
node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == NotConnected
node3.peerManager.switch.peerStore.peers().len == 1
node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node3.peerManager.switch.peerStore.peers().anyIt(it.protocols.contains(betaCodec))
node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == NotConnected
await node3.start() # This should trigger a reconnect
check:
# Reconnected to node2 after "restart"
node3.peerManager.wakuPeerStore.peers().len == 1
node3.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node3.peerManager.wakuPeerStore.peers().anyIt(it.protocols.contains(betaCodec))
node3.peerManager.wakuPeerStore.peers().anyIt(it.protocols.contains(stableCodec))
node3.peerManager.wakuPeerStore.connectedness(peerInfo2.peerId) == Connected
node3.peerManager.switch.peerStore.peers().len == 1
node3.peerManager.switch.peerStore.peers().anyIt(it.peerId == peerInfo2.peerId)
node3.peerManager.switch.peerStore.peers().anyIt(it.protocols.contains(betaCodec))
node3.peerManager.switch.peerStore.peers().anyIt(
it.protocols.contains(stableCodec)
)
node3.peerManager.switch.peerStore.connectedness(peerInfo2.peerId) == Connected
await allFutures([node1.stop(), node2.stop(), node3.stop()])
@ -561,38 +575,38 @@ procSuite "Peer Manager":
check:
# Peerstore track all three peers
nodes[0].peerManager.wakuPeerStore.peers().len == 3
nodes[0].peerManager.switch.peerStore.peers().len == 3
# All peer ids are correct
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[1].switch.peerInfo.peerId
)
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[2].switch.peerInfo.peerId
)
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[3].switch.peerInfo.peerId
)
# All peers support the relay protocol
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
# All peers are connected
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[1].switch.peerInfo.peerId
] == Connected
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[2].switch.peerInfo.peerId
] == Connected
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[3].switch.peerInfo.peerId
] == Connected
@ -631,38 +645,38 @@ procSuite "Peer Manager":
check:
# Peerstore track all three peers
nodes[0].peerManager.wakuPeerStore.peers().len == 3
nodes[0].peerManager.switch.peerStore.peers().len == 3
# All peer ids are correct
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[1].switch.peerInfo.peerId
)
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[2].switch.peerInfo.peerId
)
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[3].switch.peerInfo.peerId
)
# All peers support the relay protocol
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
# All peers are connected
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[1].switch.peerInfo.peerId
] == Connected
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[2].switch.peerInfo.peerId
] == Connected
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[3].switch.peerInfo.peerId
] == Connected
@ -691,66 +705,72 @@ procSuite "Peer Manager":
check:
# Peerstore track all three peers
nodes[0].peerManager.wakuPeerStore.peers().len == 3
nodes[0].peerManager.switch.peerStore.peers().len == 3
# Inbound/Outbound number of peers match
nodes[0].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 3
nodes[0].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 0
nodes[1].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 0
nodes[1].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 1
nodes[2].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 0
nodes[2].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 1
nodes[3].peerManager.wakuPeerStore.getPeersByDirection(Inbound).len == 0
nodes[3].peerManager.wakuPeerStore.getPeersByDirection(Outbound).len == 1
nodes[0].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 3
nodes[0].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 0
nodes[1].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 0
nodes[1].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 1
nodes[2].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 0
nodes[2].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 1
nodes[3].peerManager.switch.peerStore.getPeersByDirection(Inbound).len == 0
nodes[3].peerManager.switch.peerStore.getPeersByDirection(Outbound).len == 1
# All peer ids are correct
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[1].switch.peerInfo.peerId
)
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[2].switch.peerInfo.peerId
)
nodes[0].peerManager.wakuPeerStore.peers().anyIt(
nodes[0].peerManager.switch.peerStore.peers().anyIt(
it.peerId == nodes[3].switch.peerInfo.peerId
)
# All peers support the relay protocol
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[1].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[2].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
nodes[0].peerManager.wakuPeerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
nodes[0].peerManager.switch.peerStore[ProtoBook][nodes[3].switch.peerInfo.peerId].contains(
WakuRelayCodec
)
# All peers are connected
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[1].switch.peerInfo.peerId
] == Connected
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[2].switch.peerInfo.peerId
] == Connected
nodes[0].peerManager.wakuPeerStore[ConnectionBook][
nodes[0].peerManager.switch.peerStore[ConnectionBook][
nodes[3].switch.peerInfo.peerId
] == Connected
# All peers are Inbound in peer 0
nodes[0].peerManager.wakuPeerStore[DirectionBook][nodes[1].switch.peerInfo.peerId] ==
Inbound
nodes[0].peerManager.wakuPeerStore[DirectionBook][nodes[2].switch.peerInfo.peerId] ==
Inbound
nodes[0].peerManager.wakuPeerStore[DirectionBook][nodes[3].switch.peerInfo.peerId] ==
Inbound
nodes[0].peerManager.switch.peerStore[DirectionBook][
nodes[1].switch.peerInfo.peerId
] == Inbound
nodes[0].peerManager.switch.peerStore[DirectionBook][
nodes[2].switch.peerInfo.peerId
] == Inbound
nodes[0].peerManager.switch.peerStore[DirectionBook][
nodes[3].switch.peerInfo.peerId
] == Inbound
# All peers have an Outbound connection with peer 0
nodes[1].peerManager.wakuPeerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] ==
Outbound
nodes[2].peerManager.wakuPeerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] ==
Outbound
nodes[3].peerManager.wakuPeerStore[DirectionBook][nodes[0].switch.peerInfo.peerId] ==
Outbound
nodes[1].peerManager.switch.peerStore[DirectionBook][
nodes[0].switch.peerInfo.peerId
] == Outbound
nodes[2].peerManager.switch.peerStore[DirectionBook][
nodes[0].switch.peerInfo.peerId
] == Outbound
nodes[3].peerManager.switch.peerStore[DirectionBook][
nodes[0].switch.peerInfo.peerId
] == Outbound
await allFutures(nodes.mapIt(it.stop()))
@ -779,12 +799,13 @@ procSuite "Peer Manager":
# all peers are stored in the peerstore
check:
node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[0].peerId)
node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[1].peerId)
node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[2].peerId)
node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[0].peerId)
node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[1].peerId)
node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[2].peerId)
# but the relay peer is not
node.peerManager.wakuPeerStore.peers().anyIt(it.peerId == peers[3].peerId) == false
node.peerManager.switch.peerStore.peers().anyIt(it.peerId == peers[3].peerId) ==
false
# all service peers are added to its service slot
check:
@ -901,8 +922,8 @@ procSuite "Peer Manager":
peers.len == 3
# Add a peer[0] to the peerstore
pm.wakuPeerStore[AddressBook][peers[0].peerId] = peers[0].addrs
pm.wakuPeerStore[ProtoBook][peers[0].peerId] =
pm.switch.peerStore[AddressBook][peers[0].peerId] = peers[0].addrs
pm.switch.peerStore[ProtoBook][peers[0].peerId] =
@[WakuRelayCodec, WakuStoreCodec, WakuFilterSubscribeCodec, WakuLightPushCodec]
# When no service peers, we get one from the peerstore
@ -998,44 +1019,44 @@ procSuite "Peer Manager":
# Check that we have 30 peers in the peerstore
check:
pm.wakuPeerStore.peers.len == 30
pm.switch.peerStore.peers.len == 30
# fake that some peers failed to connected
pm.wakuPeerStore[NumberFailedConnBook][peers[0].peerId] = 2
pm.wakuPeerStore[NumberFailedConnBook][peers[1].peerId] = 2
pm.wakuPeerStore[NumberFailedConnBook][peers[2].peerId] = 2
pm.wakuPeerStore[NumberFailedConnBook][peers[3].peerId] = 2
pm.wakuPeerStore[NumberFailedConnBook][peers[4].peerId] = 2
pm.switch.peerStore[NumberFailedConnBook][peers[0].peerId] = 2
pm.switch.peerStore[NumberFailedConnBook][peers[1].peerId] = 2
pm.switch.peerStore[NumberFailedConnBook][peers[2].peerId] = 2
pm.switch.peerStore[NumberFailedConnBook][peers[3].peerId] = 2
pm.switch.peerStore[NumberFailedConnBook][peers[4].peerId] = 2
# fake that some peers are connected
pm.wakuPeerStore[ConnectionBook][peers[5].peerId] = Connected
pm.wakuPeerStore[ConnectionBook][peers[8].peerId] = Connected
pm.wakuPeerStore[ConnectionBook][peers[15].peerId] = Connected
pm.wakuPeerStore[ConnectionBook][peers[18].peerId] = Connected
pm.wakuPeerStore[ConnectionBook][peers[24].peerId] = Connected
pm.wakuPeerStore[ConnectionBook][peers[29].peerId] = Connected
pm.switch.peerStore[ConnectionBook][peers[5].peerId] = Connected
pm.switch.peerStore[ConnectionBook][peers[8].peerId] = Connected
pm.switch.peerStore[ConnectionBook][peers[15].peerId] = Connected
pm.switch.peerStore[ConnectionBook][peers[18].peerId] = Connected
pm.switch.peerStore[ConnectionBook][peers[24].peerId] = Connected
pm.switch.peerStore[ConnectionBook][peers[29].peerId] = Connected
# Prune the peerstore (current=30, target=25)
pm.prunePeerStore()
check:
# ensure peerstore was pruned
pm.wakuPeerStore.peers.len == 25
pm.switch.peerStore.peers.len == 25
# ensure connected peers were not pruned
pm.wakuPeerStore.peers.anyIt(it.peerId == peers[5].peerId)
pm.wakuPeerStore.peers.anyIt(it.peerId == peers[8].peerId)
pm.wakuPeerStore.peers.anyIt(it.peerId == peers[15].peerId)
pm.wakuPeerStore.peers.anyIt(it.peerId == peers[18].peerId)
pm.wakuPeerStore.peers.anyIt(it.peerId == peers[24].peerId)
pm.wakuPeerStore.peers.anyIt(it.peerId == peers[29].peerId)
pm.switch.peerStore.peers.anyIt(it.peerId == peers[5].peerId)
pm.switch.peerStore.peers.anyIt(it.peerId == peers[8].peerId)
pm.switch.peerStore.peers.anyIt(it.peerId == peers[15].peerId)
pm.switch.peerStore.peers.anyIt(it.peerId == peers[18].peerId)
pm.switch.peerStore.peers.anyIt(it.peerId == peers[24].peerId)
pm.switch.peerStore.peers.anyIt(it.peerId == peers[29].peerId)
# ensure peers that failed were the first to be pruned
not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[0].peerId)
not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[1].peerId)
not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[2].peerId)
not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[3].peerId)
not pm.wakuPeerStore.peers.anyIt(it.peerId == peers[4].peerId)
not pm.switch.peerStore.peers.anyIt(it.peerId == peers[0].peerId)
not pm.switch.peerStore.peers.anyIt(it.peerId == peers[1].peerId)
not pm.switch.peerStore.peers.anyIt(it.peerId == peers[2].peerId)
not pm.switch.peerStore.peers.anyIt(it.peerId == peers[3].peerId)
not pm.switch.peerStore.peers.anyIt(it.peerId == peers[4].peerId)
asyncTest "canBeConnected() returns correct value":
let pm = PeerManager.new(
@ -1061,8 +1082,8 @@ procSuite "Peer Manager":
pm.canBeConnected(p1) == true
# peer with ONE error that just failed
pm.wakuPeerStore[NumberFailedConnBook][p1] = 1
pm.wakuPeerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second)
pm.switch.peerStore[NumberFailedConnBook][p1] = 1
pm.switch.peerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second)
# we cant connect right now
check:
pm.canBeConnected(p1) == false
@ -1073,8 +1094,8 @@ procSuite "Peer Manager":
pm.canBeConnected(p1) == true
# peer with TWO errors, we can connect until 2 seconds have passed
pm.wakuPeerStore[NumberFailedConnBook][p1] = 2
pm.wakuPeerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second)
pm.switch.peerStore[NumberFailedConnBook][p1] = 2
pm.switch.peerStore[LastFailedConnBook][p1] = Moment.init(getTime().toUnix, Second)
# cant be connected after 1 second
await sleepAsync(chronos.milliseconds(1000))
@ -1171,6 +1192,23 @@ procSuite "Peer Manager":
check:
nodes[0].peerManager.ipTable["127.0.0.1"].len == 1
nodes[0].peerManager.switch.connManager.getConnections().len == 1
nodes[0].peerManager.wakuPeerStore.peers().len == 1
nodes[0].peerManager.switch.peerStore.peers().len == 1
await allFutures(nodes.mapIt(it.stop()))
asyncTest "Retrieve peer that mounted peer exchange":
let
node1 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(55048))
node2 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(55023))
await allFutures(node1.start(), node2.start())
await allFutures(node1.mountRelay(), node2.mountRelay())
await allFutures(node1.mountPeerExchange(), node2.mountPeerExchange())
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
var r = node1.peerManager.selectPeer(WakuRelayCodec)
assert r.isSome(), "could not retrieve peer mounting WakuRelayCodec"
r = node1.peerManager.selectPeer(WakuPeerExchangeCodec)
assert r.isSome(), "could not retrieve peer mounting WakuPeerExchangeCodec"

View File

@ -9,12 +9,8 @@ import
libp2p/multiaddress,
testutils/unittests
import
waku/[
node/peer_manager/peer_manager,
node/peer_manager/waku_peer_store,
waku_node,
waku_core/peers,
],
waku/
[node/peer_manager/peer_manager, node/peer_manager/waku_peer_store, waku_core/peers],
./testlib/wakucore
suite "Extended nim-libp2p Peer Store":
@ -25,7 +21,7 @@ suite "Extended nim-libp2p Peer Store":
setup:
# Setup a nim-libp2p peerstore with some peers
let peerStore = WakuPeerStore.new(nil, capacity = 50)
let peerStore = PeerStore.new(nil, capacity = 50)
var p1, p2, p3, p4, p5, p6: PeerId
# create five peers basePeerId + [1-5]
@ -320,7 +316,7 @@ suite "Extended nim-libp2p Peer Store":
test "del() successfully deletes waku custom books":
# Given
let peerStore = WakuPeerStore.new(nil, capacity = 5)
let peerStore = PeerStore.new(nil, capacity = 5)
var p1: PeerId
require p1.init("QmeuZJbXrszW2jdT7GdduSjQskPU3S7vvGWKtKgDfkDvW1")

View File

@ -4,10 +4,8 @@ import
std/[sequtils, options],
stew/shims/net,
testutils/unittests,
chronicles,
chronos,
libp2p/peerid,
libp2p/crypto/crypto,
libp2p/protocols/pubsub/gossipsub
import waku/waku_core, waku/waku_node, ./testlib/wakucore, ./testlib/wakunode
@ -25,8 +23,10 @@ procSuite "Relay (GossipSub) Peer Exchange":
newTestWakuNode(node2Key, listenAddress, port, sendSignedPeerRecord = true)
# When both client and server mount relay without a handler
await node1.mountRelay(@[DefaultRelayShard])
await node2.mountRelay(@[DefaultRelayShard], none(RoutingRecordsHandler))
(await node1.mountRelay(@[DefaultRelayShard])).isOkOr:
assert false, "Failed to mount relay"
(await node2.mountRelay(@[DefaultRelayShard], none(RoutingRecordsHandler))).isOkOr:
assert false, "Failed to mount relay"
# Then the relays are mounted without a handler
check:
@ -75,9 +75,12 @@ procSuite "Relay (GossipSub) Peer Exchange":
peerExchangeHandle: RoutingRecordsHandler = peerExchangeHandler
# Givem the nodes mount relay with a peer exchange handler
await node1.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))
await node2.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))
await node3.mountRelay(@[DefaultRelayShard], some(peerExchangeHandle))
(await node1.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))).isOkOr:
assert false, "Failed to mount relay"
(await node2.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))).isOkOr:
assert false, "Failed to mount relay"
(await node3.mountRelay(@[DefaultRelayShard], some(peerExchangeHandle))).isOkOr:
assert false, "Failed to mount relay"
# Ensure that node1 prunes all peers after the first connection
node1.wakuRelay.parameters.dHigh = 1

View File

@ -3,7 +3,8 @@
import
std/[sequtils, tables],
stew/shims/net,
stew/[base32, results],
results,
stew/base32,
testutils/unittests,
chronicles,
chronos,
@ -36,9 +37,12 @@ suite "Waku DNS Discovery":
node3 = newTestWakuNode(nodeKey3, bindIp, Port(63503))
enr3 = node3.enr
await node1.mountRelay()
await node2.mountRelay()
await node3.mountRelay()
(await node1.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
(await node2.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
(await node3.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await allFutures([node1.start(), node2.start(), node3.start()])
# Build and sign tree
@ -74,7 +78,8 @@ suite "Waku DNS Discovery":
nodeKey4 = generateSecp256k1Key()
node4 = newTestWakuNode(nodeKey4, bindIp, Port(63504))
await node4.mountRelay()
(await node4.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
await node4.start()
var wakuDnsDisc = WakuDnsDiscovery.init(location, resolver).get()
@ -94,20 +99,20 @@ suite "Waku DNS Discovery":
check:
# We have successfully connected to all discovered nodes
node4.peerManager.wakuPeerStore.peers().anyIt(
node4.peerManager.switch.peerStore.peers().anyIt(
it.peerId == node1.switch.peerInfo.peerId
)
node4.peerManager.wakuPeerStore.connectedness(node1.switch.peerInfo.peerId) ==
node4.peerManager.switch.peerStore.connectedness(node1.switch.peerInfo.peerId) ==
Connected
node4.peerManager.wakuPeerStore.peers().anyIt(
node4.peerManager.switch.peerStore.peers().anyIt(
it.peerId == node2.switch.peerInfo.peerId
)
node4.peerManager.wakuPeerStore.connectedness(node2.switch.peerInfo.peerId) ==
node4.peerManager.switch.peerStore.connectedness(node2.switch.peerInfo.peerId) ==
Connected
node4.peerManager.wakuPeerStore.peers().anyIt(
node4.peerManager.switch.peerStore.peers().anyIt(
it.peerId == node3.switch.peerInfo.peerId
)
node4.peerManager.wakuPeerStore.connectedness(node3.switch.peerInfo.peerId) ==
node4.peerManager.switch.peerStore.connectedness(node3.switch.peerInfo.peerId) ==
Connected
await allFutures([node1.stop(), node2.stop(), node3.stop(), node4.stop()])

Some files were not shown because too many files have changed in this diff Show More