mirror of
https://github.com/logos-messaging/logos-messaging-nim.git
synced 2026-01-02 14:03:06 +00:00
Merge branch 'master' into add_shard_metrics
This commit is contained in:
commit
aefd70159d
7
.github/workflows/ci.yml
vendored
7
.github/workflows/ci.yml
vendored
@ -80,6 +80,8 @@ jobs:
|
||||
run: make V=1 QUICK_AND_DIRTY_COMPILER=1 all tools
|
||||
|
||||
build-windows:
|
||||
needs: changes
|
||||
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' }}
|
||||
uses: ./.github/workflows/windows-build.yml
|
||||
with:
|
||||
branch: ${{ github.ref }}
|
||||
@ -119,12 +121,13 @@ jobs:
|
||||
sudo docker run --rm -d -e POSTGRES_PASSWORD=test123 -p 5432:5432 postgres:15.4-alpine3.18
|
||||
postgres_enabled=1
|
||||
fi
|
||||
|
||||
|
||||
export MAKEFLAGS="-j1"
|
||||
export NIMFLAGS="--colors:off -d:chronicles_colors:none"
|
||||
export USE_LIBBACKTRACE=0
|
||||
|
||||
make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test testwakunode2
|
||||
make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test
|
||||
make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled testwakunode2
|
||||
|
||||
build-docker-image:
|
||||
needs: changes
|
||||
|
||||
21
Makefile
21
Makefile
@ -53,7 +53,19 @@ endif
|
||||
# default target, because it's the first one that doesn't start with '.'
|
||||
all: | wakunode2 example2 chat2 chat2bridge libwaku
|
||||
|
||||
test: | testcommon testwaku
|
||||
TEST_FILE := $(word 2,$(MAKECMDGOALS))
|
||||
TEST_NAME := $(wordlist 3,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS))
|
||||
|
||||
test:
|
||||
ifeq ($(strip $(TEST_FILE)),)
|
||||
$(MAKE) testcommon
|
||||
$(MAKE) testwaku
|
||||
else
|
||||
$(MAKE) compile-test $(TEST_FILE) $(TEST_NAME)
|
||||
endif
|
||||
# this prevents make from erroring on unknown targets like "Index"
|
||||
%:
|
||||
@true
|
||||
|
||||
waku.nims:
|
||||
ln -s waku.nimble $@
|
||||
@ -244,9 +256,10 @@ build/%: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$*" && \
|
||||
$(ENV_SCRIPT) nim buildone $(NIM_PARAMS) waku.nims $*
|
||||
|
||||
test/%: | build deps librln
|
||||
echo -e $(BUILD_MSG) "test/$*" && \
|
||||
$(ENV_SCRIPT) nim testone $(NIM_PARAMS) waku.nims $*
|
||||
compile-test: | build deps librln
|
||||
echo -e $(BUILD_MSG) "$(TEST_FILE)" && \
|
||||
$(ENV_SCRIPT) nim buildTest $(NIM_PARAMS) waku.nims $(TEST_FILE) && \
|
||||
$(ENV_SCRIPT) nim execTest $(NIM_PARAMS) waku.nims $(TEST_FILE) "$(TEST_NAME)"
|
||||
|
||||
################
|
||||
## Waku tools ##
|
||||
|
||||
10
README.md
10
README.md
@ -110,11 +110,19 @@ source env.sh
|
||||
```
|
||||
If everything went well, you should see your prompt suffixed with `[Nimbus env]$`. Now you can run `nim` commands as usual.
|
||||
|
||||
### Waku Protocol Test Suite
|
||||
### Test Suite
|
||||
|
||||
```bash
|
||||
# Run all the Waku tests
|
||||
make test
|
||||
|
||||
# Run a specific test file
|
||||
make test <test_file_path>
|
||||
# e.g. : make test tests/wakunode2/test_all.nim
|
||||
|
||||
# Run a specific test name from a specific test file
|
||||
make test <test_file_path> <test_name>
|
||||
# e.g. : make test tests/wakunode2/test_all.nim "node setup is successful with default configuration"
|
||||
```
|
||||
|
||||
### Building single test files
|
||||
|
||||
@ -590,9 +590,6 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
|
||||
await chat.readWriteLoop()
|
||||
|
||||
if conf.keepAlive:
|
||||
node.startKeepalive()
|
||||
|
||||
runForever()
|
||||
|
||||
proc main(rng: ref HmacDrbgContext) {.async.} =
|
||||
|
||||
@ -23,6 +23,7 @@ import
|
||||
waku_store,
|
||||
factory/builder,
|
||||
common/utils/matterbridge_client,
|
||||
common/rate_limit/setting,
|
||||
],
|
||||
# Chat 2 imports
|
||||
../chat2/chat2,
|
||||
|
||||
@ -9,7 +9,7 @@ x-logging: &logging
|
||||
x-eth-client-address: ð_client_address ${ETH_CLIENT_ADDRESS:-} # Add your ETH_CLIENT_ADDRESS after the "-"
|
||||
|
||||
x-rln-environment: &rln_env
|
||||
RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4}
|
||||
RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xB9cd878C90E49F797B4431fBF4fb333108CB90e6}
|
||||
RLN_RELAY_CRED_PATH: ${RLN_RELAY_CRED_PATH:-} # Optional: Add your RLN_RELAY_CRED_PATH after the "-"
|
||||
RLN_RELAY_CRED_PASSWORD: ${RLN_RELAY_CRED_PASSWORD:-} # Optional: Add your RLN_RELAY_CRED_PASSWORD after the "-"
|
||||
|
||||
|
||||
@ -122,7 +122,7 @@ when isMainModule:
|
||||
error "Issue converting toWakuConf", error = $error
|
||||
quit(QuitFailure)
|
||||
|
||||
var waku = Waku.new(wakuConf).valueOr:
|
||||
var waku = (waitFor Waku.new(wakuConf)).valueOr:
|
||||
error "Waku initialization failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
|
||||
@ -570,17 +570,18 @@ when isMainModule:
|
||||
info "cli flags", conf = conf
|
||||
|
||||
if conf.clusterId == 1:
|
||||
let twnClusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let twnNetworkConf = NetworkConf.TheWakuNetworkConf()
|
||||
|
||||
conf.bootstrapNodes = twnClusterConf.discv5BootstrapNodes
|
||||
conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
|
||||
conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
|
||||
conf.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec
|
||||
conf.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit
|
||||
conf.numShardsInNetwork = twnClusterConf.numShardsInNetwork
|
||||
conf.bootstrapNodes = twnNetworkConf.discv5BootstrapNodes
|
||||
conf.rlnRelayDynamic = twnNetworkConf.rlnRelayDynamic
|
||||
conf.rlnRelayEthContractAddress = twnNetworkConf.rlnRelayEthContractAddress
|
||||
conf.rlnEpochSizeSec = twnNetworkConf.rlnEpochSizeSec
|
||||
conf.rlnRelayUserMessageLimit = twnNetworkConf.rlnRelayUserMessageLimit
|
||||
conf.numShardsInNetwork = twnNetworkConf.shardingConf.numShardsInCluster
|
||||
|
||||
if conf.shards.len == 0:
|
||||
conf.shards = toSeq(uint16(0) .. uint16(twnClusterConf.numShardsInNetwork - 1))
|
||||
conf.shards =
|
||||
toSeq(uint16(0) .. uint16(twnNetworkConf.shardingConf.numShardsInCluster - 1))
|
||||
|
||||
if conf.logLevel != LogLevel.NONE:
|
||||
setLogLevel(conf.logLevel)
|
||||
|
||||
@ -1,12 +1,20 @@
|
||||
# RPC URL for accessing testnet via HTTP.
|
||||
# e.g. https://sepolia.infura.io/v3/123aa110320f4aec179150fba1e1b1b1
|
||||
# e.g. https://linea-sepolia.infura.io/v3/123aa110320f4aec179150fba1e1b1b1
|
||||
RLN_RELAY_ETH_CLIENT_ADDRESS=
|
||||
|
||||
# Private key of testnet where you have sepolia ETH that would be staked into RLN contract.
|
||||
# Account of testnet where you have Linea Sepolia ETH that would be staked into RLN contract.
|
||||
ETH_TESTNET_ACCOUNT=
|
||||
|
||||
# Private key of testnet where you have Linea Sepolia ETH that would be staked into RLN contract.
|
||||
# Note: make sure you don't use the '0x' prefix.
|
||||
# e.g. 0116196e9a8abed42dd1a22eb63fa2a5a17b0c27d716b87ded2c54f1bf192a0b
|
||||
ETH_TESTNET_KEY=
|
||||
|
||||
# Address of the RLN contract on Linea Sepolia.
|
||||
RLN_CONTRACT_ADDRESS=0xB9cd878C90E49F797B4431fBF4fb333108CB90e6
|
||||
# Address of the RLN Membership Token contract on Linea Sepolia used to pay for membership.
|
||||
TOKEN_CONTRACT_ADDRESS=0x185A0015aC462a0aECb81beCc0497b649a64B9ea
|
||||
|
||||
# Password you would like to use to protect your RLN membership.
|
||||
RLN_RELAY_CRED_PASSWORD=
|
||||
|
||||
@ -15,7 +23,8 @@ NWAKU_IMAGE=
|
||||
NODEKEY=
|
||||
DOMAIN=
|
||||
EXTRA_ARGS=
|
||||
RLN_RELAY_CONTRACT_ADDRESS=
|
||||
STORAGE_SIZE=
|
||||
|
||||
|
||||
# -------------------- SONDA CONFIG ------------------
|
||||
METRICS_PORT=8004
|
||||
|
||||
@ -30,13 +30,13 @@ It works by running a `nwaku` node, publishing a message from it every fixed int
|
||||
2. If you want to query nodes in `cluster-id` 1, then you have to follow the steps of registering an RLN membership. Otherwise, you can skip this step.
|
||||
|
||||
For it, you need:
|
||||
* Ethereum Sepolia WebSocket endpoint. Get one free from [Infura](https://www.infura.io/).
|
||||
* Ethereum Sepolia account with some balance <0.01 Eth. Get some [here](https://www.infura.io/faucet/sepolia).
|
||||
* Ethereum Linea Sepolia WebSocket endpoint. Get one free from [Infura](https://linea-sepolia.infura.io/).
|
||||
* Ethereum Linea Sepolia account with minimum 0.01ETH. Get some [here](https://docs.metamask.io/developer-tools/faucet/).
|
||||
* A password to protect your rln membership.
|
||||
|
||||
Fill the `RLN_RELAY_ETH_CLIENT_ADDRESS`, `ETH_TESTNET_KEY` and `RLN_RELAY_CRED_PASSWORD` env variables and run
|
||||
|
||||
```
|
||||
```
|
||||
./register_rln.sh
|
||||
```
|
||||
|
||||
|
||||
@ -56,7 +56,7 @@ when isMainModule:
|
||||
error "Waku configuration failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
var waku = Waku.new(conf).valueOr:
|
||||
var waku = (waitFor Waku.new(conf)).valueOr:
|
||||
error "Waku initialization failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
|
||||
@ -7,6 +7,7 @@ pipeline {
|
||||
options {
|
||||
timestamps()
|
||||
timeout(time: 20, unit: 'MINUTES')
|
||||
disableRestartFromStage()
|
||||
buildDiscarder(logRotator(
|
||||
numToKeepStr: '10',
|
||||
daysToKeepStr: '30',
|
||||
|
||||
@ -36,6 +36,7 @@ pipeline {
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
disableRestartFromStage()
|
||||
/* Prevent Jenkins jobs from running forever */
|
||||
timeout(time: 30, unit: 'MINUTES')
|
||||
/* Limit builds retained. */
|
||||
|
||||
@ -6,6 +6,7 @@ pipeline {
|
||||
|
||||
options {
|
||||
timestamps()
|
||||
disableRestartFromStage()
|
||||
timeout(time: 20, unit: 'MINUTES')
|
||||
buildDiscarder(logRotator(
|
||||
numToKeepStr: '10',
|
||||
|
||||
@ -33,8 +33,8 @@ make wakunode2
|
||||
Follow [Step 10](../droplet-quickstart.md#10-run-nwaku) of the [droplet quickstart](../droplet-quickstart.md) guide, while replacing the run command with -
|
||||
|
||||
```bash
|
||||
export SEPOLIA_HTTP_NODE_ADDRESS=<HTTP RPC URL to a Sepolia Node>
|
||||
export RLN_RELAY_CONTRACT_ADDRESS="0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4" # Replace this with any compatible implementation
|
||||
export LINEA_SEPOLIA_HTTP_NODE_ADDRESS=<HTTP RPC URL to a Linea Sepolia Node>
|
||||
export RLN_RELAY_CONTRACT_ADDRESS="0xB9cd878C90E49F797B4431fBF4fb333108CB90e6" # Replace this with any compatible implementation
|
||||
$WAKUNODE_DIR/wakunode2 \
|
||||
--store:true \
|
||||
--persist-messages \
|
||||
@ -44,7 +44,7 @@ $WAKUNODE_DIR/wakunode2 \
|
||||
--rln-relay:true \
|
||||
--rln-relay-dynamic:true \
|
||||
--rln-relay-eth-contract-address:"$RLN_RELAY_CONTRACT_ADDRESS" \
|
||||
--rln-relay-eth-client-address:"$SEPOLIA_HTTP_NODE_ADDRESS"
|
||||
--rln-relay-eth-client-address:"$LINEA_SEPOLIA_HTTP_NODE_ADDRESS"
|
||||
```
|
||||
|
||||
OR
|
||||
@ -53,9 +53,9 @@ If you are running the nwaku node within docker, follow [Step 2](../docker-quick
|
||||
|
||||
```bash
|
||||
export WAKU_FLEET=<entree of the fleet>
|
||||
export SEPOLIA_HTTP_NODE_ADDRESS=<HTTP RPC URL to a Sepolia Node>
|
||||
export RLN_RELAY_CONTRACT_ADDRESS="0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4" # Replace this with any compatible implementation
|
||||
docker run -i -t -p 60000:60000 -p 9000:9000/udp wakuorg/nwaku:v0.20.0 \
|
||||
export LINEA_SEPOLIA_HTTP_NODE_ADDRESS=<HTTP RPC URL to a Sepolia Node>
|
||||
export RLN_RELAY_CONTRACT_ADDRESS="0xB9cd878C90E49F797B4431fBF4fb333108CB90e6" # Replace this with any compatible implementation
|
||||
docker run -i -t -p 60000:60000 -p 9000:9000/udp wakuorg/nwaku:v0.36.0 \
|
||||
--dns-discovery:true \
|
||||
--dns-discovery-url:"$WAKU_FLEET" \
|
||||
--discv5-discovery \
|
||||
@ -63,7 +63,7 @@ docker run -i -t -p 60000:60000 -p 9000:9000/udp wakuorg/nwaku:v0.20.0 \
|
||||
--rln-relay:true \
|
||||
--rln-relay-dynamic:true \
|
||||
--rln-relay-eth-contract-address:"$RLN_RELAY_CONTRACT_ADDRESS" \
|
||||
--rln-relay-eth-client-address:"$SEPOLIA_HTTP_NODE_ADDRESS"
|
||||
--rln-relay-eth-client-address:"$LINEA_SEPOLIA_HTTP_NODE_ADDRESS"
|
||||
```
|
||||
|
||||
> Note: You can choose to keep connections to other nodes alive by adding the `--keep-alive` flag.
|
||||
@ -74,7 +74,7 @@ runtime arguments -
|
||||
1. `--rln-relay`: Allows waku-rln-relay to be mounted into the setup of the nwaku node
|
||||
2. `--rln-relay-dynamic`: Enables waku-rln-relay to connect to an ethereum node to fetch the membership group
|
||||
3. `--rln-relay-eth-contract-address`: The contract address of an RLN membership group
|
||||
4. `--rln-relay-eth-client-address`: The HTTP url to a Sepolia ethereum node
|
||||
4. `--rln-relay-eth-client-address`: The HTTP url to a Linea Sepolia ethereum node
|
||||
|
||||
You should now have nwaku running, with RLN enabled!
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
# Spam-protected chat2 application with on-chain group management
|
||||
|
||||
This document is a tutorial on how to run the chat2 application in the spam-protected mode using the Waku-RLN-Relay protocol and with dynamic/on-chain group management.
|
||||
In the on-chain/dynamic group management, the state of the group members i.e., their identity commitment keys is moderated via a membership smart contract deployed on the Sepolia network which is one of the Ethereum test-nets.
|
||||
In the on-chain/dynamic group management, the state of the group members i.e., their identity commitment keys is moderated via a membership smart contract deployed on the Linea Sepolia network which is one of the test-nets.
|
||||
Members can be dynamically added to the group and the group size can grow up to 2^20 members.
|
||||
This differs from the prior test scenarios in which the RLN group was static and the set of members' keys was hardcoded and fixed.
|
||||
|
||||
@ -45,7 +45,7 @@ Run the following command to set up your chat2 client.
|
||||
--content-topic:/toy-chat/3/mingde/proto \
|
||||
--rln-relay:true \
|
||||
--rln-relay-dynamic:true \
|
||||
--rln-relay-eth-contract-address:0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4 \
|
||||
--rln-relay-eth-contract-address:0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 \
|
||||
--rln-relay-cred-path:xxx/xx/rlnKeystore.json \
|
||||
--rln-relay-cred-password:xxxx \
|
||||
--rln-relay-eth-client-address:xxxx \
|
||||
@ -58,11 +58,11 @@ In this command
|
||||
- the `rln-relay` flag is set to `true` to enable the Waku-RLN-Relay protocol for spam protection.
|
||||
- the `--rln-relay-dynamic` flag is set to `true` to enable the on-chain mode of Waku-RLN-Relay protocol with dynamic group management.
|
||||
- the `--rln-relay-eth-contract-address` option gets the address of the membership contract.
|
||||
The current address of the contract is `0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4`.
|
||||
You may check the state of the contract on the [Sepolia testnet](https://sepolia.etherscan.io/address/0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4).
|
||||
The current address of the contract is `0xB9cd878C90E49F797B4431fBF4fb333108CB90e6`.
|
||||
You may check the state of the contract on the [Linea Sepolia testnet](https://sepolia.lineascan.build/address/0xB9cd878C90E49F797B4431fBF4fb333108CB90e6).
|
||||
- the `--rln-relay-cred-path` option denotes the path to the keystore file described above
|
||||
- the `--rln-relay-cred-password` option denotes the password to the keystore
|
||||
- the `rln-relay-eth-client-address` is the WebSocket address of the hosted node on the Sepolia testnet.
|
||||
- the `rln-relay-eth-client-address` is the WebSocket address of the hosted node on the Linea Sepolia testnet.
|
||||
You need to replace the `xxxx` with the actual node's address.
|
||||
|
||||
For `rln-relay-eth-client-address`, if you do not know how to obtain it, you may use the following tutorial on the [prerequisites of running on-chain spam-protected chat2](./pre-requisites-of-running-on-chain-spam-protected-chat2.md).
|
||||
@ -166,7 +166,7 @@ You can check this fact by looking at `Bob`'s console, where `message3` is missi
|
||||
|
||||
**Alice**
|
||||
```bash
|
||||
./build/chat2 --fleet:test --content-topic:/toy-chat/3/mingde/proto --rln-relay:true --rln-relay-dynamic:true --rln-relay-eth-contract-address:0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4 --rln-relay-cred-path:rlnKeystore.json --rln-relay-cred-password:password --rln-relay-eth-client-address:https://sepolia.infura.io/v3/12345678901234567890123456789012 --ports-shift=1
|
||||
./build/chat2 --fleet:test --content-topic:/toy-chat/3/mingde/proto --rln-relay:true --rln-relay-dynamic:true --rln-relay-eth-contract-address:0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 --rln-relay-cred-path:rlnKeystore.json --rln-relay-cred-password:password --rln-relay-eth-client-address:https://sepolia.infura.io/v3/12345678901234567890123456789012 --ports-shift=1
|
||||
```
|
||||
|
||||
```
|
||||
@ -209,7 +209,7 @@ your rln identity commitment key is: bd093cbf14fb933d53f596c33f98b3df83b7e9f7a19
|
||||
|
||||
**Bob**
|
||||
```bash
|
||||
./build/chat2 --fleet:test --content-topic:/toy-chat/3/mingde/proto --rln-relay:true --rln-relay-dynamic:true --rln-relay-eth-contract-address:0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4 --rln-relay-cred-path:rlnKeystore.json --rln-relay-cred-index:1 --rln-relay-cred-password:password --rln-relay-eth-client-address:https://sepolia.infura.io/v3/12345678901234567890123456789012 --ports-shift=2
|
||||
./build/chat2 --fleet:test --content-topic:/toy-chat/3/mingde/proto --rln-relay:true --rln-relay-dynamic:true --rln-relay-eth-contract-address:0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 --rln-relay-cred-path:rlnKeystore.json --rln-relay-cred-index:1 --rln-relay-cred-password:password --rln-relay-eth-client-address:https://sepolia.infura.io/v3/12345678901234567890123456789012 --ports-shift=2
|
||||
```
|
||||
|
||||
```
|
||||
|
||||
@ -21,9 +21,9 @@ It is meant to be used to generate and persist a set of valid RLN credentials to
|
||||
2. Define the arguments you wish to use
|
||||
|
||||
```bash
|
||||
export RPC_URL="https://sepolia.infura.io/v3/..."
|
||||
export RPC_URL="https://linea-sepolia.infura.io/v3/..."
|
||||
export PRIVATE_KEY="0x..."
|
||||
export RLN_CONTRACT_ADDRESS="0xF471d71E9b1455bBF4b85d475afb9BB0954A29c4"
|
||||
export RLN_CONTRACT_ADDRESS="0xB9cd878C90E49F797B4431fBF4fb333108CB90e6"
|
||||
export RLN_CREDENTIAL_PATH="rlnKeystore.json"
|
||||
export RLN_CREDENTIAL_PASSWORD="xxx"
|
||||
```
|
||||
|
||||
@ -77,7 +77,7 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
|
||||
let node = builder.build().tryGet()
|
||||
|
||||
node.mountMetadata(clusterId).expect("failed to mount waku metadata protocol")
|
||||
waitFor node.mountFilterClient()
|
||||
await node.mountFilterClient()
|
||||
|
||||
await node.start()
|
||||
|
||||
|
||||
@ -24,29 +24,29 @@ proc setup*(): Waku =
|
||||
|
||||
var conf = confRes.get()
|
||||
|
||||
let twnClusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let twnNetworkConf = NetworkConf.TheWakuNetworkConf()
|
||||
if len(conf.shards) != 0:
|
||||
conf.pubsubTopics = conf.shards.mapIt(twnClusterConf.pubsubTopics[it.uint16])
|
||||
conf.pubsubTopics = conf.shards.mapIt(twnNetworkConf.pubsubTopics[it.uint16])
|
||||
else:
|
||||
conf.pubsubTopics = twnClusterConf.pubsubTopics
|
||||
conf.pubsubTopics = twnNetworkConf.pubsubTopics
|
||||
|
||||
# Override configuration
|
||||
conf.maxMessageSize = twnClusterConf.maxMessageSize
|
||||
conf.clusterId = twnClusterConf.clusterId
|
||||
conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
|
||||
conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
|
||||
conf.discv5Discovery = twnClusterConf.discv5Discovery
|
||||
conf.maxMessageSize = twnNetworkConf.maxMessageSize
|
||||
conf.clusterId = twnNetworkConf.clusterId
|
||||
conf.rlnRelayEthContractAddress = twnNetworkConf.rlnRelayEthContractAddress
|
||||
conf.rlnRelayDynamic = twnNetworkConf.rlnRelayDynamic
|
||||
conf.discv5Discovery = twnNetworkConf.discv5Discovery
|
||||
conf.discv5BootstrapNodes =
|
||||
conf.discv5BootstrapNodes & twnClusterConf.discv5BootstrapNodes
|
||||
conf.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec
|
||||
conf.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit
|
||||
conf.discv5BootstrapNodes & twnNetworkConf.discv5BootstrapNodes
|
||||
conf.rlnEpochSizeSec = twnNetworkConf.rlnEpochSizeSec
|
||||
conf.rlnRelayUserMessageLimit = twnNetworkConf.rlnRelayUserMessageLimit
|
||||
|
||||
# Only set rlnRelay to true if relay is configured
|
||||
if conf.relay:
|
||||
conf.rlnRelay = twnClusterConf.rlnRelay
|
||||
conf.rlnRelay = twnNetworkConf.rlnRelay
|
||||
|
||||
debug "Starting node"
|
||||
var waku = Waku.new(conf).valueOr:
|
||||
var waku = (waitFor Waku.new(conf)).valueOr:
|
||||
error "Waku initialization failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
|
||||
@ -16,17 +16,17 @@ import
|
||||
waku/waku_core/subscription/push_handler,
|
||||
waku/waku_relay,
|
||||
./events/json_message_event,
|
||||
./waku_thread/waku_thread,
|
||||
./waku_thread/inter_thread_communication/requests/node_lifecycle_request,
|
||||
./waku_thread/inter_thread_communication/requests/peer_manager_request,
|
||||
./waku_thread/inter_thread_communication/requests/protocols/relay_request,
|
||||
./waku_thread/inter_thread_communication/requests/protocols/store_request,
|
||||
./waku_thread/inter_thread_communication/requests/protocols/lightpush_request,
|
||||
./waku_thread/inter_thread_communication/requests/protocols/filter_request,
|
||||
./waku_thread/inter_thread_communication/requests/debug_node_request,
|
||||
./waku_thread/inter_thread_communication/requests/discovery_request,
|
||||
./waku_thread/inter_thread_communication/requests/ping_request,
|
||||
./waku_thread/inter_thread_communication/waku_thread_request,
|
||||
./waku_context,
|
||||
./waku_thread_requests/requests/node_lifecycle_request,
|
||||
./waku_thread_requests/requests/peer_manager_request,
|
||||
./waku_thread_requests/requests/protocols/relay_request,
|
||||
./waku_thread_requests/requests/protocols/store_request,
|
||||
./waku_thread_requests/requests/protocols/lightpush_request,
|
||||
./waku_thread_requests/requests/protocols/filter_request,
|
||||
./waku_thread_requests/requests/debug_node_request,
|
||||
./waku_thread_requests/requests/discovery_request,
|
||||
./waku_thread_requests/requests/ping_request,
|
||||
./waku_thread_requests/waku_thread_request,
|
||||
./alloc,
|
||||
./ffi_types,
|
||||
../waku/factory/app_callbacks
|
||||
@ -54,7 +54,7 @@ proc handleRequest(
|
||||
callback: WakuCallBack,
|
||||
userData: pointer,
|
||||
): cint =
|
||||
waku_thread.sendRequestToWakuThread(ctx, requestType, content, callback, userData).isOkOr:
|
||||
waku_context.sendRequestToWakuThread(ctx, requestType, content, callback, userData).isOkOr:
|
||||
let msg = "libwaku error: " & $error
|
||||
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
|
||||
return RET_ERR
|
||||
@ -111,7 +111,7 @@ proc waku_new(
|
||||
return nil
|
||||
|
||||
## Create the Waku thread that will keep waiting for req from the main thread.
|
||||
var ctx = waku_thread.createWakuContext().valueOr:
|
||||
var ctx = waku_context.createWakuContext().valueOr:
|
||||
let msg = "Error in createWakuContext: " & $error
|
||||
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
|
||||
return nil
|
||||
@ -145,7 +145,7 @@ proc waku_destroy(
|
||||
initializeLibrary()
|
||||
checkLibwakuParams(ctx, callback, userData)
|
||||
|
||||
waku_thread.destroyWakuContext(ctx).isOkOr:
|
||||
waku_context.destroyWakuContext(ctx).isOkOr:
|
||||
let msg = "libwaku error: " & $error
|
||||
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
|
||||
return RET_ERR
|
||||
|
||||
@ -9,9 +9,9 @@ import
|
||||
waku/node/peer_manager,
|
||||
waku/waku_relay/[protocol, topic_health],
|
||||
waku/waku_core/[topics/pubsub_topic, message],
|
||||
./inter_thread_communication/[waku_thread_request, requests/debug_node_request],
|
||||
../ffi_types,
|
||||
../events/[
|
||||
./waku_thread_requests/[waku_thread_request, requests/debug_node_request],
|
||||
./ffi_types,
|
||||
./events/[
|
||||
json_message_event, json_topic_health_change_event, json_connection_change_event,
|
||||
json_waku_not_responding_event,
|
||||
]
|
||||
@ -118,8 +118,12 @@ proc watchdogThreadBody(ctx: ptr WakuContext) {.thread.} =
|
||||
## Watchdog thread that monitors the Waku thread and notifies the library user if it hangs.
|
||||
|
||||
let watchdogRun = proc(ctx: ptr WakuContext) {.async.} =
|
||||
const WatchdogStartDelay = 10.seconds
|
||||
const WatchdogTimeinterval = 1.seconds
|
||||
const WakuNotRespondingTimeout = 3.seconds
|
||||
|
||||
# Give time for the node to be created and up before sending watchdog requests
|
||||
await sleepAsync(WatchdogStartDelay)
|
||||
while true:
|
||||
await sleepAsync(WatchdogTimeinterval)
|
||||
|
||||
@ -166,13 +170,13 @@ proc wakuThreadBody(ctx: ptr WakuContext) {.thread.} =
|
||||
error "waku thread could not receive a request"
|
||||
continue
|
||||
|
||||
## Handle the request
|
||||
asyncSpawn WakuThreadRequest.process(request, addr waku)
|
||||
|
||||
let fireRes = ctx.reqReceivedSignal.fireSync()
|
||||
if fireRes.isErr():
|
||||
error "could not fireSync back to requester thread", error = fireRes.error
|
||||
|
||||
## Handle the request
|
||||
asyncSpawn WakuThreadRequest.process(request, addr waku)
|
||||
|
||||
waitFor wakuRun(ctx)
|
||||
|
||||
proc createWakuContext*(): Result[ptr WakuContext, string] =
|
||||
@ -8,9 +8,9 @@ import
|
||||
libp2p/peerid,
|
||||
metrics
|
||||
import
|
||||
../../../../waku/factory/waku,
|
||||
../../../../waku/node/waku_node,
|
||||
../../../../waku/node/health_monitor
|
||||
../../../waku/factory/waku,
|
||||
../../../waku/node/waku_node,
|
||||
../../../waku/node/health_monitor
|
||||
|
||||
type DebugNodeMsgType* = enum
|
||||
RETRIEVE_LISTENING_ADDRESSES
|
||||
@ -1,12 +1,12 @@
|
||||
import std/json
|
||||
import chronos, chronicles, results, strutils, libp2p/multiaddress
|
||||
import
|
||||
../../../../waku/factory/waku,
|
||||
../../../../waku/discovery/waku_dnsdisc,
|
||||
../../../../waku/discovery/waku_discv5,
|
||||
../../../../waku/waku_core/peers,
|
||||
../../../../waku/node/waku_node,
|
||||
../../../alloc
|
||||
../../../waku/factory/waku,
|
||||
../../../waku/discovery/waku_dnsdisc,
|
||||
../../../waku/discovery/waku_discv5,
|
||||
../../../waku/waku_core/peers,
|
||||
../../../waku/node/waku_node,
|
||||
../../alloc
|
||||
|
||||
type DiscoveryMsgType* = enum
|
||||
GET_BOOTSTRAP_NODES
|
||||
@ -2,14 +2,14 @@ import std/[options, json, strutils, net]
|
||||
import chronos, chronicles, results, confutils, confutils/std/net
|
||||
|
||||
import
|
||||
../../../../waku/node/peer_manager/peer_manager,
|
||||
../../../../waku/factory/external_config,
|
||||
../../../../waku/factory/waku,
|
||||
../../../../waku/factory/node_factory,
|
||||
../../../../waku/factory/networks_config,
|
||||
../../../../waku/factory/app_callbacks,
|
||||
../../../../waku/waku_api/rest/builder,
|
||||
../../../alloc
|
||||
../../../waku/node/peer_manager/peer_manager,
|
||||
../../../waku/factory/external_config,
|
||||
../../../waku/factory/waku,
|
||||
../../../waku/factory/node_factory,
|
||||
../../../waku/factory/networks_config,
|
||||
../../../waku/factory/app_callbacks,
|
||||
../../../waku/waku_api/rest/builder,
|
||||
../../alloc
|
||||
|
||||
type NodeLifecycleMsgType* = enum
|
||||
CREATE_NODE
|
||||
@ -79,7 +79,7 @@ proc createWaku(
|
||||
|
||||
wakuConf.restServerConf = none(RestServerConf) ## don't want REST in libwaku
|
||||
|
||||
let wakuRes = Waku.new(wakuConf, appCallbacks).valueOr:
|
||||
let wakuRes = (await Waku.new(wakuConf, appCallbacks)).valueOr:
|
||||
error "waku initialization failed", error = error
|
||||
return err("Failed setting up Waku: " & $error)
|
||||
|
||||
@ -1,10 +1,10 @@
|
||||
import std/[sequtils, strutils]
|
||||
import chronicles, chronos, results, options, json
|
||||
import
|
||||
../../../../waku/factory/waku,
|
||||
../../../../waku/node/waku_node,
|
||||
../../../alloc,
|
||||
../../../../waku/node/peer_manager
|
||||
../../../waku/factory/waku,
|
||||
../../../waku/node/waku_node,
|
||||
../../alloc,
|
||||
../../../waku/node/peer_manager
|
||||
|
||||
type PeerManagementMsgType* {.pure.} = enum
|
||||
CONNECT_TO
|
||||
@ -56,22 +56,6 @@ proc destroyShared(self: ptr PeerManagementRequest) =
|
||||
|
||||
deallocShared(self)
|
||||
|
||||
proc connectTo(
|
||||
node: WakuNode, peerMultiAddr: string, dialTimeout: Duration
|
||||
): Result[void, string] =
|
||||
let peers = (peerMultiAddr).split(",").mapIt(strip(it))
|
||||
|
||||
# TODO: the dialTimeout is not being used at all!
|
||||
let connectFut = node.connectToNodes(peers, source = "static")
|
||||
while not connectFut.finished():
|
||||
poll()
|
||||
|
||||
if not connectFut.completed():
|
||||
let msg = "Timeout expired."
|
||||
return err(msg)
|
||||
|
||||
return ok()
|
||||
|
||||
proc process*(
|
||||
self: ptr PeerManagementRequest, waku: Waku
|
||||
): Future[Result[string, string]] {.async.} =
|
||||
@ -80,10 +64,9 @@ proc process*(
|
||||
|
||||
case self.operation
|
||||
of CONNECT_TO:
|
||||
let ret = waku.node.connectTo($self[].peerMultiAddr, self[].dialTimeout)
|
||||
if ret.isErr():
|
||||
error "CONNECT_TO failed", error = ret.error
|
||||
return err(ret.error)
|
||||
let peers = ($self[].peerMultiAddr).split(",").mapIt(strip(it))
|
||||
await waku.node.connectToNodes(peers, source = "static")
|
||||
return ok("")
|
||||
of GET_ALL_PEER_IDS:
|
||||
## returns a comma-separated string of peerIDs
|
||||
let peerIDs =
|
||||
@ -122,14 +105,7 @@ proc process*(
|
||||
await waku.node.peerManager.disconnectNode(peerId)
|
||||
return ok("")
|
||||
of DISCONNECT_ALL_PEERS:
|
||||
let connectedPeers = waku.node.peerManager.switch.peerStore.peers().filterIt(
|
||||
it.connectedness == Connected
|
||||
)
|
||||
|
||||
var futs: seq[Future[void]]
|
||||
for peer in connectedPeers:
|
||||
futs.add(waku.node.peerManager.disconnectNode(peer))
|
||||
await allFutures(futs)
|
||||
await waku.node.peerManager.disconnectAllPeers()
|
||||
return ok("")
|
||||
of DIAL_PEER:
|
||||
let remotePeerInfo = parsePeerInfo($self[].peerMultiAddr).valueOr:
|
||||
@ -1,7 +1,7 @@
|
||||
import std/[json, strutils]
|
||||
import chronos, results
|
||||
import libp2p/[protocols/ping, switch, multiaddress, multicodec]
|
||||
import ../../../../waku/[factory/waku, waku_core/peers, node/waku_node], ../../../alloc
|
||||
import ../../../waku/[factory/waku, waku_core/peers, node/waku_node], ../../alloc
|
||||
|
||||
type PingRequest* = object
|
||||
peerAddr: cstring
|
||||
@ -1,16 +1,16 @@
|
||||
import options, std/[strutils, sequtils]
|
||||
import chronicles, chronos, results
|
||||
import
|
||||
../../../../../waku/waku_filter_v2/client,
|
||||
../../../../../waku/waku_core/message/message,
|
||||
../../../../../waku/factory/waku,
|
||||
../../../../../waku/waku_filter_v2/common,
|
||||
../../../../../waku/waku_core/subscription/push_handler,
|
||||
../../../../../waku/node/peer_manager/peer_manager,
|
||||
../../../../../waku/node/waku_node,
|
||||
../../../../../waku/waku_core/topics/pubsub_topic,
|
||||
../../../../../waku/waku_core/topics/content_topic,
|
||||
../../../../alloc
|
||||
../../../../waku/waku_filter_v2/client,
|
||||
../../../../waku/waku_core/message/message,
|
||||
../../../../waku/factory/waku,
|
||||
../../../../waku/waku_filter_v2/common,
|
||||
../../../../waku/waku_core/subscription/push_handler,
|
||||
../../../../waku/node/peer_manager/peer_manager,
|
||||
../../../../waku/node/waku_node,
|
||||
../../../../waku/waku_core/topics/pubsub_topic,
|
||||
../../../../waku/waku_core/topics/content_topic,
|
||||
../../../alloc
|
||||
|
||||
type FilterMsgType* = enum
|
||||
SUBSCRIBE
|
||||
@ -1,16 +1,16 @@
|
||||
import options
|
||||
import chronicles, chronos, results
|
||||
import
|
||||
../../../../../waku/waku_core/message/message,
|
||||
../../../../../waku/waku_core/codecs,
|
||||
../../../../../waku/factory/waku,
|
||||
../../../../../waku/waku_core/message,
|
||||
../../../../../waku/waku_core/time, # Timestamp
|
||||
../../../../../waku/waku_core/topics/pubsub_topic,
|
||||
../../../../../waku/waku_lightpush_legacy/client,
|
||||
../../../../../waku/waku_lightpush_legacy/common,
|
||||
../../../../../waku/node/peer_manager/peer_manager,
|
||||
../../../../alloc
|
||||
../../../../waku/waku_core/message/message,
|
||||
../../../../waku/waku_core/codecs,
|
||||
../../../../waku/factory/waku,
|
||||
../../../../waku/waku_core/message,
|
||||
../../../../waku/waku_core/time, # Timestamp
|
||||
../../../../waku/waku_core/topics/pubsub_topic,
|
||||
../../../../waku/waku_lightpush_legacy/client,
|
||||
../../../../waku/waku_lightpush_legacy/common,
|
||||
../../../../waku/node/peer_manager/peer_manager,
|
||||
../../../alloc
|
||||
|
||||
type LightpushMsgType* = enum
|
||||
PUBLISH
|
||||
@ -1,16 +1,16 @@
|
||||
import std/[net, sequtils, strutils]
|
||||
import chronicles, chronos, stew/byteutils, results
|
||||
import
|
||||
../../../../../waku/waku_core/message/message,
|
||||
../../../../../waku/factory/[external_config, validator_signed, waku],
|
||||
../../../../../waku/waku_node,
|
||||
../../../../../waku/waku_core/message,
|
||||
../../../../../waku/waku_core/time, # Timestamp
|
||||
../../../../../waku/waku_core/topics/pubsub_topic,
|
||||
../../../../../waku/waku_core/topics,
|
||||
../../../../../waku/waku_relay/protocol,
|
||||
../../../../../waku/node/peer_manager,
|
||||
../../../../alloc
|
||||
../../../../waku/waku_core/message/message,
|
||||
../../../../waku/factory/[external_config, validator_signed, waku],
|
||||
../../../../waku/waku_node,
|
||||
../../../../waku/waku_core/message,
|
||||
../../../../waku/waku_core/time, # Timestamp
|
||||
../../../../waku/waku_core/topics/pubsub_topic,
|
||||
../../../../waku/waku_core/topics,
|
||||
../../../../waku/waku_relay/protocol,
|
||||
../../../../waku/node/peer_manager,
|
||||
../../../alloc
|
||||
|
||||
type RelayMsgType* = enum
|
||||
SUBSCRIBE
|
||||
@ -1,15 +1,15 @@
|
||||
import std/[json, sugar, strutils, options]
|
||||
import chronos, chronicles, results, stew/byteutils
|
||||
import
|
||||
../../../../../waku/factory/waku,
|
||||
../../../../alloc,
|
||||
../../../../utils,
|
||||
../../../../../waku/waku_core/peers,
|
||||
../../../../../waku/waku_core/time,
|
||||
../../../../../waku/waku_core/message/digest,
|
||||
../../../../../waku/waku_store/common,
|
||||
../../../../../waku/waku_store/client,
|
||||
../../../../../waku/common/paging
|
||||
../../../../waku/factory/waku,
|
||||
../../../alloc,
|
||||
../../../utils,
|
||||
../../../../waku/waku_core/peers,
|
||||
../../../../waku/waku_core/time,
|
||||
../../../../waku/waku_core/message/digest,
|
||||
../../../../waku/waku_store/common,
|
||||
../../../../waku/waku_store/client,
|
||||
../../../../waku/common/paging
|
||||
|
||||
type StoreReqType* = enum
|
||||
REMOTE_QUERY ## to perform a query to another Store node
|
||||
@ -5,8 +5,8 @@
|
||||
import std/json, results
|
||||
import chronos, chronos/threadsync
|
||||
import
|
||||
../../../waku/factory/waku,
|
||||
../../ffi_types,
|
||||
../../waku/factory/waku,
|
||||
../ffi_types,
|
||||
./requests/node_lifecycle_request,
|
||||
./requests/peer_manager_request,
|
||||
./requests/protocols/relay_request,
|
||||
File diff suppressed because it is too large
Load Diff
@ -38,7 +38,8 @@ when os == "Linux" and
|
||||
#./waku_archive_legacy/test_driver_postgres_query,
|
||||
#./waku_archive_legacy/test_driver_postgres,
|
||||
./factory/test_node_factory,
|
||||
./wakunode_rest/test_rest_store
|
||||
./wakunode_rest/test_rest_store,
|
||||
./wakunode_rest/test_all
|
||||
|
||||
# Waku store test suite
|
||||
import
|
||||
@ -91,21 +92,7 @@ import
|
||||
# Waku Keystore test suite
|
||||
import ./test_waku_keystore_keyfile, ./test_waku_keystore
|
||||
|
||||
## Wakunode Rest API test suite
|
||||
import
|
||||
./wakunode_rest/test_rest_debug,
|
||||
./wakunode_rest/test_rest_debug_serdes,
|
||||
./wakunode_rest/test_rest_relay,
|
||||
./wakunode_rest/test_rest_relay_serdes,
|
||||
./wakunode_rest/test_rest_serdes,
|
||||
./wakunode_rest/test_rest_filter,
|
||||
./wakunode_rest/test_rest_lightpush,
|
||||
./wakunode_rest/test_rest_lightpush_legacy,
|
||||
./wakunode_rest/test_rest_admin,
|
||||
./wakunode_rest/test_rest_cors,
|
||||
./wakunode_rest/test_rest_health
|
||||
|
||||
import ./waku_rln_relay/test_all
|
||||
|
||||
# Node Factory
|
||||
import ./factory/[test_external_config, test_node_factory, test_waku_conf]
|
||||
import ./factory/test_all
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
./test_base64_codec,
|
||||
./test_confutils_envvar,
|
||||
|
||||
3
tests/factory/test_all.nim
Normal file
3
tests/factory/test_all.nim
Normal file
@ -0,0 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import ./test_external_config, ./test_node_factory, ./test_waku_conf
|
||||
@ -17,10 +17,46 @@ import
|
||||
../../waku/common/logging,
|
||||
../../waku/common/utils/parse_size_units
|
||||
|
||||
suite "Waku config - apply preset":
|
||||
test "Default preset is TWN":
|
||||
suite "Waku external config - default values":
|
||||
test "Default sharding value":
|
||||
## Setup
|
||||
let expectedConf = ClusterConf.TheWakuNetworkConf()
|
||||
let defaultShardingMode = AutoSharding
|
||||
let defaultNumShardsInCluster = 1.uint16
|
||||
let defaultSubscribeShards = @[0.uint16]
|
||||
|
||||
## Given
|
||||
let preConfig = defaultWakuNodeConf().get()
|
||||
|
||||
## When
|
||||
let res = preConfig.toWakuConf()
|
||||
assert res.isOk(), $res.error
|
||||
|
||||
## Then
|
||||
let conf = res.get()
|
||||
check conf.shardingConf.kind == defaultShardingMode
|
||||
check conf.shardingConf.numShardsInCluster == defaultNumShardsInCluster
|
||||
check conf.subscribeShards == defaultSubscribeShards
|
||||
|
||||
test "Default shards value in static sharding":
|
||||
## Setup
|
||||
let defaultSubscribeShards: seq[uint16] = @[]
|
||||
|
||||
## Given
|
||||
var preConfig = defaultWakuNodeConf().get()
|
||||
preConfig.numShardsInNetwork = 0.uint16
|
||||
|
||||
## When
|
||||
let res = preConfig.toWakuConf()
|
||||
assert res.isOk(), $res.error
|
||||
|
||||
## Then
|
||||
let conf = res.get()
|
||||
check conf.subscribeShards == defaultSubscribeShards
|
||||
|
||||
suite "Waku external config - apply preset":
|
||||
test "Preset is TWN":
|
||||
## Setup
|
||||
let expectedConf = NetworkConf.TheWakuNetworkConf()
|
||||
|
||||
## Given
|
||||
let preConfig = WakuNodeConf(
|
||||
@ -48,7 +84,9 @@ suite "Waku config - apply preset":
|
||||
check rlnRelayConf.chainId == expectedConf.rlnRelayChainId
|
||||
check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec
|
||||
check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit
|
||||
check conf.numShardsInNetwork == expectedConf.numShardsInNetwork
|
||||
check conf.shardingConf.kind == expectedConf.shardingConf.kind
|
||||
check conf.shardingConf.numShardsInCluster ==
|
||||
expectedConf.shardingConf.numShardsInCluster
|
||||
check conf.discv5Conf.isSome() == expectedConf.discv5Discovery
|
||||
if conf.discv5Conf.isSome():
|
||||
let discv5Conf = conf.discv5Conf.get()
|
||||
@ -56,7 +94,7 @@ suite "Waku config - apply preset":
|
||||
|
||||
test "Subscribes to all valid shards in twn":
|
||||
## Setup
|
||||
let expectedConf = ClusterConf.TheWakuNetworkConf()
|
||||
let expectedConf = NetworkConf.TheWakuNetworkConf()
|
||||
|
||||
## Given
|
||||
let shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7]
|
||||
@ -68,11 +106,11 @@ suite "Waku config - apply preset":
|
||||
|
||||
## Then
|
||||
let conf = res.get()
|
||||
check conf.shards.len == expectedConf.numShardsInNetwork.int
|
||||
check conf.subscribeShards.len == expectedConf.shardingConf.numShardsInCluster.int
|
||||
|
||||
test "Subscribes to some valid shards in twn":
|
||||
## Setup
|
||||
let expectedConf = ClusterConf.TheWakuNetworkConf()
|
||||
let expectedConf = NetworkConf.TheWakuNetworkConf()
|
||||
|
||||
## Given
|
||||
let shards: seq[uint16] = @[0, 4, 7]
|
||||
@ -84,9 +122,9 @@ suite "Waku config - apply preset":
|
||||
|
||||
## Then
|
||||
let conf = resConf.get()
|
||||
assert conf.shards.len() == shards.len()
|
||||
assert conf.subscribeShards.len() == shards.len()
|
||||
for index, shard in shards:
|
||||
assert shard in conf.shards
|
||||
assert shard in conf.subscribeShards
|
||||
|
||||
test "Subscribes to invalid shards in twn":
|
||||
## Setup
|
||||
@ -103,7 +141,7 @@ suite "Waku config - apply preset":
|
||||
|
||||
test "Apply TWN preset when cluster id = 1":
|
||||
## Setup
|
||||
let expectedConf = ClusterConf.TheWakuNetworkConf()
|
||||
let expectedConf = NetworkConf.TheWakuNetworkConf()
|
||||
|
||||
## Given
|
||||
let preConfig = WakuNodeConf(
|
||||
@ -131,13 +169,15 @@ suite "Waku config - apply preset":
|
||||
check rlnRelayConf.chainId == expectedConf.rlnRelayChainId
|
||||
check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec
|
||||
check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit
|
||||
check conf.numShardsInNetwork == expectedConf.numShardsInNetwork
|
||||
check conf.shardingConf.kind == expectedConf.shardingConf.kind
|
||||
check conf.shardingConf.numShardsInCluster ==
|
||||
expectedConf.shardingConf.numShardsInCluster
|
||||
check conf.discv5Conf.isSome() == expectedConf.discv5Discovery
|
||||
if conf.discv5Conf.isSome():
|
||||
let discv5Conf = conf.discv5Conf.get()
|
||||
check discv5Conf.bootstrapNodes == expectedConf.discv5BootstrapNodes
|
||||
|
||||
suite "Waku config - node key":
|
||||
suite "Waku external config - node key":
|
||||
test "Passed node key is used":
|
||||
## Setup
|
||||
let nodeKeyStr =
|
||||
@ -158,13 +198,13 @@ suite "Waku config - node key":
|
||||
assert utils.toHex(resKey.getRawBytes().get()) ==
|
||||
utils.toHex(nodekey.getRawBytes().get())
|
||||
|
||||
suite "Waku config - Shards":
|
||||
suite "Waku external config - Shards":
|
||||
test "Shards are valid":
|
||||
## Setup
|
||||
|
||||
## Given
|
||||
let shards: seq[uint16] = @[0, 2, 4]
|
||||
let numShardsInNetwork = 5.uint32
|
||||
let numShardsInNetwork = 5.uint16
|
||||
let wakuNodeConf = WakuNodeConf(
|
||||
cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
|
||||
)
|
||||
@ -183,7 +223,7 @@ suite "Waku config - Shards":
|
||||
|
||||
## Given
|
||||
let shards: seq[uint16] = @[0, 2, 5]
|
||||
let numShardsInNetwork = 5.uint32
|
||||
let numShardsInNetwork = 5.uint16
|
||||
let wakuNodeConf = WakuNodeConf(
|
||||
cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
|
||||
)
|
||||
@ -198,7 +238,7 @@ suite "Waku config - Shards":
|
||||
## Setup
|
||||
|
||||
## Given
|
||||
let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"])
|
||||
let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=0"])
|
||||
|
||||
## When
|
||||
let res = wakuNodeConf.toWakuConf()
|
||||
@ -207,3 +247,15 @@ suite "Waku config - Shards":
|
||||
let wakuConf = res.get()
|
||||
let vRes = wakuConf.validate()
|
||||
assert vRes.isOk(), $vRes.error
|
||||
|
||||
test "Imvalid shard is passed without num shards":
|
||||
## Setup
|
||||
|
||||
## Given
|
||||
let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"])
|
||||
|
||||
## When
|
||||
let res = wakuNodeConf.toWakuConf()
|
||||
|
||||
## Then
|
||||
assert res.isErr(), "Invalid shard was accepted"
|
||||
|
||||
@ -9,10 +9,10 @@ import
|
||||
waku/factory/conf_builder/conf_builder
|
||||
|
||||
suite "Node Factory":
|
||||
test "Set up a node based on default configurations":
|
||||
asynctest "Set up a node based on default configurations":
|
||||
let conf = defaultTestWakuConf()
|
||||
|
||||
let node = setupNode(conf, relay = Relay.new()).valueOr:
|
||||
let node = (await setupNode(conf, relay = Relay.new())).valueOr:
|
||||
raiseAssert error
|
||||
|
||||
check:
|
||||
@ -23,13 +23,13 @@ suite "Node Factory":
|
||||
not node.wakuStoreClient.isNil()
|
||||
not node.wakuRendezvous.isNil()
|
||||
|
||||
test "Set up a node with Store enabled":
|
||||
asynctest "Set up a node with Store enabled":
|
||||
var confBuilder = defaultTestWakuConfBuilder()
|
||||
confBuilder.storeServiceConf.withEnabled(true)
|
||||
confBuilder.storeServiceConf.withDbUrl("sqlite://store.sqlite3")
|
||||
let conf = confBuilder.build().value
|
||||
|
||||
let node = setupNode(conf, relay = Relay.new()).valueOr:
|
||||
let node = (await setupNode(conf, relay = Relay.new())).valueOr:
|
||||
raiseAssert error
|
||||
|
||||
check:
|
||||
@ -37,28 +37,28 @@ suite "Node Factory":
|
||||
not node.wakuStore.isNil()
|
||||
not node.wakuArchive.isNil()
|
||||
|
||||
test "Set up a node with Filter enabled":
|
||||
asynctest "Set up a node with Filter enabled":
|
||||
var confBuilder = defaultTestWakuConfBuilder()
|
||||
confBuilder.filterServiceConf.withEnabled(true)
|
||||
let conf = confBuilder.build().value
|
||||
|
||||
let node = setupNode(conf, relay = Relay.new()).valueOr:
|
||||
let node = (await setupNode(conf, relay = Relay.new())).valueOr:
|
||||
raiseAssert error
|
||||
|
||||
check:
|
||||
not node.isNil()
|
||||
not node.wakuFilter.isNil()
|
||||
|
||||
test "Start a node based on default configurations":
|
||||
asynctest "Start a node based on default configurations":
|
||||
let conf = defaultTestWakuConf()
|
||||
|
||||
let node = setupNode(conf, relay = Relay.new()).valueOr:
|
||||
let node = (await setupNode(conf, relay = Relay.new())).valueOr:
|
||||
raiseAssert error
|
||||
|
||||
assert not node.isNil(), "Node can't be nil"
|
||||
|
||||
let startRes = catch:
|
||||
(waitFor startNode(node, conf))
|
||||
(await startNode(node, conf))
|
||||
|
||||
assert not startRes.isErr(), "Exception starting node"
|
||||
assert startRes.get().isOk(), "Error starting node " & startRes.get().error
|
||||
@ -67,4 +67,4 @@ test "Start a node based on default configurations":
|
||||
node.started == true
|
||||
|
||||
## Cleanup
|
||||
waitFor node.stop()
|
||||
await node.stop()
|
||||
|
||||
@ -16,7 +16,7 @@ import
|
||||
suite "Waku Conf - build with cluster conf":
|
||||
test "Cluster Conf is passed and relay is enabled":
|
||||
## Setup
|
||||
let clusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let networkConf = NetworkConf.TheWakuNetworkConf()
|
||||
var builder = WakuConfBuilder.init()
|
||||
builder.discv5Conf.withUdpPort(9000)
|
||||
builder.withRelayServiceRatio("50:50")
|
||||
@ -25,7 +25,7 @@ suite "Waku Conf - build with cluster conf":
|
||||
|
||||
## Given
|
||||
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
|
||||
builder.withClusterConf(clusterConf)
|
||||
builder.withNetworkConf(networkConf)
|
||||
builder.withRelay(true)
|
||||
builder.rlnRelayConf.withTreePath("/tmp/test-tree-path")
|
||||
|
||||
@ -37,27 +37,29 @@ suite "Waku Conf - build with cluster conf":
|
||||
## Then
|
||||
let resValidate = conf.validate()
|
||||
assert resValidate.isOk(), $resValidate.error
|
||||
check conf.clusterId == clusterConf.clusterId
|
||||
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
|
||||
check conf.shards == expectedShards
|
||||
check conf.clusterId == networkConf.clusterId
|
||||
check conf.shardingConf.kind == networkConf.shardingConf.kind
|
||||
check conf.shardingConf.numShardsInCluster ==
|
||||
networkConf.shardingConf.numShardsInCluster
|
||||
check conf.subscribeShards == expectedShards
|
||||
check conf.maxMessageSizeBytes ==
|
||||
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
|
||||
uint64(parseCorrectMsgSize(networkConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes
|
||||
|
||||
if clusterConf.rlnRelay:
|
||||
if networkConf.rlnRelay:
|
||||
assert conf.rlnRelayConf.isSome(), "RLN Relay conf is disabled"
|
||||
|
||||
let rlnRelayConf = conf.rlnRelayConf.get()
|
||||
check rlnRelayConf.ethContractAddress.string ==
|
||||
clusterConf.rlnRelayEthContractAddress
|
||||
check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic
|
||||
check rlnRelayConf.chainId == clusterConf.rlnRelayChainId
|
||||
check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec
|
||||
check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit
|
||||
networkConf.rlnRelayEthContractAddress
|
||||
check rlnRelayConf.dynamic == networkConf.rlnRelayDynamic
|
||||
check rlnRelayConf.chainId == networkConf.rlnRelayChainId
|
||||
check rlnRelayConf.epochSizeSec == networkConf.rlnEpochSizeSec
|
||||
check rlnRelayConf.userMessageLimit == networkConf.rlnRelayUserMessageLimit
|
||||
|
||||
test "Cluster Conf is passed, but relay is disabled":
|
||||
## Setup
|
||||
let clusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let networkConf = NetworkConf.TheWakuNetworkConf()
|
||||
var builder = WakuConfBuilder.init()
|
||||
builder.withRelayServiceRatio("50:50")
|
||||
builder.discv5Conf.withUdpPort(9000)
|
||||
@ -66,7 +68,7 @@ suite "Waku Conf - build with cluster conf":
|
||||
|
||||
## Given
|
||||
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
|
||||
builder.withClusterConf(clusterConf)
|
||||
builder.withNetworkConf(networkConf)
|
||||
builder.withRelay(false)
|
||||
|
||||
## When
|
||||
@ -77,18 +79,20 @@ suite "Waku Conf - build with cluster conf":
|
||||
## Then
|
||||
let resValidate = conf.validate()
|
||||
assert resValidate.isOk(), $resValidate.error
|
||||
check conf.clusterId == clusterConf.clusterId
|
||||
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
|
||||
check conf.shards == expectedShards
|
||||
check conf.clusterId == networkConf.clusterId
|
||||
check conf.shardingConf.kind == networkConf.shardingConf.kind
|
||||
check conf.shardingConf.numShardsInCluster ==
|
||||
networkConf.shardingConf.numShardsInCluster
|
||||
check conf.subscribeShards == expectedShards
|
||||
check conf.maxMessageSizeBytes ==
|
||||
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
|
||||
uint64(parseCorrectMsgSize(networkConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes
|
||||
|
||||
assert conf.rlnRelayConf.isNone
|
||||
|
||||
test "Cluster Conf is passed, but rln relay is disabled":
|
||||
## Setup
|
||||
let clusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let networkConf = NetworkConf.TheWakuNetworkConf()
|
||||
var builder = WakuConfBuilder.init()
|
||||
|
||||
let # Mount all shards in network
|
||||
@ -96,7 +100,7 @@ suite "Waku Conf - build with cluster conf":
|
||||
|
||||
## Given
|
||||
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
|
||||
builder.withClusterConf(clusterConf)
|
||||
builder.withNetworkConf(networkConf)
|
||||
builder.rlnRelayConf.withEnabled(false)
|
||||
|
||||
## When
|
||||
@ -107,24 +111,26 @@ suite "Waku Conf - build with cluster conf":
|
||||
## Then
|
||||
let resValidate = conf.validate()
|
||||
assert resValidate.isOk(), $resValidate.error
|
||||
check conf.clusterId == clusterConf.clusterId
|
||||
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
|
||||
check conf.shards == expectedShards
|
||||
check conf.clusterId == networkConf.clusterId
|
||||
check conf.shardingConf.kind == networkConf.shardingConf.kind
|
||||
check conf.shardingConf.numShardsInCluster ==
|
||||
networkConf.shardingConf.numShardsInCluster
|
||||
check conf.subscribeShards == expectedShards
|
||||
check conf.maxMessageSizeBytes ==
|
||||
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
|
||||
uint64(parseCorrectMsgSize(networkConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes
|
||||
assert conf.rlnRelayConf.isNone
|
||||
|
||||
test "Cluster Conf is passed and valid shards are specified":
|
||||
## Setup
|
||||
let clusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let networkConf = NetworkConf.TheWakuNetworkConf()
|
||||
var builder = WakuConfBuilder.init()
|
||||
let shards = @[2.uint16, 3.uint16]
|
||||
|
||||
## Given
|
||||
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
|
||||
builder.withClusterConf(clusterConf)
|
||||
builder.withShards(shards)
|
||||
builder.withNetworkConf(networkConf)
|
||||
builder.withSubscribeShards(shards)
|
||||
|
||||
## When
|
||||
let resConf = builder.build()
|
||||
@ -134,23 +140,25 @@ suite "Waku Conf - build with cluster conf":
|
||||
## Then
|
||||
let resValidate = conf.validate()
|
||||
assert resValidate.isOk(), $resValidate.error
|
||||
check conf.clusterId == clusterConf.clusterId
|
||||
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
|
||||
check conf.shards == shards
|
||||
check conf.clusterId == networkConf.clusterId
|
||||
check conf.shardingConf.kind == networkConf.shardingConf.kind
|
||||
check conf.shardingConf.numShardsInCluster ==
|
||||
networkConf.shardingConf.numShardsInCluster
|
||||
check conf.subscribeShards == shards
|
||||
check conf.maxMessageSizeBytes ==
|
||||
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
|
||||
uint64(parseCorrectMsgSize(networkConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes
|
||||
|
||||
test "Cluster Conf is passed and invalid shards are specified":
|
||||
## Setup
|
||||
let clusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let networkConf = NetworkConf.TheWakuNetworkConf()
|
||||
var builder = WakuConfBuilder.init()
|
||||
let shards = @[2.uint16, 10.uint16]
|
||||
|
||||
## Given
|
||||
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
|
||||
builder.withClusterConf(clusterConf)
|
||||
builder.withShards(shards)
|
||||
builder.withNetworkConf(networkConf)
|
||||
builder.withSubscribeShards(shards)
|
||||
|
||||
## When
|
||||
let resConf = builder.build()
|
||||
@ -160,7 +168,7 @@ suite "Waku Conf - build with cluster conf":
|
||||
|
||||
test "Cluster Conf is passed and RLN contract is **not** overridden":
|
||||
## Setup
|
||||
let clusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let networkConf = NetworkConf.TheWakuNetworkConf()
|
||||
var builder = WakuConfBuilder.init()
|
||||
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
|
||||
|
||||
@ -170,7 +178,7 @@ suite "Waku Conf - build with cluster conf":
|
||||
|
||||
## Given
|
||||
builder.rlnRelayConf.withEthContractAddress(contractAddress)
|
||||
builder.withClusterConf(clusterConf)
|
||||
builder.withNetworkConf(networkConf)
|
||||
builder.withRelay(true)
|
||||
builder.rlnRelayConf.withTreePath("/tmp/test")
|
||||
|
||||
@ -182,24 +190,26 @@ suite "Waku Conf - build with cluster conf":
|
||||
## Then
|
||||
let resValidate = conf.validate()
|
||||
assert resValidate.isOk(), $resValidate.error
|
||||
check conf.clusterId == clusterConf.clusterId
|
||||
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
|
||||
check conf.shards == expectedShards
|
||||
check conf.clusterId == networkConf.clusterId
|
||||
check conf.shardingConf.kind == networkConf.shardingConf.kind
|
||||
check conf.shardingConf.numShardsInCluster ==
|
||||
networkConf.shardingConf.numShardsInCluster
|
||||
check conf.subscribeShards == expectedShards
|
||||
check conf.maxMessageSizeBytes ==
|
||||
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
|
||||
check conf.discv5Conf.isSome == clusterConf.discv5Discovery
|
||||
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
|
||||
uint64(parseCorrectMsgSize(networkConf.maxMessageSize))
|
||||
check conf.discv5Conf.isSome == networkConf.discv5Discovery
|
||||
check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes
|
||||
|
||||
if clusterConf.rlnRelay:
|
||||
if networkConf.rlnRelay:
|
||||
assert conf.rlnRelayConf.isSome
|
||||
|
||||
let rlnRelayConf = conf.rlnRelayConf.get()
|
||||
check rlnRelayConf.ethContractAddress.string ==
|
||||
clusterConf.rlnRelayEthContractAddress
|
||||
check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic
|
||||
check rlnRelayConf.chainId == clusterConf.rlnRelayChainId
|
||||
check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec
|
||||
check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit
|
||||
networkConf.rlnRelayEthContractAddress
|
||||
check rlnRelayConf.dynamic == networkConf.rlnRelayDynamic
|
||||
check rlnRelayConf.chainId == networkConf.rlnRelayChainId
|
||||
check rlnRelayConf.epochSizeSec == networkConf.rlnEpochSizeSec
|
||||
check rlnRelayConf.userMessageLimit == networkConf.rlnRelayUserMessageLimit
|
||||
|
||||
suite "Waku Conf - node key":
|
||||
test "Node key is generated":
|
||||
@ -264,10 +274,25 @@ suite "Waku Conf - extMultiaddrs":
|
||||
## Then
|
||||
let resValidate = conf.validate()
|
||||
assert resValidate.isOk(), $resValidate.error
|
||||
check multiaddrs.len == conf.networkConf.extMultiAddrs.len
|
||||
let resMultiaddrs = conf.networkConf.extMultiAddrs.map(
|
||||
check multiaddrs.len == conf.endpointConf.extMultiAddrs.len
|
||||
let resMultiaddrs = conf.endpointConf.extMultiAddrs.map(
|
||||
proc(m: MultiAddress): string =
|
||||
$m
|
||||
)
|
||||
for m in multiaddrs:
|
||||
check m in resMultiaddrs
|
||||
|
||||
suite "Waku Conf Builder - rate limits":
|
||||
test "Valid rate limit passed via string":
|
||||
## Setup
|
||||
var builder = RateLimitConfBuilder.init()
|
||||
|
||||
## Given
|
||||
let rateLimitsStr = @["lightpush:2/2ms", "10/2m", "store: 3/3s"]
|
||||
builder.withRateLimits(rateLimitsStr)
|
||||
|
||||
## When
|
||||
let res = builder.build()
|
||||
|
||||
## Then
|
||||
assert res.isOk(), $res.error
|
||||
|
||||
@ -1 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import ./test_rpc_codec, ./test_poc_eligibility, ./test_poc_reputation
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
./test_wakunode_filter,
|
||||
./test_wakunode_legacy_lightpush,
|
||||
|
||||
@ -76,7 +76,7 @@ suite "Waku Lightpush - End To End":
|
||||
# Then the message is not relayed but not due to RLN
|
||||
assert publishResponse.isErr(), "We expect an error response"
|
||||
|
||||
assert (publishResponse.error.code == NO_PEERS_TO_RELAY),
|
||||
assert (publishResponse.error.code == LightPushErrorCode.NO_PEERS_TO_RELAY),
|
||||
"incorrect error response"
|
||||
|
||||
suite "Waku LightPush Validation Tests":
|
||||
@ -93,7 +93,7 @@ suite "Waku Lightpush - End To End":
|
||||
|
||||
check:
|
||||
publishResponse.isErr()
|
||||
publishResponse.error.code == INVALID_MESSAGE_ERROR
|
||||
publishResponse.error.code == LightPushErrorCode.INVALID_MESSAGE
|
||||
publishResponse.error.desc ==
|
||||
some(fmt"Message size exceeded maximum of {DefaultMaxWakuMessageSize} bytes")
|
||||
|
||||
@ -168,7 +168,7 @@ suite "RLN Proofs as a Lightpush Service":
|
||||
|
||||
# Then the message is not relayed but not due to RLN
|
||||
assert publishResponse.isErr(), "We expect an error response"
|
||||
check publishResponse.error.code == NO_PEERS_TO_RELAY
|
||||
check publishResponse.error.code == LightPushErrorCode.NO_PEERS_TO_RELAY
|
||||
|
||||
suite "Waku Lightpush message delivery":
|
||||
asyncTest "lightpush message flow succeed":
|
||||
|
||||
@ -420,7 +420,7 @@ procSuite "Peer Manager":
|
||||
parseIpAddress("0.0.0.0"),
|
||||
port,
|
||||
clusterId = 3,
|
||||
shards = @[uint16(0)],
|
||||
subscribeShards = @[uint16(0)],
|
||||
)
|
||||
|
||||
# same network
|
||||
@ -429,14 +429,14 @@ procSuite "Peer Manager":
|
||||
parseIpAddress("0.0.0.0"),
|
||||
port,
|
||||
clusterId = 4,
|
||||
shards = @[uint16(0)],
|
||||
subscribeShards = @[uint16(0)],
|
||||
)
|
||||
node3 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
parseIpAddress("0.0.0.0"),
|
||||
port,
|
||||
clusterId = 4,
|
||||
shards = @[uint16(0)],
|
||||
subscribeShards = @[uint16(0)],
|
||||
)
|
||||
|
||||
node1.mountMetadata(3).expect("Mounted Waku Metadata")
|
||||
@ -567,6 +567,9 @@ procSuite "Peer Manager":
|
||||
# Connect to relay peers
|
||||
await nodes[0].peerManager.connectToRelayPeers()
|
||||
|
||||
# wait for the connections to settle
|
||||
await sleepAsync(chronos.milliseconds(500))
|
||||
|
||||
check:
|
||||
# Peerstore track all three peers
|
||||
nodes[0].peerManager.switch.peerStore.peers().len == 3
|
||||
@ -637,6 +640,9 @@ procSuite "Peer Manager":
|
||||
# Connect to relay peers
|
||||
await nodes[0].peerManager.manageRelayPeers()
|
||||
|
||||
# wait for the connections to settle
|
||||
await sleepAsync(chronos.milliseconds(500))
|
||||
|
||||
check:
|
||||
# Peerstore track all three peers
|
||||
nodes[0].peerManager.switch.peerStore.peers().len == 3
|
||||
|
||||
@ -44,7 +44,10 @@ suite "Waku Keepalive":
|
||||
|
||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||
|
||||
node1.startKeepalive(2.seconds)
|
||||
let healthMonitor = NodeHealthMonitor()
|
||||
healthMonitor.setNodeToHealthMonitor(node1)
|
||||
healthMonitor.startKeepalive(2.seconds).isOkOr:
|
||||
assert false, "Failed to start keepalive"
|
||||
|
||||
check:
|
||||
(await completionFut.withTimeout(5.seconds)) == true
|
||||
|
||||
@ -18,8 +18,8 @@ suite "Waku NetConfig":
|
||||
let wakuFlags = defaultTestWakuFlags()
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extIp = none(IpAddress),
|
||||
extPort = none(Port),
|
||||
extMultiAddrs = @[],
|
||||
@ -46,7 +46,8 @@ suite "Waku NetConfig":
|
||||
let conf = defaultTestWakuConf()
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress, bindPort = conf.networkConf.p2pTcpPort
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
)
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
@ -57,7 +58,9 @@ suite "Waku NetConfig":
|
||||
netConfig.announcedAddresses.len == 1 # Only bind address should be present
|
||||
netConfig.announcedAddresses[0] ==
|
||||
formatListenAddress(
|
||||
ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.networkConf.p2pTcpPort)
|
||||
ip4TcpEndPoint(
|
||||
conf.endpointConf.p2pListenAddress, conf.endpointConf.p2pTcpPort
|
||||
)
|
||||
)
|
||||
|
||||
asyncTest "AnnouncedAddresses contains external address if extIp/Port are provided":
|
||||
@ -67,8 +70,8 @@ suite "Waku NetConfig":
|
||||
extPort = Port(1234)
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extIp = some(extIp),
|
||||
extPort = some(extPort),
|
||||
)
|
||||
@ -88,8 +91,8 @@ suite "Waku NetConfig":
|
||||
extPort = Port(1234)
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
dns4DomainName = some(dns4DomainName),
|
||||
extPort = some(extPort),
|
||||
)
|
||||
@ -110,8 +113,8 @@ suite "Waku NetConfig":
|
||||
extMultiAddrs = @[ip4TcpEndPoint(extIp, extPort)]
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extMultiAddrs = extMultiAddrs,
|
||||
)
|
||||
|
||||
@ -131,8 +134,8 @@ suite "Waku NetConfig":
|
||||
extPort = Port(1234)
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
dns4DomainName = some(dns4DomainName),
|
||||
extIp = some(extIp),
|
||||
extPort = some(extPort),
|
||||
@ -152,8 +155,8 @@ suite "Waku NetConfig":
|
||||
wssEnabled = false
|
||||
|
||||
var netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
wsEnabled = true,
|
||||
wssEnabled = wssEnabled,
|
||||
)
|
||||
@ -165,8 +168,9 @@ suite "Waku NetConfig":
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
|
||||
netConfig.announcedAddresses[1] == (
|
||||
ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.webSocketConf.get().port) &
|
||||
wsFlag(wssEnabled)
|
||||
ip4TcpEndPoint(
|
||||
conf.endpointConf.p2pListenAddress, conf.webSocketConf.get().port
|
||||
) & wsFlag(wssEnabled)
|
||||
)
|
||||
|
||||
## Now try the same for the case of wssEnabled = true
|
||||
@ -174,8 +178,8 @@ suite "Waku NetConfig":
|
||||
wssEnabled = true
|
||||
|
||||
netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
wsEnabled = true,
|
||||
wssEnabled = wssEnabled,
|
||||
)
|
||||
@ -187,8 +191,9 @@ suite "Waku NetConfig":
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
|
||||
netConfig.announcedAddresses[1] == (
|
||||
ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.websocketConf.get().port) &
|
||||
wsFlag(wssEnabled)
|
||||
ip4TcpEndPoint(
|
||||
conf.endpointConf.p2pListenAddress, conf.websocketConf.get().port
|
||||
) & wsFlag(wssEnabled)
|
||||
)
|
||||
|
||||
asyncTest "Announced WebSocket address contains external IP if provided":
|
||||
@ -199,8 +204,8 @@ suite "Waku NetConfig":
|
||||
wssEnabled = false
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extIp = some(extIp),
|
||||
extPort = some(extPort),
|
||||
wsEnabled = true,
|
||||
@ -224,8 +229,8 @@ suite "Waku NetConfig":
|
||||
wssEnabled = false
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
dns4DomainName = some(dns4DomainName),
|
||||
extPort = some(extPort),
|
||||
wsEnabled = true,
|
||||
@ -252,8 +257,8 @@ suite "Waku NetConfig":
|
||||
wssEnabled = false
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
dns4DomainName = some(dns4DomainName),
|
||||
extIp = some(extIp),
|
||||
extPort = some(extPort),
|
||||
@ -277,7 +282,8 @@ suite "Waku NetConfig":
|
||||
let conf = defaultTestWakuConf()
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress, bindPort = conf.networkConf.p2pTcpPort
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
)
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
@ -285,8 +291,8 @@ suite "Waku NetConfig":
|
||||
let netConfig = netConfigRes.get()
|
||||
|
||||
check:
|
||||
netConfig.enrIp.get() == conf.networkConf.p2pListenAddress
|
||||
netConfig.enrPort.get() == conf.networkConf.p2pTcpPort
|
||||
netConfig.enrIp.get() == conf.endpointConf.p2pListenAddress
|
||||
netConfig.enrPort.get() == conf.endpointConf.p2pTcpPort
|
||||
|
||||
asyncTest "ENR is set with extIp/Port if provided":
|
||||
let
|
||||
@ -295,8 +301,8 @@ suite "Waku NetConfig":
|
||||
extPort = Port(1234)
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extIp = some(extIp),
|
||||
extPort = some(extPort),
|
||||
)
|
||||
@ -316,8 +322,8 @@ suite "Waku NetConfig":
|
||||
extPort = Port(1234)
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
dns4DomainName = some(dns4DomainName),
|
||||
extPort = some(extPort),
|
||||
)
|
||||
@ -339,8 +345,8 @@ suite "Waku NetConfig":
|
||||
extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))]
|
||||
|
||||
var netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extMultiAddrs = extMultiAddrs,
|
||||
wsEnabled = wsEnabled,
|
||||
)
|
||||
@ -358,8 +364,8 @@ suite "Waku NetConfig":
|
||||
extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))]
|
||||
|
||||
netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extMultiAddrs = extMultiAddrs,
|
||||
wssEnabled = wssEnabled,
|
||||
)
|
||||
@ -380,8 +386,8 @@ suite "Waku NetConfig":
|
||||
extMultiAddrs = @[ip4TcpEndPoint(extAddIp, extAddPort)]
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extMultiAddrs = extMultiAddrs,
|
||||
extMultiAddrsOnly = true,
|
||||
)
|
||||
|
||||
@ -37,7 +37,7 @@ proc defaultTestWakuConfBuilder*(): WakuConfBuilder =
|
||||
builder.withRelayServiceRatio("60:40")
|
||||
builder.withMaxMessageSize("1024 KiB")
|
||||
builder.withClusterId(DefaultClusterId)
|
||||
builder.withShards(@[DefaultShardId])
|
||||
builder.withSubscribeShards(@[DefaultShardId])
|
||||
builder.withRelay(true)
|
||||
builder.withRendezvous(true)
|
||||
builder.storeServiceConf.withDbMigration(false)
|
||||
@ -72,7 +72,7 @@ proc newTestWakuNode*(
|
||||
agentString = none(string),
|
||||
peerStoreCapacity = none(int),
|
||||
clusterId = DefaultClusterId,
|
||||
shards = @[DefaultShardId],
|
||||
subscribeShards = @[DefaultShardId],
|
||||
): WakuNode =
|
||||
var resolvedExtIp = extIp
|
||||
|
||||
@ -86,7 +86,7 @@ proc newTestWakuNode*(
|
||||
var conf = defaultTestWakuConf()
|
||||
|
||||
conf.clusterId = clusterId
|
||||
conf.shards = shards
|
||||
conf.subscribeShards = subscribeShards
|
||||
|
||||
if dns4DomainName.isSome() and extIp.isNone():
|
||||
# If there's an error resolving the IP, an exception is thrown and test fails
|
||||
@ -114,7 +114,7 @@ proc newTestWakuNode*(
|
||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||
|
||||
enrBuilder.withWakuRelaySharding(
|
||||
RelayShards(clusterId: conf.clusterId, shardIds: conf.shards)
|
||||
RelayShards(clusterId: conf.clusterId, shardIds: conf.subscribeShards)
|
||||
).isOkOr:
|
||||
raise newException(Defect, "Invalid record: " & $error)
|
||||
|
||||
|
||||
@ -429,7 +429,7 @@ suite "Waku Discovery v5":
|
||||
let conf = confBuilder.build().valueOr:
|
||||
raiseAssert error
|
||||
|
||||
let waku0 = Waku.new(conf).valueOr:
|
||||
let waku0 = (await Waku.new(conf)).valueOr:
|
||||
raiseAssert error
|
||||
(waitFor startWaku(addr waku0)).isOkOr:
|
||||
raiseAssert error
|
||||
@ -444,7 +444,7 @@ suite "Waku Discovery v5":
|
||||
let conf1 = confBuilder.build().valueOr:
|
||||
raiseAssert error
|
||||
|
||||
let waku1 = Waku.new(conf1).valueOr:
|
||||
let waku1 = (await Waku.new(conf1)).valueOr:
|
||||
raiseAssert error
|
||||
(waitFor startWaku(addr waku1)).isOkOr:
|
||||
raiseAssert error
|
||||
@ -461,7 +461,7 @@ suite "Waku Discovery v5":
|
||||
let conf2 = confBuilder.build().valueOr:
|
||||
raiseAssert error
|
||||
|
||||
let waku2 = Waku.new(conf2).valueOr:
|
||||
let waku2 = (await Waku.new(conf2)).valueOr:
|
||||
raiseAssert error
|
||||
(waitFor startWaku(addr waku2)).isOkOr:
|
||||
raiseAssert error
|
||||
@ -492,7 +492,7 @@ suite "Waku Discovery v5":
|
||||
let conf = confBuilder.build().valueOr:
|
||||
raiseAssert error
|
||||
|
||||
let waku = Waku.new(conf).valueOr:
|
||||
let waku = (await Waku.new(conf)).valueOr:
|
||||
raiseAssert error
|
||||
|
||||
discard setupDiscoveryV5(
|
||||
@ -503,7 +503,7 @@ suite "Waku Discovery v5":
|
||||
waku.dynamicBootstrapNodes,
|
||||
waku.rng,
|
||||
waku.conf.nodeKey,
|
||||
waku.conf.networkConf.p2pListenAddress,
|
||||
waku.conf.endpointConf.p2pListenAddress,
|
||||
waku.conf.portsShift,
|
||||
)
|
||||
|
||||
@ -523,7 +523,7 @@ suite "Waku Discovery v5":
|
||||
let conf = confBuilder.build().valueOr:
|
||||
raiseAssert error
|
||||
|
||||
let waku = Waku.new(conf).valueOr:
|
||||
let waku = (await Waku.new(conf)).valueOr:
|
||||
raiseAssert error
|
||||
|
||||
discard setupDiscoveryV5(
|
||||
@ -534,7 +534,7 @@ suite "Waku Discovery v5":
|
||||
waku.dynamicBootstrapNodes,
|
||||
waku.rng,
|
||||
waku.conf.nodeKey,
|
||||
waku.conf.networkConf.p2pListenAddress,
|
||||
waku.conf.endpointConf.p2pListenAddress,
|
||||
waku.conf.portsShift,
|
||||
)
|
||||
|
||||
|
||||
@ -18,8 +18,10 @@ proc newTestWakuLightpushNode*(
|
||||
): Future[WakuLightPush] {.async.} =
|
||||
let
|
||||
peerManager = PeerManager.new(switch)
|
||||
wakuSharding = Sharding(clusterId: 1, shardCountGenZero: 8)
|
||||
proto = WakuLightPush.new(peerManager, rng, handler, wakuSharding, rateLimitSetting)
|
||||
wakuAutoSharding = Sharding(clusterId: 1, shardCountGenZero: 8)
|
||||
proto = WakuLightPush.new(
|
||||
peerManager, rng, handler, some(wakuAutoSharding), rateLimitSetting
|
||||
)
|
||||
|
||||
await proto.start()
|
||||
switch.mount(proto)
|
||||
|
||||
@ -1 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import ./test_client, ./test_ratelimit
|
||||
|
||||
@ -42,8 +42,9 @@ suite "Waku Lightpush Client":
|
||||
): Future[WakuLightPushResult] {.async.} =
|
||||
let msgLen = message.encode().buffer.len
|
||||
if msgLen > int(DefaultMaxWakuMessageSize) + 64 * 1024:
|
||||
return
|
||||
lighpushErrorResult(PAYLOAD_TOO_LARGE, "length greater than maxMessageSize")
|
||||
return lighpushErrorResult(
|
||||
LightPushErrorCode.PAYLOAD_TOO_LARGE, "length greater than maxMessageSize"
|
||||
)
|
||||
handlerFuture.complete((pubsubTopic, message))
|
||||
# return that we published the message to 1 peer.
|
||||
return ok(1)
|
||||
@ -263,7 +264,7 @@ suite "Waku Lightpush Client":
|
||||
# Then the message is not received by the server
|
||||
check:
|
||||
publishResponse5.isErr()
|
||||
publishResponse5.error.code == PAYLOAD_TOO_LARGE
|
||||
publishResponse5.error.code == LightPushErrorCode.PAYLOAD_TOO_LARGE
|
||||
(await handlerFuture.waitForResult()).isErr()
|
||||
|
||||
asyncTest "Invalid Encoding Payload":
|
||||
@ -276,7 +277,7 @@ suite "Waku Lightpush Client":
|
||||
# And the error is returned
|
||||
check:
|
||||
publishResponse.requestId == "N/A"
|
||||
publishResponse.statusCode == LightpushStatusCode.BAD_REQUEST.uint32
|
||||
publishResponse.statusCode == LightPushErrorCode.BAD_REQUEST
|
||||
publishResponse.statusDesc.isSome()
|
||||
scanf(publishResponse.statusDesc.get(), decodeRpcFailure)
|
||||
|
||||
@ -289,7 +290,7 @@ suite "Waku Lightpush Client":
|
||||
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
|
||||
): Future[WakuLightPushResult] {.async.} =
|
||||
handlerFuture2.complete()
|
||||
return lighpushErrorResult(PAYLOAD_TOO_LARGE, handlerError)
|
||||
return lighpushErrorResult(LightPushErrorCode.PAYLOAD_TOO_LARGE, handlerError)
|
||||
|
||||
let
|
||||
serverSwitch2 = newTestSwitch()
|
||||
@ -305,7 +306,7 @@ suite "Waku Lightpush Client":
|
||||
|
||||
# Then the response is negative
|
||||
check:
|
||||
publishResponse.error.code == PAYLOAD_TOO_LARGE
|
||||
publishResponse.error.code == LightPushErrorCode.PAYLOAD_TOO_LARGE
|
||||
publishResponse.error.desc == some(handlerError)
|
||||
(await handlerFuture2.waitForResult()).isOk()
|
||||
|
||||
@ -369,4 +370,4 @@ suite "Waku Lightpush Client":
|
||||
|
||||
# Then the response is negative
|
||||
check not publishResponse.isOk()
|
||||
check publishResponse.error.code == LightpushStatusCode.NO_PEERS_TO_RELAY
|
||||
check publishResponse.error.code == LightPushErrorCode.NO_PEERS_TO_RELAY
|
||||
|
||||
@ -119,7 +119,7 @@ suite "Rate limited push service":
|
||||
|
||||
check:
|
||||
requestRes.isErr()
|
||||
requestRes.error.code == TOO_MANY_REQUESTS
|
||||
requestRes.error.code == LightPushErrorCode.TOO_MANY_REQUESTS
|
||||
requestRes.error.desc == some(TooManyRequestsMessage)
|
||||
|
||||
for testCnt in 0 .. 2:
|
||||
|
||||
@ -1 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import ./test_client, ./test_ratelimit
|
||||
|
||||
@ -1 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import ./test_protocol, ./test_rpc_codec
|
||||
|
||||
@ -657,7 +657,7 @@ suite "WakuNode - Relay":
|
||||
await node.start()
|
||||
(await node.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
require node.mountSharding(1, 1).isOk
|
||||
require node.mountAutoSharding(1, 1).isOk
|
||||
|
||||
## Given
|
||||
let
|
||||
@ -670,11 +670,14 @@ suite "WakuNode - Relay":
|
||||
): Future[void] {.gcsafe, raises: [Defect].} =
|
||||
discard pubsubTopic
|
||||
discard message
|
||||
assert shard == node.wakuSharding.getShard(contentTopicA).expect("Valid Topic"),
|
||||
assert shard ==
|
||||
node.wakuAutoSharding.get().getShard(contentTopicA).expect("Valid Topic"),
|
||||
"topic must use the same shard"
|
||||
assert shard == node.wakuSharding.getShard(contentTopicB).expect("Valid Topic"),
|
||||
assert shard ==
|
||||
node.wakuAutoSharding.get().getShard(contentTopicB).expect("Valid Topic"),
|
||||
"topic must use the same shard"
|
||||
assert shard == node.wakuSharding.getShard(contentTopicC).expect("Valid Topic"),
|
||||
assert shard ==
|
||||
node.wakuAutoSharding.get().getShard(contentTopicC).expect("Valid Topic"),
|
||||
"topic must use the same shard"
|
||||
|
||||
## When
|
||||
|
||||
@ -135,7 +135,7 @@ suite "Onchain group manager":
|
||||
(waitFor manager.init()).isOkOr:
|
||||
raiseAssert $error
|
||||
|
||||
let merkleRootBefore = manager.fetchMerkleRoot()
|
||||
let merkleRootBefore = waitFor manager.fetchMerkleRoot()
|
||||
|
||||
try:
|
||||
waitFor manager.register(credentials, UserMessageLimit(20))
|
||||
@ -144,7 +144,7 @@ suite "Onchain group manager":
|
||||
|
||||
discard waitFor withTimeout(trackRootChanges(manager), 15.seconds)
|
||||
|
||||
let merkleRootAfter = manager.fetchMerkleRoot()
|
||||
let merkleRootAfter = waitFor manager.fetchMerkleRoot()
|
||||
|
||||
let metadataSetRes = manager.setMetadata()
|
||||
assert metadataSetRes.isOk(), metadataSetRes.error
|
||||
@ -170,7 +170,7 @@ suite "Onchain group manager":
|
||||
(waitFor manager.init()).isOkOr:
|
||||
raiseAssert $error
|
||||
|
||||
let merkleRootBefore = manager.fetchMerkleRoot()
|
||||
let merkleRootBefore = waitFor manager.fetchMerkleRoot()
|
||||
|
||||
try:
|
||||
for i in 0 ..< credentials.len():
|
||||
@ -180,7 +180,7 @@ suite "Onchain group manager":
|
||||
except Exception, CatchableError:
|
||||
assert false, "exception raised: " & getCurrentExceptionMsg()
|
||||
|
||||
let merkleRootAfter = manager.fetchMerkleRoot()
|
||||
let merkleRootAfter = waitFor manager.fetchMerkleRoot()
|
||||
|
||||
check:
|
||||
merkleRootBefore != merkleRootAfter
|
||||
@ -205,20 +205,16 @@ suite "Onchain group manager":
|
||||
(waitFor manager.init()).isOkOr:
|
||||
raiseAssert $error
|
||||
|
||||
let idCommitment = generateCredentials(manager.rlnInstance).idCommitment
|
||||
let merkleRootBefore = manager.fetchMerkleRoot()
|
||||
let idCredentials = generateCredentials(manager.rlnInstance)
|
||||
let merkleRootBefore = waitFor manager.fetchMerkleRoot()
|
||||
|
||||
try:
|
||||
waitFor manager.register(
|
||||
RateCommitment(
|
||||
idCommitment: idCommitment, userMessageLimit: UserMessageLimit(20)
|
||||
)
|
||||
)
|
||||
waitFor manager.register(idCredentials, UserMessageLimit(20))
|
||||
except Exception, CatchableError:
|
||||
assert false,
|
||||
"exception raised when calling register: " & getCurrentExceptionMsg()
|
||||
|
||||
let merkleRootAfter = manager.fetchMerkleRoot()
|
||||
let merkleRootAfter = waitFor manager.fetchMerkleRoot()
|
||||
|
||||
check:
|
||||
merkleRootAfter != merkleRootBefore
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import std/options, chronos
|
||||
import std/options, chronos, chronicles
|
||||
|
||||
import
|
||||
waku/[node/peer_manager, waku_store, waku_store/client], ../testlib/[common, wakucore]
|
||||
|
||||
@ -17,7 +17,7 @@ suite "Wakunode2 - Waku":
|
||||
## Given
|
||||
let conf = defaultTestWakuConf()
|
||||
|
||||
let waku = Waku.new(conf).valueOr:
|
||||
let waku = (waitFor Waku.new(conf)).valueOr:
|
||||
raiseAssert error
|
||||
|
||||
## When
|
||||
@ -33,7 +33,7 @@ suite "Wakunode2 - Waku initialization":
|
||||
var conf = defaultTestWakuConf()
|
||||
conf.peerPersistence = true
|
||||
|
||||
let waku = Waku.new(conf).valueOr:
|
||||
let waku = (waitFor Waku.new(conf)).valueOr:
|
||||
raiseAssert error
|
||||
|
||||
check:
|
||||
@ -44,7 +44,7 @@ suite "Wakunode2 - Waku initialization":
|
||||
var conf = defaultTestWakuConf()
|
||||
|
||||
## When
|
||||
var waku = Waku.new(conf).valueOr:
|
||||
var waku = (waitFor Waku.new(conf)).valueOr:
|
||||
raiseAssert error
|
||||
|
||||
(waitFor startWaku(addr waku)).isOkOr:
|
||||
@ -65,10 +65,10 @@ suite "Wakunode2 - Waku initialization":
|
||||
test "app properly handles dynamic port configuration":
|
||||
## Given
|
||||
var conf = defaultTestWakuConf()
|
||||
conf.networkConf.p2pTcpPort = Port(0)
|
||||
conf.endpointConf.p2pTcpPort = Port(0)
|
||||
|
||||
## When
|
||||
var waku = Waku.new(conf).valueOr:
|
||||
var waku = (waitFor Waku.new(conf)).valueOr:
|
||||
raiseAssert error
|
||||
|
||||
(waitFor startWaku(addr waku)).isOkOr:
|
||||
|
||||
@ -1,14 +1,15 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
./test_rest_debug_serdes,
|
||||
./test_rest_admin,
|
||||
./test_rest_cors,
|
||||
./test_rest_debug,
|
||||
./test_rest_debug_serdes,
|
||||
./test_rest_filter,
|
||||
./test_rest_lightpush_legacy,
|
||||
./test_rest_health,
|
||||
./test_rest_lightpush,
|
||||
./test_rest_lightpush_legacy,
|
||||
./test_rest_relay_serdes,
|
||||
./test_rest_relay,
|
||||
./test_rest_serdes,
|
||||
./test_rest_store,
|
||||
./test_rest_admin,
|
||||
./test_rest_cors
|
||||
./test_rest_store
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[sequtils, strformat, net],
|
||||
std/[sequtils, net],
|
||||
testutils/unittests,
|
||||
presto,
|
||||
presto/client as presto_client,
|
||||
@ -42,6 +42,14 @@ suite "Waku v2 Rest API - Admin":
|
||||
node2 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(60602))
|
||||
node3 = newTestWakuNode(generateSecp256k1Key(), getPrimaryIPAddr(), Port(60604))
|
||||
|
||||
let clusterId = 1.uint16
|
||||
node1.mountMetadata(clusterId).isOkOr:
|
||||
assert false, "Failed to mount metadata: " & $error
|
||||
node2.mountMetadata(clusterId).isOkOr:
|
||||
assert false, "Failed to mount metadata: " & $error
|
||||
node3.mountMetadata(clusterId).isOkOr:
|
||||
assert false, "Failed to mount metadata: " & $error
|
||||
|
||||
await allFutures(node1.start(), node2.start(), node3.start())
|
||||
await allFutures(
|
||||
node1.mountRelay(),
|
||||
@ -56,7 +64,7 @@ suite "Waku v2 Rest API - Admin":
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
await sleepAsync(0.milliseconds)
|
||||
|
||||
let shard = RelayShard(clusterId: 1, shardId: 0)
|
||||
let shard = RelayShard(clusterId: clusterId, shardId: 0)
|
||||
node1.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
node2.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr:
|
||||
|
||||
@ -296,7 +296,7 @@ suite "Waku v2 Rest API - Relay":
|
||||
await node.start()
|
||||
(await node.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
require node.mountSharding(1, 8).isOk
|
||||
require node.mountAutoSharding(1, 8).isOk
|
||||
|
||||
var restPort = Port(0)
|
||||
let restAddress = parseIpAddress("0.0.0.0")
|
||||
@ -346,6 +346,7 @@ suite "Waku v2 Rest API - Relay":
|
||||
await node.start()
|
||||
(await node.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
require node.mountAutoSharding(1, 8).isOk
|
||||
|
||||
var restPort = Port(0)
|
||||
let restAddress = parseIpAddress("0.0.0.0")
|
||||
@ -404,6 +405,7 @@ suite "Waku v2 Rest API - Relay":
|
||||
await node.start()
|
||||
(await node.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
require node.mountAutoSharding(1, 8).isOk
|
||||
|
||||
var restPort = Port(0)
|
||||
let restAddress = parseIpAddress("0.0.0.0")
|
||||
@ -469,6 +471,8 @@ suite "Waku v2 Rest API - Relay":
|
||||
await node.start()
|
||||
(await node.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
require node.mountAutoSharding(1, 8).isOk
|
||||
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
dynamic: false,
|
||||
credIndex: some(1.uint),
|
||||
@ -528,6 +532,8 @@ suite "Waku v2 Rest API - Relay":
|
||||
await node.start()
|
||||
(await node.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
require node.mountAutoSharding(1, 8).isOk
|
||||
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
dynamic: false,
|
||||
credIndex: some(1.uint),
|
||||
@ -641,6 +647,8 @@ suite "Waku v2 Rest API - Relay":
|
||||
await node.start()
|
||||
(await node.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
require node.mountAutoSharding(1, 8).isOk
|
||||
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
dynamic: false,
|
||||
credIndex: some(1.uint),
|
||||
|
||||
11
waku.nimble
11
waku.nimble
@ -70,7 +70,7 @@ proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "static") =
|
||||
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
|
||||
extra_params & " " & srcDir & name & ".nim"
|
||||
else:
|
||||
var lib_name = toDll("libwaku")
|
||||
let lib_name = (when defined(windows): toDll(name) else: name & ".so")
|
||||
when defined(windows):
|
||||
exec "nim c" & " --out:build/" & lib_name &
|
||||
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
|
||||
@ -161,10 +161,13 @@ task buildone, "Build custom target":
|
||||
let filepath = paramStr(paramCount())
|
||||
discard buildModule filepath
|
||||
|
||||
task testone, "Test custom target":
|
||||
task buildTest, "Test custom target":
|
||||
let filepath = paramStr(paramCount())
|
||||
if buildModule(filepath):
|
||||
exec "build/" & filepath & ".bin"
|
||||
discard buildModule(filepath)
|
||||
|
||||
task execTest, "Run test":
|
||||
let filepath = paramStr(paramCount() - 1)
|
||||
exec "build/" & filepath & ".bin" & " test \"" & paramStr(paramCount()) & "\""
|
||||
|
||||
### C Bindings
|
||||
let chroniclesParams =
|
||||
|
||||
@ -43,8 +43,8 @@ type
|
||||
switchSendSignedPeerRecord: Option[bool]
|
||||
circuitRelay: Relay
|
||||
|
||||
#Rate limit configs for non-relay req-resp protocols
|
||||
rateLimitSettings: Option[seq[string]]
|
||||
# Rate limit configs for non-relay req-resp protocols
|
||||
rateLimitSettings: Option[ProtocolRateLimitSettings]
|
||||
|
||||
WakuNodeBuilderResult* = Result[void, string]
|
||||
|
||||
@ -127,7 +127,7 @@ proc withPeerManagerConfig*(
|
||||
proc withColocationLimit*(builder: var WakuNodeBuilder, colocationLimit: int) =
|
||||
builder.colocationLimit = colocationLimit
|
||||
|
||||
proc withRateLimit*(builder: var WakuNodeBuilder, limits: seq[string]) =
|
||||
proc withRateLimit*(builder: var WakuNodeBuilder, limits: ProtocolRateLimitSettings) =
|
||||
builder.rateLimitSettings = some(limits)
|
||||
|
||||
proc withCircuitRelay*(builder: var WakuNodeBuilder, circuitRelay: Relay) =
|
||||
@ -219,11 +219,9 @@ proc build*(builder: WakuNodeBuilder): Result[WakuNode, string] =
|
||||
switch = switch,
|
||||
peerManager = peerManager,
|
||||
rng = rng,
|
||||
rateLimitSettings = builder.rateLimitSettings.get(DefaultProtocolRateLimit),
|
||||
)
|
||||
except Exception:
|
||||
return err("failed to build WakuNode instance: " & getCurrentExceptionMsg())
|
||||
|
||||
if builder.rateLimitSettings.isSome():
|
||||
?node.setRateLimits(builder.rateLimitSettings.get())
|
||||
|
||||
ok(node)
|
||||
|
||||
@ -8,10 +8,11 @@ import
|
||||
./discv5_conf_builder,
|
||||
./web_socket_conf_builder,
|
||||
./metrics_server_conf_builder,
|
||||
./rate_limit_conf_builder,
|
||||
./rln_relay_conf_builder
|
||||
|
||||
export
|
||||
waku_conf_builder, filter_service_conf_builder, store_sync_conf_builder,
|
||||
store_service_conf_builder, rest_server_conf_builder, dns_discovery_conf_builder,
|
||||
discv5_conf_builder, web_socket_conf_builder, metrics_server_conf_builder,
|
||||
rln_relay_conf_builder
|
||||
rate_limit_conf_builder, rln_relay_conf_builder
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
import chronicles, std/[net, options, sequtils], results
|
||||
import chronicles, std/[net, options, strutils], results
|
||||
import ../waku_conf
|
||||
|
||||
logScope:
|
||||
@ -8,16 +8,12 @@ logScope:
|
||||
## DNS Discovery Config Builder ##
|
||||
##################################
|
||||
type DnsDiscoveryConfBuilder* = object
|
||||
enabled*: Option[bool]
|
||||
enrTreeUrl*: Option[string]
|
||||
nameServers*: seq[IpAddress]
|
||||
|
||||
proc init*(T: type DnsDiscoveryConfBuilder): DnsDiscoveryConfBuilder =
|
||||
DnsDiscoveryConfBuilder()
|
||||
|
||||
proc withEnabled*(b: var DnsDiscoveryConfBuilder, enabled: bool) =
|
||||
b.enabled = some(enabled)
|
||||
|
||||
proc withEnrTreeUrl*(b: var DnsDiscoveryConfBuilder, enrTreeUrl: string) =
|
||||
b.enrTreeUrl = some(enrTreeUrl)
|
||||
|
||||
@ -25,13 +21,13 @@ proc withNameServers*(b: var DnsDiscoveryConfBuilder, nameServers: seq[IpAddress
|
||||
b.nameServers = nameServers
|
||||
|
||||
proc build*(b: DnsDiscoveryConfBuilder): Result[Option[DnsDiscoveryConf], string] =
|
||||
if not b.enabled.get(false):
|
||||
if b.enrTreeUrl.isNone():
|
||||
return ok(none(DnsDiscoveryConf))
|
||||
|
||||
if isEmptyOrWhiteSpace(b.enrTreeUrl.get()):
|
||||
return err("dnsDiscovery.enrTreeUrl cannot be an empty string")
|
||||
if b.nameServers.len == 0:
|
||||
return err("dnsDiscovery.nameServers is not specified")
|
||||
if b.enrTreeUrl.isNone():
|
||||
return err("dnsDiscovery.enrTreeUrl is not specified")
|
||||
|
||||
return ok(
|
||||
some(DnsDiscoveryConf(nameServers: b.nameServers, enrTreeUrl: b.enrTreeUrl.get()))
|
||||
|
||||
29
waku/factory/conf_builder/rate_limit_conf_builder.nim
Normal file
29
waku/factory/conf_builder/rate_limit_conf_builder.nim
Normal file
@ -0,0 +1,29 @@
|
||||
import chronicles, std/[net, options], results
|
||||
import waku/common/rate_limit/setting
|
||||
|
||||
logScope:
|
||||
topics = "waku conf builder rate limit"
|
||||
|
||||
type RateLimitConfBuilder* = object
|
||||
strValue: Option[seq[string]]
|
||||
objValue: Option[ProtocolRateLimitSettings]
|
||||
|
||||
proc init*(T: type RateLimitConfBuilder): RateLimitConfBuilder =
|
||||
RateLimitConfBuilder()
|
||||
|
||||
proc withRateLimits*(b: var RateLimitConfBuilder, rateLimits: seq[string]) =
|
||||
b.strValue = some(rateLimits)
|
||||
|
||||
proc build*(b: RateLimitConfBuilder): Result[ProtocolRateLimitSettings, string] =
|
||||
if b.strValue.isSome() and b.objValue.isSome():
|
||||
return err("Rate limits conf must only be set once on the builder")
|
||||
|
||||
if b.objValue.isSome():
|
||||
return ok(b.objValue.get())
|
||||
|
||||
if b.strValue.isSome():
|
||||
let rateLimits = ProtocolRateLimitSettings.parse(b.strValue.get()).valueOr:
|
||||
return err("Invalid rate limits settings:" & $error)
|
||||
return ok(rateLimits)
|
||||
|
||||
return ok(DefaultProtocolRateLimit)
|
||||
@ -64,7 +64,7 @@ proc build*(b: StoreServiceConfBuilder): Result[Option[StoreServiceConf], string
|
||||
dbMigration: b.dbMigration.get(true),
|
||||
dbURl: b.dbUrl.get(),
|
||||
dbVacuum: b.dbVacuum.get(false),
|
||||
supportV2: b.supportV2.get(true),
|
||||
supportV2: b.supportV2.get(false),
|
||||
maxNumDbConnections: b.maxNumDbConnections.get(50),
|
||||
retentionPolicy: b.retentionPolicy.get("time:" & $2.days.seconds),
|
||||
resume: b.resume.get(false),
|
||||
|
||||
@ -23,6 +23,7 @@ import
|
||||
./discv5_conf_builder,
|
||||
./web_socket_conf_builder,
|
||||
./metrics_server_conf_builder,
|
||||
./rate_limit_conf_builder,
|
||||
./rln_relay_conf_builder
|
||||
|
||||
logScope:
|
||||
@ -59,8 +60,9 @@ type WakuConfBuilder* = object
|
||||
nodeKey: Option[crypto.PrivateKey]
|
||||
|
||||
clusterId: Option[uint16]
|
||||
numShardsInNetwork: Option[uint32]
|
||||
shards: Option[seq[uint16]]
|
||||
shardingConf: Option[ShardingConfKind]
|
||||
numShardsInCluster: Option[uint16]
|
||||
subscribeShards: Option[seq[uint16]]
|
||||
protectedShards: Option[seq[ProtectedShard]]
|
||||
contentTopics: Option[seq[string]]
|
||||
|
||||
@ -73,6 +75,7 @@ type WakuConfBuilder* = object
|
||||
rlnRelayConf*: RlnRelayConfBuilder
|
||||
storeServiceConf*: StoreServiceConfBuilder
|
||||
webSocketConf*: WebSocketConfBuilder
|
||||
rateLimitConf*: RateLimitConfBuilder
|
||||
# End conf builders
|
||||
relay: Option[bool]
|
||||
lightPush: Option[bool]
|
||||
@ -83,7 +86,7 @@ type WakuConfBuilder* = object
|
||||
# TODO: move within a relayConf
|
||||
rendezvous: Option[bool]
|
||||
|
||||
clusterConf: Option[ClusterConf]
|
||||
networkConf: Option[NetworkConf]
|
||||
|
||||
staticNodes: seq[string]
|
||||
|
||||
@ -115,13 +118,10 @@ type WakuConfBuilder* = object
|
||||
|
||||
agentString: Option[string]
|
||||
|
||||
rateLimits: Option[seq[string]]
|
||||
|
||||
maxRelayPeers: Option[int]
|
||||
relayShardedPeerManagement: Option[bool]
|
||||
relayServiceRatio: Option[string]
|
||||
circuitRelayClient: Option[bool]
|
||||
keepAlive: Option[bool]
|
||||
p2pReliability: Option[bool]
|
||||
|
||||
proc init*(T: type WakuConfBuilder): WakuConfBuilder =
|
||||
@ -134,10 +134,11 @@ proc init*(T: type WakuConfBuilder): WakuConfBuilder =
|
||||
rlnRelayConf: RlnRelayConfBuilder.init(),
|
||||
storeServiceConf: StoreServiceConfBuilder.init(),
|
||||
webSocketConf: WebSocketConfBuilder.init(),
|
||||
rateLimitConf: RateLimitConfBuilder.init(),
|
||||
)
|
||||
|
||||
proc withClusterConf*(b: var WakuConfBuilder, clusterConf: ClusterConf) =
|
||||
b.clusterConf = some(clusterConf)
|
||||
proc withNetworkConf*(b: var WakuConfBuilder, networkConf: NetworkConf) =
|
||||
b.networkConf = some(networkConf)
|
||||
|
||||
proc withNodeKey*(b: var WakuConfBuilder, nodeKey: crypto.PrivateKey) =
|
||||
b.nodeKey = some(nodeKey)
|
||||
@ -145,11 +146,14 @@ proc withNodeKey*(b: var WakuConfBuilder, nodeKey: crypto.PrivateKey) =
|
||||
proc withClusterId*(b: var WakuConfBuilder, clusterId: uint16) =
|
||||
b.clusterId = some(clusterId)
|
||||
|
||||
proc withNumShardsInNetwork*(b: var WakuConfBuilder, numShardsInNetwork: uint32) =
|
||||
b.numShardsInNetwork = some(numShardsInNetwork)
|
||||
proc withShardingConf*(b: var WakuConfBuilder, shardingConf: ShardingConfKind) =
|
||||
b.shardingConf = some(shardingConf)
|
||||
|
||||
proc withShards*(b: var WakuConfBuilder, shards: seq[uint16]) =
|
||||
b.shards = some(shards)
|
||||
proc withNumShardsInCluster*(b: var WakuConfBuilder, numShardsInCluster: uint16) =
|
||||
b.numShardsInCluster = some(numShardsInCluster)
|
||||
|
||||
proc withSubscribeShards*(b: var WakuConfBuilder, shards: seq[uint16]) =
|
||||
b.subscribeShards = some(shards)
|
||||
|
||||
proc withProtectedShards*(
|
||||
b: var WakuConfBuilder, protectedShards: seq[ProtectedShard]
|
||||
@ -238,9 +242,6 @@ proc withAgentString*(b: var WakuConfBuilder, agentString: string) =
|
||||
proc withColocationLimit*(b: var WakuConfBuilder, colocationLimit: int) =
|
||||
b.colocationLimit = some(colocationLimit)
|
||||
|
||||
proc withRateLimits*(b: var WakuConfBuilder, rateLimits: seq[string]) =
|
||||
b.rateLimits = some(rateLimits)
|
||||
|
||||
proc withMaxRelayPeers*(b: var WakuConfBuilder, maxRelayPeers: int) =
|
||||
b.maxRelayPeers = some(maxRelayPeers)
|
||||
|
||||
@ -255,9 +256,6 @@ proc withRelayShardedPeerManagement*(
|
||||
) =
|
||||
b.relayShardedPeerManagement = some(relayShardedPeerManagement)
|
||||
|
||||
proc withKeepAlive*(b: var WakuConfBuilder, keepAlive: bool) =
|
||||
b.keepAlive = some(keepAlive)
|
||||
|
||||
proc withP2pReliability*(b: var WakuConfBuilder, p2pReliability: bool) =
|
||||
b.p2pReliability = some(p2pReliability)
|
||||
|
||||
@ -273,6 +271,8 @@ proc withMaxMessageSize*(builder: var WakuConfBuilder, maxMessageSize: string) =
|
||||
proc withStaticNodes*(builder: var WakuConfBuilder, staticNodes: seq[string]) =
|
||||
builder.staticNodes = concat(builder.staticNodes, staticNodes)
|
||||
|
||||
## Building
|
||||
|
||||
proc nodeKey(
|
||||
builder: WakuConfBuilder, rng: ref HmacDrbgContext
|
||||
): Result[crypto.PrivateKey, string] =
|
||||
@ -285,77 +285,105 @@ proc nodeKey(
|
||||
return err("Failed to generate key: " & $error)
|
||||
return ok(nodeKey)
|
||||
|
||||
proc applyClusterConf(builder: var WakuConfBuilder) =
|
||||
# Apply cluster conf, overrides most values passed individually
|
||||
# If you want to tweak values, don't use clusterConf
|
||||
if builder.clusterConf.isNone():
|
||||
proc buildShardingConf(
|
||||
bShardingConfKind: Option[ShardingConfKind],
|
||||
bNumShardsInCluster: Option[uint16],
|
||||
bSubscribeShards: Option[seq[uint16]],
|
||||
): (ShardingConf, seq[uint16]) =
|
||||
echo "bSubscribeShards: ", bSubscribeShards
|
||||
case bShardingConfKind.get(AutoSharding)
|
||||
of StaticSharding:
|
||||
(ShardingConf(kind: StaticSharding), bSubscribeShards.get(@[]))
|
||||
of AutoSharding:
|
||||
let numShardsInCluster = bNumShardsInCluster.get(1)
|
||||
let shardingConf =
|
||||
ShardingConf(kind: AutoSharding, numShardsInCluster: numShardsInCluster)
|
||||
let upperShard = uint16(numShardsInCluster - 1)
|
||||
(shardingConf, bSubscribeShards.get(toSeq(0.uint16 .. upperShard)))
|
||||
|
||||
proc applyNetworkConf(builder: var WakuConfBuilder) =
|
||||
# Apply network conf, overrides most values passed individually
|
||||
# If you want to tweak values, don't use networkConf
|
||||
# TODO: networkconf should be one field of the conf builder so that this function becomes unnecessary
|
||||
if builder.networkConf.isNone():
|
||||
return
|
||||
let clusterConf = builder.clusterConf.get()
|
||||
let networkConf = builder.networkConf.get()
|
||||
|
||||
if builder.clusterId.isSome():
|
||||
warn "Cluster id was provided alongside a cluster conf",
|
||||
used = clusterConf.clusterId, discarded = builder.clusterId.get()
|
||||
builder.clusterId = some(clusterConf.clusterId)
|
||||
warn "Cluster id was provided alongside a network conf",
|
||||
used = networkConf.clusterId, discarded = builder.clusterId.get()
|
||||
builder.clusterId = some(networkConf.clusterId)
|
||||
|
||||
# Apply relay parameters
|
||||
if builder.relay.get(false) and clusterConf.rlnRelay:
|
||||
if builder.relay.get(false) and networkConf.rlnRelay:
|
||||
if builder.rlnRelayConf.enabled.isSome():
|
||||
warn "RLN Relay was provided alongside a cluster conf",
|
||||
used = clusterConf.rlnRelay, discarded = builder.rlnRelayConf.enabled
|
||||
warn "RLN Relay was provided alongside a network conf",
|
||||
used = networkConf.rlnRelay, discarded = builder.rlnRelayConf.enabled
|
||||
builder.rlnRelayConf.withEnabled(true)
|
||||
|
||||
if builder.rlnRelayConf.ethContractAddress.get("") != "":
|
||||
warn "RLN Relay ETH Contract Address was provided alongside a cluster conf",
|
||||
used = clusterConf.rlnRelayEthContractAddress.string,
|
||||
warn "RLN Relay ETH Contract Address was provided alongside a network conf",
|
||||
used = networkConf.rlnRelayEthContractAddress.string,
|
||||
discarded = builder.rlnRelayConf.ethContractAddress.get().string
|
||||
builder.rlnRelayConf.withEthContractAddress(clusterConf.rlnRelayEthContractAddress)
|
||||
builder.rlnRelayConf.withEthContractAddress(networkConf.rlnRelayEthContractAddress)
|
||||
|
||||
if builder.rlnRelayConf.chainId.isSome():
|
||||
warn "RLN Relay Chain Id was provided alongside a cluster conf",
|
||||
used = clusterConf.rlnRelayChainId, discarded = builder.rlnRelayConf.chainId
|
||||
builder.rlnRelayConf.withChainId(clusterConf.rlnRelayChainId)
|
||||
warn "RLN Relay Chain Id was provided alongside a network conf",
|
||||
used = networkConf.rlnRelayChainId, discarded = builder.rlnRelayConf.chainId
|
||||
builder.rlnRelayConf.withChainId(networkConf.rlnRelayChainId)
|
||||
|
||||
if builder.rlnRelayConf.dynamic.isSome():
|
||||
warn "RLN Relay Dynamic was provided alongside a cluster conf",
|
||||
used = clusterConf.rlnRelayDynamic, discarded = builder.rlnRelayConf.dynamic
|
||||
builder.rlnRelayConf.withDynamic(clusterConf.rlnRelayDynamic)
|
||||
warn "RLN Relay Dynamic was provided alongside a network conf",
|
||||
used = networkConf.rlnRelayDynamic, discarded = builder.rlnRelayConf.dynamic
|
||||
builder.rlnRelayConf.withDynamic(networkConf.rlnRelayDynamic)
|
||||
|
||||
if builder.rlnRelayConf.epochSizeSec.isSome():
|
||||
warn "RLN Epoch Size in Seconds was provided alongside a cluster conf",
|
||||
used = clusterConf.rlnEpochSizeSec,
|
||||
warn "RLN Epoch Size in Seconds was provided alongside a network conf",
|
||||
used = networkConf.rlnEpochSizeSec,
|
||||
discarded = builder.rlnRelayConf.epochSizeSec
|
||||
builder.rlnRelayConf.withEpochSizeSec(clusterConf.rlnEpochSizeSec)
|
||||
builder.rlnRelayConf.withEpochSizeSec(networkConf.rlnEpochSizeSec)
|
||||
|
||||
if builder.rlnRelayConf.userMessageLimit.isSome():
|
||||
warn "RLN Relay Dynamic was provided alongside a cluster conf",
|
||||
used = clusterConf.rlnRelayUserMessageLimit,
|
||||
warn "RLN Relay Dynamic was provided alongside a network conf",
|
||||
used = networkConf.rlnRelayUserMessageLimit,
|
||||
discarded = builder.rlnRelayConf.userMessageLimit
|
||||
builder.rlnRelayConf.withUserMessageLimit(clusterConf.rlnRelayUserMessageLimit)
|
||||
builder.rlnRelayConf.withUserMessageLimit(networkConf.rlnRelayUserMessageLimit)
|
||||
# End Apply relay parameters
|
||||
|
||||
case builder.maxMessageSize.kind
|
||||
of mmskNone:
|
||||
discard
|
||||
of mmskStr, mmskInt:
|
||||
warn "Max Message Size was provided alongside a cluster conf",
|
||||
used = clusterConf.maxMessageSize, discarded = $builder.maxMessageSize
|
||||
builder.withMaxMessageSize(parseCorrectMsgSize(clusterConf.maxMessageSize))
|
||||
warn "Max Message Size was provided alongside a network conf",
|
||||
used = networkConf.maxMessageSize, discarded = $builder.maxMessageSize
|
||||
builder.withMaxMessageSize(parseCorrectMsgSize(networkConf.maxMessageSize))
|
||||
|
||||
if builder.numShardsInNetwork.isSome():
|
||||
warn "Num Shards In Network was provided alongside a cluster conf",
|
||||
used = clusterConf.numShardsInNetwork, discarded = builder.numShardsInNetwork
|
||||
builder.numShardsInNetwork = some(clusterConf.numShardsInNetwork)
|
||||
if builder.shardingConf.isSome():
|
||||
warn "Sharding Conf was provided alongside a network conf",
|
||||
used = networkConf.shardingConf.kind, discarded = builder.shardingConf
|
||||
|
||||
if clusterConf.discv5Discovery:
|
||||
if builder.numShardsInCluster.isSome():
|
||||
warn "Num Shards In Cluster was provided alongside a network conf",
|
||||
used = networkConf.shardingConf.numShardsInCluster,
|
||||
discarded = builder.numShardsInCluster
|
||||
|
||||
case networkConf.shardingConf.kind
|
||||
of StaticSharding:
|
||||
builder.shardingConf = some(StaticSharding)
|
||||
of AutoSharding:
|
||||
builder.shardingConf = some(AutoSharding)
|
||||
builder.numShardsInCluster = some(networkConf.shardingConf.numShardsInCluster)
|
||||
|
||||
if networkConf.discv5Discovery:
|
||||
if builder.discv5Conf.enabled.isNone:
|
||||
builder.discv5Conf.withEnabled(clusterConf.discv5Discovery)
|
||||
builder.discv5Conf.withEnabled(networkConf.discv5Discovery)
|
||||
|
||||
if builder.discv5Conf.bootstrapNodes.len == 0 and
|
||||
clusterConf.discv5BootstrapNodes.len > 0:
|
||||
warn "Discv5 Boostrap nodes were provided alongside a cluster conf",
|
||||
used = clusterConf.discv5BootstrapNodes,
|
||||
networkConf.discv5BootstrapNodes.len > 0:
|
||||
warn "Discv5 Bootstrap nodes were provided alongside a network conf",
|
||||
used = networkConf.discv5BootstrapNodes,
|
||||
discarded = builder.discv5Conf.bootstrapNodes
|
||||
builder.discv5Conf.withBootstrapNodes(clusterConf.discv5BootstrapNodes)
|
||||
builder.discv5Conf.withBootstrapNodes(networkConf.discv5BootstrapNodes)
|
||||
|
||||
proc build*(
|
||||
builder: var WakuConfBuilder, rng: ref HmacDrbgContext = crypto.newRng()
|
||||
@ -365,7 +393,7 @@ proc build*(
|
||||
## of libwaku. It aims to be agnostic so it does not apply a
|
||||
## default when it is opinionated.
|
||||
|
||||
applyClusterConf(builder)
|
||||
applyNetworkConf(builder)
|
||||
|
||||
let relay =
|
||||
if builder.relay.isSome():
|
||||
@ -415,24 +443,14 @@ proc build*(
|
||||
else:
|
||||
builder.clusterId.get().uint16
|
||||
|
||||
let numShardsInNetwork =
|
||||
if builder.numShardsInNetwork.isSome():
|
||||
builder.numShardsInNetwork.get()
|
||||
else:
|
||||
warn "Number of shards in network not specified, defaulting to zero (improve is wip)"
|
||||
0
|
||||
|
||||
let shards =
|
||||
if builder.shards.isSome():
|
||||
builder.shards.get()
|
||||
else:
|
||||
warn "shards not specified, defaulting to all shards in network"
|
||||
# TODO: conversion should not be needed
|
||||
let upperShard: uint16 = uint16(numShardsInNetwork - 1)
|
||||
toSeq(0.uint16 .. upperShard)
|
||||
|
||||
let (shardingConf, subscribeShards) = buildShardingConf(
|
||||
builder.shardingConf, builder.numShardsInCluster, builder.subscribeShards
|
||||
)
|
||||
let protectedShards = builder.protectedShards.get(@[])
|
||||
|
||||
info "Sharding configuration: ",
|
||||
shardingConf = $shardingConf, subscribeShards = $subscribeShards
|
||||
|
||||
let maxMessageSizeBytes =
|
||||
case builder.maxMessageSize.kind
|
||||
of mmskInt:
|
||||
@ -469,6 +487,10 @@ proc build*(
|
||||
|
||||
let webSocketConf = builder.webSocketConf.build().valueOr:
|
||||
return err("WebSocket Conf building failed: " & $error)
|
||||
|
||||
let rateLimit = builder.rateLimitConf.build().valueOr:
|
||||
return err("Rate limits Conf building failed: " & $error)
|
||||
|
||||
# End - Build sub-configs
|
||||
|
||||
let logLevel =
|
||||
@ -563,7 +585,6 @@ proc build*(
|
||||
# TODO: use `DefaultColocationLimit`. the user of this value should
|
||||
# probably be defining a config object
|
||||
let colocationLimit = builder.colocationLimit.get(5)
|
||||
let rateLimits = builder.rateLimits.get(newSeq[string](0))
|
||||
|
||||
# TODO: is there a strategy for experimental features? delete vs promote
|
||||
let relayShardedPeerManagement = builder.relayShardedPeerManagement.get(false)
|
||||
@ -588,9 +609,9 @@ proc build*(
|
||||
# end confs
|
||||
nodeKey: nodeKey,
|
||||
clusterId: clusterId,
|
||||
numShardsInNetwork: numShardsInNetwork,
|
||||
shardingConf: shardingConf,
|
||||
contentTopics: contentTopics,
|
||||
shards: shards,
|
||||
subscribeShards: subscribeShards,
|
||||
protectedShards: protectedShards,
|
||||
relay: relay,
|
||||
lightPush: lightPush,
|
||||
@ -605,7 +626,7 @@ proc build*(
|
||||
logLevel: logLevel,
|
||||
logFormat: logFormat,
|
||||
# TODO: Separate builders
|
||||
networkConf: NetworkConfig(
|
||||
endpointConf: EndpointConf(
|
||||
natStrategy: natStrategy,
|
||||
p2pTcpPort: p2pTcpPort,
|
||||
dns4DomainName: dns4DomainName,
|
||||
@ -623,9 +644,8 @@ proc build*(
|
||||
colocationLimit: colocationLimit,
|
||||
maxRelayPeers: builder.maxRelayPeers,
|
||||
relayServiceRatio: builder.relayServiceRatio.get("60:40"),
|
||||
rateLimits: rateLimits,
|
||||
rateLimit: rateLimit,
|
||||
circuitRelayClient: builder.circuitRelayClient.get(false),
|
||||
keepAlive: builder.keepAlive.get(true),
|
||||
staticNodes: builder.staticNodes,
|
||||
relayShardedPeerManagement: relayShardedPeerManagement,
|
||||
p2pReliability: builder.p2pReliability.get(false),
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
import chronicles, std/[net, options], results
|
||||
import ../networks_config
|
||||
import waku/factory/waku_conf
|
||||
|
||||
logScope:
|
||||
topics = "waku conf builder websocket"
|
||||
|
||||
@ -315,33 +315,22 @@ hence would have reachability issues.""",
|
||||
.}: seq[string]
|
||||
|
||||
keepAlive* {.
|
||||
desc: "Enable keep-alive for idle connections: true|false",
|
||||
defaultValue: false,
|
||||
desc:
|
||||
"Deprecated since >=v0.37. This param is ignored and keep alive is always active",
|
||||
defaultValue: true,
|
||||
name: "keep-alive"
|
||||
.}: bool
|
||||
|
||||
# TODO: This is trying to do too much, this should only be used for autosharding, which itself should be configurable
|
||||
# If numShardsInNetwork is not set, we use the number of shards configured as numShardsInNetwork
|
||||
numShardsInNetwork* {.
|
||||
desc: "Number of shards in the network",
|
||||
defaultValue: 0,
|
||||
desc:
|
||||
"Enables autosharding and set number of shards in the cluster, set to `0` to use static sharding",
|
||||
defaultValue: 1,
|
||||
name: "num-shards-in-network"
|
||||
.}: uint32
|
||||
.}: uint16
|
||||
|
||||
shards* {.
|
||||
desc:
|
||||
"Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.",
|
||||
defaultValue:
|
||||
@[
|
||||
uint16(0),
|
||||
uint16(1),
|
||||
uint16(2),
|
||||
uint16(3),
|
||||
uint16(4),
|
||||
uint16(5),
|
||||
uint16(6),
|
||||
uint16(7),
|
||||
],
|
||||
"Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated. Subscribes to all shards by default in auto-sharding, no shard for static sharding",
|
||||
name: "shard"
|
||||
.}: seq[uint16]
|
||||
|
||||
@ -357,7 +346,7 @@ hence would have reachability issues.""",
|
||||
|
||||
legacyStore* {.
|
||||
desc: "Enable/disable support of Waku Store v2 as a service",
|
||||
defaultValue: true,
|
||||
defaultValue: false,
|
||||
name: "legacy-store"
|
||||
.}: bool
|
||||
|
||||
@ -556,7 +545,8 @@ with the drawback of consuming some more bandwidth.""",
|
||||
.}: bool
|
||||
|
||||
dnsDiscoveryUrl* {.
|
||||
desc: "URL for DNS node list in format 'enrtree://<key>@<fqdn>'",
|
||||
desc:
|
||||
"URL for DNS node list in format 'enrtree://<key>@<fqdn>', enables DNS Discovery",
|
||||
defaultValue: "",
|
||||
name: "dns-discovery-url"
|
||||
.}: string
|
||||
@ -863,9 +853,9 @@ proc toKeystoreGeneratorConf*(n: WakuNodeConf): RlnKeystoreGeneratorConf =
|
||||
proc toInspectRlnDbConf*(n: WakuNodeConf): InspectRlnDbConf =
|
||||
return InspectRlnDbConf(treePath: n.treePath)
|
||||
|
||||
proc toClusterConf(
|
||||
proc toNetworkConf(
|
||||
preset: string, clusterId: Option[uint16]
|
||||
): ConfResult[Option[ClusterConf]] =
|
||||
): ConfResult[Option[NetworkConf]] =
|
||||
var lcPreset = toLowerAscii(preset)
|
||||
if clusterId.isSome() and clusterId.get() == 1:
|
||||
warn(
|
||||
@ -875,9 +865,9 @@ proc toClusterConf(
|
||||
|
||||
case lcPreset
|
||||
of "":
|
||||
ok(none(ClusterConf))
|
||||
ok(none(NetworkConf))
|
||||
of "twn":
|
||||
ok(some(ClusterConf.TheWakuNetworkConf()))
|
||||
ok(some(NetworkConf.TheWakuNetworkConf()))
|
||||
else:
|
||||
err("Invalid --preset value passed: " & lcPreset)
|
||||
|
||||
@ -914,11 +904,11 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
|
||||
b.withProtectedShards(n.protectedShards)
|
||||
b.withClusterId(n.clusterId)
|
||||
|
||||
let clusterConf = toClusterConf(n.preset, some(n.clusterId)).valueOr:
|
||||
let networkConf = toNetworkConf(n.preset, some(n.clusterId)).valueOr:
|
||||
return err("Error determining cluster from preset: " & $error)
|
||||
|
||||
if clusterConf.isSome():
|
||||
b.withClusterConf(clusterConf.get())
|
||||
if networkConf.isSome():
|
||||
b.withNetworkConf(networkConf.get())
|
||||
|
||||
b.withAgentString(n.agentString)
|
||||
|
||||
@ -951,12 +941,18 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
|
||||
b.withRelayPeerExchange(n.relayPeerExchange)
|
||||
b.withRelayShardedPeerManagement(n.relayShardedPeerManagement)
|
||||
b.withStaticNodes(n.staticNodes)
|
||||
b.withKeepAlive(n.keepAlive)
|
||||
|
||||
if n.numShardsInNetwork != 0:
|
||||
b.withNumShardsInNetwork(n.numShardsInNetwork)
|
||||
b.withNumShardsInCluster(n.numShardsInNetwork)
|
||||
b.withShardingConf(AutoSharding)
|
||||
else:
|
||||
b.withShardingConf(StaticSharding)
|
||||
|
||||
# It is not possible to pass an empty sequence on the CLI
|
||||
# If this is empty, it means the user did not specify any shards
|
||||
if n.shards.len != 0:
|
||||
b.withSubscribeShards(n.shards)
|
||||
|
||||
b.withShards(n.shards)
|
||||
b.withContentTopics(n.contentTopics)
|
||||
|
||||
b.storeServiceConf.withEnabled(n.store)
|
||||
@ -1003,8 +999,8 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
|
||||
b.metricsServerConf.withHttpPort(n.metricsServerPort)
|
||||
b.metricsServerConf.withLogging(n.metricsLogging)
|
||||
|
||||
b.dnsDiscoveryConf.withEnabled(n.dnsDiscovery)
|
||||
b.dnsDiscoveryConf.withEnrTreeUrl(n.dnsDiscoveryUrl)
|
||||
if n.dnsDiscoveryUrl != "":
|
||||
b.dnsDiscoveryConf.withEnrTreeUrl(n.dnsDiscoveryUrl)
|
||||
b.dnsDiscoveryConf.withNameServers(n.dnsAddrsNameServers)
|
||||
|
||||
if n.discv5Discovery.isSome():
|
||||
@ -1027,6 +1023,6 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
|
||||
b.webSocketConf.withKeyPath(n.websocketSecureKeyPath)
|
||||
b.webSocketConf.withCertPath(n.websocketSecureCertPath)
|
||||
|
||||
b.withRateLimits(n.rateLimits)
|
||||
b.rateLimitConf.withRateLimits(n.rateLimits)
|
||||
|
||||
return b.build()
|
||||
|
||||
@ -6,13 +6,7 @@ import
|
||||
libp2p/nameresolving/dnsresolver,
|
||||
std/[options, sequtils, net],
|
||||
results
|
||||
import
|
||||
../common/utils/nat,
|
||||
../node/net_config,
|
||||
../waku_enr,
|
||||
../waku_core,
|
||||
./waku_conf,
|
||||
./networks_config
|
||||
import ../common/utils/nat, ../node/net_config, ../waku_enr, ../waku_core, ./waku_conf
|
||||
|
||||
proc enrConfiguration*(
|
||||
conf: WakuConf, netConfig: NetConfig
|
||||
@ -29,7 +23,7 @@ proc enrConfiguration*(
|
||||
enrBuilder.withMultiaddrs(netConfig.enrMultiaddrs)
|
||||
|
||||
enrBuilder.withWakuRelaySharding(
|
||||
RelayShards(clusterId: conf.clusterId, shardIds: conf.shards)
|
||||
RelayShards(clusterId: conf.clusterId, shardIds: conf.subscribeShards)
|
||||
).isOkOr:
|
||||
return err("could not initialize ENR with shards")
|
||||
|
||||
@ -64,14 +58,14 @@ proc dnsResolve*(
|
||||
# TODO: Reduce number of parameters, can be done once the same is done on Netconfig.init
|
||||
proc networkConfiguration*(
|
||||
clusterId: uint16,
|
||||
conf: NetworkConfig,
|
||||
conf: EndpointConf,
|
||||
discv5Conf: Option[Discv5Conf],
|
||||
webSocketConf: Option[WebSocketConf],
|
||||
wakuFlags: CapabilitiesBitfield,
|
||||
dnsAddrsNameServers: seq[IpAddress],
|
||||
portsShift: uint16,
|
||||
clientId: string,
|
||||
): NetConfigResult =
|
||||
): Future[NetConfigResult] {.async.} =
|
||||
## `udpPort` is only supplied to satisfy underlying APIs but is not
|
||||
## actually a supported transport for libp2p traffic.
|
||||
let natRes = setupNat(
|
||||
@ -105,7 +99,7 @@ proc networkConfiguration*(
|
||||
# Resolve and use DNS domain IP
|
||||
if conf.dns4DomainName.isSome() and extIp.isNone():
|
||||
try:
|
||||
let dnsRes = waitFor dnsResolve(conf.dns4DomainName.get(), dnsAddrsNameServers)
|
||||
let dnsRes = await dnsResolve(conf.dns4DomainName.get(), dnsAddrsNameServers)
|
||||
|
||||
if dnsRes.isErr():
|
||||
return err($dnsRes.error) # Pass error down the stack
|
||||
@ -143,11 +137,3 @@ proc networkConfiguration*(
|
||||
)
|
||||
|
||||
return netConfigRes
|
||||
|
||||
# TODO: numShardsInNetwork should be mandatory with autosharding, and unneeded otherwise
|
||||
proc getNumShardsInNetwork*(conf: WakuConf): uint32 =
|
||||
if conf.numShardsInNetwork != 0:
|
||||
return conf.numShardsInNetwork
|
||||
# If conf.numShardsInNetwork is not set, use 1024 - the maximum possible as per the static sharding spec
|
||||
# https://github.com/waku-org/specs/blob/master/standards/core/relay-sharding.md#static-sharding
|
||||
return uint32(MaxShardIndex + 1)
|
||||
|
||||
@ -1,18 +1,23 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import stint, std/[nativesockets, options]
|
||||
import chronicles, results, stint
|
||||
|
||||
type WebSocketSecureConf* {.requiresInit.} = object
|
||||
keyPath*: string
|
||||
certPath*: string
|
||||
logScope:
|
||||
topics = "waku networks conf"
|
||||
|
||||
type WebSocketConf* = object
|
||||
port*: Port
|
||||
secureConf*: Option[WebSocketSecureConf]
|
||||
type
|
||||
ShardingConfKind* = enum
|
||||
AutoSharding
|
||||
StaticSharding
|
||||
|
||||
# TODO: Rename this type to match file name
|
||||
ShardingConf* = object
|
||||
case kind*: ShardingConfKind
|
||||
of AutoSharding:
|
||||
numShardsInCluster*: uint16
|
||||
of StaticSharding:
|
||||
discard
|
||||
|
||||
type ClusterConf* = object
|
||||
type NetworkConf* = object
|
||||
maxMessageSize*: string # TODO: static convert to a uint64
|
||||
clusterId*: uint16
|
||||
rlnRelay*: bool
|
||||
@ -21,17 +26,16 @@ type ClusterConf* = object
|
||||
rlnRelayDynamic*: bool
|
||||
rlnEpochSizeSec*: uint64
|
||||
rlnRelayUserMessageLimit*: uint64
|
||||
# TODO: should be uint16 like the `shards` parameter
|
||||
numShardsInNetwork*: uint32
|
||||
shardingConf*: ShardingConf
|
||||
discv5Discovery*: bool
|
||||
discv5BootstrapNodes*: seq[string]
|
||||
|
||||
# cluster-id=1 (aka The Waku Network)
|
||||
# Cluster configuration corresponding to The Waku Network. Note that it
|
||||
# overrides existing cli configuration
|
||||
proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf =
|
||||
proc TheWakuNetworkConf*(T: type NetworkConf): NetworkConf =
|
||||
const RelayChainId = 59141'u256
|
||||
return ClusterConf(
|
||||
return NetworkConf(
|
||||
maxMessageSize: "150KiB",
|
||||
clusterId: 1,
|
||||
rlnRelay: true,
|
||||
@ -40,7 +44,7 @@ proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf =
|
||||
rlnRelayChainId: RelayChainId,
|
||||
rlnEpochSizeSec: 600,
|
||||
rlnRelayUserMessageLimit: 100,
|
||||
numShardsInNetwork: 8,
|
||||
shardingConf: ShardingConf(kind: AutoSharding, numShardsInCluster: 8),
|
||||
discv5Discovery: true,
|
||||
discv5BootstrapNodes:
|
||||
@[
|
||||
@ -49,3 +53,21 @@ proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf =
|
||||
"enr:-QEkuEBfEzJm_kigJ2HoSS_RBFJYhKHocGdkhhBr6jSUAWjLdFPp6Pj1l4yiTQp7TGHyu1kC6FyaU573VN8klLsEm-XuAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOwsS69tgD7u1K50r5-qG5hweuTwa0W26aYPnvivpNlrYN0Y3CCdl-DdWRwgiMohXdha3UyDw",
|
||||
],
|
||||
)
|
||||
|
||||
proc validateShards*(
|
||||
shardingConf: ShardingConf, shards: seq[uint16]
|
||||
): Result[void, string] =
|
||||
case shardingConf.kind
|
||||
of StaticSharding:
|
||||
return ok()
|
||||
of AutoSharding:
|
||||
let numShardsInCluster = shardingConf.numShardsInCluster
|
||||
for shard in shards:
|
||||
if shard >= numShardsInCluster:
|
||||
let msg =
|
||||
"validateShards invalid shard: " & $shard & " when numShardsInCluster: " &
|
||||
$numShardsInCluster
|
||||
error "validateShards failed", error = msg
|
||||
return err(msg)
|
||||
|
||||
return ok()
|
||||
|
||||
@ -10,6 +10,7 @@ import
|
||||
|
||||
import
|
||||
./internal_config,
|
||||
./networks_config,
|
||||
./waku_conf,
|
||||
./builder,
|
||||
./validator_signed,
|
||||
@ -121,7 +122,7 @@ proc initNode(
|
||||
relayServiceRatio = conf.relayServiceRatio,
|
||||
shardAware = conf.relayShardedPeerManagement,
|
||||
)
|
||||
builder.withRateLimit(conf.rateLimits)
|
||||
builder.withRateLimit(conf.rateLimit)
|
||||
builder.withCircuitRelay(relay)
|
||||
|
||||
let node =
|
||||
@ -137,10 +138,12 @@ proc initNode(
|
||||
proc getAutoshards*(
|
||||
node: WakuNode, contentTopics: seq[string]
|
||||
): Result[seq[RelayShard], string] =
|
||||
if node.wakuAutoSharding.isNone():
|
||||
return err("Static sharding used, cannot get shards from content topics")
|
||||
var autoShards: seq[RelayShard]
|
||||
for contentTopic in contentTopics:
|
||||
let shard = node.wakuSharding.getShard(contentTopic).valueOr:
|
||||
return err("Could not parse content topic: " & error)
|
||||
let shard = node.wakuAutoSharding.get().getShard(contentTopic).valueOr:
|
||||
return err("Could not parse content topic: " & error)
|
||||
autoShards.add(shard)
|
||||
return ok(autoshards)
|
||||
|
||||
@ -163,7 +166,7 @@ proc setupProtocols(
|
||||
if conf.storeServiceConf.isSome():
|
||||
let storeServiceConf = conf.storeServiceConf.get()
|
||||
if storeServiceConf.supportV2:
|
||||
let archiveDriverRes = waitFor legacy_driver.ArchiveDriver.new(
|
||||
let archiveDriverRes = await legacy_driver.ArchiveDriver.new(
|
||||
storeServiceConf.dbUrl, storeServiceConf.dbVacuum, storeServiceConf.dbMigration,
|
||||
storeServiceConf.maxNumDbConnections, onFatalErrorAction,
|
||||
)
|
||||
@ -197,7 +200,7 @@ proc setupProtocols(
|
||||
else:
|
||||
storeServiceConf.dbMigration
|
||||
|
||||
let archiveDriverRes = waitFor driver.ArchiveDriver.new(
|
||||
let archiveDriverRes = await driver.ArchiveDriver.new(
|
||||
storeServiceConf.dbUrl, storeServiceConf.dbVacuum, migrate,
|
||||
storeServiceConf.maxNumDbConnections, onFatalErrorAction,
|
||||
)
|
||||
@ -258,16 +261,11 @@ proc setupProtocols(
|
||||
if conf.storeServiceConf.isSome and conf.storeServiceConf.get().resume:
|
||||
node.setupStoreResume()
|
||||
|
||||
# If conf.numShardsInNetwork is not set, use the number of shards configured as numShardsInNetwork
|
||||
let numShardsInNetwork = getNumShardsInNetwork(conf)
|
||||
|
||||
if conf.numShardsInNetwork == 0:
|
||||
warn "Number of shards in network not configured, setting it to",
|
||||
# TODO: If not configured, it mounts 1024 shards! Make it a mandatory configuration instead
|
||||
numShardsInNetwork = $numShardsInNetwork
|
||||
|
||||
node.mountSharding(conf.clusterId, numShardsInNetwork).isOkOr:
|
||||
return err("failed to mount waku sharding: " & error)
|
||||
if conf.shardingConf.kind == AutoSharding:
|
||||
node.mountAutoSharding(conf.clusterId, conf.shardingConf.numShardsInCluster).isOkOr:
|
||||
return err("failed to mount waku auto sharding: " & error)
|
||||
else:
|
||||
warn("Auto sharding is disabled")
|
||||
|
||||
# Mount relay on all nodes
|
||||
var peerExchangeHandler = none(RoutingRecordsHandler)
|
||||
@ -290,14 +288,22 @@ proc setupProtocols(
|
||||
|
||||
peerExchangeHandler = some(handlePeerExchange)
|
||||
|
||||
let autoShards = node.getAutoshards(conf.contentTopics).valueOr:
|
||||
return err("Could not get autoshards: " & error)
|
||||
# TODO: when using autosharding, the user should not be expected to pass any shards, but only content topics
|
||||
# Hence, this joint logic should be removed in favour of an either logic:
|
||||
# use passed shards (static) or deduce shards from content topics (auto)
|
||||
let autoShards =
|
||||
if node.wakuAutoSharding.isSome():
|
||||
node.getAutoshards(conf.contentTopics).valueOr:
|
||||
return err("Could not get autoshards: " & error)
|
||||
else:
|
||||
@[]
|
||||
|
||||
debug "Shards created from content topics",
|
||||
contentTopics = conf.contentTopics, shards = autoShards
|
||||
|
||||
let confShards =
|
||||
conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it)))
|
||||
let confShards = conf.subscribeShards.mapIt(
|
||||
RelayShard(clusterId: conf.clusterId, shardId: uint16(it))
|
||||
)
|
||||
let shards = confShards & autoShards
|
||||
|
||||
if conf.relay:
|
||||
@ -313,7 +319,7 @@ proc setupProtocols(
|
||||
# Add validation keys to protected topics
|
||||
var subscribedProtectedShards: seq[ProtectedShard]
|
||||
for shardKey in conf.protectedShards:
|
||||
if shardKey.shard notin conf.shards:
|
||||
if shardKey.shard notin conf.subscribeShards:
|
||||
warn "protected shard not in subscribed shards, skipping adding validator",
|
||||
protectedShard = shardKey.shard, subscribedShards = shards
|
||||
continue
|
||||
@ -348,7 +354,7 @@ proc setupProtocols(
|
||||
)
|
||||
|
||||
try:
|
||||
waitFor node.mountRlnRelay(rlnConf)
|
||||
await node.mountRlnRelay(rlnConf)
|
||||
except CatchableError:
|
||||
return err("failed to mount waku RLN relay protocol: " & getCurrentExceptionMsg())
|
||||
|
||||
@ -462,10 +468,6 @@ proc startNode*(
|
||||
if conf.peerExchange and not conf.discv5Conf.isSome():
|
||||
node.startPeerExchangeLoop()
|
||||
|
||||
# Start keepalive, if enabled
|
||||
if conf.keepAlive:
|
||||
node.startKeepalive()
|
||||
|
||||
# Maintain relay connections
|
||||
if conf.relay:
|
||||
node.peerManager.start()
|
||||
@ -474,11 +476,13 @@ proc startNode*(
|
||||
|
||||
proc setupNode*(
|
||||
wakuConf: WakuConf, rng: ref HmacDrbgContext = crypto.newRng(), relay: Relay
|
||||
): Result[WakuNode, string] =
|
||||
let netConfig = networkConfiguration(
|
||||
wakuConf.clusterId, wakuConf.networkConf, wakuConf.discv5Conf,
|
||||
wakuConf.webSocketConf, wakuConf.wakuFlags, wakuConf.dnsAddrsNameServers,
|
||||
wakuConf.portsShift, clientId,
|
||||
): Future[Result[WakuNode, string]] {.async.} =
|
||||
let netConfig = (
|
||||
await networkConfiguration(
|
||||
wakuConf.clusterId, wakuConf.endpointConf, wakuConf.discv5Conf,
|
||||
wakuConf.webSocketConf, wakuConf.wakuFlags, wakuConf.dnsAddrsNameServers,
|
||||
wakuConf.portsShift, clientId,
|
||||
)
|
||||
).valueOr:
|
||||
error "failed to create internal config", error = error
|
||||
return err("failed to create internal config: " & error)
|
||||
@ -509,7 +513,7 @@ proc setupNode*(
|
||||
debug "Mounting protocols"
|
||||
|
||||
try:
|
||||
(waitFor node.setupProtocols(wakuConf)).isOkOr:
|
||||
(await node.setupProtocols(wakuConf)).isOkOr:
|
||||
error "Mounting protocols failed", error = error
|
||||
return err("Mounting protocols failed: " & error)
|
||||
except CatchableError:
|
||||
|
||||
@ -127,11 +127,16 @@ proc setupAppCallbacks(
|
||||
if node.wakuRelay.isNil():
|
||||
return err("Cannot configure relayHandler callback without Relay mounted")
|
||||
|
||||
let autoShards = node.getAutoshards(conf.contentTopics).valueOr:
|
||||
return err("Could not get autoshards: " & error)
|
||||
let autoShards =
|
||||
if node.wakuAutoSharding.isSome():
|
||||
node.getAutoshards(conf.contentTopics).valueOr:
|
||||
return err("Could not get autoshards: " & error)
|
||||
else:
|
||||
@[]
|
||||
|
||||
let confShards =
|
||||
conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it)))
|
||||
let confShards = conf.subscribeShards.mapIt(
|
||||
RelayShard(clusterId: conf.clusterId, shardId: uint16(it))
|
||||
)
|
||||
let shards = confShards & autoShards
|
||||
|
||||
let uniqueShards = deduplicate(shards)
|
||||
@ -157,7 +162,7 @@ proc setupAppCallbacks(
|
||||
|
||||
proc new*(
|
||||
T: type Waku, wakuConf: WakuConf, appCallbacks: AppCallbacks = nil
|
||||
): Result[Waku, string] =
|
||||
): Future[Result[Waku, string]] {.async.} =
|
||||
let rng = crypto.newRng()
|
||||
|
||||
logging.setupLog(wakuConf.logLevel, wakuConf.logFormat)
|
||||
@ -181,7 +186,7 @@ proc new*(
|
||||
|
||||
var relay = newCircuitRelay(wakuConf.circuitRelayClient)
|
||||
|
||||
let node = setupNode(wakuConf, rng, relay).valueOr:
|
||||
let node = (await setupNode(wakuConf, rng, relay)).valueOr:
|
||||
error "Failed setting up node", error = $error
|
||||
return err("Failed setting up node: " & $error)
|
||||
|
||||
@ -243,28 +248,30 @@ proc getPorts(
|
||||
|
||||
return ok((tcpPort: tcpPort, websocketPort: websocketPort))
|
||||
|
||||
proc getRunningNetConfig(waku: ptr Waku): Result[NetConfig, string] =
|
||||
proc getRunningNetConfig(waku: ptr Waku): Future[Result[NetConfig, string]] {.async.} =
|
||||
var conf = waku[].conf
|
||||
let (tcpPort, websocketPort) = getPorts(waku[].node.switch.peerInfo.listenAddrs).valueOr:
|
||||
return err("Could not retrieve ports: " & error)
|
||||
|
||||
if tcpPort.isSome():
|
||||
conf.networkConf.p2pTcpPort = tcpPort.get()
|
||||
conf.endpointConf.p2pTcpPort = tcpPort.get()
|
||||
|
||||
if websocketPort.isSome() and conf.webSocketConf.isSome():
|
||||
conf.webSocketConf.get().port = websocketPort.get()
|
||||
|
||||
# Rebuild NetConfig with bound port values
|
||||
let netConf = networkConfiguration(
|
||||
conf.clusterId, conf.networkConf, conf.discv5Conf, conf.webSocketConf,
|
||||
conf.wakuFlags, conf.dnsAddrsNameServers, conf.portsShift, clientId,
|
||||
let netConf = (
|
||||
await networkConfiguration(
|
||||
conf.clusterId, conf.endpointConf, conf.discv5Conf, conf.webSocketConf,
|
||||
conf.wakuFlags, conf.dnsAddrsNameServers, conf.portsShift, clientId,
|
||||
)
|
||||
).valueOr:
|
||||
return err("Could not update NetConfig: " & error)
|
||||
|
||||
return ok(netConf)
|
||||
|
||||
proc updateEnr(waku: ptr Waku): Result[void, string] =
|
||||
let netConf: NetConfig = getRunningNetConfig(waku).valueOr:
|
||||
proc updateEnr(waku: ptr Waku): Future[Result[void, string]] {.async.} =
|
||||
let netConf: NetConfig = (await getRunningNetConfig(waku)).valueOr:
|
||||
return err("error calling updateNetConfig: " & $error)
|
||||
let record = enrConfiguration(waku[].conf, netConf).valueOr:
|
||||
return err("ENR setup failed: " & error)
|
||||
@ -304,11 +311,11 @@ proc updateAddressInENR(waku: ptr Waku): Result[void, string] =
|
||||
|
||||
return ok()
|
||||
|
||||
proc updateWaku(waku: ptr Waku): Result[void, string] =
|
||||
proc updateWaku(waku: ptr Waku): Future[Result[void, string]] {.async.} =
|
||||
let conf = waku[].conf
|
||||
if conf.networkConf.p2pTcpPort == Port(0) or
|
||||
if conf.endpointConf.p2pTcpPort == Port(0) or
|
||||
(conf.websocketConf.isSome() and conf.websocketConf.get.port == Port(0)):
|
||||
updateEnr(waku).isOkOr:
|
||||
(await updateEnr(waku)).isOkOr:
|
||||
return err("error calling updateEnr: " & $error)
|
||||
|
||||
?updateAnnouncedAddrWithPrimaryIpAddr(waku[].node)
|
||||
@ -376,7 +383,7 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
|
||||
return err("error while calling startNode: " & $error)
|
||||
|
||||
## Update waku data that is set dynamically on node start
|
||||
updateWaku(waku).isOkOr:
|
||||
(await updateWaku(waku)).isOkOr:
|
||||
return err("Error in updateApp: " & $error)
|
||||
|
||||
## Discv5
|
||||
@ -389,7 +396,7 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
|
||||
waku.dynamicBootstrapNodes,
|
||||
waku.rng,
|
||||
conf.nodeKey,
|
||||
conf.networkConf.p2pListenAddress,
|
||||
conf.endpointConf.p2pListenAddress,
|
||||
conf.portsShift,
|
||||
)
|
||||
|
||||
@ -401,7 +408,8 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
|
||||
waku[].deliveryMonitor.startDeliveryMonitor()
|
||||
|
||||
## Health Monitor
|
||||
waku[].healthMonitor.startHealthMonitor()
|
||||
waku[].healthMonitor.startHealthMonitor().isOkOr:
|
||||
return err("failed to start health monitor: " & $error)
|
||||
|
||||
if conf.restServerConf.isSome():
|
||||
rest_server_builder.startRestServerProtocolSupport(
|
||||
@ -412,14 +420,18 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
|
||||
conf.relay,
|
||||
conf.lightPush,
|
||||
conf.clusterId,
|
||||
conf.shards,
|
||||
conf.subscribeShards,
|
||||
conf.contentTopics,
|
||||
).isOkOr:
|
||||
return err ("Starting protocols support REST server failed: " & $error)
|
||||
|
||||
if conf.metricsServerConf.isSome():
|
||||
waku[].metricsServer = waku_metrics.startMetricsServerAndLogging(
|
||||
conf.metricsServerConf.get(), conf.portsShift
|
||||
waku[].metricsServer = (
|
||||
await (
|
||||
waku_metrics.startMetricsServerAndLogging(
|
||||
conf.metricsServerConf.get(), conf.portsShift
|
||||
)
|
||||
)
|
||||
).valueOr:
|
||||
return err("Starting monitoring and external interfaces failed: " & error)
|
||||
|
||||
|
||||
@ -12,6 +12,7 @@ import
|
||||
../discovery/waku_discv5,
|
||||
../node/waku_metrics,
|
||||
../common/logging,
|
||||
../common/rate_limit/setting,
|
||||
../waku_enr/capabilities,
|
||||
./networks_config
|
||||
|
||||
@ -20,6 +21,14 @@ export RlnRelayConf, RlnRelayCreds, RestServerConf, Discv5Conf, MetricsServerCon
|
||||
logScope:
|
||||
topics = "waku conf"
|
||||
|
||||
type WebSocketSecureConf* {.requiresInit.} = object
|
||||
keyPath*: string
|
||||
certPath*: string
|
||||
|
||||
type WebSocketConf* = object
|
||||
port*: Port
|
||||
secureConf*: Option[WebSocketSecureConf]
|
||||
|
||||
# TODO: should be defined in validator_signed.nim and imported here
|
||||
type ProtectedShard* {.requiresInit.} = object
|
||||
shard*: uint16
|
||||
@ -50,7 +59,7 @@ type FilterServiceConf* {.requiresInit.} = object
|
||||
subscriptionTimeout*: uint16
|
||||
maxCriteria*: uint32
|
||||
|
||||
type NetworkConfig* = object # TODO: make enum
|
||||
type EndpointConf* = object # TODO: make enum
|
||||
natStrategy*: string
|
||||
p2pTcpPort*: Port
|
||||
dns4DomainName*: Option[string]
|
||||
@ -68,11 +77,10 @@ type WakuConf* {.requiresInit.} = ref object
|
||||
nodeKey*: crypto.PrivateKey
|
||||
|
||||
clusterId*: uint16
|
||||
shards*: seq[uint16]
|
||||
subscribeShards*: seq[uint16]
|
||||
protectedShards*: seq[ProtectedShard]
|
||||
|
||||
# TODO: move to an autoShardingConf
|
||||
numShardsInNetwork*: uint32
|
||||
shardingConf*: ShardingConf
|
||||
contentTopics*: seq[string]
|
||||
|
||||
relay*: bool
|
||||
@ -83,7 +91,6 @@ type WakuConf* {.requiresInit.} = ref object
|
||||
relayPeerExchange*: bool
|
||||
rendezvous*: bool
|
||||
circuitRelayClient*: bool
|
||||
keepAlive*: bool
|
||||
|
||||
discv5Conf*: Option[Discv5Conf]
|
||||
dnsDiscoveryConf*: Option[DnsDiscoveryConf]
|
||||
@ -96,7 +103,7 @@ type WakuConf* {.requiresInit.} = ref object
|
||||
|
||||
portsShift*: uint16
|
||||
dnsAddrsNameServers*: seq[IpAddress]
|
||||
networkConf*: NetworkConfig
|
||||
endpointConf*: EndpointConf
|
||||
wakuFlags*: CapabilitiesBitfield
|
||||
|
||||
# TODO: could probably make it a `PeerRemoteInfo`
|
||||
@ -121,8 +128,7 @@ type WakuConf* {.requiresInit.} = ref object
|
||||
|
||||
colocationLimit*: int
|
||||
|
||||
# TODO: use proper type
|
||||
rateLimits*: seq[string]
|
||||
rateLimit*: ProtocolRateLimitSettings
|
||||
|
||||
# TODO: those could be in a relay conf object
|
||||
maxRelayPeers*: Option[int]
|
||||
@ -143,8 +149,8 @@ proc logConf*(conf: WakuConf) =
|
||||
|
||||
info "Configuration. Network", cluster = conf.clusterId
|
||||
|
||||
for shard in conf.shards:
|
||||
info "Configuration. Shards", shard = shard
|
||||
for shard in conf.subscribeShards:
|
||||
info "Configuration. Active Relay Shards", shard = shard
|
||||
|
||||
if conf.discv5Conf.isSome():
|
||||
for i in conf.discv5Conf.get().bootstrapNodes:
|
||||
@ -166,26 +172,9 @@ proc validateNodeKey(wakuConf: WakuConf): Result[void, string] =
|
||||
return err("nodekey param is invalid")
|
||||
return ok()
|
||||
|
||||
proc validateShards(wakuConf: WakuConf): Result[void, string] =
|
||||
let numShardsInNetwork = wakuConf.numShardsInNetwork
|
||||
|
||||
# TODO: fix up this behaviour
|
||||
if numShardsInNetwork == 0:
|
||||
return ok()
|
||||
|
||||
for shard in wakuConf.shards:
|
||||
if shard >= numShardsInNetwork:
|
||||
let msg =
|
||||
"validateShards invalid shard: " & $shard & " when numShardsInNetwork: " &
|
||||
$numShardsInNetwork # fmt doesn't work
|
||||
error "validateShards failed", error = msg
|
||||
return err(msg)
|
||||
|
||||
return ok()
|
||||
|
||||
proc validateNoEmptyStrings(wakuConf: WakuConf): Result[void, string] =
|
||||
if wakuConf.networkConf.dns4DomainName.isSome() and
|
||||
isEmptyOrWhiteSpace(wakuConf.networkConf.dns4DomainName.get().string):
|
||||
if wakuConf.endpointConf.dns4DomainName.isSome() and
|
||||
isEmptyOrWhiteSpace(wakuConf.endpointConf.dns4DomainName.get().string):
|
||||
return err("dns4-domain-name is an empty string, set it to none(string) instead")
|
||||
|
||||
if isEmptyOrWhiteSpace(wakuConf.relayServiceRatio):
|
||||
@ -237,6 +226,6 @@ proc validateNoEmptyStrings(wakuConf: WakuConf): Result[void, string] =
|
||||
|
||||
proc validate*(wakuConf: WakuConf): Result[void, string] =
|
||||
?wakuConf.validateNodeKey()
|
||||
?wakuConf.validateShards()
|
||||
?wakuConf.shardingConf.validateShards(wakuConf.subscribeShards)
|
||||
?wakuConf.validateNoEmptyStrings()
|
||||
return ok()
|
||||
|
||||
@ -1,6 +1,10 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[options, sets, strformat], chronos, chronicles, libp2p/protocols/rendezvous
|
||||
import
|
||||
std/[options, sets, strformat, random, sequtils],
|
||||
chronos,
|
||||
chronicles,
|
||||
libp2p/protocols/rendezvous
|
||||
|
||||
import
|
||||
../waku_node,
|
||||
@ -13,6 +17,10 @@ import
|
||||
|
||||
## This module is aimed to check the state of the "self" Waku Node
|
||||
|
||||
# randomize initializes sdt/random's random number generator
|
||||
# if not called, the outcome of randomization procedures will be the same in every run
|
||||
randomize()
|
||||
|
||||
type
|
||||
HealthReport* = object
|
||||
nodeHealth*: HealthStatus
|
||||
@ -22,6 +30,7 @@ type
|
||||
nodeHealth: HealthStatus
|
||||
node: WakuNode
|
||||
onlineMonitor*: OnlineMonitor
|
||||
keepAliveFut: Future[void]
|
||||
|
||||
template checkWakuNodeNotNil(node: WakuNode, p: ProtocolHealth): untyped =
|
||||
if node.isNil():
|
||||
@ -224,6 +233,145 @@ proc getRendezvousHealth(hm: NodeHealthMonitor): ProtocolHealth =
|
||||
|
||||
return p.ready()
|
||||
|
||||
proc selectRandomPeersForKeepalive(
|
||||
node: WakuNode, outPeers: seq[PeerId], numRandomPeers: int
|
||||
): Future[seq[PeerId]] {.async.} =
|
||||
## Select peers for random keepalive, prioritizing mesh peers
|
||||
|
||||
if node.wakuRelay.isNil():
|
||||
return selectRandomPeers(outPeers, numRandomPeers)
|
||||
|
||||
let meshPeers = node.wakuRelay.getPeersInMesh().valueOr:
|
||||
error "Failed getting peers in mesh for ping", error = error
|
||||
# Fallback to random selection from all outgoing peers
|
||||
return selectRandomPeers(outPeers, numRandomPeers)
|
||||
|
||||
trace "Mesh peers for keepalive", meshPeers = meshPeers
|
||||
|
||||
# Get non-mesh peers and shuffle them
|
||||
var nonMeshPeers = outPeers.filterIt(it notin meshPeers)
|
||||
shuffle(nonMeshPeers)
|
||||
|
||||
# Combine mesh peers + random non-mesh peers up to numRandomPeers total
|
||||
let numNonMeshPeers = max(0, numRandomPeers - len(meshPeers))
|
||||
let selectedNonMeshPeers = nonMeshPeers[0 ..< min(len(nonMeshPeers), numNonMeshPeers)]
|
||||
|
||||
let selectedPeers = meshPeers & selectedNonMeshPeers
|
||||
trace "Selected peers for keepalive", selected = selectedPeers
|
||||
return selectedPeers
|
||||
|
||||
proc keepAliveLoop(
|
||||
node: WakuNode,
|
||||
randomPeersKeepalive: chronos.Duration,
|
||||
allPeersKeepAlive: chronos.Duration,
|
||||
numRandomPeers = 10,
|
||||
) {.async.} =
|
||||
# Calculate how many random peer cycles before pinging all peers
|
||||
let randomToAllRatio =
|
||||
int(allPeersKeepAlive.seconds() / randomPeersKeepalive.seconds())
|
||||
var countdownToPingAll = max(0, randomToAllRatio - 1)
|
||||
|
||||
# Sleep detection configuration
|
||||
let sleepDetectionInterval = 3 * randomPeersKeepalive
|
||||
|
||||
# Failure tracking
|
||||
var consecutiveIterationFailures = 0
|
||||
const maxAllowedConsecutiveFailures = 2
|
||||
|
||||
var lastTimeExecuted = Moment.now()
|
||||
|
||||
while true:
|
||||
trace "Running keepalive loop"
|
||||
await sleepAsync(randomPeersKeepalive)
|
||||
|
||||
if not node.started:
|
||||
continue
|
||||
|
||||
let currentTime = Moment.now()
|
||||
|
||||
# Check for sleep detection
|
||||
if currentTime - lastTimeExecuted > sleepDetectionInterval:
|
||||
warn "Keep alive hasn't been executed recently. Killing all connections"
|
||||
await node.peerManager.disconnectAllPeers()
|
||||
lastTimeExecuted = currentTime
|
||||
consecutiveIterationFailures = 0
|
||||
continue
|
||||
|
||||
# Check for consecutive failures
|
||||
if consecutiveIterationFailures > maxAllowedConsecutiveFailures:
|
||||
warn "Too many consecutive ping failures, node likely disconnected. Killing all connections",
|
||||
consecutiveIterationFailures, maxAllowedConsecutiveFailures
|
||||
await node.peerManager.disconnectAllPeers()
|
||||
consecutiveIterationFailures = 0
|
||||
lastTimeExecuted = currentTime
|
||||
continue
|
||||
|
||||
# Determine which peers to ping
|
||||
let outPeers = node.peerManager.connectedPeers()[1]
|
||||
let peersToPing =
|
||||
if countdownToPingAll > 0:
|
||||
await selectRandomPeersForKeepalive(node, outPeers, numRandomPeers)
|
||||
else:
|
||||
outPeers
|
||||
|
||||
let numPeersToPing = len(peersToPing)
|
||||
|
||||
if countdownToPingAll > 0:
|
||||
trace "Pinging random peers",
|
||||
count = numPeersToPing, countdownToPingAll = countdownToPingAll
|
||||
countdownToPingAll.dec()
|
||||
else:
|
||||
trace "Pinging all peers", count = numPeersToPing
|
||||
countdownToPingAll = max(0, randomToAllRatio - 1)
|
||||
|
||||
# Execute keepalive pings
|
||||
let successfulPings = await parallelPings(node, peersToPing)
|
||||
|
||||
if successfulPings != numPeersToPing:
|
||||
waku_node_errors.inc(
|
||||
amount = numPeersToPing - successfulPings, labelValues = ["keep_alive_failure"]
|
||||
)
|
||||
|
||||
trace "Keepalive results",
|
||||
attemptedPings = numPeersToPing, successfulPings = successfulPings
|
||||
|
||||
# Update failure tracking
|
||||
if numPeersToPing > 0 and successfulPings == 0:
|
||||
consecutiveIterationFailures.inc()
|
||||
error "All pings failed", consecutiveFailures = consecutiveIterationFailures
|
||||
else:
|
||||
consecutiveIterationFailures = 0
|
||||
|
||||
lastTimeExecuted = currentTime
|
||||
|
||||
# 2 minutes default - 20% of the default chronosstream timeout duration
|
||||
proc startKeepalive*(
|
||||
hm: NodeHealthMonitor,
|
||||
randomPeersKeepalive = 10.seconds,
|
||||
allPeersKeepalive = 2.minutes,
|
||||
): Result[void, string] =
|
||||
# Validate input parameters
|
||||
if randomPeersKeepalive.isZero() or allPeersKeepAlive.isZero():
|
||||
error "startKeepalive: allPeersKeepAlive and randomPeersKeepalive must be greater than 0",
|
||||
randomPeersKeepalive = $randomPeersKeepalive,
|
||||
allPeersKeepAlive = $allPeersKeepAlive
|
||||
return err(
|
||||
"startKeepalive: allPeersKeepAlive and randomPeersKeepalive must be greater than 0"
|
||||
)
|
||||
|
||||
if allPeersKeepAlive < randomPeersKeepalive:
|
||||
error "startKeepalive: allPeersKeepAlive can't be less than randomPeersKeepalive",
|
||||
allPeersKeepAlive = $allPeersKeepAlive,
|
||||
randomPeersKeepalive = $randomPeersKeepalive
|
||||
return
|
||||
err("startKeepalive: allPeersKeepAlive can't be less than randomPeersKeepalive")
|
||||
|
||||
info "starting keepalive",
|
||||
randomPeersKeepalive = randomPeersKeepalive, allPeersKeepalive = allPeersKeepalive
|
||||
|
||||
hm.keepAliveFut = hm.node.keepAliveLoop(randomPeersKeepalive, allPeersKeepalive)
|
||||
return ok()
|
||||
|
||||
proc getNodeHealthReport*(hm: NodeHealthMonitor): Future[HealthReport] {.async.} =
|
||||
var report: HealthReport
|
||||
report.nodeHealth = hm.nodeHealth
|
||||
@ -253,11 +401,15 @@ proc setNodeToHealthMonitor*(hm: NodeHealthMonitor, node: WakuNode) =
|
||||
proc setOverallHealth*(hm: NodeHealthMonitor, health: HealthStatus) =
|
||||
hm.nodeHealth = health
|
||||
|
||||
proc startHealthMonitor*(hm: NodeHealthMonitor) =
|
||||
proc startHealthMonitor*(hm: NodeHealthMonitor): Result[void, string] =
|
||||
hm.onlineMonitor.startOnlineMonitor()
|
||||
hm.startKeepalive().isOkOr:
|
||||
return err("startHealthMonitor: failed starting keep alive: " & error)
|
||||
return ok()
|
||||
|
||||
proc stopHealthMonitor*(hm: NodeHealthMonitor) {.async.} =
|
||||
await hm.onlineMonitor.stopOnlineMonitor()
|
||||
await hm.keepAliveFut.cancelAndWait()
|
||||
|
||||
proc new*(
|
||||
T: type NodeHealthMonitor,
|
||||
|
||||
@ -53,7 +53,7 @@ proc networkConnectivityLoop(self: OnlineMonitor): Future[void] {.async.} =
|
||||
## and triggers any change that depends on the network connectivity state
|
||||
while true:
|
||||
await self.updateOnlineState()
|
||||
await sleepAsync(15.seconds)
|
||||
await sleepAsync(5.seconds)
|
||||
|
||||
proc startOnlineMonitor*(self: OnlineMonitor) =
|
||||
self.networkConnLoopHandle = self.networkConnectivityLoop()
|
||||
|
||||
@ -374,14 +374,6 @@ proc connectToNodes*(
|
||||
info "Finished dialing multiple peers",
|
||||
successfulConns = connectedPeers.len, attempted = nodes.len
|
||||
|
||||
# The issue seems to be around peers not being fully connected when
|
||||
# trying to subscribe. So what we do is sleep to guarantee nodes are
|
||||
# fully connected.
|
||||
#
|
||||
# This issue was known to Dmitiry on nim-libp2p and may be resolvable
|
||||
# later.
|
||||
await sleepAsync(chronos.seconds(5))
|
||||
|
||||
proc disconnectNode*(pm: PeerManager, peerId: PeerId) {.async.} =
|
||||
await pm.switch.disconnect(peerId)
|
||||
|
||||
@ -503,6 +495,13 @@ proc connectedPeers*(
|
||||
|
||||
return (inPeers, outPeers)
|
||||
|
||||
proc disconnectAllPeers*(pm: PeerManager) {.async.} =
|
||||
let (inPeerIds, outPeerIds) = pm.connectedPeers()
|
||||
let connectedPeers = concat(inPeerIds, outPeerIds)
|
||||
|
||||
let futs = connectedPeers.mapIt(pm.disconnectNode(it))
|
||||
await allFutures(futs)
|
||||
|
||||
proc getStreamByPeerIdAndProtocol*(
|
||||
pm: PeerManager, peerId: PeerId, protocol: string
|
||||
): Future[Result[Connection, string]] {.async.} =
|
||||
|
||||
@ -59,14 +59,14 @@ proc startMetricsLog*() =
|
||||
|
||||
proc startMetricsServer(
|
||||
serverIp: IpAddress, serverPort: Port
|
||||
): Result[MetricsHttpServerRef, string] =
|
||||
): Future[Result[MetricsHttpServerRef, string]] {.async.} =
|
||||
info "Starting metrics HTTP server", serverIp = $serverIp, serverPort = $serverPort
|
||||
|
||||
let server = MetricsHttpServerRef.new($serverIp, serverPort).valueOr:
|
||||
return err("metrics HTTP server start failed: " & $error)
|
||||
|
||||
try:
|
||||
waitFor server.start()
|
||||
await server.start()
|
||||
except CatchableError:
|
||||
return err("metrics HTTP server start failed: " & getCurrentExceptionMsg())
|
||||
|
||||
@ -75,10 +75,12 @@ proc startMetricsServer(
|
||||
|
||||
proc startMetricsServerAndLogging*(
|
||||
conf: MetricsServerConf, portsShift: uint16
|
||||
): Result[MetricsHttpServerRef, string] =
|
||||
): Future[Result[MetricsHttpServerRef, string]] {.async.} =
|
||||
var metricsServer: MetricsHttpServerRef
|
||||
metricsServer = startMetricsServer(
|
||||
conf.httpAddress, Port(conf.httpPort.uint16 + portsShift)
|
||||
metricsServer = (
|
||||
await (
|
||||
startMetricsServer(conf.httpAddress, Port(conf.httpPort.uint16 + portsShift))
|
||||
)
|
||||
).valueOr:
|
||||
return err("Starting metrics server failed. Continuing in current state:" & $error)
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[hashes, options, sugar, tables, strutils, sequtils, os, net],
|
||||
std/[hashes, options, sugar, tables, strutils, sequtils, os, net, random],
|
||||
chronos,
|
||||
chronicles,
|
||||
metrics,
|
||||
@ -69,6 +69,10 @@ declarePublicGauge waku_px_peers,
|
||||
logScope:
|
||||
topics = "waku node"
|
||||
|
||||
# randomize initializes sdt/random's random number generator
|
||||
# if not called, the outcome of randomization procedures will be the same in every run
|
||||
randomize()
|
||||
|
||||
# TODO: Move to application instance (e.g., `WakuNode2`)
|
||||
# Git version in git describe format (defined compile time)
|
||||
const git_version* {.strdefine.} = "n/a"
|
||||
@ -108,7 +112,7 @@ type
|
||||
wakuLightpushClient*: WakuLightPushClient
|
||||
wakuPeerExchange*: WakuPeerExchange
|
||||
wakuMetadata*: WakuMetadata
|
||||
wakuSharding*: Sharding
|
||||
wakuAutoSharding*: Option[Sharding]
|
||||
enr*: enr.Record
|
||||
libp2pPing*: Ping
|
||||
rng*: ref rand.HmacDrbgContext
|
||||
@ -124,6 +128,7 @@ proc new*(
|
||||
enr: enr.Record,
|
||||
switch: Switch,
|
||||
peerManager: PeerManager,
|
||||
rateLimitSettings: ProtocolRateLimitSettings = DefaultProtocolRateLimit,
|
||||
# TODO: make this argument required after tests are updated
|
||||
rng: ref HmacDrbgContext = crypto.newRng(),
|
||||
): T {.raises: [Defect, LPError, IOError, TLSStreamProtocolError].} =
|
||||
@ -140,7 +145,7 @@ proc new*(
|
||||
enr: enr,
|
||||
announcedAddresses: netConfig.announcedAddresses,
|
||||
topicSubscriptionQueue: queue,
|
||||
rateLimitSettings: DefaultProtocolRateLimit,
|
||||
rateLimitSettings: rateLimitSettings,
|
||||
)
|
||||
|
||||
return node
|
||||
@ -194,12 +199,13 @@ proc mountMetadata*(node: WakuNode, clusterId: uint32): Result[void, string] =
|
||||
|
||||
return ok()
|
||||
|
||||
## Waku Sharding
|
||||
proc mountSharding*(
|
||||
## Waku AutoSharding
|
||||
proc mountAutoSharding*(
|
||||
node: WakuNode, clusterId: uint16, shardCount: uint32
|
||||
): Result[void, string] =
|
||||
info "mounting sharding", clusterId = clusterId, shardCount = shardCount
|
||||
node.wakuSharding = Sharding(clusterId: clusterId, shardCountGenZero: shardCount)
|
||||
info "mounting auto sharding", clusterId = clusterId, shardCount = shardCount
|
||||
node.wakuAutoSharding =
|
||||
some(Sharding(clusterId: clusterId, shardCountGenZero: shardCount))
|
||||
return ok()
|
||||
|
||||
## Waku Sync
|
||||
@ -318,11 +324,15 @@ proc subscribe*(
|
||||
let (pubsubTopic, contentTopicOp) =
|
||||
case subscription.kind
|
||||
of ContentSub:
|
||||
let shard = node.wakuSharding.getShard((subscription.topic)).valueOr:
|
||||
error "Autosharding error", error = error
|
||||
return err("Autosharding error: " & error)
|
||||
|
||||
($shard, some(subscription.topic))
|
||||
if node.wakuAutoSharding.isSome():
|
||||
let shard = node.wakuAutoSharding.get().getShard((subscription.topic)).valueOr:
|
||||
error "Autosharding error", error = error
|
||||
return err("Autosharding error: " & error)
|
||||
($shard, some(subscription.topic))
|
||||
else:
|
||||
return err(
|
||||
"Static sharding is used, relay subscriptions must specify a pubsub topic"
|
||||
)
|
||||
of PubsubSub:
|
||||
(subscription.topic, none(ContentTopic))
|
||||
else:
|
||||
@ -349,11 +359,15 @@ proc unsubscribe*(
|
||||
let (pubsubTopic, contentTopicOp) =
|
||||
case subscription.kind
|
||||
of ContentUnsub:
|
||||
let shard = node.wakuSharding.getShard((subscription.topic)).valueOr:
|
||||
error "Autosharding error", error = error
|
||||
return err("Autosharding error: " & error)
|
||||
|
||||
($shard, some(subscription.topic))
|
||||
if node.wakuAutoSharding.isSome():
|
||||
let shard = node.wakuAutoSharding.get().getShard((subscription.topic)).valueOr:
|
||||
error "Autosharding error", error = error
|
||||
return err("Autosharding error: " & error)
|
||||
($shard, some(subscription.topic))
|
||||
else:
|
||||
return err(
|
||||
"Static sharding is used, relay subscriptions must specify a pubsub topic"
|
||||
)
|
||||
of PubsubUnsub:
|
||||
(subscription.topic, none(ContentTopic))
|
||||
else:
|
||||
@ -384,9 +398,10 @@ proc publish*(
|
||||
return err(msg)
|
||||
|
||||
let pubsubTopic = pubsubTopicOp.valueOr:
|
||||
node.wakuSharding.getShard(message.contentTopic).valueOr:
|
||||
if node.wakuAutoSharding.isNone():
|
||||
return err("Pubsub topic must be specified when static sharding is enabled.")
|
||||
node.wakuAutoSharding.get().getShard(message.contentTopic).valueOr:
|
||||
let msg = "Autosharding error: " & error
|
||||
error "publish error", err = msg
|
||||
return err(msg)
|
||||
|
||||
#TODO instead of discard return error when 0 peers received the message
|
||||
@ -560,8 +575,14 @@ proc filterSubscribe*(
|
||||
waku_node_errors.inc(labelValues = ["subscribe_filter_failure"])
|
||||
|
||||
return subRes
|
||||
elif node.wakuAutoSharding.isNone():
|
||||
error "Failed filter subscription, pubsub topic must be specified with static sharding"
|
||||
waku_node_errors.inc(labelValues = ["subscribe_filter_failure"])
|
||||
else:
|
||||
let topicMapRes = node.wakuSharding.parseSharding(pubsubTopic, contentTopics)
|
||||
# No pubsub topic, autosharding is used to deduce it
|
||||
# but content topics must be well-formed for this
|
||||
let topicMapRes =
|
||||
node.wakuAutoSharding.get().getShardsFromContentTopics(contentTopics)
|
||||
|
||||
let topicMap =
|
||||
if topicMapRes.isErr():
|
||||
@ -571,11 +592,11 @@ proc filterSubscribe*(
|
||||
topicMapRes.get()
|
||||
|
||||
var futures = collect(newSeq):
|
||||
for pubsub, topics in topicMap.pairs:
|
||||
for shard, topics in topicMap.pairs:
|
||||
info "registering filter subscription to content",
|
||||
pubsubTopic = pubsub, contentTopics = topics, peer = remotePeer.peerId
|
||||
shard = shard, contentTopics = topics, peer = remotePeer.peerId
|
||||
let content = topics.mapIt($it)
|
||||
node.wakuFilterClient.subscribe(remotePeer, $pubsub, content)
|
||||
node.wakuFilterClient.subscribe(remotePeer, $shard, content)
|
||||
|
||||
var subRes: FilterSubscribeResult = FilterSubscribeResult.ok()
|
||||
try:
|
||||
@ -639,8 +660,12 @@ proc filterUnsubscribe*(
|
||||
waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"])
|
||||
|
||||
return unsubRes
|
||||
elif node.wakuAutoSharding.isNone():
|
||||
error "Failed filter un-subscription, pubsub topic must be specified with static sharding"
|
||||
waku_node_errors.inc(labelValues = ["unsubscribe_filter_failure"])
|
||||
else: # pubsubTopic.isNone
|
||||
let topicMapRes = node.wakuSharding.parseSharding(pubsubTopic, contentTopics)
|
||||
let topicMapRes =
|
||||
node.wakuAutoSharding.get().getShardsFromContentTopics(contentTopics)
|
||||
|
||||
let topicMap =
|
||||
if topicMapRes.isErr():
|
||||
@ -650,11 +675,11 @@ proc filterUnsubscribe*(
|
||||
topicMapRes.get()
|
||||
|
||||
var futures = collect(newSeq):
|
||||
for pubsub, topics in topicMap.pairs:
|
||||
for shard, topics in topicMap.pairs:
|
||||
info "deregistering filter subscription to content",
|
||||
pubsubTopic = pubsub, contentTopics = topics, peer = remotePeer.peerId
|
||||
shard = shard, contentTopics = topics, peer = remotePeer.peerId
|
||||
let content = topics.mapIt($it)
|
||||
node.wakuFilterClient.unsubscribe(remotePeer, $pubsub, content)
|
||||
node.wakuFilterClient.unsubscribe(remotePeer, $shard, content)
|
||||
|
||||
var unsubRes: FilterSubscribeResult = FilterSubscribeResult.ok()
|
||||
try:
|
||||
@ -1060,7 +1085,10 @@ proc legacyLightpushPublish*(
|
||||
if pubsubTopic.isSome():
|
||||
return await internalPublish(node, pubsubTopic.get(), message, peer)
|
||||
|
||||
let topicMapRes = node.wakuSharding.parseSharding(pubsubTopic, message.contentTopic)
|
||||
if node.wakuAutoSharding.isNone():
|
||||
return err("Pubsub topic must be specified when static sharding is enabled")
|
||||
let topicMapRes =
|
||||
node.wakuAutoSharding.get().getShardsFromContentTopics(message.contentTopic)
|
||||
|
||||
let topicMap =
|
||||
if topicMapRes.isErr():
|
||||
@ -1116,7 +1144,7 @@ proc mountLightPush*(
|
||||
lightpush_protocol.getRelayPushHandler(node.wakuRelay, rlnPeer)
|
||||
|
||||
node.wakuLightPush = WakuLightPush.new(
|
||||
node.peerManager, node.rng, pushHandler, node.wakuSharding, some(rateLimit)
|
||||
node.peerManager, node.rng, pushHandler, node.wakuAutoSharding, some(rateLimit)
|
||||
)
|
||||
|
||||
if node.started:
|
||||
@ -1163,7 +1191,9 @@ proc lightpushPublish*(
|
||||
): Future[lightpush_protocol.WakuLightPushResult] {.async.} =
|
||||
if node.wakuLightpushClient.isNil() and node.wakuLightPush.isNil():
|
||||
error "failed to publish message as lightpush not available"
|
||||
return lighpushErrorResult(SERVICE_NOT_AVAILABLE, "Waku lightpush not available")
|
||||
return lighpushErrorResult(
|
||||
LightPushErrorCode.SERVICE_NOT_AVAILABLE, "Waku lightpush not available"
|
||||
)
|
||||
|
||||
let toPeer: RemotePeerInfo = peerOpt.valueOr:
|
||||
if not node.wakuLightPush.isNil():
|
||||
@ -1172,20 +1202,27 @@ proc lightpushPublish*(
|
||||
node.peerManager.selectPeer(WakuLightPushCodec).valueOr:
|
||||
let msg = "no suitable remote peers"
|
||||
error "failed to publish message", msg = msg
|
||||
return lighpushErrorResult(NO_PEERS_TO_RELAY, msg)
|
||||
return lighpushErrorResult(LightPushErrorCode.NO_PEERS_TO_RELAY, msg)
|
||||
else:
|
||||
return lighpushErrorResult(NO_PEERS_TO_RELAY, "no suitable remote peers")
|
||||
return lighpushErrorResult(
|
||||
LightPushErrorCode.NO_PEERS_TO_RELAY, "no suitable remote peers"
|
||||
)
|
||||
|
||||
let pubsubForPublish = pubSubTopic.valueOr:
|
||||
if node.wakuAutoSharding.isNone():
|
||||
let msg = "Pubsub topic must be specified when static sharding is enabled"
|
||||
error "lightpush publish error", error = msg
|
||||
return lighpushErrorResult(LightPushErrorCode.INVALID_MESSAGE, msg)
|
||||
|
||||
let parsedTopic = NsContentTopic.parse(message.contentTopic).valueOr:
|
||||
let msg = "Invalid content-topic:" & $error
|
||||
error "lightpush request handling error", error = msg
|
||||
return lighpushErrorResult(INVALID_MESSAGE_ERROR, msg)
|
||||
return lighpushErrorResult(LightPushErrorCode.INVALID_MESSAGE, msg)
|
||||
|
||||
node.wakuSharding.getShard(parsedTopic).valueOr:
|
||||
node.wakuAutoSharding.get().getShard(parsedTopic).valueOr:
|
||||
let msg = "Autosharding error: " & error
|
||||
error "lightpush publish error", error = msg
|
||||
return lighpushErrorResult(INTERNAL_SERVER_ERROR, msg)
|
||||
return lighpushErrorResult(LightPushErrorCode.INTERNAL_SERVER_ERROR, msg)
|
||||
|
||||
return await lightpushPublishHandler(node, pubsubForPublish, message, toPeer)
|
||||
|
||||
@ -1203,7 +1240,7 @@ proc mountRlnRelay*(
|
||||
CatchableError, "WakuRelay protocol is not mounted, cannot mount WakuRlnRelay"
|
||||
)
|
||||
|
||||
let rlnRelayRes = waitFor WakuRlnRelay.new(rlnConf, registrationHandler)
|
||||
let rlnRelayRes = await WakuRlnRelay.new(rlnConf, registrationHandler)
|
||||
if rlnRelayRes.isErr():
|
||||
raise
|
||||
newException(CatchableError, "failed to mount WakuRlnRelay: " & rlnRelayRes.error)
|
||||
@ -1325,35 +1362,60 @@ proc mountLibp2pPing*(node: WakuNode) {.async: (raises: []).} =
|
||||
except LPError:
|
||||
error "failed to mount libp2pPing", error = getCurrentExceptionMsg()
|
||||
|
||||
# TODO: Move this logic to PeerManager
|
||||
proc keepaliveLoop(node: WakuNode, keepalive: chronos.Duration) {.async.} =
|
||||
while true:
|
||||
await sleepAsync(keepalive)
|
||||
if not node.started:
|
||||
proc pingPeer(node: WakuNode, peerId: PeerId): Future[Result[void, string]] {.async.} =
|
||||
## Ping a single peer and return the result
|
||||
|
||||
try:
|
||||
# Establish a stream
|
||||
let stream = (await node.peerManager.dialPeer(peerId, PingCodec)).valueOr:
|
||||
error "pingPeer: failed dialing peer", peerId = peerId
|
||||
return err("pingPeer failed dialing peer peerId: " & $peerId)
|
||||
defer:
|
||||
# Always close the stream
|
||||
try:
|
||||
await stream.close()
|
||||
except CatchableError as e:
|
||||
debug "Error closing ping connection", peerId = peerId, error = e.msg
|
||||
|
||||
# Perform ping
|
||||
let pingDuration = await node.libp2pPing.ping(stream)
|
||||
|
||||
trace "Ping successful", peerId = peerId, duration = pingDuration
|
||||
return ok()
|
||||
except CatchableError as e:
|
||||
error "pingPeer: exception raised pinging peer", peerId = peerId, error = e.msg
|
||||
return err("pingPeer: exception raised pinging peer: " & e.msg)
|
||||
|
||||
proc selectRandomPeers*(peers: seq[PeerId], numRandomPeers: int): seq[PeerId] =
|
||||
var randomPeers = peers
|
||||
shuffle(randomPeers)
|
||||
return randomPeers[0 ..< min(len(randomPeers), numRandomPeers)]
|
||||
|
||||
# Returns the number of succesful pings performed
|
||||
proc parallelPings*(node: WakuNode, peerIds: seq[PeerId]): Future[int] {.async.} =
|
||||
if len(peerIds) == 0:
|
||||
return 0
|
||||
|
||||
var pingFuts: seq[Future[Result[void, string]]]
|
||||
|
||||
# Create ping futures for each peer
|
||||
for i, peerId in peerIds:
|
||||
let fut = pingPeer(node, peerId)
|
||||
pingFuts.add(fut)
|
||||
|
||||
# Wait for all pings to complete
|
||||
discard await allFutures(pingFuts).withTimeout(5.seconds)
|
||||
|
||||
var successCount = 0
|
||||
for fut in pingFuts:
|
||||
if not fut.completed() or fut.failed():
|
||||
continue
|
||||
|
||||
# Keep connected peers alive while running
|
||||
# Each node is responsible of keeping its outgoing connections alive
|
||||
trace "Running keepalive"
|
||||
let res = fut.read()
|
||||
if res.isOk():
|
||||
successCount.inc()
|
||||
|
||||
# First get a list of connected peer infos
|
||||
let outPeers = node.peerManager.connectedPeers()[1]
|
||||
|
||||
for peerId in outPeers:
|
||||
try:
|
||||
let conn = (await node.peerManager.dialPeer(peerId, PingCodec)).valueOr:
|
||||
warn "Failed dialing peer for keep alive", peerId = peerId
|
||||
continue
|
||||
let pingDelay = await node.libp2pPing.ping(conn)
|
||||
await conn.close()
|
||||
except CatchableError as exc:
|
||||
waku_node_errors.inc(labelValues = ["keep_alive_failure"])
|
||||
|
||||
# 2 minutes default - 20% of the default chronosstream timeout duration
|
||||
proc startKeepalive*(node: WakuNode, keepalive = 2.minutes) =
|
||||
info "starting keepalive", keepalive = keepalive
|
||||
|
||||
asyncSpawn node.keepaliveLoop(keepalive)
|
||||
return successCount
|
||||
|
||||
proc mountRendezvous*(node: WakuNode) {.async: (raises: []).} =
|
||||
info "mounting rendezvous discovery protocol"
|
||||
@ -1506,10 +1568,3 @@ proc isReady*(node: WakuNode): Future[bool] {.async: (raises: [Exception]).} =
|
||||
return true
|
||||
return await node.wakuRlnRelay.isReady()
|
||||
## TODO: add other protocol `isReady` checks
|
||||
|
||||
proc setRateLimits*(node: WakuNode, limits: seq[string]): Result[void, string] =
|
||||
let rateLimitConfig = ProtocolRateLimitSettings.parse(limits)
|
||||
if rateLimitConfig.isErr():
|
||||
return err("invalid rate limit settings:" & rateLimitConfig.error)
|
||||
node.rateLimitSettings = rateLimitConfig.get()
|
||||
return ok()
|
||||
|
||||
@ -241,13 +241,20 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) =
|
||||
let shard = shardId.valueOr:
|
||||
return RestApiResponse.badRequest(fmt("Invalid shardId: {error}"))
|
||||
|
||||
if node.wakuMetadata.isNil():
|
||||
return RestApiResponse.serviceUnavailable(
|
||||
"Error: Metadata Protocol is not mounted to the node"
|
||||
)
|
||||
|
||||
if node.wakuRelay.isNil():
|
||||
return RestApiResponse.serviceUnavailable(
|
||||
"Error: Relay Protocol is not mounted to the node"
|
||||
)
|
||||
|
||||
let topic =
|
||||
toPubsubTopic(RelayShard(clusterId: node.wakuSharding.clusterId, shardId: shard))
|
||||
# TODO: clusterId and shards should be uint16 across all codebase and probably be defined as a type
|
||||
let topic = toPubsubTopic(
|
||||
RelayShard(clusterId: node.wakuMetadata.clusterId.uint16, shardId: shard)
|
||||
)
|
||||
let pubsubPeers =
|
||||
node.wakuRelay.getConnectedPubSubPeers(topic).get(initHashSet[PubSubPeer](0))
|
||||
let relayPeer = PeersOfShard(
|
||||
@ -284,13 +291,19 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) =
|
||||
let shard = shardId.valueOr:
|
||||
return RestApiResponse.badRequest(fmt("Invalid shardId: {error}"))
|
||||
|
||||
if node.wakuMetadata.isNil():
|
||||
return RestApiResponse.serviceUnavailable(
|
||||
"Error: Metadata Protocol is not mounted to the node"
|
||||
)
|
||||
|
||||
if node.wakuRelay.isNil():
|
||||
return RestApiResponse.serviceUnavailable(
|
||||
"Error: Relay Protocol is not mounted to the node"
|
||||
)
|
||||
|
||||
let topic =
|
||||
toPubsubTopic(RelayShard(clusterId: node.wakuSharding.clusterId, shardId: shard))
|
||||
let topic = toPubsubTopic(
|
||||
RelayShard(clusterId: node.wakuMetadata.clusterId.uint16, shardId: shard)
|
||||
)
|
||||
let peers =
|
||||
node.wakuRelay.getPubSubPeersInMesh(topic).get(initHashSet[PubSubPeer](0))
|
||||
let relayPeer = PeersOfShard(
|
||||
|
||||
@ -151,17 +151,19 @@ proc startRestServerProtocolSupport*(
|
||||
error "Could not subscribe", pubsubTopic, error
|
||||
continue
|
||||
|
||||
for contentTopic in contentTopics:
|
||||
cache.contentSubscribe(contentTopic)
|
||||
if node.wakuAutoSharding.isSome():
|
||||
# Only deduce pubsub topics to subscribe to from content topics if autosharding is enabled
|
||||
for contentTopic in contentTopics:
|
||||
cache.contentSubscribe(contentTopic)
|
||||
|
||||
let shard = node.wakuSharding.getShard(contentTopic).valueOr:
|
||||
error "Autosharding error in REST", error = error
|
||||
continue
|
||||
let pubsubTopic = $shard
|
||||
let shard = node.wakuAutoSharding.get().getShard(contentTopic).valueOr:
|
||||
error "Autosharding error in REST", error = error
|
||||
continue
|
||||
let pubsubTopic = $shard
|
||||
|
||||
node.subscribe((kind: PubsubSub, topic: pubsubTopic), handler).isOkOr:
|
||||
error "Could not subscribe", pubsubTopic, error
|
||||
continue
|
||||
node.subscribe((kind: PubsubSub, topic: pubsubTopic), handler).isOkOr:
|
||||
error "Could not subscribe", pubsubTopic, error
|
||||
continue
|
||||
|
||||
installRelayApiHandlers(router, node, cache)
|
||||
else:
|
||||
|
||||
@ -32,7 +32,7 @@ const NoPeerNoneFoundError = "No suitable service peer & none discovered"
|
||||
proc useSelfHostedLightPush(node: WakuNode): bool =
|
||||
return node.wakuLightPush != nil and node.wakuLightPushClient == nil
|
||||
|
||||
proc convertErrorKindToHttpStatus(statusCode: LightpushStatusCode): HttpCode =
|
||||
proc convertErrorKindToHttpStatus(statusCode: LightPushStatusCode): HttpCode =
|
||||
## Lightpush status codes are matching HTTP status codes by design
|
||||
return toHttpCode(statusCode.int).get(Http500)
|
||||
|
||||
@ -66,7 +66,8 @@ proc installLightPushRequestHandler*(
|
||||
contentBody: Option[ContentBody]
|
||||
) -> RestApiResponse:
|
||||
## Send a request to push a waku message
|
||||
debug "post", ROUTE_LIGHTPUSH, contentBody
|
||||
debug "post received", ROUTE_LIGHTPUSH
|
||||
trace "content body", ROUTE_LIGHTPUSH, contentBody
|
||||
|
||||
let req: PushRequest = decodeRequestBody[PushRequest](contentBody).valueOr:
|
||||
return
|
||||
|
||||
@ -272,11 +272,16 @@ proc installRelayApiHandlers*(
|
||||
var message: WakuMessage = req.toWakuMessage(version = 0).valueOr:
|
||||
return RestApiResponse.badRequest()
|
||||
|
||||
let pubsubTopic = node.wakuSharding.getShard(message.contentTopic).valueOr:
|
||||
let msg = "Autosharding error: " & error
|
||||
if node.wakuAutoSharding.isNone():
|
||||
let msg = "Autosharding is disabled"
|
||||
error "publish error", err = msg
|
||||
return RestApiResponse.badRequest("Failed to publish. " & msg)
|
||||
|
||||
let pubsubTopic = node.wakuAutoSharding.get().getShard(message.contentTopic).valueOr:
|
||||
let msg = "Autosharding error: " & error
|
||||
error "publish error", err = msg
|
||||
return RestApiResponse.badRequest("Failed to publish. " & msg)
|
||||
|
||||
# if RLN is mounted, append the proof to the message
|
||||
if not node.wakuRlnRelay.isNil():
|
||||
node.wakuRlnRelay.appendRLNProof(message, float64(getTime().toUnix())).isOkOr:
|
||||
|
||||
@ -122,6 +122,18 @@ proc parse*(
|
||||
"Invalid content topic structure. Expected either /<application>/<version>/<topic-name>/<encoding> or /<gen>/<application>/<version>/<topic-name>/<encoding>"
|
||||
return err(ParsingError.invalidFormat(errMsg))
|
||||
|
||||
proc parse*(
|
||||
T: type NsContentTopic, topics: seq[ContentTopic]
|
||||
): ParsingResult[seq[NsContentTopic]] =
|
||||
var res: seq[NsContentTopic] = @[]
|
||||
for contentTopic in topics:
|
||||
let parseRes = NsContentTopic.parse(contentTopic)
|
||||
if parseRes.isErr():
|
||||
let error: ParsingError = parseRes.error
|
||||
return ParsingResult[seq[NsContentTopic]].err(error)
|
||||
res.add(parseRes.value)
|
||||
return ParsingResult[seq[NsContentTopic]].ok(res)
|
||||
|
||||
# Content topic compatibility
|
||||
|
||||
converter toContentTopic*(topic: NsContentTopic): ContentTopic =
|
||||
|
||||
@ -8,6 +8,7 @@ import nimcrypto, std/options, std/tables, stew/endians2, results, stew/byteutil
|
||||
|
||||
import ./content_topic, ./pubsub_topic
|
||||
|
||||
# TODO: this is autosharding, not just "sharding"
|
||||
type Sharding* = object
|
||||
clusterId*: uint16
|
||||
# TODO: generations could be stored in a table here
|
||||
@ -50,48 +51,32 @@ proc getShard*(s: Sharding, topic: ContentTopic): Result[RelayShard, string] =
|
||||
|
||||
ok(shard)
|
||||
|
||||
proc parseSharding*(
|
||||
s: Sharding,
|
||||
pubsubTopic: Option[PubsubTopic],
|
||||
contentTopics: ContentTopic | seq[ContentTopic],
|
||||
proc getShardsFromContentTopics*(
|
||||
s: Sharding, contentTopics: ContentTopic | seq[ContentTopic]
|
||||
): Result[Table[RelayShard, seq[NsContentTopic]], string] =
|
||||
var topics: seq[ContentTopic]
|
||||
when contentTopics is seq[ContentTopic]:
|
||||
topics = contentTopics
|
||||
else:
|
||||
topics = @[contentTopics]
|
||||
let topics =
|
||||
when contentTopics is seq[ContentTopic]:
|
||||
contentTopics
|
||||
else:
|
||||
@[contentTopics]
|
||||
|
||||
let parseRes = NsContentTopic.parse(topics)
|
||||
let nsContentTopics =
|
||||
if parseRes.isErr():
|
||||
return err("Cannot parse content topic: " & $parseRes.error)
|
||||
else:
|
||||
parseRes.get()
|
||||
|
||||
var topicMap = initTable[RelayShard, seq[NsContentTopic]]()
|
||||
for contentTopic in topics:
|
||||
let parseRes = NsContentTopic.parse(contentTopic)
|
||||
for content in nsContentTopics:
|
||||
let shard = s.getShard(content).valueOr:
|
||||
return err("Cannot deduce shard from content topic: " & $error)
|
||||
|
||||
let content =
|
||||
if parseRes.isErr():
|
||||
return err("Cannot parse content topic: " & $parseRes.error)
|
||||
else:
|
||||
parseRes.get()
|
||||
|
||||
let pubsub =
|
||||
if pubsubTopic.isSome():
|
||||
let parseRes = RelayShard.parse(pubsubTopic.get())
|
||||
|
||||
if parseRes.isErr():
|
||||
return err("Cannot parse pubsub topic: " & $parseRes.error)
|
||||
else:
|
||||
parseRes.get()
|
||||
else:
|
||||
let shardsRes = s.getShard(content)
|
||||
|
||||
if shardsRes.isErr():
|
||||
return err("Cannot autoshard content topic: " & $shardsRes.error)
|
||||
else:
|
||||
shardsRes.get()
|
||||
|
||||
if not topicMap.hasKey(pubsub):
|
||||
topicMap[pubsub] = @[]
|
||||
if not topicMap.hasKey(shard):
|
||||
topicMap[shard] = @[]
|
||||
|
||||
try:
|
||||
topicMap[pubsub].add(content)
|
||||
topicMap[shard].add(content)
|
||||
except CatchableError:
|
||||
return err(getCurrentExceptionMsg())
|
||||
|
||||
|
||||
@ -44,10 +44,10 @@ proc getRelayPushHandler*(
|
||||
): Future[WakuLightPushResult] {.async.} =
|
||||
# append RLN proof
|
||||
let msgWithProof = checkAndGenerateRLNProof(rlnPeer, message).valueOr:
|
||||
return lighpushErrorResult(OUT_OF_RLN_PROOF, error)
|
||||
return lighpushErrorResult(LightPushErrorCode.OUT_OF_RLN_PROOF, error)
|
||||
|
||||
(await wakuRelay.validateMessage(pubSubTopic, msgWithProof)).isOkOr:
|
||||
return lighpushErrorResult(INVALID_MESSAGE_ERROR, $error)
|
||||
return lighpushErrorResult(LightPushErrorCode.INVALID_MESSAGE, $error)
|
||||
|
||||
let publishedResult = await wakuRelay.publish(pubsubTopic, msgWithProof)
|
||||
|
||||
|
||||
@ -35,7 +35,8 @@ proc sendPushRequest(
|
||||
let connection = (await wl.peerManager.dialPeer(peer, WakuLightPushCodec)).valueOr:
|
||||
waku_lightpush_v3_errors.inc(labelValues = [dialFailure])
|
||||
return lighpushErrorResult(
|
||||
NO_PEERS_TO_RELAY, dialFailure & ": " & $peer & " is not accessible"
|
||||
LightPushErrorCode.NO_PEERS_TO_RELAY,
|
||||
dialFailure & ": " & $peer & " is not accessible",
|
||||
)
|
||||
|
||||
await connection.writeLP(req.encode().buffer)
|
||||
@ -44,7 +45,7 @@ proc sendPushRequest(
|
||||
try:
|
||||
buffer = await connection.readLp(DefaultMaxRpcSize.int)
|
||||
except LPStreamRemoteClosedError:
|
||||
error "Failed to read responose from peer", error = getCurrentExceptionMsg()
|
||||
error "Failed to read response from peer", error = getCurrentExceptionMsg()
|
||||
return lightpushResultInternalError(
|
||||
"Failed to read response from peer: " & getCurrentExceptionMsg()
|
||||
)
|
||||
@ -55,7 +56,7 @@ proc sendPushRequest(
|
||||
return lightpushResultInternalError(decodeRpcFailure)
|
||||
|
||||
if response.requestId != req.requestId and
|
||||
response.statusCode != TOO_MANY_REQUESTS.uint32:
|
||||
response.statusCode != LightPushErrorCode.TOO_MANY_REQUESTS:
|
||||
error "response failure, requestId mismatch",
|
||||
requestId = req.requestId, responseRequestId = response.requestId
|
||||
return lightpushResultInternalError("response failure, requestId mismatch")
|
||||
@ -105,7 +106,9 @@ proc publishToAny*(
|
||||
|
||||
let peer = wl.peerManager.selectPeer(WakuLightPushCodec).valueOr:
|
||||
# TODO: check if it is matches the situation - shall we distinguish client side missing peers from server side?
|
||||
return lighpushErrorResult(NO_PEERS_TO_RELAY, "no suitable remote peers")
|
||||
return lighpushErrorResult(
|
||||
LightPushErrorCode.NO_PEERS_TO_RELAY, "no suitable remote peers"
|
||||
)
|
||||
|
||||
let pushRequest = LightpushRequest(
|
||||
requestId: generateRequestId(wl.rng),
|
||||
|
||||
@ -5,18 +5,21 @@ import ../waku_core, ./rpc, ../waku_relay/protocol
|
||||
|
||||
from ../waku_core/codecs import WakuLightPushCodec
|
||||
export WakuLightPushCodec
|
||||
export LightPushStatusCode
|
||||
|
||||
type LightpushStatusCode* = enum
|
||||
SUCCESS = uint32(200)
|
||||
BAD_REQUEST = uint32(400)
|
||||
PAYLOAD_TOO_LARGE = uint32(413)
|
||||
INVALID_MESSAGE_ERROR = uint32(420)
|
||||
UNSUPPORTED_PUBSUB_TOPIC = uint32(421)
|
||||
TOO_MANY_REQUESTS = uint32(429)
|
||||
INTERNAL_SERVER_ERROR = uint32(500)
|
||||
SERVICE_NOT_AVAILABLE = uint32(503)
|
||||
OUT_OF_RLN_PROOF = uint32(504)
|
||||
NO_PEERS_TO_RELAY = uint32(505)
|
||||
const LightPushSuccessCode* = (SUCCESS: LightPushStatusCode(200))
|
||||
|
||||
const LightPushErrorCode* = (
|
||||
BAD_REQUEST: LightPushStatusCode(400),
|
||||
PAYLOAD_TOO_LARGE: LightPushStatusCode(413),
|
||||
INVALID_MESSAGE: LightPushStatusCode(420),
|
||||
UNSUPPORTED_PUBSUB_TOPIC: LightPushStatusCode(421),
|
||||
TOO_MANY_REQUESTS: LightPushStatusCode(429),
|
||||
INTERNAL_SERVER_ERROR: LightPushStatusCode(500),
|
||||
SERVICE_NOT_AVAILABLE: LightPushStatusCode(503),
|
||||
OUT_OF_RLN_PROOF: LightPushStatusCode(504),
|
||||
NO_PEERS_TO_RELAY: LightPushStatusCode(505),
|
||||
)
|
||||
|
||||
type ErrorStatus* = tuple[code: LightpushStatusCode, desc: Option[string]]
|
||||
type WakuLightPushResult* = Result[uint32, ErrorStatus]
|
||||
@ -28,25 +31,25 @@ type PushMessageHandler* = proc(
|
||||
const TooManyRequestsMessage* = "Request rejected due to too many requests"
|
||||
|
||||
func isSuccess*(response: LightPushResponse): bool =
|
||||
return response.statusCode == LightpushStatusCode.SUCCESS.uint32
|
||||
return response.statusCode == LightPushSuccessCode.SUCCESS
|
||||
|
||||
func toPushResult*(response: LightPushResponse): WakuLightPushResult =
|
||||
if isSuccess(response):
|
||||
return ok(response.relayPeerCount.get(0))
|
||||
else:
|
||||
return err((response.statusCode.LightpushStatusCode, response.statusDesc))
|
||||
return err((response.statusCode, response.statusDesc))
|
||||
|
||||
func lightpushSuccessResult*(relayPeerCount: uint32): WakuLightPushResult =
|
||||
return ok(relayPeerCount)
|
||||
|
||||
func lightpushResultInternalError*(msg: string): WakuLightPushResult =
|
||||
return err((LightpushStatusCode.INTERNAL_SERVER_ERROR, some(msg)))
|
||||
return err((LightPushErrorCode.INTERNAL_SERVER_ERROR, some(msg)))
|
||||
|
||||
func lightpushResultBadRequest*(msg: string): WakuLightPushResult =
|
||||
return err((LightpushStatusCode.BAD_REQUEST, some(msg)))
|
||||
return err((LightPushErrorCode.BAD_REQUEST, some(msg)))
|
||||
|
||||
func lightpushResultServiceUnavailable*(msg: string): WakuLightPushResult =
|
||||
return err((LightpushStatusCode.SERVICE_NOT_AVAILABLE, some(msg)))
|
||||
return err((LightPushErrorCode.SERVICE_NOT_AVAILABLE, some(msg)))
|
||||
|
||||
func lighpushErrorResult*(
|
||||
statusCode: LightpushStatusCode, desc: Option[string]
|
||||
@ -63,24 +66,22 @@ func mapPubishingErrorToPushResult*(
|
||||
): WakuLightPushResult =
|
||||
case publishOutcome
|
||||
of NoTopicSpecified:
|
||||
return err(
|
||||
(LightpushStatusCode.INVALID_MESSAGE_ERROR, some("Empty topic, skipping publish"))
|
||||
)
|
||||
return
|
||||
err((LightPushErrorCode.INVALID_MESSAGE, some("Empty topic, skipping publish")))
|
||||
of DuplicateMessage:
|
||||
return err(
|
||||
(LightpushStatusCode.INVALID_MESSAGE_ERROR, some("Dropping already-seen message"))
|
||||
)
|
||||
return
|
||||
err((LightPushErrorCode.INVALID_MESSAGE, some("Dropping already-seen message")))
|
||||
of NoPeersToPublish:
|
||||
return err(
|
||||
(
|
||||
LightpushStatusCode.NO_PEERS_TO_RELAY,
|
||||
LightPushErrorCode.NO_PEERS_TO_RELAY,
|
||||
some("No peers for topic, skipping publish"),
|
||||
)
|
||||
)
|
||||
of CannotGenerateMessageId:
|
||||
return err(
|
||||
(
|
||||
LightpushStatusCode.INTERNAL_SERVER_ERROR,
|
||||
LightPushErrorCode.INTERNAL_SERVER_ERROR,
|
||||
some("Error generating message id, skipping publish"),
|
||||
)
|
||||
)
|
||||
|
||||
@ -26,96 +26,87 @@ type WakuLightPush* = ref object of LPProtocol
|
||||
peerManager*: PeerManager
|
||||
pushHandler*: PushMessageHandler
|
||||
requestRateLimiter*: RequestRateLimiter
|
||||
sharding: Sharding
|
||||
autoSharding: Option[Sharding]
|
||||
|
||||
proc handleRequest(
|
||||
wl: WakuLightPush, peerId: PeerId, pushRequest: LightpushRequest
|
||||
): Future[WakuLightPushResult] {.async.} =
|
||||
let pubsubTopic = pushRequest.pubSubTopic.valueOr:
|
||||
if wl.autoSharding.isNone():
|
||||
let msg = "Pubsub topic must be specified when static sharding is enabled"
|
||||
error "lightpush request handling error", error = msg
|
||||
return WakuLightPushResult.err(
|
||||
(code: LightPushErrorCode.INVALID_MESSAGE, desc: some(msg))
|
||||
)
|
||||
|
||||
let parsedTopic = NsContentTopic.parse(pushRequest.message.contentTopic).valueOr:
|
||||
let msg = "Invalid content-topic:" & $error
|
||||
error "lightpush request handling error", error = msg
|
||||
return WakuLightPushResult.err(
|
||||
(code: LightPushErrorCode.INVALID_MESSAGE, desc: some(msg))
|
||||
)
|
||||
|
||||
wl.autoSharding.get().getShard(parsedTopic).valueOr:
|
||||
let msg = "Auto-sharding error: " & error
|
||||
error "lightpush request handling error", error = msg
|
||||
return WakuLightPushResult.err(
|
||||
(code: LightPushErrorCode.INTERNAL_SERVER_ERROR, desc: some(msg))
|
||||
)
|
||||
|
||||
# ensure checking topic will not cause error at gossipsub level
|
||||
if pubsubTopic.isEmptyOrWhitespace():
|
||||
let msg = "topic must not be empty"
|
||||
error "lightpush request handling error", error = msg
|
||||
return
|
||||
WakuLightPushResult.err((code: LightPushErrorCode.BAD_REQUEST, desc: some(msg)))
|
||||
|
||||
waku_lightpush_v3_messages.inc(labelValues = ["PushRequest"])
|
||||
|
||||
let msg_hash = pubsubTopic.computeMessageHash(pushRequest.message).to0xHex()
|
||||
notice "handling lightpush request",
|
||||
my_peer_id = wl.peerManager.switch.peerInfo.peerId,
|
||||
peer_id = peerId,
|
||||
requestId = pushRequest.requestId,
|
||||
pubsubTopic = pushRequest.pubsubTopic,
|
||||
msg_hash = msg_hash,
|
||||
receivedTime = getNowInNanosecondTime()
|
||||
|
||||
let res = (await wl.pushHandler(peerId, pubsubTopic, pushRequest.message)).valueOr:
|
||||
return err((code: error.code, desc: error.desc))
|
||||
return ok(res)
|
||||
|
||||
proc handleRequest*(
|
||||
wl: WakuLightPush, peerId: PeerId, buffer: seq[byte]
|
||||
): Future[LightPushResponse] {.async.} =
|
||||
let reqDecodeRes = LightpushRequest.decode(buffer)
|
||||
var isSuccess = false
|
||||
var pushResponse: LightpushResponse
|
||||
|
||||
if reqDecodeRes.isErr():
|
||||
pushResponse = LightpushResponse(
|
||||
let pushRequest = LightPushRequest.decode(buffer).valueOr:
|
||||
let desc = decodeRpcFailure & ": " & $error
|
||||
error "failed to push message", error = desc
|
||||
let errorCode = LightPushErrorCode.BAD_REQUEST
|
||||
waku_lightpush_v3_errors.inc(labelValues = [$errorCode])
|
||||
return LightPushResponse(
|
||||
requestId: "N/A", # due to decode failure we don't know requestId
|
||||
statusCode: LightpushStatusCode.BAD_REQUEST.uint32,
|
||||
statusDesc: some(decodeRpcFailure & ": " & $reqDecodeRes.error),
|
||||
)
|
||||
else:
|
||||
let pushRequest = reqDecodeRes.get()
|
||||
|
||||
let pubsubTopic = pushRequest.pubSubTopic.valueOr:
|
||||
let parsedTopic = NsContentTopic.parse(pushRequest.message.contentTopic).valueOr:
|
||||
let msg = "Invalid content-topic:" & $error
|
||||
error "lightpush request handling error", error = msg
|
||||
return LightpushResponse(
|
||||
requestId: pushRequest.requestId,
|
||||
statusCode: LightpushStatusCode.INVALID_MESSAGE_ERROR.uint32,
|
||||
statusDesc: some(msg),
|
||||
)
|
||||
|
||||
wl.sharding.getShard(parsedTopic).valueOr:
|
||||
let msg = "Autosharding error: " & error
|
||||
error "lightpush request handling error", error = msg
|
||||
return LightpushResponse(
|
||||
requestId: pushRequest.requestId,
|
||||
statusCode: LightpushStatusCode.INTERNAL_SERVER_ERROR.uint32,
|
||||
statusDesc: some(msg),
|
||||
)
|
||||
|
||||
# ensure checking topic will not cause error at gossipsub level
|
||||
if pubsubTopic.isEmptyOrWhitespace():
|
||||
let msg = "topic must not be empty"
|
||||
error "lightpush request handling error", error = msg
|
||||
return LightPushResponse(
|
||||
requestId: pushRequest.requestId,
|
||||
statusCode: LightpushStatusCode.BAD_REQUEST.uint32,
|
||||
statusDesc: some(msg),
|
||||
)
|
||||
|
||||
waku_lightpush_v3_messages.inc(labelValues = ["PushRequest"])
|
||||
|
||||
let msg_hash = pubsubTopic.computeMessageHash(pushRequest.message).to0xHex()
|
||||
notice "handling lightpush request",
|
||||
my_peer_id = wl.peerManager.switch.peerInfo.peerId,
|
||||
peer_id = peerId,
|
||||
requestId = pushRequest.requestId,
|
||||
pubsubTopic = pushRequest.pubsubTopic,
|
||||
msg_hash = msg_hash,
|
||||
receivedTime = getNowInNanosecondTime()
|
||||
|
||||
let handleRes = await wl.pushHandler(peerId, pubsubTopic, pushRequest.message)
|
||||
|
||||
isSuccess = handleRes.isOk()
|
||||
pushResponse = LightpushResponse(
|
||||
requestId: pushRequest.requestId,
|
||||
statusCode:
|
||||
if isSuccess:
|
||||
LightpushStatusCode.SUCCESS.uint32
|
||||
else:
|
||||
handleRes.error.code.uint32,
|
||||
statusDesc:
|
||||
if isSuccess:
|
||||
none[string]()
|
||||
else:
|
||||
handleRes.error.desc,
|
||||
relayPeerCount:
|
||||
if isSuccess:
|
||||
some(handleRes.get())
|
||||
else:
|
||||
none[uint32](),
|
||||
statusCode: errorCode,
|
||||
statusDesc: some(desc),
|
||||
)
|
||||
|
||||
if not isSuccess:
|
||||
waku_lightpush_v3_errors.inc(
|
||||
labelValues = [pushResponse.statusDesc.valueOr("unknown")]
|
||||
let relayPeerCount = (await handleRequest(wl, peerId, pushRequest)).valueOr:
|
||||
let desc = error.desc
|
||||
waku_lightpush_v3_errors.inc(labelValues = [$error.code])
|
||||
error "failed to push message", error = desc
|
||||
return LightPushResponse(
|
||||
requestId: pushRequest.requestId, statusCode: error.code, statusDesc: desc
|
||||
)
|
||||
error "failed to push message", error = pushResponse.statusDesc
|
||||
return pushResponse
|
||||
|
||||
return LightPushResponse(
|
||||
requestId: pushRequest.requestId,
|
||||
statusCode: LightPushSuccessCode.SUCCESS,
|
||||
statusDesc: none[string](),
|
||||
relayPeerCount: some(relayPeerCount),
|
||||
)
|
||||
|
||||
proc initProtocolHandler(wl: WakuLightPush) =
|
||||
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
var rpc: LightpushResponse
|
||||
var rpc: LightPushResponse
|
||||
wl.requestRateLimiter.checkUsageLimit(WakuLightPushCodec, conn):
|
||||
var buffer: seq[byte]
|
||||
try:
|
||||
@ -137,12 +128,12 @@ proc initProtocolHandler(wl: WakuLightPush) =
|
||||
peerId = conn.peerId, limit = $wl.requestRateLimiter.setting
|
||||
|
||||
rpc = static(
|
||||
LightpushResponse(
|
||||
LightPushResponse(
|
||||
## We will not copy and decode RPC buffer from stream only for requestId
|
||||
## in reject case as it is comparably too expensive and opens possible
|
||||
## attack surface
|
||||
requestId: "N/A",
|
||||
statusCode: LightpushStatusCode.TOO_MANY_REQUESTS.uint32,
|
||||
statusCode: LightPushErrorCode.TOO_MANY_REQUESTS,
|
||||
statusDesc: some(TooManyRequestsMessage),
|
||||
)
|
||||
)
|
||||
@ -152,8 +143,8 @@ proc initProtocolHandler(wl: WakuLightPush) =
|
||||
except LPStreamError:
|
||||
error "lightpush write stream failed", error = getCurrentExceptionMsg()
|
||||
|
||||
## For lightpush might not worth to measure outgoing trafic as it is only
|
||||
## small respones about success/failure
|
||||
## For lightpush might not worth to measure outgoing traffic as it is only
|
||||
## small response about success/failure
|
||||
|
||||
wl.handler = handler
|
||||
wl.codec = WakuLightPushCodec
|
||||
@ -163,7 +154,7 @@ proc new*(
|
||||
peerManager: PeerManager,
|
||||
rng: ref rand.HmacDrbgContext,
|
||||
pushHandler: PushMessageHandler,
|
||||
sharding: Sharding,
|
||||
autoSharding: Option[Sharding],
|
||||
rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](),
|
||||
): T =
|
||||
let wl = WakuLightPush(
|
||||
@ -171,7 +162,7 @@ proc new*(
|
||||
peerManager: peerManager,
|
||||
pushHandler: pushHandler,
|
||||
requestRateLimiter: newRequestRateLimiter(rateLimitSetting),
|
||||
sharding: sharding,
|
||||
autoSharding: autoSharding,
|
||||
)
|
||||
wl.initProtocolHandler()
|
||||
setServiceLimitMetric(WakuLightpushCodec, rateLimitSetting)
|
||||
|
||||
@ -3,6 +3,10 @@
|
||||
import std/options
|
||||
import ../waku_core
|
||||
|
||||
type LightPushStatusCode* = distinct uint32
|
||||
proc `==`*(a, b: LightPushStatusCode): bool {.borrow.}
|
||||
proc `$`*(code: LightPushStatusCode): string {.borrow.}
|
||||
|
||||
type
|
||||
LightpushRequest* = object
|
||||
requestId*: string
|
||||
@ -11,6 +15,6 @@ type
|
||||
|
||||
LightPushResponse* = object
|
||||
requestId*: string
|
||||
statusCode*: uint32
|
||||
statusCode*: LightPushStatusCode
|
||||
statusDesc*: Option[string]
|
||||
relayPeerCount*: Option[uint32]
|
||||
|
||||
@ -43,7 +43,7 @@ proc encode*(rpc: LightPushResponse): ProtoBuffer =
|
||||
var pb = initProtoBuffer()
|
||||
|
||||
pb.write3(1, rpc.requestId)
|
||||
pb.write3(10, rpc.statusCode)
|
||||
pb.write3(10, rpc.statusCode.uint32)
|
||||
pb.write3(11, rpc.statusDesc)
|
||||
pb.write3(12, rpc.relayPeerCount)
|
||||
pb.finish3()
|
||||
@ -64,7 +64,7 @@ proc decode*(T: type LightPushResponse, buffer: seq[byte]): ProtobufResult[T] =
|
||||
if not ?pb.getField(10, statusCode):
|
||||
return err(ProtobufError.missingRequiredField("status_code"))
|
||||
else:
|
||||
rpc.statusCode = statusCode
|
||||
rpc.statusCode = statusCode.LightPushStatusCode
|
||||
|
||||
var statusDesc: string
|
||||
if not ?pb.getField(11, statusDesc):
|
||||
|
||||
@ -45,7 +45,8 @@ proc handleRequest*(
|
||||
let msg_hash = pubsubTopic.computeMessageHash(message).to0xHex()
|
||||
waku_lightpush_messages.inc(labelValues = ["PushRequest"])
|
||||
|
||||
notice "handling lightpush request",
|
||||
notice "handling legacy lightpush request",
|
||||
my_peer_id = wl.peerManager.switch.peerInfo.peerId,
|
||||
peer_id = peerId,
|
||||
requestId = requestId,
|
||||
pubsubTopic = pubsubTopic,
|
||||
|
||||
@ -29,7 +29,7 @@ proc respond(
|
||||
m: WakuMetadata, conn: Connection
|
||||
): Future[Result[void, string]] {.async, gcsafe.} =
|
||||
let response =
|
||||
WakuMetadataResponse(clusterId: some(m.clusterId), shards: toSeq(m.shards))
|
||||
WakuMetadataResponse(clusterId: some(m.clusterId.uint32), shards: toSeq(m.shards))
|
||||
|
||||
let res = catch:
|
||||
await conn.writeLP(response.encode().buffer)
|
||||
|
||||
@ -372,6 +372,13 @@ proc getPubSubPeersInMesh*(
|
||||
## Returns the list of PubSubPeers in a mesh defined by the passed pubsub topic.
|
||||
## The 'mesh' atribute is defined in the GossipSub ref object.
|
||||
|
||||
# If pubsubTopic is empty, we return all peers in mesh for any pubsub topic
|
||||
if pubsubTopic == "":
|
||||
var allPeers = initHashSet[PubSubPeer]()
|
||||
for topic, topicMesh in w.mesh.pairs:
|
||||
allPeers = allPeers.union(topicMesh)
|
||||
return ok(allPeers)
|
||||
|
||||
if not w.mesh.hasKey(pubsubTopic):
|
||||
debug "getPubSubPeersInMesh - there is no mesh peer for the given pubsub topic",
|
||||
pubsubTopic = pubsubTopic
|
||||
@ -388,7 +395,7 @@ proc getPubSubPeersInMesh*(
|
||||
return ok(peers)
|
||||
|
||||
proc getPeersInMesh*(
|
||||
w: WakuRelay, pubsubTopic: PubsubTopic
|
||||
w: WakuRelay, pubsubTopic: PubsubTopic = ""
|
||||
): Result[seq[PeerId], string] =
|
||||
## Returns the list of peerIds in a mesh defined by the passed pubsub topic.
|
||||
## The 'mesh' atribute is defined in the GossipSub ref object.
|
||||
|
||||
@ -143,9 +143,10 @@ proc needsReceiverLoop(self: SyncTransfer) {.async.} =
|
||||
proc initProtocolHandler(self: SyncTransfer) =
|
||||
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
|
||||
while true:
|
||||
if not self.inSessions.contains(conn.peerId):
|
||||
## removed DOS prototection until we can design something better
|
||||
#[ if not self.inSessions.contains(conn.peerId):
|
||||
error "unwanted peer, disconnecting", remote = conn.peerId
|
||||
break
|
||||
break ]#
|
||||
|
||||
let readRes = catch:
|
||||
await conn.readLp(int64(DefaultMaxWakuMessageSize))
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user