chore!: separate internal and CLI configurations (#3357)

Split `WakuNodeConfig` object for better separation of concerns and to introduce a tree-like structure to configuration.

* fix: ensure twn cluster conf is still applied when clusterId=1
* test: remove usage of `WakuNodeConf`
* Remove macro, split builder files, remove wakunodeconf from tests
* rm network_conf_builder module as it is not used

---------

Co-authored-by: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com>
Co-authored-by: Ivan Folgueira Bande <ivansete@status.im>
This commit is contained in:
fryorcraken 2025-05-08 07:05:35 +10:00 committed by GitHub
parent 6bc05efc02
commit cc66c7fe78
59 changed files with 2984 additions and 933 deletions

View File

@ -557,14 +557,19 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
echo "rln-relay preparation is in progress..."
let rlnConf = WakuRlnConfig(
rlnRelayDynamic: conf.rlnRelayDynamic,
rlnRelayCredIndex: conf.rlnRelayCredIndex,
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
rlnRelayCredPath: conf.rlnRelayCredPath,
rlnRelayCredPassword: conf.rlnRelayCredPassword,
rlnRelayUserMessageLimit: conf.rlnRelayUserMessageLimit,
rlnEpochSizeSec: conf.rlnEpochSizeSec,
dynamic: conf.rlnRelayDynamic,
credIndex: conf.rlnRelayCredIndex,
chainId: conf.rlnRelayChainId,
ethContractAddress: conf.rlnRelayEthContractAddress,
ethClientAddress: string(conf.rlnRelayethClientAddress),
creds: some(
RlnRelayCreds(
path: conf.rlnRelayCredPath, password: conf.rlnRelayCredPassword
)
),
userMessageLimit: conf.rlnRelayUserMessageLimit,
epochSizeSec: conf.rlnEpochSizeSec,
treePath: conf.rlnRelayTreePath,
)
waitFor node.mountRlnRelay(rlnConf, spamHandler = some(spamHandler))

View File

@ -213,6 +213,13 @@ type
name: "rln-relay"
.}: bool
rlnRelayChainId* {.
desc:
"Chain ID of the provided contract (optional, will fetch from RPC provider if not used)",
defaultValue: 0,
name: "rln-relay-chain-id"
.}: uint
rlnRelayCredPath* {.
desc: "The path for peristing rln-relay credential",
defaultValue: "",
@ -273,6 +280,12 @@ type
name: "rln-relay-epoch-sec"
.}: uint64
rlnRelayTreePath* {.
desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)",
defaultValue: "",
name: "rln-relay-tree-path"
.}: string
# NOTE: Keys are different in nim-libp2p
proc parseCmdArg*(T: type crypto.PrivateKey, p: string): T =
try:

View File

@ -127,7 +127,7 @@ when isMainModule:
nodeHealthMonitor = WakuNodeHealthMonitor()
nodeHealthMonitor.setOverallHealth(HealthStatus.INITIALIZING)
let restServer = rest_server_builder.startRestServerEsentials(
let restServer = rest_server_builder.startRestServerEssentials(
nodeHealthMonitor, wakuConf
).valueOr:
error "Starting esential REST server failed.", error = $error

View File

@ -462,7 +462,7 @@ proc initAndStartApp(
nodeBuilder.withNodeKey(key)
nodeBuilder.withRecord(record)
nodeBUilder.withSwitchConfiguration(maxConnections = some(MaxConnectedPeers))
nodeBuilder.withSwitchConfiguration(maxConnections = some(MaxConnectedPeers))
nodeBuilder.withPeerManagerConfig(
maxConnections = MaxConnectedPeers,
@ -635,14 +635,13 @@ when isMainModule:
if conf.rlnRelay and conf.rlnRelayEthContractAddress != "":
let rlnConf = WakuRlnConfig(
rlnRelayDynamic: conf.rlnRelayDynamic,
rlnRelayCredIndex: some(uint(0)),
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
rlnRelayCredPath: "",
rlnRelayCredPassword: "",
rlnRelayTreePath: conf.rlnRelayTreePath,
rlnEpochSizeSec: conf.rlnEpochSizeSec,
dynamic: conf.rlnRelayDynamic,
credIndex: some(uint(0)),
ethContractAddress: conf.rlnRelayEthContractAddress,
ethClientAddress: string(conf.rlnRelayethClientAddress),
treePath: conf.rlnRelayTreePath,
epochSizeSec: conf.rlnEpochSizeSec,
creds: none(RlnRelayCreds),
onFatalErrorAction: onFatalErrorAction,
)

View File

@ -195,7 +195,7 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
let netConfig = NetConfig.init(
bindIp = bindIp,
bindPort = nodeTcpPort,
wsBindPort = wsBindPort,
wsBindPort = some(wsBindPort),
wsEnabled = isWs,
wssEnabled = isWss,
)

View File

@ -38,17 +38,19 @@ when isMainModule:
const versionString = "version / git commit hash: " & waku.git_version
var conf = WakuNodeConf.load(version = versionString).valueOr:
var wakuNodeConf = WakuNodeConf.load(version = versionString).valueOr:
error "failure while loading the configuration", error = error
quit(QuitFailure)
## Also called within Waku.new. The call to startRestServerEsentials needs the following line
logging.setupLog(conf.logLevel, conf.logFormat)
## Also called within Waku.new. The call to startRestServerEssentials needs the following line
logging.setupLog(wakuNodeConf.logLevel, wakuNodeConf.logFormat)
case conf.cmd
case wakuNodeConf.cmd
of generateRlnKeystore:
let conf = wakuNodeConf.toKeystoreGeneratorConf()
doRlnKeystoreGenerator(conf)
of inspectRlnDb:
let conf = wakuNodeConf.toInspectRlnDbConf()
doInspectRlnDb(conf)
of noCommand:
# NOTE: {.threadvar.} is used to make the global variable GC safe for the closure uses it
@ -58,15 +60,20 @@ when isMainModule:
nodeHealthMonitor = WakuNodeHealthMonitor()
nodeHealthMonitor.setOverallHealth(HealthStatus.INITIALIZING)
var confCopy = conf
let restServer = rest_server_builder.startRestServerEsentials(
nodeHealthMonitor, confCopy
).valueOr:
error "Starting esential REST server failed.", error = $error
let conf = wakuNodeConf.toWakuConf().valueOr:
error "Waku configuration failed", error = error
quit(QuitFailure)
var waku = Waku.new(confCopy).valueOr:
var restServer: WakuRestServerRef = nil
if conf.restServerConf.isSome():
restServer = rest_server_builder.startRestServerEssentials(
nodeHealthMonitor, conf.restServerConf.get(), conf.portsShift
).valueOr:
error "Starting essential REST server failed.", error = $error
quit(QuitFailure)
var waku = Waku.new(conf).valueOr:
error "Waku initialization failed", error = error
quit(QuitFailure)
@ -78,15 +85,27 @@ when isMainModule:
error "Starting waku failed", error = error
quit(QuitFailure)
rest_server_builder.startRestServerProtocolSupport(
restServer, waku.node, waku.wakuDiscv5, confCopy
).isOkOr:
error "Starting protocols support REST server failed.", error = $error
quit(QuitFailure)
if conf.restServerConf.isSome():
rest_server_builder.startRestServerProtocolSupport(
restServer,
waku.node,
waku.wakuDiscv5,
conf.restServerConf.get(),
conf.relay,
conf.lightPush,
conf.clusterId,
conf.shards,
conf.contentTopics,
).isOkOr:
error "Starting protocols support REST server failed.", error = $error
quit(QuitFailure)
waku.metricsServer = waku_metrics.startMetricsServerAndLogging(confCopy).valueOr:
error "Starting monitoring and external interfaces failed", error = error
quit(QuitFailure)
if conf.metricsServerConf.isSome():
waku.metricsServer = waku_metrics.startMetricsServerAndLogging(
conf.metricsServerConf.get(), conf.portsShift
).valueOr:
error "Starting monitoring and external interfaces failed", error = error
quit(QuitFailure)
nodeHealthMonitor.setOverallHealth(HealthStatus.READY)

View File

@ -36,7 +36,6 @@ proc setup*(): Waku =
conf.clusterId = twnClusterConf.clusterId
conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
conf.rlnRelayBandwidthThreshold = twnClusterConf.rlnRelayBandwidthThreshold
conf.discv5Discovery = twnClusterConf.discv5Discovery
conf.discv5BootstrapNodes =
conf.discv5BootstrapNodes & twnClusterConf.discv5BootstrapNodes

View File

@ -72,7 +72,11 @@ proc createWaku(
appCallbacks.relayHandler = nil
appCallbacks.topicHealthChangeHandler = nil
let wakuRes = Waku.new(conf, appCallbacks).valueOr:
# TODO: Convert `confJson` directly to `WakuConf`
let wakuConf = conf.toWakuConf().valueOr:
return err("Configuration error: " & $error)
let wakuRes = Waku.new(wakuConf, appCallbacks).valueOr:
error "waku initialization failed", error = error
return err("Failed setting up Waku: " & $error)

View File

@ -108,4 +108,4 @@ import
import ./waku_rln_relay/test_all
# Node Factory
import ./factory/test_config
import ./factory/test_external_config

View File

@ -1,157 +0,0 @@
{.used.}
import
std/options,
testutils/unittests,
chronos,
libp2p/crypto/[crypto, secp],
libp2p/multiaddress,
nimcrypto/utils,
secp256k1,
confutils
import
../../waku/factory/external_config,
../../waku/factory/internal_config,
../../waku/factory/networks_config,
../../waku/common/logging
suite "Waku config - apply preset":
test "Default preset is TWN":
## Setup
let expectedConf = ClusterConf.TheWakuNetworkConf()
## Given
let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn")
## When
let res = applyPresetConfiguration(preConfig)
assert res.isOk(), $res.error
## Then
let conf = res.get()
assert conf.maxMessageSize == expectedConf.maxMessageSize
assert conf.clusterId == expectedConf.clusterId
assert conf.rlnRelay == expectedConf.rlnRelay
assert conf.rlnRelayEthContractAddress == expectedConf.rlnRelayEthContractAddress
assert conf.rlnRelayDynamic == expectedConf.rlnRelayDynamic
assert conf.rlnRelayChainId == expectedConf.rlnRelayChainId
assert conf.rlnRelayBandwidthThreshold == expectedConf.rlnRelayBandwidthThreshold
assert conf.rlnEpochSizeSec == expectedConf.rlnEpochSizeSec
assert conf.rlnRelayUserMessageLimit == expectedConf.rlnRelayUserMessageLimit
assert conf.numShardsInNetwork == expectedConf.numShardsInNetwork
assert conf.discv5BootstrapNodes == expectedConf.discv5BootstrapNodes
test "Subscribes to all valid shards in twn":
## Setup
let expectedConf = ClusterConf.TheWakuNetworkConf()
## Given
let shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7]
let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
## When
let res = applyPresetConfiguration(preConfig)
assert res.isOk(), $res.error
## Then
let conf = res.get()
assert conf.shards.len == expectedConf.numShardsInNetwork.int
test "Subscribes to some valid shards in twn":
## Setup
let expectedConf = ClusterConf.TheWakuNetworkConf()
## Given
let shards: seq[uint16] = @[0, 4, 7]
let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
## When
let resConf = applyPresetConfiguration(preConfig)
let res = validateShards(resConf.get())
assert res.isOk(), $res.error
## Then
let conf = resConf.get()
assert conf.shards.len() == shards.len()
for index, shard in shards:
assert shard in conf.shards
test "Subscribes to invalid shards in twn":
## Setup
## Given
let shards: seq[uint16] = @[0, 4, 7, 10]
let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
let postConfig = applyPresetConfiguration(preConfig)
## When
let res = validateShards(postConfig.get())
## Then
assert res.isErr(), "Invalid shard was accepted"
suite "Waku config - node key":
test "Passed node key is used":
## Setup
let nodeKeyStr =
"0011223344556677889900aabbccddeeff0011223344556677889900aabbccddeeff"
let nodekey = block:
let key = SkPrivateKey.init(utils.fromHex(nodeKeyStr)).tryGet()
crypto.PrivateKey(scheme: Secp256k1, skkey: key)
## Given
let config = WakuNodeConf.load(version = "", cmdLine = @["--nodekey=" & nodeKeyStr])
## When
let res = getNodeKey(config)
assert res.isOk(), $res.error
## Then
let resKey = res.get()
assert utils.toHex(resKey.getRawBytes().get()) ==
utils.toHex(nodekey.getRawBytes().get())
suite "Waku config - Shards":
test "Shards are valid":
## Setup
## Given
let shards: seq[uint16] = @[0, 2, 4]
let numShardsInNetwork = 5.uint32
let config = WakuNodeConf(
cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
)
## When
let res = validateShards(config)
## Then
assert res.isOk(), $res.error
test "Shards are not in range":
## Setup
## Given
let shards: seq[uint16] = @[0, 2, 5]
let numShardsInNetwork = 5.uint32
let config = WakuNodeConf(
cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
)
## When
let res = validateShards(config)
## Then
assert res.isErr(), "Invalid shard was accepted"
test "Shard is passed without num shards":
## Setup
## Given
let config = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"])
## When
let res = validateShards(config)
## Then
assert res.isOk(), $res.error

View File

@ -0,0 +1,208 @@
{.used.}
import
std/options,
testutils/unittests,
chronos,
libp2p/crypto/[crypto, secp],
libp2p/multiaddress,
nimcrypto/utils,
secp256k1,
confutils
import
../../waku/factory/external_config,
../../waku/factory/networks_config,
../../waku/factory/waku_conf,
../../waku/common/logging,
../../waku/common/utils/parse_size_units
suite "Waku config - apply preset":
test "Default preset is TWN":
## Setup
let expectedConf = ClusterConf.TheWakuNetworkConf()
## Given
let preConfig = WakuNodeConf(
cmd: noCommand,
preset: "twn",
relay: true,
rlnRelayEthClientAddress: "http://someaddress".EthRpcUrl,
rlnRelayTreePath: "/tmp/sometreepath",
)
## When
let res = preConfig.toWakuConf()
assert res.isOk(), $res.error
## Then
let conf = res.get()
check conf.maxMessageSizeBytes ==
uint64(parseCorrectMsgSize(expectedConf.maxMessageSize))
check conf.clusterId == expectedConf.clusterId
check conf.rlnRelayConf.isSome() == expectedConf.rlnRelay
if conf.rlnRelayConf.isSome():
let rlnRelayConf = conf.rlnRelayConf.get()
check rlnRelayConf.ethContractAddress == expectedConf.rlnRelayEthContractAddress
check rlnRelayConf.dynamic == expectedConf.rlnRelayDynamic
check rlnRelayConf.chainId == expectedConf.rlnRelayChainId
check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec
check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit
check conf.numShardsInNetwork == expectedConf.numShardsInNetwork
check conf.discv5Conf.isSome() == expectedConf.discv5Discovery
if conf.discv5Conf.isSome():
let discv5Conf = conf.discv5Conf.get()
check discv5Conf.bootstrapNodes == expectedConf.discv5BootstrapNodes
test "Subscribes to all valid shards in twn":
## Setup
let expectedConf = ClusterConf.TheWakuNetworkConf()
## Given
let shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7]
let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
## When
let res = preConfig.toWakuConf()
assert res.isOk(), $res.error
## Then
let conf = res.get()
check conf.shards.len == expectedConf.numShardsInNetwork.int
test "Subscribes to some valid shards in twn":
## Setup
let expectedConf = ClusterConf.TheWakuNetworkConf()
## Given
let shards: seq[uint16] = @[0, 4, 7]
let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
## When
let resConf = preConfig.toWakuConf()
assert resConf.isOk(), $resConf.error
## Then
let conf = resConf.get()
assert conf.shards.len() == shards.len()
for index, shard in shards:
assert shard in conf.shards
test "Subscribes to invalid shards in twn":
## Setup
## Given
let shards: seq[uint16] = @[0, 4, 7, 10]
let preConfig = WakuNodeConf(cmd: noCommand, preset: "twn", shards: shards)
## When
let res = preConfig.toWakuConf()
## Then
assert res.isErr(), "Invalid shard was accepted"
test "Apply TWN preset when cluster id = 1":
## Setup
let expectedConf = ClusterConf.TheWakuNetworkConf()
## Given
let preConfig = WakuNodeConf(
cmd: noCommand,
clusterId: 1.uint16,
relay: true,
rlnRelayEthClientAddress: "http://someaddress".EthRpcUrl,
rlnRelayTreePath: "/tmp/sometreepath",
)
## When
let res = preConfig.toWakuConf()
assert res.isOk(), $res.error
## Then
let conf = res.get()
check conf.maxMessageSizeBytes ==
uint64(parseCorrectMsgSize(expectedConf.maxMessageSize))
check conf.clusterId == expectedConf.clusterId
check conf.rlnRelayConf.isSome() == expectedConf.rlnRelay
if conf.rlnRelayConf.isSome():
let rlnRelayConf = conf.rlnRelayConf.get()
check rlnRelayConf.ethContractAddress == expectedConf.rlnRelayEthContractAddress
check rlnRelayConf.dynamic == expectedConf.rlnRelayDynamic
check rlnRelayConf.chainId == expectedConf.rlnRelayChainId
check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec
check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit
check conf.numShardsInNetwork == expectedConf.numShardsInNetwork
check conf.discv5Conf.isSome() == expectedConf.discv5Discovery
if conf.discv5Conf.isSome():
let discv5Conf = conf.discv5Conf.get()
check discv5Conf.bootstrapNodes == expectedConf.discv5BootstrapNodes
suite "Waku config - node key":
test "Passed node key is used":
## Setup
let nodeKeyStr =
"0011223344556677889900aabbccddeeff0011223344556677889900aabbccddeeff"
let nodekey = block:
let key = SkPrivateKey.init(utils.fromHex(nodeKeyStr)).tryGet()
crypto.PrivateKey(scheme: Secp256k1, skkey: key)
## Given
let config = WakuNodeConf.load(version = "", cmdLine = @["--nodekey=" & nodeKeyStr])
## When
let res = config.toWakuConf()
assert res.isOk(), $res.error
## Then
let resKey = res.get().nodeKey
assert utils.toHex(resKey.getRawBytes().get()) ==
utils.toHex(nodekey.getRawBytes().get())
suite "Waku config - Shards":
test "Shards are valid":
## Setup
## Given
let shards: seq[uint16] = @[0, 2, 4]
let numShardsInNetwork = 5.uint32
let wakuNodeConf = WakuNodeConf(
cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
)
## When
let res = wakuNodeConf.toWakuConf()
assert res.isOk(), $res.error
## Then
let wakuConf = res.get()
let vRes = wakuConf.validate()
assert vRes.isOk(), $vRes.error
test "Shards are not in range":
## Setup
## Given
let shards: seq[uint16] = @[0, 2, 5]
let numShardsInNetwork = 5.uint32
let wakuNodeConf = WakuNodeConf(
cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
)
## When
let res = wakuNodeConf.toWakuConf()
## Then
assert res.isErr(), "Invalid shard was accepted"
test "Shard is passed without num shards":
## Setup
## Given
let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"])
## When
let res = wakuNodeConf.toWakuConf()
## Then
let wakuConf = res.get()
let vRes = wakuConf.validate()
assert vRes.isOk(), $vRes.error

View File

@ -2,11 +2,15 @@
import testutils/unittests, chronos, libp2p/protocols/connectivity/relay/relay
import ../testlib/wakunode, waku/factory/node_factory, waku/waku_node
import
../testlib/wakunode,
waku/factory/node_factory,
waku/waku_node,
waku/factory/conf_builder/conf_builder
suite "Node Factory":
test "Set up a node based on default configurations":
let conf = defaultTestWakuNodeConf()
let conf = defaultTestWakuConf()
let node = setupNode(conf, relay = Relay.new()).valueOr:
raiseAssert error
@ -20,8 +24,10 @@ suite "Node Factory":
not node.wakuRendezvous.isNil()
test "Set up a node with Store enabled":
var conf = defaultTestWakuNodeConf()
conf.store = true
var confBuilder = defaultTestWakuConfBuilder()
confBuilder.storeServiceConf.withEnabled(true)
confBuilder.storeServiceConf.withDbUrl("sqlite://store.sqlite3")
let conf = confBuilder.build().value
let node = setupNode(conf, relay = Relay.new()).valueOr:
raiseAssert error
@ -32,8 +38,9 @@ suite "Node Factory":
not node.wakuArchive.isNil()
test "Set up a node with Filter enabled":
var conf = defaultTestWakuNodeConf()
conf.filter = true
var confBuilder = defaultTestWakuConfBuilder()
confBuilder.filterServiceConf.withEnabled(true)
let conf = confBuilder.build().value
let node = setupNode(conf, relay = Relay.new()).valueOr:
raiseAssert error
@ -43,7 +50,7 @@ test "Set up a node with Filter enabled":
not node.wakuFilter.isNil()
test "Start a node based on default configurations":
let conf = defaultTestWakuNodeConf()
let conf = defaultTestWakuConf()
let node = setupNode(conf, relay = Relay.new()).valueOr:
raiseAssert error

View File

@ -0,0 +1,272 @@
{.used.}
import
libp2p/crypto/[crypto, secp],
libp2p/multiaddress,
nimcrypto/utils,
std/[options, sequtils],
results,
testutils/unittests
import
waku/factory/waku_conf,
waku/factory/waku_conf_builder,
waku/factory/networks_config,
waku/common/utils/parse_size_units
suite "Waku Conf - build with cluster conf":
test "Cluster Conf is passed and relay is enabled":
## Setup
let clusterConf = ClusterConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
builder.discv5Conf.withUdpPort(9000)
builder.withRelayServiceRatio("50:50")
# Mount all shards in network
let expectedShards = toSeq[0.uint16 .. 7.uint16]
## Given
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
builder.withClusterConf(clusterConf)
builder.withRelay(true)
builder.rlnRelayConf.withTreePath("/tmp/test-tree-path")
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
check conf.clusterId == clusterConf.clusterId
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
check conf.shards == expectedShards
check conf.maxMessageSizeBytes ==
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
if clusterConf.rlnRelay:
assert conf.rlnRelayConf.isSome(), "RLN Relay conf is disabled"
let rlnRelayConf = conf.rlnRelayConf.get()
check rlnRelayConf.ethContractAddress.string ==
clusterConf.rlnRelayEthContractAddress
check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic
check rlnRelayConf.chainId == clusterConf.rlnRelayChainId
check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec
check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit
test "Cluster Conf is passed, but relay is disabled":
## Setup
let clusterConf = ClusterConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
builder.withRelayServiceRatio("50:50")
builder.discv5Conf.withUdpPort(9000)
# Mount all shards in network
let expectedShards = toSeq[0.uint16 .. 7.uint16]
## Given
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
builder.withClusterConf(clusterConf)
builder.withRelay(false)
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
check conf.clusterId == clusterConf.clusterId
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
check conf.shards == expectedShards
check conf.maxMessageSizeBytes ==
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
assert conf.rlnRelayConf.isNone
test "Cluster Conf is passed, but rln relay is disabled":
## Setup
let clusterConf = ClusterConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
let # Mount all shards in network
expectedShards = toSeq[0.uint16 .. 7.uint16]
## Given
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
builder.withClusterConf(clusterConf)
builder.rlnRelayConf.withEnabled(false)
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
check conf.clusterId == clusterConf.clusterId
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
check conf.shards == expectedShards
check conf.maxMessageSizeBytes ==
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
assert conf.rlnRelayConf.isNone
test "Cluster Conf is passed and valid shards are specified":
## Setup
let clusterConf = ClusterConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
let shards = @[2.uint16, 3.uint16]
## Given
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
builder.withClusterConf(clusterConf)
builder.withShards(shards)
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
check conf.clusterId == clusterConf.clusterId
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
check conf.shards == shards
check conf.maxMessageSizeBytes ==
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
test "Cluster Conf is passed and invalid shards are specified":
## Setup
let clusterConf = ClusterConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
let shards = @[2.uint16, 10.uint16]
## Given
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
builder.withClusterConf(clusterConf)
builder.withShards(shards)
## When
let resConf = builder.build()
## Then
assert resConf.isErr(), "Invalid shard was accepted"
test "Cluster Conf is passed and RLN contract is overridden":
## Setup
let clusterConf = ClusterConf.TheWakuNetworkConf()
var builder = WakuConfBuilder.init()
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
# Mount all shards in network
let expectedShards = toSeq[0.uint16 .. 7.uint16]
let contractAddress = "0x0123456789ABCDEF"
## Given
builder.rlnRelayConf.withEthContractAddress(contractAddress)
builder.withClusterConf(clusterConf)
builder.withRelay(true)
builder.rlnRelayConf.withTreePath("/tmp/test")
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
check conf.clusterId == clusterConf.clusterId
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
check conf.shards == expectedShards
check conf.maxMessageSizeBytes ==
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
check conf.discv5Conf.isSome == clusterConf.discv5Discovery
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
if clusterConf.rlnRelay:
assert conf.rlnRelayConf.isSome
let rlnRelayConf = conf.rlnRelayConf.get()
check rlnRelayConf.ethContractAddress.string == contractAddress
check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic
check rlnRelayConf.chainId == clusterConf.rlnRelayChainId
check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec
check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit
suite "Waku Conf - node key":
test "Node key is generated":
## Setup
var builder = WakuConfBuilder.init()
builder.withClusterId(1)
## Given
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
let pubkey = getPublicKey(conf.nodeKey)
assert pubkey.isOk()
test "Passed node key is used":
## Setup
let nodeKeyStr =
"0011223344556677889900aabbccddeeff0011223344556677889900aabbccddeeff"
let nodeKey = block:
let key = SkPrivateKey.init(utils.fromHex(nodeKeyStr)).tryGet()
crypto.PrivateKey(scheme: Secp256k1, skkey: key)
var builder = WakuConfBuilder.init()
builder.withClusterId(1)
## Given
builder.withNodeKey(nodeKey)
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
assert utils.toHex(conf.nodeKey.getRawBytes().get()) ==
utils.toHex(nodeKey.getRawBytes().get()),
"Passed node key isn't in config:" & $nodeKey & $conf.nodeKey
suite "Waku Conf - extMultiaddrs":
test "Valid multiaddresses are passed and accepted":
## Setup
var builder = WakuConfBuilder.init()
builder.withClusterId(1)
## Given
let multiaddrs =
@["/ip4/127.0.0.1/udp/9090/quic", "/ip6/::1/tcp/3217", "/dns4/foo.com/tcp/80"]
builder.withExtMultiAddrs(multiaddrs)
## When
let resConf = builder.build()
assert resConf.isOk(), $resConf.error
let conf = resConf.get()
## Then
let resValidate = conf.validate()
assert resValidate.isOk(), $resValidate.error
check multiaddrs.len == conf.networkConf.extMultiAddrs.len
let resMultiaddrs = conf.networkConf.extMultiAddrs.map(
proc(m: MultiAddress): string =
$m
)
for m in multiaddrs:
check m in resMultiaddrs

View File

@ -134,11 +134,11 @@ suite "RLN Proofs as a Lightpush Service":
# mount rln-relay
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
dynamic: false,
credIndex: some(1.uint),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode"),
)
await allFutures(server.start(), client.start())

View File

@ -128,11 +128,11 @@ suite "RLN Proofs as a Lightpush Service":
# mount rln-relay
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
dynamic: false,
credIndex: some(1.uint),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode"),
)
await allFutures(server.start(), client.start())

View File

@ -83,16 +83,15 @@ proc getWakuRlnConfigOnChain*(
ethClientAddress: Option[string] = none(string),
): WakuRlnConfig =
return WakuRlnConfig(
rlnRelayDynamic: true,
rlnRelayCredIndex: some(credIndex),
rlnRelayEthContractAddress: rlnRelayEthContractAddress,
rlnRelayEthClientAddress: ethClientAddress.get(EthClient),
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $credIndex),
rlnEpochSizeSec: 1,
dynamic: true,
credIndex: some(credIndex),
ethContractAddress: rlnRelayEthContractAddress,
ethClientAddress: ethClientAddress.get(EthClient),
treePath: genTempPath("rln_tree", "wakunode_" & $credIndex),
epochSizeSec: 1,
onFatalErrorAction: fatalErrorHandler.get(fatalErrorVoidHandler),
# If these are used, initialisation fails with "failed to mount WakuRlnRelay: could not initialize the group manager: the commitment does not have a membership"
rlnRelayCredPath: keystorePath,
rlnRelayCredPassword: password,
creds: some(RlnRelayCreds(path: keystorePath, password: password)),
)
proc setupRelayWithOnChainRln*(
@ -227,13 +226,13 @@ suite "Waku RlnRelay - End to End - Static":
let contractAddress = await uploadRLNContract(EthClient)
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: true,
rlnRelayCredIndex: some(0.uint),
rlnRelayUserMessageLimit: 111,
rlnRelayTreepath: genTempPath("rln_tree", "wakunode_0"),
rlnRelayEthClientAddress: EthClient,
rlnRelayEthContractAddress: $contractAddress,
rlnRelayChainId: 1337,
dynamic: true,
credIndex: some(0.uint),
userMessageLimit: 111,
treepath: genTempPath("rln_tree", "wakunode_0"),
ethClientAddress: EthClient,
ethContractAddress: $contractAddress,
chainId: 1337,
onFatalErrorAction: proc(errStr: string) =
raiseAssert errStr
,

View File

@ -4,7 +4,7 @@ import chronos, confutils/toml/std/net, libp2p/multiaddress, testutils/unittests
import ./testlib/wakunode, waku/waku_enr/capabilities
include waku/node/config
include waku/node/net_config
proc defaultTestWakuFlags(): CapabilitiesBitfield =
CapabilitiesBitfield.init(
@ -13,19 +13,27 @@ proc defaultTestWakuFlags(): CapabilitiesBitfield =
suite "Waku NetConfig":
asyncTest "Create NetConfig with default values":
let conf = defaultTestWakuNodeConf()
let conf = defaultTestWakuConf()
let wakuFlags = defaultTestWakuFlags()
let netConfigRes = NetConfig.init(
bindIp = conf.listenAddress,
bindPort = conf.tcpPort,
bindIp = conf.networkConf.p2pListenAddress,
bindPort = conf.networkConf.p2pTcpPort,
extIp = none(IpAddress),
extPort = none(Port),
extMultiAddrs = @[],
wsBindPort = conf.websocketPort,
wsEnabled = conf.websocketSupport,
wssEnabled = conf.websocketSecureSupport,
wsBindPort =
if conf.webSocketConf.isSome():
some(conf.webSocketConf.get().port)
else:
none(Port),
wsEnabled = conf.webSocketConf.isSome(),
wssEnabled =
if conf.webSocketConf.isSome():
conf.webSocketConf.get().secureConf.isSome()
else:
false,
dns4DomainName = none(string),
discv5UdpPort = none(Port),
wakuFlags = some(wakuFlags),
@ -35,10 +43,11 @@ suite "Waku NetConfig":
netConfigRes.isOk()
asyncTest "AnnouncedAddresses contains only bind address when no external addresses are provided":
let conf = defaultTestWakuNodeConf()
let conf = defaultTestWakuConf()
let netConfigRes =
NetConfig.init(bindIp = conf.listenAddress, bindPort = conf.tcpPort)
let netConfigRes = NetConfig.init(
bindIp = conf.networkConf.p2pListenAddress, bindPort = conf.networkConf.p2pTcpPort
)
assert netConfigRes.isOk(), $netConfigRes.error
@ -47,17 +56,19 @@ suite "Waku NetConfig":
check:
netConfig.announcedAddresses.len == 1 # Only bind address should be present
netConfig.announcedAddresses[0] ==
formatListenAddress(ip4TcpEndPoint(conf.listenAddress, conf.tcpPort))
formatListenAddress(
ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.networkConf.p2pTcpPort)
)
asyncTest "AnnouncedAddresses contains external address if extIp/Port are provided":
let
conf = defaultTestWakuNodeConf()
conf = defaultTestWakuConf()
extIp = parseIpAddress("1.2.3.4")
extPort = Port(1234)
let netConfigRes = NetConfig.init(
bindIp = conf.listenAddress,
bindPort = conf.tcpPort,
bindIp = conf.networkConf.p2pListenAddress,
bindPort = conf.networkConf.p2pTcpPort,
extIp = some(extIp),
extPort = some(extPort),
)
@ -72,13 +83,13 @@ suite "Waku NetConfig":
asyncTest "AnnouncedAddresses contains dns4DomainName if provided":
let
conf = defaultTestWakuNodeConf()
conf = defaultTestWakuConf()
dns4DomainName = "example.com"
extPort = Port(1234)
let netConfigRes = NetConfig.init(
bindIp = conf.listenAddress,
bindPort = conf.tcpPort,
bindIp = conf.networkConf.p2pListenAddress,
bindPort = conf.networkConf.p2pTcpPort,
dns4DomainName = some(dns4DomainName),
extPort = some(extPort),
)
@ -93,14 +104,14 @@ suite "Waku NetConfig":
asyncTest "AnnouncedAddresses includes extMultiAddrs when provided":
let
conf = defaultTestWakuNodeConf()
conf = defaultTestWakuConf()
extIp = parseIpAddress("1.2.3.4")
extPort = Port(1234)
extMultiAddrs = @[ip4TcpEndPoint(extIp, extPort)]
let netConfigRes = NetConfig.init(
bindIp = conf.listenAddress,
bindPort = conf.tcpPort,
bindIp = conf.networkConf.p2pListenAddress,
bindPort = conf.networkConf.p2pTcpPort,
extMultiAddrs = extMultiAddrs,
)
@ -114,14 +125,14 @@ suite "Waku NetConfig":
asyncTest "AnnouncedAddresses uses dns4DomainName over extIp when both are provided":
let
conf = defaultTestWakuNodeConf()
conf = defaultTestWakuConf()
dns4DomainName = "example.com"
extIp = parseIpAddress("1.2.3.4")
extPort = Port(1234)
let netConfigRes = NetConfig.init(
bindIp = conf.listenAddress,
bindPort = conf.tcpPort,
bindIp = conf.networkConf.p2pListenAddress,
bindPort = conf.networkConf.p2pTcpPort,
dns4DomainName = some(dns4DomainName),
extIp = some(extIp),
extPort = some(extPort),
@ -137,12 +148,12 @@ suite "Waku NetConfig":
asyncTest "AnnouncedAddresses includes WebSocket addresses when enabled":
var
conf = defaultTestWakuNodeConf()
conf = defaultTestWakuConf()
wssEnabled = false
var netConfigRes = NetConfig.init(
bindIp = conf.listenAddress,
bindPort = conf.tcpPort,
bindIp = conf.networkConf.p2pListenAddress,
bindPort = conf.networkConf.p2pTcpPort,
wsEnabled = true,
wssEnabled = wssEnabled,
)
@ -153,16 +164,18 @@ suite "Waku NetConfig":
check:
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
netConfig.announcedAddresses[1] ==
(ip4TcpEndPoint(conf.listenAddress, conf.websocketPort) & wsFlag(wssEnabled))
netConfig.announcedAddresses[1] == (
ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.webSocketConf.get().port) &
wsFlag(wssEnabled)
)
## Now try the same for the case of wssEnabled = true
wssEnabled = true
netConfigRes = NetConfig.init(
bindIp = conf.listenAddress,
bindPort = conf.tcpPort,
bindIp = conf.networkConf.p2pListenAddress,
bindPort = conf.networkConf.p2pTcpPort,
wsEnabled = true,
wssEnabled = wssEnabled,
)
@ -173,19 +186,21 @@ suite "Waku NetConfig":
check:
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
netConfig.announcedAddresses[1] ==
(ip4TcpEndPoint(conf.listenAddress, conf.websocketPort) & wsFlag(wssEnabled))
netConfig.announcedAddresses[1] == (
ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.websocketConf.get().port) &
wsFlag(wssEnabled)
)
asyncTest "Announced WebSocket address contains external IP if provided":
let
conf = defaultTestWakuNodeConf()
conf = defaultTestWakuConf()
extIp = parseIpAddress("1.2.3.4")
extPort = Port(1234)
wssEnabled = false
let netConfigRes = NetConfig.init(
bindIp = conf.listenAddress,
bindPort = conf.tcpPort,
bindIp = conf.networkConf.p2pListenAddress,
bindPort = conf.networkConf.p2pTcpPort,
extIp = some(extIp),
extPort = some(extPort),
wsEnabled = true,
@ -199,18 +214,18 @@ suite "Waku NetConfig":
check:
netConfig.announcedAddresses.len == 2 # External address + wsHostAddress
netConfig.announcedAddresses[1] ==
(ip4TcpEndPoint(extIp, conf.websocketPort) & wsFlag(wssEnabled))
(ip4TcpEndPoint(extIp, conf.websocketConf.get().port) & wsFlag(wssEnabled))
asyncTest "Announced WebSocket address contains dns4DomainName if provided":
let
conf = defaultTestWakuNodeConf()
conf = defaultTestWakuConf()
dns4DomainName = "example.com"
extPort = Port(1234)
wssEnabled = false
let netConfigRes = NetConfig.init(
bindIp = conf.listenAddress,
bindPort = conf.tcpPort,
bindIp = conf.networkConf.p2pListenAddress,
bindPort = conf.networkConf.p2pTcpPort,
dns4DomainName = some(dns4DomainName),
extPort = some(extPort),
wsEnabled = true,
@ -223,20 +238,22 @@ suite "Waku NetConfig":
check:
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
netConfig.announcedAddresses[1] ==
(dns4TcpEndPoint(dns4DomainName, conf.websocketPort) & wsFlag(wssEnabled))
netConfig.announcedAddresses[1] == (
dns4TcpEndPoint(dns4DomainName, conf.webSocketConf.get().port) &
wsFlag(wssEnabled)
)
asyncTest "Announced WebSocket address contains dns4DomainName if provided alongside extIp":
let
conf = defaultTestWakuNodeConf()
conf = defaultTestWakuConf()
dns4DomainName = "example.com"
extIp = parseIpAddress("1.2.3.4")
extPort = Port(1234)
wssEnabled = false
let netConfigRes = NetConfig.init(
bindIp = conf.listenAddress,
bindPort = conf.tcpPort,
bindIp = conf.networkConf.p2pListenAddress,
bindPort = conf.networkConf.p2pTcpPort,
dns4DomainName = some(dns4DomainName),
extIp = some(extIp),
extPort = some(extPort),
@ -251,32 +268,35 @@ suite "Waku NetConfig":
check:
netConfig.announcedAddresses.len == 2 # DNS address + wsHostAddress
netConfig.announcedAddresses[0] == dns4TcpEndPoint(dns4DomainName, extPort)
netConfig.announcedAddresses[1] ==
(dns4TcpEndPoint(dns4DomainName, conf.websocketPort) & wsFlag(wssEnabled))
netConfig.announcedAddresses[1] == (
dns4TcpEndPoint(dns4DomainName, conf.webSocketConf.get().port) &
wsFlag(wssEnabled)
)
asyncTest "ENR is set with bindIp/Port if no extIp/Port are provided":
let conf = defaultTestWakuNodeConf()
let conf = defaultTestWakuConf()
let netConfigRes =
NetConfig.init(bindIp = conf.listenAddress, bindPort = conf.tcpPort)
let netConfigRes = NetConfig.init(
bindIp = conf.networkConf.p2pListenAddress, bindPort = conf.networkConf.p2pTcpPort
)
assert netConfigRes.isOk(), $netConfigRes.error
let netConfig = netConfigRes.get()
check:
netConfig.enrIp.get() == conf.listenAddress
netConfig.enrPort.get() == conf.tcpPort
netConfig.enrIp.get() == conf.networkConf.p2pListenAddress
netConfig.enrPort.get() == conf.networkConf.p2pTcpPort
asyncTest "ENR is set with extIp/Port if provided":
let
conf = defaultTestWakuNodeConf()
conf = defaultTestWakuConf()
extIp = parseIpAddress("1.2.3.4")
extPort = Port(1234)
let netConfigRes = NetConfig.init(
bindIp = conf.listenAddress,
bindPort = conf.tcpPort,
bindIp = conf.networkConf.p2pListenAddress,
bindPort = conf.networkConf.p2pTcpPort,
extIp = some(extIp),
extPort = some(extPort),
)
@ -291,13 +311,13 @@ suite "Waku NetConfig":
asyncTest "ENR is set with dns4DomainName if provided":
let
conf = defaultTestWakuNodeConf()
conf = defaultTestWakuConf()
dns4DomainName = "example.com"
extPort = Port(1234)
let netConfigRes = NetConfig.init(
bindIp = conf.listenAddress,
bindPort = conf.tcpPort,
bindIp = conf.networkConf.p2pListenAddress,
bindPort = conf.networkConf.p2pTcpPort,
dns4DomainName = some(dns4DomainName),
extPort = some(extPort),
)
@ -311,7 +331,7 @@ suite "Waku NetConfig":
asyncTest "wsHostAddress is not announced if a WS/WSS address is provided in extMultiAddrs":
var
conf = defaultTestWakuNodeConf()
conf = defaultTestWakuConf()
extAddIp = parseIpAddress("1.2.3.4")
extAddPort = Port(1234)
wsEnabled = true
@ -319,8 +339,8 @@ suite "Waku NetConfig":
extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))]
var netConfigRes = NetConfig.init(
bindIp = conf.listenAddress,
bindPort = conf.tcpPort,
bindIp = conf.networkConf.p2pListenAddress,
bindPort = conf.networkConf.p2pTcpPort,
extMultiAddrs = extMultiAddrs,
wsEnabled = wsEnabled,
)
@ -338,8 +358,8 @@ suite "Waku NetConfig":
extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))]
netConfigRes = NetConfig.init(
bindIp = conf.listenAddress,
bindPort = conf.tcpPort,
bindIp = conf.networkConf.p2pListenAddress,
bindPort = conf.networkConf.p2pTcpPort,
extMultiAddrs = extMultiAddrs,
wssEnabled = wssEnabled,
)
@ -354,14 +374,14 @@ suite "Waku NetConfig":
asyncTest "Only extMultiAddrs are published when enabling extMultiAddrsOnly flag":
let
conf = defaultTestWakuNodeConf()
conf = defaultTestWakuConf()
extAddIp = parseIpAddress("1.2.3.4")
extAddPort = Port(1234)
extMultiAddrs = @[ip4TcpEndPoint(extAddIp, extAddPort)]
let netConfigRes = NetConfig.init(
bindIp = conf.listenAddress,
bindPort = conf.tcpPort,
bindIp = conf.networkConf.p2pListenAddress,
bindPort = conf.networkConf.p2pTcpPort,
extMultiAddrs = extMultiAddrs,
extMultiAddrsOnly = true,
)

View File

@ -2,7 +2,6 @@ import
std/[options, times],
results,
stew/byteutils,
stew/shims/net,
chronos,
libp2p/switch,
libp2p/builders,

View File

@ -15,35 +15,41 @@ import
node/peer_manager,
waku_enr,
discovery/waku_discv5,
factory/external_config,
factory/internal_config,
factory/waku_conf,
factory/conf_builder/conf_builder,
factory/builder,
],
./common
# Waku node
proc defaultTestWakuNodeConf*(): WakuNodeConf =
## set cluster-id == 0 to not use TWN as that needs a background blockchain (e.g. anvil)
## running because RLN is mounted if TWN (cluster-id == 1) is configured.
WakuNodeConf(
cmd: noCommand,
tcpPort: Port(60000),
websocketPort: Port(8000),
listenAddress: parseIpAddress("0.0.0.0"),
restAddress: parseIpAddress("127.0.0.1"),
metricsServerAddress: parseIpAddress("127.0.0.1"),
dnsAddrsNameServers: @[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")],
nat: "any",
maxConnections: 50,
relayServiceRatio: "60:40",
maxMessageSize: "1024 KiB",
clusterId: DefaultClusterId,
shards: @[DefaultShardId],
relay: true,
rendezvous: true,
storeMessageDbUrl: "sqlite://store.sqlite3",
# TODO: migrate to usage of a test cluster conf
proc defaultTestWakuConfBuilder*(): WakuConfBuilder =
var builder = WakuConfBuilder.init()
builder.withP2pTcpPort(Port(60000))
builder.withP2pListenAddress(parseIpAddress("0.0.0.0"))
builder.restServerConf.withListenAddress(parseIpAddress("127.0.0.1"))
builder.withDnsAddrsNameServers(
@[parseIpAddress("1.1.1.1"), parseIpAddress("1.0.0.1")]
)
builder.withNatStrategy("any")
builder.withMaxConnections(50)
builder.withRelayServiceRatio("60:40")
builder.withMaxMessageSize("1024 KiB")
builder.withClusterId(DefaultClusterId)
builder.withShards(@[DefaultShardId])
builder.withRelay(true)
builder.withRendezvous(true)
builder.storeServiceConf.withDbMigration(false)
builder.storeServiceConf.withSupportV2(false)
builder.webSocketConf.withWebSocketPort(Port(8000))
builder.webSocketConf.withEnabled(true)
return builder
proc defaultTestWakuConf*(): WakuConf =
var builder = defaultTestWakuConfBuilder()
return builder.build().value
proc newTestWakuNode*(
nodeKey: crypto.PrivateKey,
@ -78,31 +84,31 @@ proc newTestWakuNode*(
else:
extPort
var conf = defaultTestWakuNodeConf()
var conf = defaultTestWakuConf()
conf.clusterId = clusterId
conf.shards = shards
if dns4DomainName.isSome() and extIp.isNone():
# If there's an error resolving the IP, an exception is thrown and test fails
let dns = (waitFor dnsResolve(dns4DomainName.get(), conf)).valueOr:
let dns = (waitFor dnsResolve(dns4DomainName.get(), conf.dnsAddrsNameServers)).valueOr:
raise newException(Defect, error)
resolvedExtIp = some(parseIpAddress(dns))
let netConf = NetConfig.init(
bindIp = bindIp,
clusterId = conf.clusterId,
bindIp = bindIp,
bindPort = bindPort,
extIp = resolvedExtIp,
extPort = extPort,
extMultiAddrs = extMultiAddrs,
wsBindPort = wsBindPort,
wsBindPort = some(wsBindPort),
wsEnabled = wsEnabled,
wssEnabled = wssEnabled,
wakuFlags = wakuFlags,
dns4DomainName = dns4DomainName,
discv5UdpPort = discv5UdpPort,
wakuFlags = wakuFlags,
).valueOr:
raise newException(Defect, "Invalid network configuration: " & error)

View File

@ -1,27 +1,33 @@
{.used.}
import
std/[sequtils, algorithm],
std/[sequtils, algorithm, options, net],
results,
stew/shims/net,
chronos,
chronicles,
testutils/unittests,
libp2p/crypto/crypto as libp2p_keys,
eth/keys as eth_keys,
eth/p2p/discoveryv5/enr as ethEnr,
libp2p/crypto/secp,
libp2p/protocols/rendezvous
import
waku/[waku_core/topics, waku_enr, discovery/waku_discv5, waku_enr/capabilities],
waku/[
waku_core/topics,
waku_core/codecs,
waku_enr,
discovery/waku_discv5,
waku_enr/capabilities,
factory/conf_builder/conf_builder,
factory/waku,
node/waku_node,
node/peer_manager,
],
../testlib/[wakucore, testasync, assertions, futures, wakunode, testutils],
../waku_enr/utils,
./utils as discv5_utils
import eth/p2p/discoveryv5/enr as ethEnr
include waku/factory/waku
suite "Waku Discovery v5":
const validEnr =
"enr:-K64QGAvsATunmvMT5c3LFjKS0tG39zlQ1195Z2pWu6RoB5fWP3EXz9QPlRXN" &
@ -360,7 +366,7 @@ suite "Waku Discovery v5":
# Cleanup
await allFutures(node1.stop(), node2.stop(), node3.stop(), node4.stop())
suite "addBoostrapNode":
suite "addBootstrapNode":
asyncTest "address is valid":
# Given an empty list of enrs
var enrs: seq[Record] = @[]
@ -413,25 +419,32 @@ suite "Waku Discovery v5":
suite "waku discv5 initialization":
asyncTest "Start waku and check discv5 discovered peers":
let myRng = crypto.newRng()
var conf = defaultTestWakuNodeConf()
let myRng = libp2p_keys.newRng()
var confBuilder = defaultTestWakuConfBuilder()
conf.nodekey = some(crypto.PrivateKey.random(Secp256k1, myRng[])[])
conf.discv5Discovery = true
conf.discv5UdpPort = Port(9000)
confBuilder.withNodeKey(libp2p_keys.PrivateKey.random(Secp256k1, myRng[])[])
confBuilder.discv5Conf.withEnabled(true)
confBuilder.discv5Conf.withUdpPort(9000.Port)
let conf = confBuilder.build().valueOr:
raiseAssert error
let waku0 = Waku.new(conf).valueOr:
raiseAssert error
(waitFor startWaku(addr waku0)).isOkOr:
raiseAssert error
conf.nodekey = some(crypto.PrivateKey.random(Secp256k1, myRng[])[])
conf.discv5BootstrapNodes = @[waku0.node.enr.toURI()]
conf.discv5Discovery = true
conf.discv5UdpPort = Port(9001)
conf.tcpPort = Port(60001)
confBuilder.withNodeKey(crypto.PrivateKey.random(Secp256k1, myRng[])[])
confBuilder.discv5Conf.withBootstrapNodes(@[waku0.node.enr.toURI()])
confBuilder.discv5Conf.withEnabled(true)
confBuilder.discv5Conf.withUdpPort(9001.Port)
confBuilder.withP2pTcpPort(60001.Port)
confBuilder.websocketConf.withEnabled(false)
let waku1 = Waku.new(conf).valueOr:
let conf1 = confBuilder.build().valueOr:
raiseAssert error
let waku1 = Waku.new(conf1).valueOr:
raiseAssert error
(waitFor startWaku(addr waku1)).isOkOr:
raiseAssert error
@ -439,12 +452,14 @@ suite "Waku Discovery v5":
await waku1.node.mountPeerExchange()
await waku1.node.mountRendezvous()
var conf2 = conf
conf2.discv5BootstrapNodes = @[waku1.node.enr.toURI()]
conf2.discv5Discovery = true
conf2.tcpPort = Port(60003)
conf2.discv5UdpPort = Port(9003)
conf2.nodekey = some(crypto.PrivateKey.random(Secp256k1, myRng[])[])
confBuilder.discv5Conf.withBootstrapNodes(@[waku1.node.enr.toURI()])
confBuilder.withP2pTcpPort(60003.Port)
confBuilder.discv5Conf.withUdpPort(9003.Port)
confBuilder.withNodeKey(crypto.PrivateKey.random(Secp256k1, myRng[])[])
confBuilder.websocketConf.withEnabled(false)
let conf2 = confBuilder.build().valueOr:
raiseAssert error
let waku2 = Waku.new(conf2).valueOr:
raiseAssert error
@ -470,16 +485,26 @@ suite "Waku Discovery v5":
assert r.isSome(), "could not retrieve peer mounting RendezVousCodec"
asyncTest "Discv5 bootstrap nodes should be added to the peer store":
var conf = defaultTestWakuNodeConf()
conf.discv5BootstrapNodes = @[validEnr]
var confBuilder = defaultTestWakuConfBuilder()
confBuilder.discv5Conf.withEnabled(true)
confBuilder.discv5Conf.withUdpPort(9003.Port)
confBuilder.discv5Conf.withBootstrapNodes(@[validEnr])
let conf = confBuilder.build().valueOr:
raiseAssert error
let waku = Waku.new(conf).valueOr:
raiseAssert error
discard setupDiscoveryV5(
waku.node.enr, waku.node.peerManager, waku.node.topicSubscriptionQueue,
waku.conf, waku.dynamicBootstrapNodes, waku.rng, waku.key,
waku.node.enr,
waku.node.peerManager,
waku.node.topicSubscriptionQueue,
waku.conf.discv5Conf.get(),
waku.dynamicBootstrapNodes,
waku.rng,
waku.conf.nodeKey,
waku.conf.networkConf.p2pListenAddress,
waku.conf.portsShift,
)
check:
@ -488,18 +513,29 @@ suite "Waku Discovery v5":
)
asyncTest "Invalid discv5 bootstrap node ENRs are ignored":
var conf = defaultTestWakuNodeConf()
var confBuilder = defaultTestWakuConfBuilder()
confBuilder.discv5Conf.withEnabled(true)
confBuilder.discv5Conf.withUdpPort(9004.Port)
let invalidEnr = "invalid-enr"
conf.discv5BootstrapNodes = @[invalidEnr]
confBuilder.discv5Conf.withBootstrapNodes(@[invalidEnr])
let conf = confBuilder.build().valueOr:
raiseAssert error
let waku = Waku.new(conf).valueOr:
raiseAssert error
discard setupDiscoveryV5(
waku.node.enr, waku.node.peerManager, waku.node.topicSubscriptionQueue,
waku.conf, waku.dynamicBootstrapNodes, waku.rng, waku.key,
waku.node.enr,
waku.node.peerManager,
waku.node.topicSubscriptionQueue,
conf.discv5Conf.get(),
waku.dynamicBootstrapNodes,
waku.rng,
waku.conf.nodeKey,
waku.conf.networkConf.p2pListenAddress,
waku.conf.portsShift,
)
check:

View File

@ -44,10 +44,10 @@ proc newTestWakuRelay*(switch = newTestSwitch()): Future[WakuRelay] {.async.} =
proc setupRln*(node: WakuNode, identifier: uint) {.async.} =
await node.mountRlnRelay(
WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(identifier),
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $identifier),
rlnEpochSizeSec: 1,
dynamic: false,
credIndex: some(identifier),
treePath: genTempPath("rln_tree", "wakunode_" & $identifier),
epochSizeSec: 1,
)
)

View File

@ -690,11 +690,11 @@ suite "Waku rln relay":
let index = MembershipIndex(5)
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(index),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_2"),
dynamic: false,
credIndex: some(index),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "waku_rln_relay_2"),
)
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:
@ -741,22 +741,22 @@ suite "Waku rln relay":
let index2 = MembershipIndex(6)
let rlnConf1 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(index1),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_3"),
dynamic: false,
credIndex: some(index1),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "waku_rln_relay_3"),
)
let wakuRlnRelay1 = (await WakuRlnRelay.new(rlnConf1)).valueOr:
raiseAssert "failed to create waku rln relay: " & $error
let rlnConf2 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(index2),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_4"),
dynamic: false,
credIndex: some(index2),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "waku_rln_relay_4"),
)
let wakuRlnRelay2 = (await WakuRlnRelay.new(rlnConf2)).valueOr:
@ -893,11 +893,11 @@ suite "Waku rln relay":
proc runTestForEpochSizeSec(rlnEpochSizeSec: uint) {.async.} =
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(index),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: rlnEpochSizeSec,
rlnRelayTreePath: genTempPath("rln_tree", "waku_rln_relay_4"),
dynamic: false,
credIndex: some(index),
userMessageLimit: 1,
epochSizeSec: rlnEpochSizeSec,
treePath: genTempPath("rln_tree", "waku_rln_relay_4"),
)
let wakuRlnRelay = (await WakuRlnRelay.new(wakuRlnConfig)).valueOr:

View File

@ -25,11 +25,11 @@ proc buildWakuRlnConfig(
let treePath = genTempPath("rln_tree", treeFilename)
# Off-chain
return WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(credIndex.uint),
rlnRelayUserMessageLimit: userMessageLimit,
rlnEpochSizeSec: epochSizeSec,
rlnRelayTreePath: treePath,
dynamic: false,
credIndex: some(credIndex.uint),
userMessageLimit: userMessageLimit,
epochSizeSec: epochSizeSec,
treePath: treePath,
)
proc waitForNullifierLog(node: WakuNode, expectedLen: int): Future[bool] {.async.} =
@ -63,11 +63,11 @@ procSuite "WakuNode - RLN relay":
# mount rlnrelay in off-chain mode
let wakuRlnConfig1 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
dynamic: false,
credIndex: some(1.uint),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode"),
)
await node1.mountRlnRelay(wakuRlnConfig1)
@ -79,11 +79,11 @@ procSuite "WakuNode - RLN relay":
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig2 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(2.uint),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_2"),
dynamic: false,
credIndex: some(2.uint),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode_2"),
)
await node2.mountRlnRelay(wakuRlnConfig2)
@ -95,11 +95,11 @@ procSuite "WakuNode - RLN relay":
assert false, "Failed to mount relay"
let wakuRlnConfig3 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(3.uint),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_3"),
dynamic: false,
credIndex: some(3.uint),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode_3"),
)
await node3.mountRlnRelay(wakuRlnConfig3)
@ -174,11 +174,11 @@ procSuite "WakuNode - RLN relay":
# mount rlnrelay in off-chain mode
for index, node in nodes:
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(index.uint + 1),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $(index + 1)),
dynamic: false,
credIndex: some(index.uint + 1),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode_" & $(index + 1)),
)
await node.mountRlnRelay(wakuRlnConfig)
@ -278,11 +278,11 @@ procSuite "WakuNode - RLN relay":
# mount rlnrelay in off-chain mode
let wakuRlnConfig1 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_4"),
dynamic: false,
credIndex: some(1.uint),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode_4"),
)
await node1.mountRlnRelay(wakuRlnConfig1)
@ -294,11 +294,11 @@ procSuite "WakuNode - RLN relay":
assert false, "Failed to mount relay"
# mount rlnrelay in off-chain mode
let wakuRlnConfig2 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(2.uint),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_5"),
dynamic: false,
credIndex: some(2.uint),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode_5"),
)
await node2.mountRlnRelay(wakuRlnConfig2)
@ -310,11 +310,11 @@ procSuite "WakuNode - RLN relay":
assert false, "Failed to mount relay"
let wakuRlnConfig3 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(3.uint),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_6"),
dynamic: false,
credIndex: some(3.uint),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode_6"),
)
await node3.mountRlnRelay(wakuRlnConfig3)
@ -403,11 +403,11 @@ procSuite "WakuNode - RLN relay":
# mount rlnrelay in off-chain mode
let wakuRlnConfig1 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_7"),
dynamic: false,
credIndex: some(1.uint),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode_7"),
)
await node1.mountRlnRelay(wakuRlnConfig1)
@ -420,11 +420,11 @@ procSuite "WakuNode - RLN relay":
# mount rlnrelay in off-chain mode
let wakuRlnConfig2 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(2.uint),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_8"),
dynamic: false,
credIndex: some(2.uint),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode_8"),
)
await node2.mountRlnRelay(wakuRlnConfig2)
@ -436,11 +436,11 @@ procSuite "WakuNode - RLN relay":
# mount rlnrelay in off-chain mode
let wakuRlnConfig3 = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(3.uint),
rlnRelayUserMessageLimit: 1,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_9"),
dynamic: false,
credIndex: some(3.uint),
userMessageLimit: 1,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode_9"),
)
await node3.mountRlnRelay(wakuRlnConfig3)

View File

@ -25,10 +25,10 @@ proc setupStaticRln*(
) {.async.} =
await node.mountRlnRelay(
WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(identifier),
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_" & $identifier),
rlnEpochSizeSec: 1,
dynamic: false,
credIndex: some(identifier),
treePath: genTempPath("rln_tree", "wakunode_" & $identifier),
epochSizeSec: 1,
)
)

View File

@ -9,15 +9,14 @@ import
libp2p/crypto/secp,
libp2p/multiaddress,
libp2p/switch
import
../testlib/common, ../testlib/wakucore, ../testlib/wakunode, waku/node/waku_metrics
import ../testlib/wakucore, ../testlib/wakunode
include waku/factory/waku
include waku/factory/waku, waku/common/enr/typed_record
suite "Wakunode2 - Waku":
test "compilation version should be reported":
## Given
var conf = defaultTestWakuNodeConf()
let conf = defaultTestWakuConf()
let waku = Waku.new(conf).valueOr:
raiseAssert error
@ -32,7 +31,7 @@ suite "Wakunode2 - Waku":
suite "Wakunode2 - Waku initialization":
test "peer persistence setup should be successfully mounted":
## Given
var conf = defaultTestWakuNodeConf()
var conf = defaultTestWakuConf()
conf.peerPersistence = true
let waku = Waku.new(conf).valueOr:
@ -43,7 +42,7 @@ suite "Wakunode2 - Waku initialization":
test "node setup is successful with default configuration":
## Given
var conf = defaultTestWakuNodeConf()
var conf = defaultTestWakuConf()
## When
var waku = Waku.new(conf).valueOr:
@ -52,9 +51,6 @@ suite "Wakunode2 - Waku initialization":
(waitFor startWaku(addr waku)).isOkOr:
raiseAssert error
waku.metricsServer = waku_metrics.startMetricsServerAndLogging(conf).valueOr:
raiseAssert error
## Then
let node = waku.node
check:
@ -69,8 +65,8 @@ suite "Wakunode2 - Waku initialization":
test "app properly handles dynamic port configuration":
## Given
var conf = defaultTestWakuNodeConf()
conf.tcpPort = Port(0)
var conf = defaultTestWakuConf()
conf.networkConf.p2pTcpPort = Port(0)
## When
var waku = Waku.new(conf).valueOr:
@ -82,9 +78,12 @@ suite "Wakunode2 - Waku initialization":
## Then
let
node = waku.node
typedNodeEnr = node.enr.toTypedRecord()
typedNodeEnr = node.enr.toTyped()
assert typedNodeEnr.isOk(), $typedNodeEnr.error
let tcpPort = typedNodeEnr.value.tcp()
assert tcpPort.isSome()
check tcpPort.get() != 0
check:
# Waku started properly

View File

@ -69,10 +69,10 @@ suite "Waku v2 REST API - health":
# now kick in rln (currently the only check for health)
await node.mountRlnRelay(
WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode"),
dynamic: false,
credIndex: some(1.uint),
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode"),
)
)
healthMonitor.setNode(node)

View File

@ -226,11 +226,11 @@ suite "Waku v2 Rest API - Relay":
(await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnRelayUserMessageLimit: 20,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
dynamic: false,
credIndex: some(1.uint),
userMessageLimit: 20,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode_1"),
)
await node.mountRlnRelay(wakuRlnConfig)
@ -456,11 +456,11 @@ suite "Waku v2 Rest API - Relay":
(await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnRelayUserMessageLimit: 20,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
dynamic: false,
credIndex: some(1.uint),
userMessageLimit: 20,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode_1"),
)
await node.mountRlnRelay(wakuRlnConfig)
@ -510,11 +510,11 @@ suite "Waku v2 Rest API - Relay":
(await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnRelayUserMessageLimit: 20,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
dynamic: false,
credIndex: some(1.uint),
userMessageLimit: 20,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode_1"),
)
await node.mountRlnRelay(wakuRlnConfig)
@ -561,11 +561,11 @@ suite "Waku v2 Rest API - Relay":
(await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnRelayUserMessageLimit: 20,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
dynamic: false,
credIndex: some(1.uint),
userMessageLimit: 20,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode_1"),
)
await node.mountRlnRelay(wakuRlnConfig)
@ -618,11 +618,11 @@ suite "Waku v2 Rest API - Relay":
(await node.mountRelay()).isOkOr:
assert false, "Failed to mount relay"
let wakuRlnConfig = WakuRlnConfig(
rlnRelayDynamic: false,
rlnRelayCredIndex: some(1.uint),
rlnRelayUserMessageLimit: 20,
rlnEpochSizeSec: 1,
rlnRelayTreePath: genTempPath("rln_tree", "wakunode_1"),
dynamic: false,
credIndex: some(1.uint),
userMessageLimit: 20,
epochSizeSec: 1,
treePath: genTempPath("rln_tree", "wakunode_1"),
)
await node.mountRlnRelay(wakuRlnConfig)

View File

@ -5,13 +5,15 @@ else:
import chronicles, sequtils, results
import
waku/[waku_rln_relay/rln, waku_rln_relay/conversion_utils, factory/external_config]
import waku/[waku_rln_relay/rln, waku_rln_relay/conversion_utils]
logScope:
topics = "rln_db_inspector"
proc doInspectRlnDb*(conf: WakuNodeConf) =
type InspectRlnDbConf* = object
treePath*: string
proc doInspectRlnDb*(conf: InspectRlnDbConf) =
# 1. load configuration
trace "configuration", conf = $conf

View File

@ -11,13 +11,22 @@ import
waku_rln_relay/rln,
waku_rln_relay/conversion_utils,
waku_rln_relay/group_manager/on_chain,
factory/external_config,
]
logScope:
topics = "rln_keystore_generator"
proc doRlnKeystoreGenerator*(conf: WakuNodeConf) =
type RlnKeystoreGeneratorConf* = object
execute*: bool
ethContractAddress*: string
ethClientAddress*: string
chainId*: uint
credPath*: string
credPassword*: string
userMessageLimit*: uint64
ethPrivateKey*: string
proc doRlnKeystoreGenerator*(conf: RlnKeystoreGeneratorConf) =
# 1. load configuration
trace "configuration", conf = $conf
@ -56,13 +65,13 @@ proc doRlnKeystoreGenerator*(conf: WakuNodeConf) =
# 4. initialize OnchainGroupManager
let groupManager = OnchainGroupManager(
ethClientUrl: string(conf.rlnRelayethClientAddress),
chainId: conf.rlnRelayChainId,
ethContractAddress: conf.rlnRelayEthContractAddress,
ethClientUrl: string(conf.ethClientAddress),
chainId: conf.chainId,
ethContractAddress: conf.ethContractAddress,
rlnInstance: rlnInstance,
keystorePath: none(string),
keystorePassword: none(string),
ethPrivateKey: some(conf.rlnRelayEthPrivateKey),
ethPrivateKey: some(conf.ethPrivateKey),
onFatalErrorAction: onFatalErrorAction,
)
try:
@ -77,7 +86,7 @@ proc doRlnKeystoreGenerator*(conf: WakuNodeConf) =
# 5. register on-chain
try:
waitFor groupManager.register(credential, conf.rlnRelayUserMessageLimit)
waitFor groupManager.register(credential, conf.userMessageLimit)
except Exception, CatchableError:
error "failure while registering credentials on-chain",
error = getCurrentExceptionMsg()
@ -87,28 +96,27 @@ proc doRlnKeystoreGenerator*(conf: WakuNodeConf) =
info "Your membership has been registered on-chain.",
chainId = $groupManager.chainId,
contractAddress = conf.rlnRelayEthContractAddress,
contractAddress = conf.ethContractAddress,
membershipIndex = groupManager.membershipIndex.get()
info "Your user message limit is", userMessageLimit = conf.rlnRelayUserMessageLimit
info "Your user message limit is", userMessageLimit = conf.userMessageLimit
# 6. write to keystore
let keystoreCred = KeystoreMembership(
membershipContract: MembershipContract(
chainId: $groupManager.chainId, address: conf.rlnRelayEthContractAddress
chainId: $groupManager.chainId, address: conf.ethContractAddress
),
treeIndex: groupManager.membershipIndex.get(),
identityCredential: credential,
userMessageLimit: conf.rlnRelayUserMessageLimit,
userMessageLimit: conf.userMessageLimit,
)
let persistRes = addMembershipCredentials(
conf.rlnRelayCredPath, keystoreCred, conf.rlnRelayCredPassword, RLNAppInfo
)
let persistRes =
addMembershipCredentials(conf.credPath, keystoreCred, conf.credPassword, RLNAppInfo)
if persistRes.isErr():
error "failed to persist credentials", error = persistRes.error
quit(1)
info "credentials persisted", path = conf.rlnRelayCredPath
info "credentials persisted", path = conf.credPath
try:
waitFor groupManager.stop()

View File

@ -8,13 +8,14 @@ logScope:
## Due to the design of nim-eth/nat module we must ensure it is only initialized once.
## see: https://github.com/waku-org/nwaku/issues/2628
## Details: nim-eth/nat module starts a meaintenance thread for refreshing the NAT mappings, but everything in the module is global,
## Details: nim-eth/nat module starts a maintenance thread for refreshing the NAT mappings, but everything in the module is global,
## there is no room to store multiple configurations.
## Exact meaning: redirectPorts cannot be called twice in a program lifetime.
## During waku tests we happen to start several node instances in parallel thus resulting in multiple NAT configurations and multiple threads.
## Those threads will dead lock each other in tear down.
var singletonNat: bool = false
# TODO: pass `NatStrategy`, not a string
proc setupNat*(
natConf, clientId: string, tcpPort, udpPort: Port
): Result[

View File

@ -10,11 +10,7 @@ import
eth/keys as eth_keys,
eth/p2p/discoveryv5/node,
eth/p2p/discoveryv5/protocol
import
../node/peer_manager/peer_manager,
../waku_core,
../waku_enr,
../factory/external_config
import ../node/peer_manager/peer_manager, ../waku_core, ../waku_enr
export protocol, waku_enr
@ -26,6 +22,18 @@ logScope:
## Config
# TODO: merge both conf
type Discv5Conf* {.requiresInit.} = object
# TODO: This should probably be an option on the builder
# But translated to everything else "false" on the config
discv5Only*: bool
bootstrapNodes*: seq[string]
udpPort*: Port
tableIpLimit*: uint
bucketIpLimit*: uint
bitsPerHop*: int
enrAutoUpdate*: bool
type WakuDiscoveryV5Config* = object
discv5Config*: Option[DiscoveryConfig]
address*: IpAddress
@ -383,10 +391,12 @@ proc setupDiscoveryV5*(
myENR: enr.Record,
nodePeerManager: PeerManager,
nodeTopicSubscriptionQueue: AsyncEventQueue[SubscriptionEvent],
conf: WakuNodeConf,
conf: Discv5Conf,
dynamicBootstrapNodes: seq[RemotePeerInfo],
rng: ref HmacDrbgContext,
key: crypto.PrivateKey,
p2pListenAddress: IpAddress,
portsShift: uint16,
): WakuDiscoveryV5 =
let dynamicBootstrapEnrs =
dynamicBootstrapNodes.filterIt(it.hasUdpPort()).mapIt(it.enr.get())
@ -394,7 +404,7 @@ proc setupDiscoveryV5*(
var discv5BootstrapEnrs: seq[enr.Record]
# parse enrURIs from the configuration and add the resulting ENRs to the discv5BootstrapEnrs seq
for enrUri in conf.discv5BootstrapNodes:
for enrUri in conf.bootstrapNodes:
addBootstrapNode(enrUri, discv5BootstrapEnrs)
for enr in discv5BootstrapEnrs:
@ -407,19 +417,18 @@ proc setupDiscoveryV5*(
discv5BootstrapEnrs.add(dynamicBootstrapEnrs)
let discv5Config = DiscoveryConfig.init(
conf.discv5TableIpLimit, conf.discv5BucketIpLimit, conf.discv5BitsPerHop
)
let discv5Config =
DiscoveryConfig.init(conf.tableIpLimit, conf.bucketIpLimit, conf.bitsPerHop)
let discv5UdpPort = Port(uint16(conf.discv5UdpPort) + conf.portsShift)
let discv5UdpPort = Port(uint16(conf.udpPort) + portsShift)
let discv5Conf = WakuDiscoveryV5Config(
discv5Config: some(discv5Config),
address: conf.listenAddress,
address: p2pListenAddress,
port: discv5UdpPort,
privateKey: eth_keys.PrivateKey(key.skkey),
bootstrapRecords: discv5BootstrapEnrs,
autoupdateRecord: conf.discv5EnrAutoUpdate,
autoupdateRecord: conf.enrAutoUpdate,
)
WakuDiscoveryV5.new(

View File

@ -89,7 +89,7 @@ proc withNetworkConfigurationDetails*(
extIp = extIp,
extPort = extPort,
extMultiAddrs = extMultiAddrs,
wsBindPort = wsBindPort,
wsBindPort = some(wsBindPort),
wsEnabled = wsEnabled,
wssEnabled = wssEnabled,
wakuFlags = wakuFlags,

View File

@ -0,0 +1,17 @@
import
./waku_conf_builder,
./filter_service_conf_builder,
./store_sync_conf_builder,
./store_service_conf_builder,
./rest_server_conf_builder,
./dns_discovery_conf_builder,
./discv5_conf_builder,
./web_socket_conf_builder,
./metrics_server_conf_builder,
./rln_relay_conf_builder
export
waku_conf_builder, filter_service_conf_builder, store_sync_conf_builder,
store_service_conf_builder, rest_server_conf_builder, dns_discovery_conf_builder,
discv5_conf_builder, web_socket_conf_builder, metrics_server_conf_builder,
rln_relay_conf_builder

View File

@ -0,0 +1,65 @@
import chronicles, std/[net, options, sequtils], results
import ../waku_conf
logScope:
topics = "waku conf builder discv5"
###########################
## Discv5 Config Builder ##
###########################
type Discv5ConfBuilder* = object
enabled*: Option[bool]
bootstrapNodes*: seq[string]
bitsPerHop*: Option[int]
bucketIpLimit*: Option[uint]
discv5Only*: Option[bool]
enrAutoUpdate*: Option[bool]
tableIpLimit*: Option[uint]
udpPort*: Option[Port]
proc init*(T: type Discv5ConfBuilder): Discv5ConfBuilder =
Discv5ConfBuilder()
proc withEnabled*(b: var Discv5ConfBuilder, enabled: bool) =
b.enabled = some(enabled)
proc withBitsPerHop*(b: var Discv5ConfBuilder, bitsPerHop: int) =
b.bitsPerHop = some(bitsPerHop)
proc withBucketIpLimit*(b: var Discv5ConfBuilder, bucketIpLimit: uint) =
b.bucketIpLimit = some(bucketIpLimit)
proc withDiscv5Only*(b: var Discv5ConfBuilder, discv5Only: bool) =
b.discv5Only = some(discv5Only)
proc withEnrAutoUpdate*(b: var Discv5ConfBuilder, enrAutoUpdate: bool) =
b.enrAutoUpdate = some(enrAutoUpdate)
proc withTableIpLimit*(b: var Discv5ConfBuilder, tableIpLimit: uint) =
b.tableIpLimit = some(tableIpLimit)
proc withUdpPort*(b: var Discv5ConfBuilder, udpPort: Port) =
b.udpPort = some(udpPort)
proc withBootstrapNodes*(b: var Discv5ConfBuilder, bootstrapNodes: seq[string]) =
# TODO: validate ENRs?
b.bootstrapNodes = concat(b.bootstrapNodes, bootstrapNodes)
proc build*(b: Discv5ConfBuilder): Result[Option[Discv5Conf], string] =
if not b.enabled.get(false):
return ok(none(Discv5Conf))
return ok(
some(
Discv5Conf(
bootstrapNodes: b.bootstrapNodes,
bitsPerHop: b.bitsPerHop.get(1),
bucketIpLimit: b.bucketIpLimit.get(2),
discv5Only: b.discv5Only.get(false),
enrAutoUpdate: b.enrAutoUpdate.get(true),
tableIpLimit: b.tableIpLimit.get(10),
udpPort: b.udpPort.get(9000.Port),
)
)
)

View File

@ -0,0 +1,38 @@
import chronicles, std/[net, options, sequtils], results
import ../waku_conf
logScope:
topics = "waku conf builder dns discovery"
##################################
## DNS Discovery Config Builder ##
##################################
type DnsDiscoveryConfBuilder* = object
enabled*: Option[bool]
enrTreeUrl*: Option[string]
nameServers*: seq[IpAddress]
proc init*(T: type DnsDiscoveryConfBuilder): DnsDiscoveryConfBuilder =
DnsDiscoveryConfBuilder()
proc withEnabled*(b: var DnsDiscoveryConfBuilder, enabled: bool) =
b.enabled = some(enabled)
proc withEnrTreeUrl*(b: var DnsDiscoveryConfBuilder, enrTreeUrl: string) =
b.enrTreeUrl = some(enrTreeUrl)
proc withNameServers*(b: var DnsDiscoveryConfBuilder, nameServers: seq[IpAddress]) =
b.nameServers = concat(b.nameServers, nameServers)
proc build*(b: DnsDiscoveryConfBuilder): Result[Option[DnsDiscoveryConf], string] =
if not b.enabled.get(false):
return ok(none(DnsDiscoveryConf))
if b.nameServers.len == 0:
return err("dnsDiscovery.nameServers is not specified")
if b.enrTreeUrl.isNone():
return err("dnsDiscovery.enrTreeUrl is not specified")
return ok(
some(DnsDiscoveryConf(nameServers: b.nameServers, enrTreeUrl: b.enrTreeUrl.get()))
)

View File

@ -0,0 +1,45 @@
import chronicles, std/options, results
import ../waku_conf
logScope:
topics = "waku conf builder filter service"
###################################
## Filter Service Config Builder ##
###################################
type FilterServiceConfBuilder* = object
enabled*: Option[bool]
maxPeersToServe*: Option[uint32]
subscriptionTimeout*: Option[uint16]
maxCriteria*: Option[uint32]
proc init*(T: type FilterServiceConfBuilder): FilterServiceConfBuilder =
FilterServiceConfBuilder()
proc withEnabled*(b: var FilterServiceConfBuilder, enabled: bool) =
b.enabled = some(enabled)
proc withMaxPeersToServe*(b: var FilterServiceConfBuilder, maxPeersToServe: uint32) =
b.maxPeersToServe = some(maxPeersToServe)
proc withSubscriptionTimeout*(
b: var FilterServiceConfBuilder, subscriptionTimeout: uint16
) =
b.subscriptionTimeout = some(subscriptionTimeout)
proc withMaxCriteria*(b: var FilterServiceConfBuilder, maxCriteria: uint32) =
b.maxCriteria = some(maxCriteria)
proc build*(b: FilterServiceConfBuilder): Result[Option[FilterServiceConf], string] =
if not b.enabled.get(false):
return ok(none(FilterServiceConf))
return ok(
some(
FilterServiceConf(
maxPeersToServe: b.maxPeersToServe.get(500),
subscriptionTimeout: b.subscriptionTimeout.get(300),
maxCriteria: b.maxCriteria.get(1000),
)
)
)

View File

@ -0,0 +1,47 @@
import chronicles, std/[net, options], results
import ../waku_conf
logScope:
topics = "waku conf builder metrics server"
###################################
## Metrics Server Config Builder ##
###################################
type MetricsServerConfBuilder* = object
enabled*: Option[bool]
httpAddress*: Option[IpAddress]
httpPort*: Option[Port]
logging*: Option[bool]
proc init*(T: type MetricsServerConfBuilder): MetricsServerConfBuilder =
MetricsServerConfBuilder()
proc withEnabled*(b: var MetricsServerConfBuilder, enabled: bool) =
b.enabled = some(enabled)
proc withHttpAddress*(b: var MetricsServerConfBuilder, httpAddress: IpAddress) =
b.httpAddress = some(httpAddress)
proc withHttpPort*(b: var MetricsServerConfBuilder, httpPort: Port) =
b.httpPort = some(httpPort)
proc withHttpPort*(b: var MetricsServerConfBuilder, httpPort: uint16) =
b.httpPort = some(Port(httpPort))
proc withLogging*(b: var MetricsServerConfBuilder, logging: bool) =
b.logging = some(logging)
proc build*(b: MetricsServerConfBuilder): Result[Option[MetricsServerConf], string] =
if not b.enabled.get(false):
return ok(none(MetricsServerConf))
return ok(
some(
MetricsServerConf(
httpAddress: b.httpAddress.get(static parseIpAddress("127.0.0.1")),
httpPort: b.httpPort.get(8008.Port),
logging: b.logging.get(false),
)
)
)

View File

@ -0,0 +1,64 @@
import chronicles, std/[net, options, sequtils], results
import ../waku_conf
logScope:
topics = "waku conf builder rest server"
################################
## REST Server Config Builder ##
################################
type RestServerConfBuilder* = object
enabled*: Option[bool]
allowOrigin*: seq[string]
listenAddress*: Option[IpAddress]
port*: Option[Port]
admin*: Option[bool]
relayCacheCapacity*: Option[uint32]
proc init*(T: type RestServerConfBuilder): RestServerConfBuilder =
RestServerConfBuilder()
proc withEnabled*(b: var RestServerConfBuilder, enabled: bool) =
b.enabled = some(enabled)
proc withAllowOrigin*(b: var RestServerConfBuilder, allowOrigin: seq[string]) =
b.allowOrigin = concat(b.allowOrigin, allowOrigin)
proc withListenAddress*(b: var RestServerConfBuilder, listenAddress: IpAddress) =
b.listenAddress = some(listenAddress)
proc withPort*(b: var RestServerConfBuilder, port: Port) =
b.port = some(port)
proc withPort*(b: var RestServerConfBuilder, port: uint16) =
b.port = some(Port(port))
proc withAdmin*(b: var RestServerConfBuilder, admin: bool) =
b.admin = some(admin)
proc withRelayCacheCapacity*(b: var RestServerConfBuilder, relayCacheCapacity: uint32) =
b.relayCacheCapacity = some(relayCacheCapacity)
proc build*(b: RestServerConfBuilder): Result[Option[RestServerConf], string] =
if not b.enabled.get(false):
return ok(none(RestServerConf))
if b.listenAddress.isNone():
return err("restServer.listenAddress is not specified")
if b.port.isNone():
return err("restServer.port is not specified")
if b.relayCacheCapacity.isNone():
return err("restServer.relayCacheCapacity is not specified")
return ok(
some(
RestServerConf(
allowOrigin: b.allowOrigin,
listenAddress: b.listenAddress.get(),
port: b.port.get(),
admin: b.admin.get(false),
relayCacheCapacity: b.relayCacheCapacity.get(),
)
)
)

View File

@ -0,0 +1,104 @@
import chronicles, std/options, results
import ../waku_conf
logScope:
topics = "waku conf builder rln relay"
##############################
## RLN Relay Config Builder ##
##############################
type RlnRelayConfBuilder* = object
enabled*: Option[bool]
chainId*: Option[uint]
ethClientAddress*: Option[string]
ethContractAddress*: Option[string]
credIndex*: Option[uint]
credPassword*: Option[string]
credPath*: Option[string]
dynamic*: Option[bool]
epochSizeSec*: Option[uint64]
userMessageLimit*: Option[uint64]
treePath*: Option[string]
proc init*(T: type RlnRelayConfBuilder): RlnRelayConfBuilder =
RlnRelayConfBuilder()
proc withEnabled*(b: var RlnRelayConfBuilder, enabled: bool) =
b.enabled = some(enabled)
proc withChainId*(b: var RlnRelayConfBuilder, chainId: uint) =
b.chainId = some(chainId)
proc withCredIndex*(b: var RlnRelayConfBuilder, credIndex: uint) =
b.credIndex = some(credIndex)
proc withCredPassword*(b: var RlnRelayConfBuilder, credPassword: string) =
b.credPassword = some(credPassword)
proc withCredPath*(b: var RlnRelayConfBuilder, credPath: string) =
b.credPath = some(credPath)
proc withDynamic*(b: var RlnRelayConfBuilder, dynamic: bool) =
b.dynamic = some(dynamic)
proc withEthClientAddress*(b: var RlnRelayConfBuilder, ethClientAddress: string) =
b.ethClientAddress = some(ethClientAddress)
proc withEthContractAddress*(b: var RlnRelayConfBuilder, ethContractAddress: string) =
b.ethContractAddress = some(ethContractAddress)
proc withEpochSizeSec*(b: var RlnRelayConfBuilder, epochSizeSec: uint64) =
b.epochSizeSec = some(epochSizeSec)
proc withUserMessageLimit*(b: var RlnRelayConfBuilder, userMessageLimit: uint64) =
b.userMessageLimit = some(userMessageLimit)
proc withTreePath*(b: var RlnRelayConfBuilder, treePath: string) =
b.treePath = some(treePath)
proc build*(b: RlnRelayConfBuilder): Result[Option[RlnRelayConf], string] =
if not b.enabled.get(false):
return ok(none(RlnRelayConf))
if b.chainId.isNone():
return err("RLN Relay Chain Id is not specified")
let creds =
if b.credPath.isSome() and b.credPassword.isSome():
some(RlnRelayCreds(path: b.credPath.get(), password: b.credPassword.get()))
elif b.credPath.isSome() and b.credPassword.isNone():
return err("RLN Relay Credential Password is not specified but path is")
elif b.credPath.isNone() and b.credPassword.isSome():
return err("RLN Relay Credential Path is not specified but password is")
else:
none(RlnRelayCreds)
if b.dynamic.isNone():
return err("rlnRelay.dynamic is not specified")
if b.ethClientAddress.get("") == "":
return err("rlnRelay.ethClientAddress is not specified")
if b.ethContractAddress.get("") == "":
return err("rlnRelay.ethContractAddress is not specified")
if b.epochSizeSec.isNone():
return err("rlnRelay.epochSizeSec is not specified")
if b.userMessageLimit.isNone():
return err("rlnRelay.userMessageLimit is not specified")
if b.treePath.isNone():
return err("rlnRelay.treePath is not specified")
return ok(
some(
RlnRelayConf(
chainId: b.chainId.get(),
credIndex: b.credIndex,
creds: creds,
dynamic: b.dynamic.get(),
ethClientAddress: b.ethClientAddress.get(),
ethContractAddress: b.ethContractAddress.get(),
epochSizeSec: b.epochSizeSec.get(),
userMessageLimit: b.userMessageLimit.get(),
treePath: b.treePath.get(),
)
)
)

View File

@ -0,0 +1,74 @@
import chronicles, std/options, results, chronos
import ../waku_conf, ./store_sync_conf_builder
logScope:
topics = "waku conf builder store service"
##################################
## Store Service Config Builder ##
##################################
type StoreServiceConfBuilder* = object
enabled*: Option[bool]
dbMigration*: Option[bool]
dbURl*: Option[string]
dbVacuum*: Option[bool]
supportV2*: Option[bool]
maxNumDbConnections*: Option[int]
retentionPolicy*: Option[string]
resume*: Option[bool]
storeSyncConf*: StoreSyncConfBuilder
proc init*(T: type StoreServiceConfBuilder): StoreServiceConfBuilder =
StoreServiceConfBuilder(storeSyncConf: StoreSyncConfBuilder.init())
proc withEnabled*(b: var StoreServiceConfBuilder, enabled: bool) =
b.enabled = some(enabled)
proc withDbMigration*(b: var StoreServiceConfBuilder, dbMigration: bool) =
b.dbMigration = some(dbMigration)
proc withDbUrl*(b: var StoreServiceConfBuilder, dbUrl: string) =
b.dbURl = some(dbUrl)
proc withDbVacuum*(b: var StoreServiceConfBuilder, dbVacuum: bool) =
b.dbVacuum = some(dbVacuum)
proc withSupportV2*(b: var StoreServiceConfBuilder, supportV2: bool) =
b.supportV2 = some(supportV2)
proc withMaxNumDbConnections*(
b: var StoreServiceConfBuilder, maxNumDbConnections: int
) =
b.maxNumDbConnections = some(maxNumDbConnections)
proc withRetentionPolicy*(b: var StoreServiceConfBuilder, retentionPolicy: string) =
b.retentionPolicy = some(retentionPolicy)
proc withResume*(b: var StoreServiceConfBuilder, resume: bool) =
b.resume = some(resume)
proc build*(b: StoreServiceConfBuilder): Result[Option[StoreServiceConf], string] =
if not b.enabled.get(false):
return ok(none(StoreServiceConf))
if b.dbUrl.get("") == "":
return err "store.dbUrl is not specified"
let storeSyncConf = b.storeSyncConf.build().valueOr:
return err("Store Sync Conf failed to build")
return ok(
some(
StoreServiceConf(
dbMigration: b.dbMigration.get(true),
dbURl: b.dbUrl.get(),
dbVacuum: b.dbVacuum.get(false),
supportV2: b.supportV2.get(true),
maxNumDbConnections: b.maxNumDbConnections.get(50),
retentionPolicy: b.retentionPolicy.get("time:" & $2.days.seconds),
resume: b.resume.get(false),
storeSyncConf: storeSyncConf,
)
)
)

View File

@ -0,0 +1,51 @@
import chronicles, std/options, results
import ../waku_conf
logScope:
topics = "waku conf builder store sync"
##################################
## Store Sync Config Builder ##
##################################
type StoreSyncConfBuilder* = object
enabled*: Option[bool]
rangeSec*: Option[uint32]
intervalSec*: Option[uint32]
relayJitterSec*: Option[uint32]
proc init*(T: type StoreSyncConfBuilder): StoreSyncConfBuilder =
StoreSyncConfBuilder()
proc withEnabled*(b: var StoreSyncConfBuilder, enabled: bool) =
b.enabled = some(enabled)
proc withRangeSec*(b: var StoreSyncConfBuilder, rangeSec: uint32) =
b.rangeSec = some(rangeSec)
proc withIntervalSec*(b: var StoreSyncConfBuilder, intervalSec: uint32) =
b.intervalSec = some(intervalSec)
proc withRelayJitterSec*(b: var StoreSyncConfBuilder, relayJitterSec: uint32) =
b.relayJitterSec = some(relayJitterSec)
proc build*(b: StoreSyncConfBuilder): Result[Option[StoreSyncConf], string] =
if not b.enabled.get(false):
return ok(none(StoreSyncConf))
if b.rangeSec.isNone():
return err "store.rangeSec is not specified"
if b.intervalSec.isNone():
return err "store.intervalSec is not specified"
if b.relayJitterSec.isNone():
return err "store.relayJitterSec is not specified"
return ok(
some(
StoreSyncConf(
rangeSec: b.rangeSec.get(),
intervalSec: b.intervalSec.get(),
relayJitterSec: b.relayJitterSec.get(),
)
)
)

View File

@ -0,0 +1,649 @@
import
libp2p/crypto/crypto,
libp2p/multiaddress,
std/[net, options, sequtils, strutils],
chronicles,
chronos,
results
import
../waku_conf,
../networks_config,
../../common/logging,
../../common/utils/parse_size_units,
../../waku_enr/capabilities
import
./filter_service_conf_builder,
./store_sync_conf_builder,
./store_service_conf_builder,
./rest_server_conf_builder,
./dns_discovery_conf_builder,
./discv5_conf_builder,
./web_socket_conf_builder,
./metrics_server_conf_builder,
./rln_relay_conf_builder
logScope:
topics = "waku conf builder"
type MaxMessageSizeKind* = enum
mmskNone
mmskStr
mmskInt
type MaxMessageSize* = object
case kind*: MaxMessageSizeKind
of mmskNone:
discard
of mmskStr:
str*: string
of mmskInt:
bytes*: uint64
## `WakuConfBuilder` is a convenient tool to accumulate
## Config parameters to build a `WakuConfig`.
## It provides some type conversion, as well as applying
## defaults in an agnostic manner (for any usage of Waku node)
#
# TODO: Sub protocol builder (eg `StoreServiceConfBuilder`
# is be better defined in the protocol module (eg store)
# and apply good defaults from this protocol PoV and make the
# decision when the dev must specify a value vs when a default
# is fine to have.
#
# TODO: Add default to most values so that when a developer uses
# the builder, it works out-of-the-box
type WakuConfBuilder* = object
nodeKey: Option[crypto.PrivateKey]
clusterId: Option[uint16]
numShardsInNetwork: Option[uint32]
shards: Option[seq[uint16]]
protectedShards: Option[seq[ProtectedShard]]
contentTopics: Option[seq[string]]
# Conf builders
dnsDiscoveryConf*: DnsDiscoveryConfBuilder
discv5Conf*: Discv5ConfBuilder
filterServiceConf*: FilterServiceConfBuilder
metricsServerConf*: MetricsServerConfBuilder
restServerConf*: RestServerConfBuilder
rlnRelayConf*: RlnRelayConfBuilder
storeServiceConf*: StoreServiceConfBuilder
webSocketConf*: WebSocketConfBuilder
# End conf builders
relay: Option[bool]
lightPush: Option[bool]
peerExchange: Option[bool]
storeSync: Option[bool]
relayPeerExchange: Option[bool]
# TODO: move within a relayConf
rendezvous: Option[bool]
discv5Only: Option[bool]
clusterConf: Option[ClusterConf]
staticNodes: seq[string]
remoteStoreNode: Option[string]
remoteLightPushNode: Option[string]
remoteFilterNode: Option[string]
remotePeerExchangeNode: Option[string]
maxMessageSize: MaxMessageSize
logLevel: Option[logging.LogLevel]
logFormat: Option[logging.LogFormat]
natStrategy: Option[string]
p2pTcpPort: Option[Port]
p2pListenAddress: Option[IpAddress]
portsShift: Option[uint16]
dns4DomainName: Option[string]
extMultiAddrs: seq[string]
extMultiAddrsOnly: Option[bool]
dnsAddrs: Option[bool]
dnsAddrsNameServers: seq[IpAddress]
peerPersistence: Option[bool]
peerStoreCapacity: Option[int]
maxConnections: Option[int]
colocationLimit: Option[int]
agentString: Option[string]
rateLimits: Option[seq[string]]
maxRelayPeers: Option[int]
relayShardedPeerManagement: Option[bool]
relayServiceRatio: Option[string]
circuitRelayClient: Option[bool]
keepAlive: Option[bool]
p2pReliability: Option[bool]
proc init*(T: type WakuConfBuilder): WakuConfBuilder =
WakuConfBuilder(
dnsDiscoveryConf: DnsDiscoveryConfBuilder.init(),
discv5Conf: Discv5ConfBuilder.init(),
filterServiceConf: FilterServiceConfBuilder.init(),
metricsServerConf: MetricsServerConfBuilder.init(),
restServerConf: RestServerConfBuilder.init(),
rlnRelayConf: RlnRelayConfBuilder.init(),
storeServiceConf: StoreServiceConfBuilder.init(),
webSocketConf: WebSocketConfBuilder.init(),
)
proc withClusterConf*(b: var WakuConfBuilder, clusterConf: ClusterConf) =
b.clusterConf = some(clusterConf)
proc withNodeKey*(b: var WakuConfBuilder, nodeKey: crypto.PrivateKey) =
b.nodeKey = some(nodeKey)
proc withClusterId*(b: var WakuConfBuilder, clusterId: uint16) =
b.clusterId = some(clusterId)
proc withNumShardsInNetwork*(b: var WakuConfBuilder, numShardsInNetwork: uint32) =
b.numShardsInNetwork = some(numShardsInNetwork)
proc withShards*(b: var WakuConfBuilder, shards: seq[uint16]) =
b.shards = some(shards)
proc withProtectedShards*(
b: var WakuConfBuilder, protectedShards: seq[ProtectedShard]
) =
b.protectedShards = some(protectedShards)
proc withContentTopics*(b: var WakuConfBuilder, contentTopics: seq[string]) =
b.contentTopics = some(contentTopics)
proc withRelay*(b: var WakuConfBuilder, relay: bool) =
b.relay = some(relay)
proc withLightPush*(b: var WakuConfBuilder, lightPush: bool) =
b.lightPush = some(lightPush)
proc withStoreSync*(b: var WakuConfBuilder, storeSync: bool) =
b.storeSync = some(storeSync)
proc withPeerExchange*(b: var WakuConfBuilder, peerExchange: bool) =
b.peerExchange = some(peerExchange)
proc withRelayPeerExchange*(b: var WakuConfBuilder, relayPeerExchange: bool) =
b.relayPeerExchange = some(relayPeerExchange)
proc withRendezvous*(b: var WakuConfBuilder, rendezvous: bool) =
b.rendezvous = some(rendezvous)
proc withRemoteStoreNode*(b: var WakuConfBuilder, remoteStoreNode: string) =
b.remoteStoreNode = some(remoteStoreNode)
proc withRemoteLightPushNode*(b: var WakuConfBuilder, remoteLightPushNode: string) =
b.remoteLightPushNode = some(remoteLightPushNode)
proc withRemoteFilterNode*(b: var WakuConfBuilder, remoteFilterNode: string) =
b.remoteFilterNode = some(remoteFilterNode)
proc withRemotePeerExchangeNode*(
b: var WakuConfBuilder, remotePeerExchangeNode: string
) =
b.remotePeerExchangeNode = some(remotePeerExchangeNode)
proc withDnsAddrs*(b: var WakuConfBuilder, dnsAddrs: bool) =
b.dnsAddrs = some(dnsAddrs)
proc withPeerPersistence*(b: var WakuConfBuilder, peerPersistence: bool) =
b.peerPersistence = some(peerPersistence)
proc withPeerStoreCapacity*(b: var WakuConfBuilder, peerStoreCapacity: int) =
b.peerStoreCapacity = some(peerStoreCapacity)
proc withMaxConnections*(b: var WakuConfBuilder, maxConnections: int) =
b.maxConnections = some(maxConnections)
proc withDnsAddrsNameServers*(
b: var WakuConfBuilder, dnsAddrsNameServers: seq[IpAddress]
) =
b.dnsAddrsNameServers = concat(b.dnsAddrsNameServers, dnsAddrsNameServers)
proc withLogLevel*(b: var WakuConfBuilder, logLevel: logging.LogLevel) =
b.logLevel = some(logLevel)
proc withLogFormat*(b: var WakuConfBuilder, logFormat: logging.LogFormat) =
b.logFormat = some(logFormat)
proc withP2pTcpPort*(b: var WakuConfBuilder, p2pTcpPort: Port) =
b.p2pTcpPort = some(p2pTcpPort)
proc withP2pTcpPort*(b: var WakuConfBuilder, p2pTcpPort: uint16) =
b.p2pTcpPort = some(Port(p2pTcpPort))
proc withPortsShift*(b: var WakuConfBuilder, portsShift: uint16) =
b.portsShift = some(portsShift)
proc withP2pListenAddress*(b: var WakuConfBuilder, p2pListenAddress: IpAddress) =
b.p2pListenAddress = some(p2pListenAddress)
proc withExtMultiAddrsOnly*(b: var WakuConfBuilder, extMultiAddrsOnly: bool) =
b.extMultiAddrsOnly = some(extMultiAddrsOnly)
proc withDns4DomainName*(b: var WakuConfBuilder, dns4DomainName: string) =
b.dns4DomainName = some(dns4DomainName)
proc withNatStrategy*(b: var WakuConfBuilder, natStrategy: string) =
b.natStrategy = some(natStrategy)
proc withAgentString*(b: var WakuConfBuilder, agentString: string) =
b.agentString = some(agentString)
proc withColocationLimit*(b: var WakuConfBuilder, colocationLimit: int) =
b.colocationLimit = some(colocationLimit)
proc withRateLimits*(b: var WakuConfBuilder, rateLimits: seq[string]) =
b.rateLimits = some(rateLimits)
proc withMaxRelayPeers*(b: var WakuConfBuilder, maxRelayPeers: int) =
b.maxRelayPeers = some(maxRelayPeers)
proc withRelayServiceRatio*(b: var WakuConfBuilder, relayServiceRatio: string) =
b.relayServiceRatio = some(relayServiceRatio)
proc withCircuitRelayClient*(b: var WakuConfBuilder, circuitRelayClient: bool) =
b.circuitRelayClient = some(circuitRelayClient)
proc withRelayShardedPeerManagement*(
b: var WakuConfBuilder, relayShardedPeerManagement: bool
) =
b.relayShardedPeerManagement = some(relayShardedPeerManagement)
proc withKeepAlive*(b: var WakuConfBuilder, keepAlive: bool) =
b.keepAlive = some(keepAlive)
proc withP2pReliability*(b: var WakuConfBuilder, p2pReliability: bool) =
b.p2pReliability = some(p2pReliability)
proc withExtMultiAddrs*(builder: var WakuConfBuilder, extMultiAddrs: seq[string]) =
builder.extMultiAddrs = concat(builder.extMultiAddrs, extMultiAddrs)
proc withMaxMessageSize*(builder: var WakuConfBuilder, maxMessageSizeBytes: uint64) =
builder.maxMessageSize = MaxMessageSize(kind: mmskInt, bytes: maxMessageSizeBytes)
proc withMaxMessageSize*(builder: var WakuConfBuilder, maxMessageSize: string) =
builder.maxMessageSize = MaxMessageSize(kind: mmskStr, str: maxMessageSize)
proc withStaticNodes*(builder: var WakuConfBuilder, staticNodes: seq[string]) =
builder.staticNodes = concat(builder.staticNodes, staticNodes)
proc nodeKey(
builder: WakuConfBuilder, rng: ref HmacDrbgContext
): Result[crypto.PrivateKey, string] =
if builder.nodeKey.isSome():
return ok(builder.nodeKey.get())
else:
warn "missing node key, generating new set"
let nodeKey = crypto.PrivateKey.random(Secp256k1, rng[]).valueOr:
error "Failed to generate key", error = error
return err("Failed to generate key: " & $error)
return ok(nodeKey)
proc applyClusterConf(builder: var WakuConfBuilder) =
# Apply cluster conf, overrides most values passed individually
# If you want to tweak values, don't use clusterConf
if builder.clusterConf.isNone:
return
let clusterConf = builder.clusterConf.get()
if builder.clusterId.isSome():
warn "Cluster id was provided alongside a cluster conf",
used = clusterConf.clusterId, discarded = builder.clusterId.get()
builder.clusterId = some(clusterConf.clusterId)
# Apply relay parameters
if builder.relay.get(false) and clusterConf.rlnRelay:
if builder.rlnRelayConf.enabled.isSome():
warn "RLN Relay was provided alongside a cluster conf",
used = clusterConf.rlnRelay, discarded = builder.rlnRelayConf.enabled
builder.rlnRelayConf.withEnabled(true)
if builder.rlnRelayConf.ethContractAddress.get("") != "":
warn "RLN Relay ETH Contract Address was provided alongside a cluster conf",
used = clusterConf.rlnRelayEthContractAddress.string,
discarded = builder.rlnRelayConf.ethContractAddress.get().string
builder.rlnRelayConf.withEthContractAddress(clusterConf.rlnRelayEthContractAddress)
if builder.rlnRelayConf.chainId.isSome():
warn "RLN Relay Chain Id was provided alongside a cluster conf",
used = clusterConf.rlnRelayChainId, discarded = builder.rlnRelayConf.chainId
builder.rlnRelayConf.withChainId(clusterConf.rlnRelayChainId)
if builder.rlnRelayConf.dynamic.isSome():
warn "RLN Relay Dynamic was provided alongside a cluster conf",
used = clusterConf.rlnRelayDynamic, discarded = builder.rlnRelayConf.dynamic
builder.rlnRelayConf.withDynamic(clusterConf.rlnRelayDynamic)
if builder.rlnRelayConf.epochSizeSec.isSome():
warn "RLN Epoch Size in Seconds was provided alongside a cluster conf",
used = clusterConf.rlnEpochSizeSec,
discarded = builder.rlnRelayConf.epochSizeSec
builder.rlnRelayConf.withEpochSizeSec(clusterConf.rlnEpochSizeSec)
if builder.rlnRelayConf.userMessageLimit.isSome():
warn "RLN Relay Dynamic was provided alongside a cluster conf",
used = clusterConf.rlnRelayUserMessageLimit,
discarded = builder.rlnRelayConf.userMessageLimit
builder.rlnRelayConf.withUserMessageLimit(clusterConf.rlnRelayUserMessageLimit)
# End Apply relay parameters
case builder.maxMessageSize.kind
of mmskNone:
discard
of mmskStr, mmskInt:
warn "Max Message Size was provided alongside a cluster conf",
used = clusterConf.maxMessageSize, discarded = $builder.maxMessageSize
builder.withMaxMessageSize(parseCorrectMsgSize(clusterConf.maxMessageSize))
if builder.numShardsInNetwork.isSome():
warn "Num Shards In Network was provided alongside a cluster conf",
used = clusterConf.numShardsInNetwork, discarded = builder.numShardsInNetwork
builder.numShardsInNetwork = some(clusterConf.numShardsInNetwork)
if clusterConf.discv5Discovery:
if builder.discv5Conf.enabled.isNone:
builder.discv5Conf.withEnabled(clusterConf.discv5Discovery)
if builder.discv5Conf.bootstrapNodes.len == 0 and
clusterConf.discv5BootstrapNodes.len > 0:
warn "Discv5 Boostrap nodes were provided alongside a cluster conf",
used = clusterConf.discv5BootstrapNodes,
discarded = builder.discv5Conf.bootstrapNodes
builder.discv5Conf.withBootstrapNodes(clusterConf.discv5BootstrapNodes)
proc build*(
builder: var WakuConfBuilder, rng: ref HmacDrbgContext = crypto.newRng()
): Result[WakuConf, string] =
## Return a WakuConf that contains all mandatory parameters
## Applies some sane defaults that are applicable across any usage
## of libwaku. It aims to be agnostic so it does not apply a
## default when it is opinionated.
applyClusterConf(builder)
let relay =
if builder.relay.isSome():
builder.relay.get()
else:
warn "whether to mount relay is not specified, defaulting to not mounting"
false
let lightPush =
if builder.lightPush.isSome():
builder.lightPush.get()
else:
warn "whether to mount lightPush is not specified, defaulting to not mounting"
false
let peerExchange =
if builder.peerExchange.isSome():
builder.peerExchange.get()
else:
warn "whether to mount peerExchange is not specified, defaulting to not mounting"
false
let storeSync =
if builder.storeSync.isSome():
builder.storeSync.get()
else:
warn "whether to mount storeSync is not specified, defaulting to not mounting"
false
let rendezvous =
if builder.rendezvous.isSome():
builder.rendezvous.get()
else:
warn "whether to mount rendezvous is not specified, defaulting to not mounting"
false
let relayPeerExchange = builder.relayPeerExchange.get(false)
let nodeKey = ?nodeKey(builder, rng)
let clusterId =
if builder.clusterId.isNone():
# TODO: ClusterId should never be defaulted, instead, presets
# should be defined and used
warn("Cluster Id was not specified, defaulting to 0")
0.uint16
else:
builder.clusterId.get()
let numShardsInNetwork =
if builder.numShardsInNetwork.isSome():
builder.numShardsInNetwork.get()
else:
warn "Number of shards in network not specified, defaulting to zero (improve is wip)"
0
let shards =
if builder.shards.isSome():
builder.shards.get()
else:
warn "shards not specified, defaulting to all shards in network"
# TODO: conversion should not be needed
let upperShard: uint16 = uint16(numShardsInNetwork - 1)
toSeq(0.uint16 .. upperShard)
let protectedShards = builder.protectedShards.get(@[])
let maxMessageSizeBytes =
case builder.maxMessageSize.kind
of mmskInt:
builder.maxMessageSize.bytes
of mmskStr:
?parseMsgSize(builder.maxMessageSize.str)
else:
warn "Max Message Size not specified, defaulting to 150KiB"
parseCorrectMsgSize("150KiB")
let contentTopics = builder.contentTopics.get(@[])
# Build sub-configs
let discv5Conf = builder.discv5Conf.build().valueOr:
return err("Discv5 Conf building failed: " & $error)
let dnsDiscoveryConf = builder.dnsDiscoveryConf.build().valueOr:
return err("DNS Discovery Conf building failed: " & $error)
let filterServiceConf = builder.filterServiceConf.build().valueOr:
return err("Filter Service Conf building failed: " & $error)
let metricsServerConf = builder.metricsServerConf.build().valueOr:
return err("Metrics Server Conf building failed: " & $error)
let restServerConf = builder.restServerConf.build().valueOr:
return err("REST Server Conf building failed: " & $error)
let rlnRelayConf = builder.rlnRelayConf.build().valueOr:
return err("RLN Relay Conf building failed: " & $error)
let storeServiceConf = builder.storeServiceConf.build().valueOr:
return err("Store Conf building failed: " & $error)
let webSocketConf = builder.webSocketConf.build().valueOr:
return err("WebSocket Conf building failed: " & $error)
# End - Build sub-configs
let logLevel =
if builder.logLevel.isSome():
builder.logLevel.get()
else:
warn "Log Level not specified, defaulting to INFO"
logging.LogLevel.INFO
let logFormat =
if builder.logFormat.isSome():
builder.logFormat.get()
else:
warn "Log Format not specified, defaulting to TEXT"
logging.LogFormat.TEXT
let natStrategy =
if builder.natStrategy.isSome():
builder.natStrategy.get()
else:
warn "Nat Strategy is not specified, defaulting to none"
"none"
let p2pTcpPort =
if builder.p2pTcpPort.isSome():
builder.p2pTcpPort.get()
else:
warn "P2P Listening TCP Port is not specified, listening on 60000"
60000.Port
let p2pListenAddress =
if builder.p2pListenAddress.isSome():
builder.p2pListenAddress.get()
else:
warn "P2P listening address not specified, listening on 0.0.0.0"
(static parseIpAddress("0.0.0.0"))
let portsShift =
if builder.portsShift.isSome():
builder.portsShift.get()
else:
warn "Ports Shift is not specified, defaulting to 0"
0.uint16
let dns4DomainName =
if builder.dns4DomainName.isSome():
let d = builder.dns4DomainName.get()
if d.string != "":
some(d)
else:
none(string)
else:
none(string)
var extMultiAddrs: seq[MultiAddress] = @[]
for s in builder.extMultiAddrs:
let m = MultiAddress.init(s).valueOr:
return err("Invalid multiaddress provided: " & s)
extMultiAddrs.add(m)
let extMultiAddrsOnly =
if builder.extMultiAddrsOnly.isSome():
builder.extMultiAddrsOnly.get()
else:
warn "Whether to only announce external multiaddresses is not specified, defaulting to false"
false
let dnsAddrs =
if builder.dnsAddrs.isSome():
builder.dnsAddrs.get()
else:
warn "Whether to resolve DNS multiaddresses was not specified, defaulting to false."
false
let dnsAddrsNameServers =
if builder.dnsAddrsNameServers.len != 0:
builder.dnsAddrsNameServers
else:
warn "DNS name servers IPs not provided, defaulting to Cloudflare's."
@[static parseIpAddress("1.1.1.1"), static parseIpAddress("1.0.0.1")]
let peerPersistence =
if builder.peerPersistence.isSome():
builder.peerPersistence.get()
else:
warn "Peer persistence not specified, defaulting to false"
false
let maxConnections =
if builder.maxConnections.isSome():
builder.maxConnections.get()
else:
warn "Max Connections was not specified, defaulting to 300"
300
# TODO: Do the git version thing here
let agentString = builder.agentString.get("nwaku")
# TODO: use `DefaultColocationLimit`. the user of this value should
# probably be defining a config object
let colocationLimit = builder.colocationLimit.get(5)
let rateLimits = builder.rateLimits.get(newSeq[string](0))
# TODO: is there a strategy for experimental features? delete vs promote
let relayShardedPeerManagement = builder.relayShardedPeerManagement.get(false)
let wakuFlags = CapabilitiesBitfield.init(
lightpush = lightPush,
filter = filterServiceConf.isSome,
store = storeServiceConf.isSome,
relay = relay,
sync = storeServiceConf.isSome() and storeServiceConf.get().storeSyncConf.isSome,
)
let wakuConf = WakuConf(
# confs
storeServiceConf: storeServiceConf,
filterServiceConf: filterServiceConf,
discv5Conf: discv5Conf,
rlnRelayConf: rlnRelayConf,
metricsServerConf: metricsServerConf,
restServerConf: restServerConf,
dnsDiscoveryConf: dnsDiscoveryConf,
# end confs
nodeKey: nodeKey,
clusterId: clusterId,
numShardsInNetwork: numShardsInNetwork,
contentTopics: contentTopics,
shards: shards,
protectedShards: protectedShards,
relay: relay,
lightPush: lightPush,
peerExchange: peerExchange,
rendezvous: rendezvous,
remoteStoreNode: builder.remoteStoreNode,
remoteLightPushNode: builder.remoteLightPushNode,
remoteFilterNode: builder.remoteFilterNode,
remotePeerExchangeNode: builder.remotePeerExchangeNode,
relayPeerExchange: relayPeerExchange,
maxMessageSizeBytes: maxMessageSizeBytes,
logLevel: logLevel,
logFormat: logFormat,
# TODO: Separate builders
networkConf: NetworkConfig(
natStrategy: natStrategy,
p2pTcpPort: p2pTcpPort,
dns4DomainName: dns4DomainName,
p2pListenAddress: p2pListenAddress,
extMultiAddrs: extMultiAddrs,
extMultiAddrsOnly: extMultiAddrsOnly,
),
portsShift: portsShift,
webSocketConf: webSocketConf,
dnsAddrs: dnsAddrs,
dnsAddrsNameServers: dnsAddrsNameServers,
peerPersistence: peerPersistence,
peerStoreCapacity: builder.peerStoreCapacity,
maxConnections: maxConnections,
agentString: agentString,
colocationLimit: colocationLimit,
maxRelayPeers: builder.maxRelayPeers,
relayServiceRatio: builder.relayServiceRatio.get("60:40"),
rateLimits: rateLimits,
circuitRelayClient: builder.circuitRelayClient.get(false),
keepAlive: builder.keepAlive.get(true),
staticNodes: builder.staticNodes,
relayShardedPeerManagement: relayShardedPeerManagement,
p2pReliability: builder.p2pReliability.get(false),
wakuFlags: wakuFlags,
)
?wakuConf.validate()
return ok(wakuConf)

View File

@ -0,0 +1,68 @@
import chronicles, std/[net, options], results
import ../network_conf
logScope:
topics = "waku conf builder websocket"
##############################
## WebSocket Config Builder ##
##############################
type WebSocketConfBuilder* = object
enabled*: Option[bool]
webSocketPort*: Option[Port]
secureEnabled*: Option[bool]
keyPath*: Option[string]
certPath*: Option[string]
proc init*(T: type WebSocketConfBuilder): WebSocketConfBuilder =
WebSocketConfBuilder()
proc withEnabled*(b: var WebSocketConfBuilder, enabled: bool) =
b.enabled = some(enabled)
proc withSecureEnabled*(b: var WebSocketConfBuilder, secureEnabled: bool) =
b.secureEnabled = some(secureEnabled)
proc withWebSocketPort*(b: var WebSocketConfBuilder, webSocketPort: Port) =
b.webSocketPort = some(webSocketPort)
proc withWebSocketPort*(b: var WebSocketConfBuilder, webSocketPort: uint16) =
b.webSocketPort = some(Port(webSocketPort))
proc withKeyPath*(b: var WebSocketConfBuilder, keyPath: string) =
b.keyPath = some(keyPath)
proc withCertPath*(b: var WebSocketConfBuilder, certPath: string) =
b.certPath = some(certPath)
proc build*(b: WebSocketConfBuilder): Result[Option[WebSocketConf], string] =
if not b.enabled.get(false):
return ok(none(WebSocketConf))
if b.webSocketPort.isNone():
return err("websocket.port is not specified")
if not b.secureEnabled.get(false):
return ok(
some(
WebSocketConf(
port: b.websocketPort.get(), secureConf: none(WebSocketSecureConf)
)
)
)
if b.keyPath.get("") == "":
return err("WebSocketSecure enabled but key path is not specified")
if b.certPath.get("") == "":
return err("WebSocketSecure enabled but cert path is not specified")
return ok(
some(
WebSocketConf(
port: b.webSocketPort.get(),
secureConf: some(
WebSocketSecureConf(keyPath: b.keyPath.get(), certPath: b.certPath.get())
),
)
)
)

View File

@ -1,6 +1,7 @@
import
std/[strutils, strformat],
results,
chronicles,
chronos,
regex,
confutils,
@ -14,17 +15,26 @@ import
nimcrypto/utils,
secp256k1,
json
import
./waku_conf,
./conf_builder/conf_builder,
./networks_config,
../common/confutils/envvar/defs as confEnvvarDefs,
../common/confutils/envvar/std/net as confEnvvarNet,
../common/logging,
../waku_enr,
../node/peer_manager,
../waku_core/topics/pubsub_topic
../waku_core/topics/pubsub_topic,
../../tools/rln_keystore_generator/rln_keystore_generator,
../../tools/rln_db_inspector/rln_db_inspector
include ../waku_core/message/default_values
export confTomlDefs, confTomlNet, confEnvvarDefs, confEnvvarNet
export confTomlDefs, confTomlNet, confEnvvarDefs, confEnvvarNet, ProtectedShard
logScope:
topics = "waku external config"
# Git version in git describe format (defined at compile time)
const git_version* {.strdefine.} = "n/a"
@ -33,10 +43,6 @@ type ConfResult*[T] = Result[T, string]
type EthRpcUrl* = distinct string
type ProtectedShard* = object
shard*: uint16
key*: secp256k1.SkPublicKey
type StartUpCommand* = enum
noCommand # default, runs waku
generateRlnKeystore # generates a new RLN keystore
@ -148,7 +154,7 @@ type WakuNodeConf* = object
## General node config
preset* {.
desc:
"Network preset to use. 'twn' is The RLN-protected Waku Network (cluster 1).",
"Network preset to use. 'twn' is The RLN-protected Waku Network (cluster 1). Overrides other values.",
defaultValue: "",
name: "preset"
.}: string
@ -196,7 +202,7 @@ type WakuNodeConf* = object
.}: seq[string]
extMultiAddrsOnly* {.
desc: "Only announce external multiaddresses",
desc: "Only announce external multiaddresses setup with --ext-multiaddr",
defaultValue: false,
name: "ext-multiaddr-only"
.}: bool
@ -300,31 +306,12 @@ hence would have reachability issues.""",
name: "rln-relay-dynamic"
.}: bool
rlnRelayIdKey* {.
desc: "Rln relay identity secret key as a Hex string",
defaultValue: "",
name: "rln-relay-id-key"
.}: string
rlnRelayIdCommitmentKey* {.
desc: "Rln relay identity commitment key as a Hex string",
defaultValue: "",
name: "rln-relay-id-commitment-key"
.}: string
rlnRelayTreePath* {.
desc: "Path to the RLN merkle tree sled db (https://github.com/spacejam/sled)",
defaultValue: "",
name: "rln-relay-tree-path"
.}: string
rlnRelayBandwidthThreshold* {.
desc:
"Message rate in bytes/sec after which verification of proofs should happen.",
defaultValue: 0, # to maintain backwards compatibility
name: "rln-relay-bandwidth-threshold"
.}: int
staticnodes* {.
desc: "Peer multiaddr to directly connect with. Argument may be repeated.",
name: "staticnode"
@ -372,7 +359,7 @@ hence would have reachability issues.""",
.}: bool
legacyStore* {.
desc: "Enable/disable waku store legacy mode",
desc: "Enable/disable support of Waku Store v2 as a service",
defaultValue: true,
name: "legacy-store"
.}: bool
@ -432,28 +419,20 @@ hence would have reachability issues.""",
desc: "Interval between store sync attempts. In seconds.",
defaultValue: 300, # 5 minutes
name: "store-sync-interval"
.}: int64
.}: uint32
storeSyncRange* {.
desc: "Amount of time to sync. In seconds.",
defaultValue: 3600, # 1 hours
name: "store-sync-range"
.}: int64
.}: uint32
storeSyncRelayJitter* {.
hidden,
desc: "Time offset to account for message propagation jitter. In seconds.",
defaultValue: 20,
name: "store-sync-relay-jitter"
.}: int64
storeSyncMaxPayloadSize* {.
hidden,
desc:
"Max size in bytes of the inner negentropy payload. Cannot be less than 5K, 0 is unlimited.",
defaultValue: 0,
name: "store-sync-max-payload-size"
.}: int64
.}: uint32
## Filter config
filter* {.
@ -471,7 +450,7 @@ hence would have reachability issues.""",
"Timeout for filter subscription without ping or refresh it, in seconds. Only for v2 filter protocol.",
defaultValue: 300, # 5 minutes
name: "filter-subscription-timeout"
.}: int64
.}: uint16
filterMaxPeersToServe* {.
desc: "Maximum number of peers to serve at a time. Only for v2 filter protocol.",
@ -594,9 +573,9 @@ with the drawback of consuming some more bandwidth.""",
## Discovery v5 config
discv5Discovery* {.
desc: "Enable discovering nodes via Node Discovery v5.",
defaultValue: false,
defaultValue: none(bool),
name: "discv5-discovery"
.}: bool
.}: Option[bool]
discv5UdpPort* {.
desc: "Listening UDP port for Node Discovery v5.",
@ -774,8 +753,7 @@ proc completeCmdArg*(T: type IpAddress, val: string): seq[string] =
return @[]
proc defaultListenAddress*(): IpAddress =
# TODO: How should we select between IPv4 and IPv6
# Maybe there should be a config option for this.
# TODO: Should probably listen on both ipv4 and ipv6 by default.
(static parseIpAddress("0.0.0.0"))
proc defaultColocationLimit*(): int =
@ -884,3 +862,188 @@ proc defaultWakuNodeConf*(): ConfResult[WakuNodeConf] =
return ok(conf)
except CatchableError:
return err("exception in defaultWakuNodeConf: " & getCurrentExceptionMsg())
proc toKeystoreGeneratorConf*(n: WakuNodeConf): RlnKeystoreGeneratorConf =
RlnKeystoreGeneratorConf(
execute: n.execute,
chainId: n.rlnRelayChainId,
ethClientAddress: n.rlnRelayEthClientAddress.string,
ethContractAddress: n.rlnRelayEthContractAddress,
userMessageLimit: n.rlnRelayUserMessageLimit,
ethPrivateKey: n.rlnRelayEthPrivateKey,
credPath: n.rlnRelayCredPath,
credPassword: n.rlnRelayCredPassword,
)
proc toInspectRlnDbConf*(n: WakuNodeConf): InspectRlnDbConf =
return InspectRlnDbConf(treePath: n.treePath)
proc toClusterConf(
preset: string, clusterId: Option[uint16]
): ConfResult[Option[ClusterConf]] =
var lcPreset = toLowerAscii(preset)
if clusterId.isSome() and clusterId.get() == 1:
warn(
"TWN - The Waku Network configuration will not be applied when `--cluster-id=1` is passed in future releases. Use `--preset=twn` instead."
)
lcPreset = "twn"
case lcPreset
of "":
ok(none(ClusterConf))
of "twn":
ok(some(ClusterConf.TheWakuNetworkConf()))
else:
err("Invalid --preset value passed: " & lcPreset)
proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
var b = WakuConfBuilder.init()
b.withLogLevel(n.logLevel)
b.withLogFormat(n.logFormat)
b.rlnRelayConf.withEnabled(n.rlnRelay)
if n.rlnRelayCredPath != "":
b.rlnRelayConf.withCredPath(n.rlnRelayCredPath)
if n.rlnRelayCredPassword != "":
b.rlnRelayConf.withCredPassword(n.rlnRelayCredPassword)
if n.rlnRelayEthClientAddress.string != "":
b.rlnRelayConf.withEthClientAddress(n.rlnRelayEthClientAddress.string)
if n.rlnRelayEthContractAddress != "":
b.rlnRelayConf.withEthContractAddress(n.rlnRelayEthContractAddress)
if n.rlnRelayChainId != 0:
b.rlnRelayConf.withChainId(n.rlnRelayChainId)
b.rlnRelayConf.withUserMessageLimit(n.rlnRelayUserMessageLimit)
b.rlnRelayConf.withEpochSizeSec(n.rlnEpochSizeSec)
if n.rlnRelayCredIndex.isSome():
b.rlnRelayConf.withCredIndex(n.rlnRelayCredIndex.get())
b.rlnRelayConf.withDynamic(n.rlnRelayDynamic)
b.rlnRelayConf.withTreePath(n.rlnRelayTreePath)
if n.maxMessageSize != "":
b.withMaxMessageSize(n.maxMessageSize)
b.withProtectedShards(n.protectedShards)
b.withClusterId(n.clusterId)
let clusterConf = toClusterConf(n.preset, some(n.clusterId)).valueOr:
return err("Error determining cluster from preset: " & $error)
if clusterConf.isSome():
b.withClusterConf(clusterConf.get())
b.withAgentString(n.agentString)
if n.nodeKey.isSome():
b.withNodeKey(n.nodeKey.get())
b.withP2pListenAddress(n.listenAddress)
b.withP2pTcpPort(n.tcpPort)
b.withPortsShift(n.portsShift)
b.withNatStrategy(n.nat)
b.withExtMultiAddrs(n.extMultiAddrs)
b.withExtMultiAddrsOnly(n.extMultiAddrsOnly)
b.withMaxConnections(n.maxConnections)
if n.maxRelayPeers.isSome():
b.withMaxRelayPeers(n.maxRelayPeers.get())
if n.relayServiceRatio != "":
b.withRelayServiceRatio(n.relayServiceRatio)
b.withColocationLimit(n.colocationLimit)
if n.peerStoreCapacity.isSome:
b.withPeerStoreCapacity(n.peerStoreCapacity.get())
b.withPeerPersistence(n.peerPersistence)
b.withDnsAddrs(n.dnsAddrs)
b.withDnsAddrsNameServers(n.dnsAddrsNameServers)
b.withDns4DomainName(n.dns4DomainName)
b.withCircuitRelayClient(n.isRelayClient)
b.withRelay(n.relay)
b.withRelayPeerExchange(n.relayPeerExchange)
b.withRelayShardedPeerManagement(n.relayShardedPeerManagement)
b.withStaticNodes(n.staticNodes)
b.withKeepAlive(n.keepAlive)
if n.numShardsInNetwork != 0:
b.withNumShardsInNetwork(n.numShardsInNetwork)
b.withShards(n.shards)
b.withContentTopics(n.contentTopics)
b.storeServiceConf.withEnabled(n.store)
b.storeServiceConf.withSupportV2(n.legacyStore)
b.storeServiceConf.withRetentionPolicy(n.storeMessageRetentionPolicy)
b.storeServiceConf.withDbUrl(n.storeMessageDbUrl)
b.storeServiceConf.withDbVacuum(n.storeMessageDbVacuum)
b.storeServiceConf.withDbMigration(n.storeMessageDbMigration)
b.storeServiceConf.withMaxNumDbConnections(n.storeMaxNumDbConnections)
b.storeServiceConf.withResume(n.storeResume)
# TODO: can we just use `Option` on the CLI?
if n.storenode != "":
b.withRemoteStoreNode(n.storenode)
if n.filternode != "":
b.withRemoteFilterNode(n.filternode)
if n.lightpushnode != "":
b.withRemoteLightPushNode(n.lightpushnode)
if n.peerExchangeNode != "":
b.withRemotePeerExchangeNode(n.peerExchangeNode)
b.storeServiceConf.storeSyncConf.withEnabled(n.storeSync)
b.storeServiceConf.storeSyncConf.withIntervalSec(n.storeSyncInterval)
b.storeServiceConf.storeSyncConf.withRangeSec(n.storeSyncRange)
b.storeServiceConf.storeSyncConf.withRelayJitterSec(n.storeSyncRelayJitter)
b.filterServiceConf.withEnabled(n.filter)
b.filterServiceConf.withSubscriptionTimeout(n.filterSubscriptionTimeout)
b.filterServiceConf.withMaxPeersToServe(n.filterMaxPeersToServe)
b.filterServiceConf.withMaxCriteria(n.filterMaxCriteria)
b.withLightPush(n.lightpush)
b.withP2pReliability(n.reliabilityEnabled)
b.restServerConf.withEnabled(n.rest)
b.restServerConf.withListenAddress(n.restAddress)
b.restServerConf.withPort(n.restPort)
b.restServerConf.withRelayCacheCapacity(n.restRelayCacheCapacity)
b.restServerConf.withAdmin(n.restAdmin)
b.restServerConf.withAllowOrigin(n.restAllowOrigin)
b.metricsServerConf.withEnabled(n.metricsServer)
b.metricsServerConf.withHttpAddress(n.metricsServerAddress)
b.metricsServerConf.withHttpPort(n.metricsServerPort)
b.metricsServerConf.withLogging(n.metricsLogging)
b.dnsDiscoveryConf.withEnabled(n.dnsDiscovery)
b.dnsDiscoveryConf.withEnrTreeUrl(n.dnsDiscoveryUrl)
b.dnsDiscoveryConf.withNameServers(n.dnsDiscoveryNameServers)
if n.discv5Discovery.isSome():
b.discv5Conf.withEnabled(n.discv5Discovery.get())
b.discv5Conf.withUdpPort(n.discv5UdpPort)
b.discv5Conf.withBootstrapNodes(n.discv5BootstrapNodes)
b.discv5Conf.withEnrAutoUpdate(n.discv5EnrAutoUpdate)
b.discv5Conf.withTableIpLimit(n.discv5TableIpLimit)
b.discv5Conf.withBucketIpLimit(n.discv5BucketIpLimit)
b.discv5Conf.withBitsPerHop(n.discv5BitsPerHop)
b.discv5Conf.withDiscv5Only(n.discv5Only)
b.withPeerExchange(n.peerExchange)
b.withRendezvous(n.rendezvous)
b.webSocketConf.withEnabled(n.websocketSupport)
b.webSocketConf.withWebSocketPort(n.websocketPort)
b.webSocketConf.withSecureEnabled(n.websocketSecureSupport)
b.webSocketConf.withKeyPath(n.websocketSecureKeyPath)
b.webSocketConf.withCertPath(n.websocketSecureCertPath)
b.withRateLimits(n.rateLimits)
return b.build()

View File

@ -4,21 +4,20 @@ import
libp2p/crypto/crypto,
libp2p/multiaddress,
libp2p/nameresolving/dnsresolver,
std/[options, sequtils, strutils, net],
std/[options, sequtils, net],
results
import
./external_config,
../common/utils/nat,
../node/config,
../waku_enr/capabilities,
../node/net_config,
../waku_enr,
../waku_core,
./networks_config
./waku_conf,
./network_conf
proc enrConfiguration*(
conf: WakuNodeConf, netConfig: NetConfig, key: crypto.PrivateKey
conf: WakuConf, netConfig: NetConfig
): Result[enr.Record, string] =
var enrBuilder = EnrBuilder.init(key)
var enrBuilder = EnrBuilder.init(conf.nodeKey)
enrBuilder.withIpAddressAndPorts(
netConfig.enrIp, netConfig.enrPort, netConfig.discv5UdpPort
@ -44,19 +43,12 @@ proc enrConfiguration*(
return ok(record)
proc validateExtMultiAddrs*(vals: seq[string]): Result[seq[MultiAddress], string] =
var multiaddrs: seq[MultiAddress]
for val in vals:
let multiaddr = ?MultiAddress.init(val)
multiaddrs.add(multiaddr)
return ok(multiaddrs)
proc dnsResolve*(
domain: string, conf: WakuNodeConf
domain: string, dnsAddrsNameServers: seq[IpAddress]
): Future[Result[string, string]] {.async.} =
# Use conf's DNS servers
var nameServers: seq[TransportAddress]
for ip in conf.dnsAddrsNameServers:
for ip in dnsAddrsNameServers:
nameServers.add(initTAddress(ip, Port(53))) # Assume all servers use port 53
let dnsResolver = DnsResolver.new(nameServers)
@ -69,14 +61,24 @@ proc dnsResolve*(
else:
return err("Could not resolve IP from DNS: empty response")
proc networkConfiguration*(conf: WakuNodeConf, clientId: string): NetConfigResult =
# TODO: Reduce number of parameters, can be done once the same is done on Netconfig.init
proc networkConfiguration*(
clusterId: uint16,
conf: NetworkConfig,
discv5Conf: Option[Discv5Conf],
webSocketConf: Option[WebSocketConf],
wakuFlags: CapabilitiesBitfield,
dnsAddrsNameServers: seq[IpAddress],
portsShift: uint16,
clientId: string,
): NetConfigResult =
## `udpPort` is only supplied to satisfy underlying APIs but is not
## actually a supported transport for libp2p traffic.
let natRes = setupNat(
conf.nat,
conf.natStrategy.string,
clientId,
Port(uint16(conf.tcpPort) + conf.portsShift),
Port(uint16(conf.tcpPort) + conf.portsShift),
Port(uint16(conf.p2pTcpPort) + portsShift),
Port(uint16(conf.p2pTcpPort) + portsShift),
)
if natRes.isErr():
return err("failed to setup NAT: " & $natRes.error)
@ -84,15 +86,9 @@ proc networkConfiguration*(conf: WakuNodeConf, clientId: string): NetConfigResul
var (extIp, extTcpPort, _) = natRes.get()
let
dns4DomainName =
if conf.dns4DomainName != "":
some(conf.dns4DomainName)
else:
none(string)
discv5UdpPort =
if conf.discv5Discovery:
some(Port(uint16(conf.discv5UdpPort) + conf.portsShift))
if discv5Conf.isSome():
some(Port(uint16(discv5Conf.get().udpPort) + portsShift))
else:
none(Port)
@ -101,34 +97,15 @@ proc networkConfiguration*(conf: WakuNodeConf, clientId: string): NetConfigResul
## extPort as well. The following heuristic assumes that, in absence of
## manual config, the external port is the same as the bind port.
extPort =
if (extIp.isSome() or dns4DomainName.isSome()) and extTcpPort.isNone():
some(Port(uint16(conf.tcpPort) + conf.portsShift))
if (extIp.isSome() or conf.dns4DomainName.isSome()) and extTcpPort.isNone():
some(Port(uint16(conf.p2pTcpPort) + portsShift))
else:
extTcpPort
extMultiAddrs =
if (conf.extMultiAddrs.len > 0):
let extMultiAddrsValidationRes = validateExtMultiAddrs(conf.extMultiAddrs)
if extMultiAddrsValidationRes.isErr():
return
err("invalid external multiaddress: " & $extMultiAddrsValidationRes.error)
else:
extMultiAddrsValidationRes.get()
else:
@[]
wakuFlags = CapabilitiesBitfield.init(
lightpush = conf.lightpush,
filter = conf.filter,
store = conf.store,
relay = conf.relay,
sync = conf.storeSync,
)
# Resolve and use DNS domain IP
if dns4DomainName.isSome() and extIp.isNone():
if conf.dns4DomainName.isSome() and extIp.isNone():
try:
let dnsRes = waitFor dnsResolve(conf.dns4DomainName, conf)
let dnsRes = waitFor dnsResolve(conf.dns4DomainName.get(), dnsAddrsNameServers)
if dnsRes.isErr():
return err($dnsRes.error) # Pass error down the stack
@ -138,92 +115,38 @@ proc networkConfiguration*(conf: WakuNodeConf, clientId: string): NetConfigResul
return
err("Could not update extIp to resolved DNS IP: " & getCurrentExceptionMsg())
let (wsEnabled, wsBindPort, wssEnabled) =
if webSocketConf.isSome:
let wsConf = webSocketConf.get()
(true, some(Port(wsConf.port.uint16 + portsShift)), wsConf.secureConf.isSome)
else:
(false, none(Port), false)
# Wrap in none because NetConfig does not have a default constructor
# TODO: We could change bindIp in NetConfig to be something less restrictive
# than IpAddress, which doesn't allow default construction
let netConfigRes = NetConfig.init(
clusterId = conf.clusterId,
bindIp = conf.listenAddress,
bindPort = Port(uint16(conf.tcpPort) + conf.portsShift),
clusterId = clusterId,
bindIp = conf.p2pListenAddress,
bindPort = Port(uint16(conf.p2pTcpPort) + portsShift),
extIp = extIp,
extPort = extPort,
extMultiAddrs = extMultiAddrs,
extMultiAddrs = conf.extMultiAddrs,
extMultiAddrsOnly = conf.extMultiAddrsOnly,
wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift),
wsEnabled = conf.websocketSupport,
wssEnabled = conf.websocketSecureSupport,
dns4DomainName = dns4DomainName,
wsBindPort = wsBindPort,
wsEnabled = wsEnabled,
wssEnabled = wssEnabled,
dns4DomainName = conf.dns4DomainName,
discv5UdpPort = discv5UdpPort,
wakuFlags = some(wakuFlags),
dnsNameServers = conf.dnsAddrsNameServers,
)
return netConfigRes
proc applyPresetConfiguration*(srcConf: WakuNodeConf): Result[WakuNodeConf, string] =
var resConf = srcConf
if resConf.clusterId == 1:
warn(
"TWN - The Waku Network configuration will not be applied when `--cluster-id=1` is passed in future releases. Use `--preset=twn` instead."
)
resConf.preset = "twn"
case toLowerAscii(resConf.preset)
of "twn":
let twnClusterConf = ClusterConf.TheWakuNetworkConf()
# Override configuration
resConf.maxMessageSize = twnClusterConf.maxMessageSize
resConf.clusterId = twnClusterConf.clusterId
resConf.rlnRelay = twnClusterConf.rlnRelay
resConf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
resConf.rlnRelayChainId = twnClusterConf.rlnRelayChainId
resConf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
resConf.rlnRelayBandwidthThreshold = twnClusterConf.rlnRelayBandwidthThreshold
resConf.discv5Discovery = twnClusterConf.discv5Discovery
resConf.discv5BootstrapNodes =
resConf.discv5BootstrapNodes & twnClusterConf.discv5BootstrapNodes
resConf.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec
resConf.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit
resConf.numShardsInNetwork = twnClusterConf.numShardsInNetwork
if resConf.relay:
resConf.rlnRelay = twnClusterConf.rlnRelay
else:
discard
return ok(resConf)
# TODO: numShardsInNetwork should be mandatory with autosharding, and unneeded otherwise
proc getNumShardsInNetwork*(conf: WakuNodeConf): uint32 =
proc getNumShardsInNetwork*(conf: WakuConf): uint32 =
if conf.numShardsInNetwork != 0:
return conf.numShardsInNetwork
# If conf.numShardsInNetwork is not set, use 1024 - the maximum possible as per the static sharding spec
# https://github.com/waku-org/specs/blob/master/standards/core/relay-sharding.md#static-sharding
return uint32(MaxShardIndex + 1)
proc validateShards*(conf: WakuNodeConf): Result[void, string] =
let numShardsInNetwork = getNumShardsInNetwork(conf)
for shard in conf.shards:
if shard >= numShardsInNetwork:
let msg =
"validateShards invalid shard: " & $shard & " when numShardsInNetwork: " &
$numShardsInNetwork # fmt doesn't work
error "validateShards failed", error = msg
return err(msg)
return ok()
proc getNodeKey*(
conf: WakuNodeConf, rng: ref HmacDrbgContext = crypto.newRng()
): Result[PrivateKey, string] =
if conf.nodekey.isSome():
return ok(conf.nodekey.get())
warn "missing node key, generating new set"
let key = crypto.PrivateKey.random(Secp256k1, rng[]).valueOr:
error "Failed to generate key", error = error
return err("Failed to generate key: " & $error)
return ok(key)

View File

@ -0,0 +1,34 @@
import std/[net, options, strutils]
import libp2p/multiaddress
type WebSocketSecureConf* {.requiresInit.} = object
keyPath*: string
certPath*: string
type WebSocketConf* = object
port*: Port
secureConf*: Option[WebSocketSecureConf]
type NetworkConf* = object
natStrategy*: string # TODO: make enum
p2pTcpPort*: Port
dns4DomainName*: Option[string]
p2pListenAddress*: IpAddress
extMultiAddrs*: seq[MultiAddress]
extMultiAddrsOnly*: bool
webSocketConf*: Option[WebSocketConf]
proc validateNoEmptyStrings(networkConf: NetworkConf): Result[void, string] =
if networkConf.dns4DomainName.isSome() and
isEmptyOrWhiteSpace(networkConf.dns4DomainName.get().string):
return err("dns4DomainName is an empty string, set it to none(string) instead")
if networkConf.webSocketConf.isSome() and
networkConf.webSocketConf.get().secureConf.isSome():
let secureConf = networkConf.webSocketConf.get().secureConf.get()
if isEmptyOrWhiteSpace(secureConf.keyPath):
return err("websocket.secureConf.keyPath is an empty string")
if isEmptyOrWhiteSpace(secureConf.certPath):
return err("websocket.secureConf.certPath is an empty string")
return ok()

View File

@ -1,15 +1,17 @@
{.push raises: [].}
# TODO: Rename this type to match file name
type ClusterConf* = object
maxMessageSize*: string
maxMessageSize*: string # TODO: static convert to a uint64
clusterId*: uint16
rlnRelay*: bool
rlnRelayEthContractAddress*: string
rlnRelayChainId*: uint
rlnRelayDynamic*: bool
rlnRelayBandwidthThreshold*: int
rlnEpochSizeSec*: uint64
rlnRelayUserMessageLimit*: uint64
# TODO: should be uint16 like the `shards` parameter
numShardsInNetwork*: uint32
discv5Discovery*: bool
discv5BootstrapNodes*: seq[string]
@ -25,12 +27,10 @@ proc TheWakuNetworkConf*(T: type ClusterConf): ClusterConf =
rlnRelayEthContractAddress: "0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8",
rlnRelayDynamic: true,
rlnRelayChainId: 11155111,
rlnRelayBandwidthThreshold: 0,
rlnEpochSizeSec: 600,
rlnRelayUserMessageLimit: 100,
numShardsInNetwork: 8,
discv5Discovery: true,
# TODO: Why is this part of the conf? eg an edge node would not have this
discv5BootstrapNodes:
@[
"enr:-QESuED0qW1BCmF-oH_ARGPr97Nv767bl_43uoy70vrbah3EaCAdK3Q0iRQ6wkSTTpdrg_dU_NC2ydO8leSlRpBX4pxiAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOTd-h5owwj-cx7xrmbvQKU8CV3Fomfdvcv1MBc-67T5oN0Y3CCdl-DdWRwgiMohXdha3UyDw",

View File

@ -10,7 +10,7 @@ import
import
./internal_config,
./external_config,
./waku_conf,
./builder,
./validator_signed,
../waku_enr/sharding,
@ -35,7 +35,6 @@ import
../node/peer_manager/peer_store/waku_peer_storage,
../node/peer_manager/peer_store/migrations as peer_store_sqlite_migrations,
../waku_lightpush_legacy/common,
../common/utils/parse_size_units,
../common/rate_limit/setting,
../common/databases/dburl
@ -56,10 +55,9 @@ proc setupPeerStorage(): Result[Option[WakuPeerStorage], string] =
## Init waku node instance
proc initNode(
conf: WakuNodeConf,
conf: WakuConf,
netConfig: NetConfig,
rng: ref HmacDrbgContext,
nodeKey: crypto.PrivateKey,
record: enr.Record,
peerStore: Option[WakuPeerStorage],
relay: Relay,
@ -86,17 +84,24 @@ proc initNode(
else:
peerStore.get()
let (secureKey, secureCert) =
if conf.webSocketConf.isSome() and conf.webSocketConf.get().secureConf.isSome():
let wssConf = conf.webSocketConf.get().secureConf.get()
(some(wssConf.keyPath), some(wssConf.certPath))
else:
(none(string), none(string))
# Build waku node instance
var builder = WakuNodeBuilder.init()
builder.withRng(rng)
builder.withNodeKey(nodekey)
builder.withNodeKey(conf.nodeKey)
builder.withRecord(record)
builder.withNetworkConfiguration(netConfig)
builder.withPeerStorage(pStorage, capacity = conf.peerStoreCapacity)
builder.withSwitchConfiguration(
maxConnections = some(conf.maxConnections.int),
secureKey = some(conf.websocketSecureKeyPath),
secureCert = some(conf.websocketSecureCertPath),
secureKey = secureKey,
secureCert = secureCert,
nameResolver = dnsResolver,
sendSignedPeerRecord = conf.relayPeerExchange,
# We send our own signed peer record when peer exchange enabled
@ -148,13 +153,13 @@ proc getAutoshards*(
return ok(autoshards)
proc setupProtocols(
node: WakuNode, conf: WakuNodeConf, nodeKey: crypto.PrivateKey
node: WakuNode, conf: WakuConf
): Future[Result[void, string]] {.async.} =
## Setup configured protocols on an existing Waku v2 node.
## Optionally include persistent message storage.
## No protocols are started yet.
if conf.discv5Only:
if conf.discv5Conf.isSome() and conf.discv5Conf.get().discv5Only:
notice "Running node only with Discv5, not mounting additional protocols"
return ok()
@ -167,11 +172,12 @@ proc setupProtocols(
error "Unrecoverable error occurred", error = msg
quit(QuitFailure)
if conf.store:
if conf.legacyStore:
if conf.storeServiceConf.isSome():
let storeServiceConf = conf.storeServiceConf.get()
if storeServiceConf.supportV2:
let archiveDriverRes = waitFor legacy_driver.ArchiveDriver.new(
conf.storeMessageDbUrl, conf.storeMessageDbVacuum, conf.storeMessageDbMigration,
conf.storeMaxNumDbConnections, onFatalErrorAction,
storeServiceConf.dbUrl, storeServiceConf.dbVacuum, storeServiceConf.dbMigration,
storeServiceConf.maxNumDbConnections, onFatalErrorAction,
)
if archiveDriverRes.isErr():
return err("failed to setup legacy archive driver: " & archiveDriverRes.error)
@ -191,26 +197,26 @@ proc setupProtocols(
## So for now, we need to make sure that when legacy store is enabled and we use sqlite
## that we migrate our db according to legacy store's schema to have the extra field
let engineRes = dburl.getDbEngine(conf.storeMessageDbUrl)
let engineRes = dburl.getDbEngine(storeServiceConf.dbUrl)
if engineRes.isErr():
return err("error getting db engine in setupProtocols: " & engineRes.error)
let engine = engineRes.get()
let migrate =
if engine == "sqlite" and conf.legacyStore:
if engine == "sqlite" and storeServiceConf.supportV2:
false
else:
conf.storeMessageDbMigration
storeServiceConf.dbMigration
let archiveDriverRes = waitFor driver.ArchiveDriver.new(
conf.storeMessageDbUrl, conf.storeMessageDbVacuum, migrate,
conf.storeMaxNumDbConnections, onFatalErrorAction,
storeServiceConf.dbUrl, storeServiceConf.dbVacuum, migrate,
storeServiceConf.maxNumDbConnections, onFatalErrorAction,
)
if archiveDriverRes.isErr():
return err("failed to setup archive driver: " & archiveDriverRes.error)
let retPolicyRes = policy.RetentionPolicy.new(conf.storeMessageRetentionPolicy)
let retPolicyRes = policy.RetentionPolicy.new(storeServiceConf.retentionPolicy)
if retPolicyRes.isErr():
return err("failed to create retention policy: " & retPolicyRes.error)
@ -218,7 +224,7 @@ proc setupProtocols(
if mountArcRes.isErr():
return err("failed to mount waku archive protocol: " & mountArcRes.error)
if conf.legacyStore:
if storeServiceConf.supportV2:
# Store legacy setup
try:
await mountLegacyStore(node, node.rateLimitSettings.getSetting(STOREV2))
@ -232,17 +238,28 @@ proc setupProtocols(
except CatchableError:
return err("failed to mount waku store protocol: " & getCurrentExceptionMsg())
if storeServiceConf.storeSyncConf.isSome():
let confStoreSync = storeServiceConf.storeSyncConf.get()
(
await node.mountStoreSync(
confStoreSync.rangeSec, confStoreSync.intervalSec,
confStoreSync.relayJitterSec,
)
).isOkOr:
return err("failed to mount waku store sync protocol: " & $error)
mountStoreClient(node)
if conf.storenode != "":
let storeNode = parsePeerInfo(conf.storenode)
if conf.remoteStoreNode.isSome():
let storeNode = parsePeerInfo(conf.remoteStoreNode.get())
if storeNode.isOk():
node.peerManager.addServicePeer(storeNode.value, store_common.WakuStoreCodec)
else:
return err("failed to set node waku store peer: " & storeNode.error)
mountLegacyStoreClient(node)
if conf.storenode != "":
let storeNode = parsePeerInfo(conf.storenode)
if conf.remoteStoreNode.isSome():
let storeNode = parsePeerInfo(conf.remoteStoreNode.get())
if storeNode.isOk():
node.peerManager.addServicePeer(
storeNode.value, legacy_common.WakuLegacyStoreCodec
@ -250,7 +267,7 @@ proc setupProtocols(
else:
return err("failed to set node waku legacy store peer: " & storeNode.error)
if conf.store and conf.storeResume:
if conf.storeServiceConf.isSome and conf.storeServiceConf.get().resume:
node.setupStoreResume()
# If conf.numShardsInNetwork is not set, use the number of shards configured as numShardsInNetwork
@ -296,14 +313,14 @@ proc setupProtocols(
let shards = confShards & autoShards
if conf.relay:
let parsedMaxMsgSize = parseMsgSize(conf.maxMessageSize).valueOr:
return err("failed to parse 'max-num-bytes-msg-size' param: " & $error)
debug "Setting max message size", num_bytes = parsedMaxMsgSize
debug "Setting max message size", num_bytes = conf.maxMessageSizeBytes
(
await mountRelay(
node, shards, peerExchangeHandler = peerExchangeHandler, int(parsedMaxMsgSize)
node,
shards,
peerExchangeHandler = peerExchangeHandler,
int(conf.maxMessageSizeBytes),
)
).isOkOr:
return err("failed to mount waku relay protocol: " & $error)
@ -330,18 +347,18 @@ proc setupProtocols(
except CatchableError:
return err("failed to mount libp2p ping protocol: " & getCurrentExceptionMsg())
if conf.rlnRelay:
if conf.rlnRelayConf.isSome():
let rlnRelayConf = conf.rlnRelayConf.get()
let rlnConf = WakuRlnConfig(
rlnRelayDynamic: conf.rlnRelayDynamic,
rlnRelayCredIndex: conf.rlnRelayCredIndex,
rlnRelayEthContractAddress: conf.rlnRelayEthContractAddress,
rlnRelayChainId: conf.rlnRelayChainId,
rlnRelayEthClientAddress: string(conf.rlnRelayethClientAddress),
rlnRelayCredPath: conf.rlnRelayCredPath,
rlnRelayCredPassword: conf.rlnRelayCredPassword,
rlnRelayTreePath: conf.rlnRelayTreePath,
rlnRelayUserMessageLimit: conf.rlnRelayUserMessageLimit,
rlnEpochSizeSec: conf.rlnEpochSizeSec,
dynamic: rlnRelayConf.dynamic,
credIndex: rlnRelayConf.credIndex,
ethContractAddress: rlnRelayConf.ethContractAddress,
chainId: rlnRelayConf.chainId,
ethClientAddress: rlnRelayConf.ethClientAddress,
creds: rlnRelayConf.creds,
treePath: rlnRelayConf.treePath,
userMessageLimit: rlnRelayConf.userMessageLimit,
epochSizeSec: rlnRelayConf.epochSizeSec,
onFatalErrorAction: onFatalErrorAction,
)
@ -351,7 +368,7 @@ proc setupProtocols(
return err("failed to mount waku RLN relay protocol: " & getCurrentExceptionMsg())
# NOTE Must be mounted after relay
if conf.lightpush:
if conf.lightPush:
try:
await mountLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH))
await mountLegacyLightPush(node, node.rateLimitSettings.getSetting(LIGHTPUSH))
@ -360,8 +377,8 @@ proc setupProtocols(
mountLightPushClient(node)
mountLegacyLightPushClient(node)
if conf.lightpushnode != "":
let lightPushNode = parsePeerInfo(conf.lightpushnode)
if conf.remoteLightPushNode.isSome():
let lightPushNode = parsePeerInfo(conf.remoteLightPushNode.get())
if lightPushNode.isOk():
node.peerManager.addServicePeer(lightPushNode.value, WakuLightPushCodec)
node.peerManager.addServicePeer(lightPushNode.value, WakuLegacyLightPushCodec)
@ -369,21 +386,22 @@ proc setupProtocols(
return err("failed to set node waku lightpush peer: " & lightPushNode.error)
# Filter setup. NOTE Must be mounted after relay
if conf.filter:
if conf.filterServiceConf.isSome():
let confFilter = conf.filterServiceConf.get()
try:
await mountFilter(
node,
subscriptionTimeout = chronos.seconds(conf.filterSubscriptionTimeout),
maxFilterPeers = conf.filterMaxPeersToServe,
maxFilterCriteriaPerPeer = conf.filterMaxCriteria,
subscriptionTimeout = chronos.seconds(confFilter.subscriptionTimeout),
maxFilterPeers = confFilter.maxPeersToServe,
maxFilterCriteriaPerPeer = confFilter.maxCriteria,
rateLimitSetting = node.rateLimitSettings.getSetting(FILTER),
)
except CatchableError:
return err("failed to mount waku filter protocol: " & getCurrentExceptionMsg())
await node.mountFilterClient()
if conf.filternode != "":
let filterNode = parsePeerInfo(conf.filternode)
if conf.remoteFilterNode.isSome():
let filterNode = parsePeerInfo(conf.remoteFilterNode.get())
if filterNode.isOk():
try:
node.peerManager.addServicePeer(filterNode.value, WakuFilterSubscribeCodec)
@ -394,14 +412,6 @@ proc setupProtocols(
else:
return err("failed to set node waku filter peer: " & filterNode.error)
if conf.storeSync:
(
await node.mountStoreSync(
conf.storeSyncRange, conf.storeSyncInterval, conf.storeSyncRelayJitter
)
).isOkOr:
return err("failed to mount waku store sync protocol: " & $error)
# waku peer exchange setup
if conf.peerExchange:
try:
@ -412,8 +422,8 @@ proc setupProtocols(
return
err("failed to mount waku peer-exchange protocol: " & getCurrentExceptionMsg())
if conf.peerExchangeNode != "":
let peerExchangeNode = parsePeerInfo(conf.peerExchangeNode)
if conf.remotePeerExchangeNode.isSome():
let peerExchangeNode = parsePeerInfo(conf.remotePeerExchangeNode.get())
if peerExchangeNode.isOk():
node.peerManager.addServicePeer(peerExchangeNode.value, WakuPeerExchangeCodec)
else:
@ -425,7 +435,7 @@ proc setupProtocols(
## Start node
proc startNode*(
node: WakuNode, conf: WakuNodeConf, dynamicBootstrapNodes: seq[RemotePeerInfo] = @[]
node: WakuNode, conf: WakuConf, dynamicBootstrapNodes: seq[RemotePeerInfo] = @[]
): Future[Result[void, string]] {.async: (raises: []).} =
## Start a configured node and all mounted protocols.
## Connect to static nodes and start
@ -438,9 +448,9 @@ proc startNode*(
return err("failed to start waku node: " & getCurrentExceptionMsg())
# Connect to configured static nodes
if conf.staticnodes.len > 0:
if conf.staticNodes.len > 0:
try:
await connectToNodes(node, conf.staticnodes, "static")
await connectToNodes(node, conf.staticNodes, "static")
except CatchableError:
return err("failed to connect to static nodes: " & getCurrentExceptionMsg())
@ -453,16 +463,18 @@ proc startNode*(
err("failed to connect to dynamic bootstrap nodes: " & getCurrentExceptionMsg())
# retrieve px peers and add the to the peer store
if conf.peerExchangeNode != "":
if conf.remotePeerExchangeNode.isSome():
var desiredOutDegree = DefaultPXNumPeersReq
if not node.wakuRelay.isNil() and node.wakuRelay.parameters.d.uint64() > 0:
desiredOutDegree = node.wakuRelay.parameters.d.uint64()
(await node.fetchPeerExchangePeers(desiredOutDegree)).isOkOr:
error "error while fetching peers from peer exchange", error = error
# TODO: behavior described by comment is undesired. PX as client should be used in tandem with discv5.
#
# Use px to periodically get peers if discv5 is disabled, as discv5 nodes have their own
# periodic loop to find peers and px returned peers actually come from discv5
if conf.peerExchange and not conf.discv5Discovery:
if conf.peerExchange and not conf.discv5Conf.isSome():
node.startPeerExchangeLoop()
# Start keepalive, if enabled
@ -476,27 +488,21 @@ proc startNode*(
return ok()
proc setupNode*(
conf: WakuNodeConf, rng: ref HmacDrbgContext = crypto.newRng(), relay: Relay
wakuConf: WakuConf, rng: ref HmacDrbgContext = crypto.newRng(), relay: Relay
): Result[WakuNode, string] =
# Use provided key only if corresponding rng is also provided
let key =
if conf.nodeKey.isSome():
conf.nodeKey.get()
else:
warn "missing key, generating new"
crypto.PrivateKey.random(Secp256k1, rng[]).valueOr:
error "Failed to generate key", error = error
return err("Failed to generate key: " & $error)
let netConfig = networkConfiguration(conf, clientId).valueOr:
let netConfig = networkConfiguration(
wakuConf.clusterId, wakuConf.networkConf, wakuConf.discv5Conf,
wakuConf.webSocketConf, wakuConf.wakuFlags, wakuConf.dnsAddrsNameServers,
wakuConf.portsShift, clientId,
).valueOr:
error "failed to create internal config", error = error
return err("failed to create internal config: " & error)
let record = enrConfiguration(conf, netConfig, key).valueOr:
let record = enrConfiguration(wakuConf, netConfig).valueOr:
error "failed to create record", error = error
return err("failed to create record: " & error)
if isClusterMismatched(record, conf.clusterId):
if isClusterMismatched(record, wakuConf.clusterId):
error "cluster id mismatch configured shards"
return err("cluster id mismatch configured shards")
@ -504,21 +510,21 @@ proc setupNode*(
## Peer persistence
var peerStore: Option[WakuPeerStorage]
if conf.peerPersistence:
if wakuConf.peerPersistence:
peerStore = setupPeerStorage().valueOr:
error "Setting up storage failed", error = "failed to setup peer store " & error
return err("Setting up storage failed: " & error)
debug "Initializing node"
let node = initNode(conf, netConfig, rng, key, record, peerStore, relay).valueOr:
let node = initNode(wakuConf, netConfig, rng, record, peerStore, relay).valueOr:
error "Initializing node failed", error = error
return err("Initializing node failed: " & error)
debug "Mounting protocols"
try:
(waitFor node.setupProtocols(conf, key)).isOkOr:
(waitFor node.setupProtocols(wakuConf)).isOkOr:
error "Mounting protocols failed", error = error
return err("Mounting protocols failed: " & error)
except CatchableError:

View File

@ -13,7 +13,7 @@ import
const MessageWindowInSec = 5 * 60 # +- 5 minutes
import ./external_config, ../waku_relay/protocol, ../waku_core
import ./waku_conf, ../waku_relay/protocol, ../waku_core
declarePublicCounter waku_msg_validator_signed_outcome,
"number of messages for each validation outcome", ["result"]

View File

@ -42,7 +42,8 @@ import
../factory/internal_config,
../factory/external_config,
../factory/app_callbacks,
../waku_enr/multiaddr
../waku_enr/multiaddr,
./waku_conf
logScope:
topics = "wakunode waku"
@ -52,12 +53,13 @@ const git_version* {.strdefine.} = "n/a"
type Waku* = ref object
version: string
conf: WakuNodeConf
rng: ref HmacDrbgContext
conf*: WakuConf
rng*: ref HmacDrbgContext
key: crypto.PrivateKey
wakuDiscv5*: WakuDiscoveryV5
dynamicBootstrapNodes: seq[RemotePeerInfo]
dynamicBootstrapNodes*: seq[RemotePeerInfo]
dnsRetryLoopHandle: Future[void]
networkConnLoopHandle: Future[void]
discoveryMngr: DiscoveryManager
@ -70,37 +72,11 @@ type Waku* = ref object
metricsServer*: MetricsHttpServerRef
appCallbacks*: AppCallbacks
proc logConfig(conf: WakuNodeConf) =
info "Configuration: Enabled protocols",
relay = conf.relay,
rlnRelay = conf.rlnRelay,
store = conf.store,
filter = conf.filter,
lightpush = conf.lightpush,
peerExchange = conf.peerExchange
info "Configuration. Network", cluster = conf.clusterId
for shard in conf.shards:
info "Configuration. Shards", shard = shard
for i in conf.discv5BootstrapNodes:
info "Configuration. Bootstrap nodes", node = i
if conf.rlnRelay and conf.rlnRelayDynamic:
info "Configuration. Validation",
mechanism = "onchain rln",
contract = conf.rlnRelayEthContractAddress,
maxMessageSize = conf.maxMessageSize,
rlnEpochSizeSec = conf.rlnEpochSizeSec,
rlnRelayUserMessageLimit = conf.rlnRelayUserMessageLimit,
rlnRelayEthClientAddress = string(conf.rlnRelayEthClientAddress)
func version*(waku: Waku): string =
waku.version
proc setupSwitchServices(
waku: Waku, conf: WakuNodeConf, circuitRelay: Relay, rng: ref HmacDrbgContext
waku: Waku, conf: WakuConf, circuitRelay: Relay, rng: ref HmacDrbgContext
) =
proc onReservation(addresses: seq[MultiAddress]) {.gcsafe, raises: [].} =
debug "circuit relay handler new reserve event",
@ -116,7 +92,7 @@ proc setupSwitchServices(
error "failed to update announced multiaddress", error = $error
let autonatService = getAutonatService(rng)
if conf.isRelayClient:
if conf.circuitRelayClient:
## The node is considered to be behind a NAT or firewall and then it
## should struggle to be reachable and establish connections to other nodes
const MaxNumRelayServers = 2
@ -131,12 +107,13 @@ proc setupSwitchServices(
## Initialisation
proc newCircuitRelay(isRelayClient: bool): Relay =
# TODO: Does it mean it's a circuit-relay server when it's false?
if isRelayClient:
return RelayClient.new()
return Relay.new()
proc setupAppCallbacks(
node: WakuNode, conf: WakuNodeConf, appCallbacks: AppCallbacks
node: WakuNode, conf: WakuConf, appCallbacks: AppCallbacks
): Result[void, string] =
if appCallbacks.isNil():
info "No external callbacks to be set"
@ -171,52 +148,36 @@ proc setupAppCallbacks(
return ok()
proc new*(
T: type Waku, confCopy: var WakuNodeConf, appCallbacks: AppCallbacks = nil
T: type Waku, wakuConf: WakuConf, appCallbacks: AppCallbacks = nil
): Result[Waku, string] =
let rng = crypto.newRng()
logging.setupLog(confCopy.logLevel, confCopy.logFormat)
logging.setupLog(wakuConf.logLevel, wakuConf.logFormat)
confCopy = block:
let res = applyPresetConfiguration(confCopy)
if res.isErr():
error "Failed to complete the config", error = res.error
return err("Failed to complete the config:" & $res.error)
res.get()
?wakuConf.validate()
logConfig(confCopy)
wakuConf.logConf()
info "Running nwaku node", version = git_version
let validateShardsRes = validateShards(confCopy)
if validateShardsRes.isErr():
error "Failed validating shards", error = $validateShardsRes.error
return err("Failed validating shards: " & $validateShardsRes.error)
var relay = newCircuitRelay(wakuConf.circuitRelayClient)
let keyRes = getNodeKey(confCopy, rng)
if keyRes.isErr():
error "Failed to generate key", error = $keyRes.error
return err("Failed to generate key: " & $keyRes.error)
confCopy.nodeKey = some(keyRes.get())
var relay = newCircuitRelay(confCopy.isRelayClient)
let nodeRes = setupNode(confCopy, rng, relay)
let nodeRes = setupNode(wakuConf, rng, relay)
if nodeRes.isErr():
error "Failed setting up node", error = nodeRes.error
return err("Failed setting up node: " & nodeRes.error)
let node = nodeRes.get()
node.setupAppCallbacks(confCopy, appCallbacks).isOkOr:
node.setupAppCallbacks(wakuConf, appCallbacks).isOkOr:
error "Failed setting up app callbacks", error = error
return err("Failed setting up app callbacks: " & $error)
## Delivery Monitor
var deliveryMonitor: DeliveryMonitor
if confCopy.reliabilityEnabled:
if confCopy.storenode == "":
return err("A storenode should be set when reliability mode is on")
if wakuConf.p2pReliability:
if wakuConf.remoteStoreNode.isNone():
return err("A remoteStoreNode should be set when reliability mode is on")
let deliveryMonitorRes = DeliveryMonitor.new(
node.wakuStoreClient, node.wakuRelay, node.wakuLightpushClient,
@ -228,16 +189,15 @@ proc new*(
var waku = Waku(
version: git_version,
# TODO: WakuNodeConf is re-used for too many context, `conf` here should be a dedicated subtype
conf: confCopy,
conf: wakuConf,
rng: rng,
key: confCopy.nodekey.get(),
key: wakuConf.nodeKey,
node: node,
deliveryMonitor: deliveryMonitor,
appCallbacks: appCallbacks,
)
waku.setupSwitchServices(confCopy, relay, rng)
waku.setupSwitchServices(wakuConf, relay, rng)
ok(waku)
@ -265,13 +225,16 @@ proc getRunningNetConfig(waku: ptr Waku): Result[NetConfig, string] =
return err("Could not retrieve ports " & error)
if tcpPort.isSome():
conf.tcpPort = tcpPort.get()
conf.networkConf.p2pTcpPort = tcpPort.get()
if websocketPort.isSome():
conf.websocketPort = websocketPort.get()
if websocketPort.isSome() and conf.webSocketConf.isSome():
conf.webSocketConf.get().port = websocketPort.get()
# Rebuild NetConfig with bound port values
let netConf = networkConfiguration(conf, clientId).valueOr:
let netConf = networkConfiguration(
conf.clusterId, conf.networkConf, conf.discv5Conf, conf.webSocketConf,
conf.wakuFlags, conf.dnsAddrsNameServers, conf.portsShift, clientId,
).valueOr:
return err("Could not update NetConfig: " & error)
return ok(netConf)
@ -279,8 +242,7 @@ proc getRunningNetConfig(waku: ptr Waku): Result[NetConfig, string] =
proc updateEnr(waku: ptr Waku): Result[void, string] =
let netConf: NetConfig = getRunningNetConfig(waku).valueOr:
return err("error calling updateNetConfig: " & $error)
let record = enrConfiguration(waku[].conf, netConf, waku[].key).valueOr:
let record = enrConfiguration(waku[].conf, netConf).valueOr:
return err("ENR setup failed: " & error)
if isClusterMismatched(record, waku[].conf.clusterId):
@ -319,7 +281,9 @@ proc updateAddressInENR(waku: ptr Waku): Result[void, string] =
return ok()
proc updateWaku(waku: ptr Waku): Result[void, string] =
if waku[].conf.tcpPort == Port(0) or waku[].conf.websocketPort == Port(0):
let conf = waku[].conf
if conf.networkConf.p2pTcpPort == Port(0) or
(conf.websocketConf.isSome() and conf.websocketConf.get.port == Port(0)):
updateEnr(waku).isOkOr:
return err("error calling updateEnr: " & $error)
@ -332,15 +296,17 @@ proc updateWaku(waku: ptr Waku): Result[void, string] =
proc startDnsDiscoveryRetryLoop(waku: ptr Waku): Future[void] {.async.} =
while true:
await sleepAsync(30.seconds)
let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes(
waku.conf.dnsDiscoveryUrl, waku.conf.dnsDiscoveryNameServers
)
if dynamicBootstrapNodesRes.isErr():
error "Retrieving dynamic bootstrap nodes failed",
error = dynamicBootstrapNodesRes.error
continue
if waku.conf.dnsDiscoveryConf.isSome():
let dnsDiscoveryConf = waku.conf.dnsDiscoveryConf.get()
let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes(
dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers
)
if dynamicBootstrapNodesRes.isErr():
error "Retrieving dynamic bootstrap nodes failed",
error = dynamicBootstrapNodesRes.error
continue
waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
if not waku[].wakuDiscv5.isNil():
let dynamicBootstrapEnrs = waku[].dynamicBootstrapNodes
@ -375,20 +341,23 @@ proc startNetworkConnectivityLoop(waku: Waku): Future[void] {.async.} =
proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
debug "Retrieve dynamic bootstrap nodes"
let conf = waku[].conf
let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes(
waku.conf.dnsDiscoveryUrl, waku.conf.dnsDiscoveryNameServers
)
if conf.dnsDiscoveryConf.isSome():
let dnsDiscoveryConf = waku.conf.dnsDiscoveryConf.get()
let dynamicBootstrapNodesRes = await waku_dnsdisc.retrieveDynamicBootstrapNodes(
dnsDiscoveryConf.enrTreeUrl, dnsDiscoveryConf.nameServers
)
if dynamicBootstrapNodesRes.isErr():
error "Retrieving dynamic bootstrap nodes failed",
error = dynamicBootstrapNodesRes.error
# Start Dns Discovery retry loop
waku[].dnsRetryLoopHandle = waku.startDnsDiscoveryRetryLoop()
else:
waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
if dynamicBootstrapNodesRes.isErr():
error "Retrieving dynamic bootstrap nodes failed",
error = dynamicBootstrapNodesRes.error
# Start Dns Discovery retry loop
waku[].dnsRetryLoopHandle = waku.startDnsDiscoveryRetryLoop()
else:
waku[].dynamicBootstrapNodes = dynamicBootstrapNodesRes.get()
if not waku[].conf.discv5Only:
if conf.discv5Conf.isNone or not conf.discv5Conf.get().discv5Only:
(await startNode(waku.node, waku.conf, waku.dynamicBootstrapNodes)).isOkOr:
return err("error while calling startNode: " & $error)
@ -397,10 +366,17 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async.} =
return err("Error in updateApp: " & $error)
## Discv5
if waku[].conf.discv5Discovery or waku[].conf.discv5Only:
if conf.discv5Conf.isSome:
waku[].wakuDiscV5 = waku_discv5.setupDiscoveryV5(
waku.node.enr, waku.node.peerManager, waku.node.topicSubscriptionQueue, waku.conf,
waku.dynamicBootstrapNodes, waku.rng, waku.key,
waku.node.enr,
waku.node.peerManager,
waku.node.topicSubscriptionQueue,
conf.discv5Conf.get(),
waku.dynamicBootstrapNodes,
waku.rng,
conf.nodeKey,
conf.networkConf.p2pListenAddress,
conf.portsShift,
)
(await waku.wakuDiscV5.start()).isOkOr:

249
waku/factory/waku_conf.nim Normal file
View File

@ -0,0 +1,249 @@
import
std/[net, options, strutils],
chronicles,
libp2p/crypto/crypto,
libp2p/multiaddress,
secp256k1,
results
import
../waku_rln_relay/rln_relay,
../waku_api/rest/builder,
../discovery/waku_discv5,
../node/waku_metrics,
../common/logging,
../waku_enr/capabilities,
./network_conf
export RlnRelayConf, RlnRelayCreds, RestServerConf, Discv5Conf, MetricsServerConf
logScope:
topics = "waku conf"
# TODO: should be defined in validator_signed.nim and imported here
type ProtectedShard* {.requiresInit.} = object
shard*: uint16
key*: secp256k1.SkPublicKey
type DnsDiscoveryConf* {.requiresInit.} = object
enrTreeUrl*: string
# TODO: should probably only have one set of name servers (see dnsaddrs)
nameServers*: seq[IpAddress]
type StoreSyncConf* {.requiresInit.} = object
rangeSec*: uint32
intervalSec*: uint32
relayJitterSec*: uint32
type StoreServiceConf* {.requiresInit.} = object
dbMigration*: bool
dbURl*: string
dbVacuum*: bool
supportV2*: bool
maxNumDbConnections*: int
retentionPolicy*: string
resume*: bool
storeSyncConf*: Option[StoreSyncConf]
type FilterServiceConf* {.requiresInit.} = object
maxPeersToServe*: uint32
subscriptionTimeout*: uint16
maxCriteria*: uint32
type NetworkConfig* = object # TODO: make enum
natStrategy*: string
p2pTcpPort*: Port
dns4DomainName*: Option[string]
p2pListenAddress*: IpAddress
extMultiAddrs*: seq[MultiAddress]
extMultiAddrsOnly*: bool
## `WakuConf` is a valid configuration for a Waku node
## All information needed by a waku node should be contained
## In this object. A convenient `validate` method enables doing
## sanity checks beyond type enforcement.
## If `Option` is `some` it means the related protocol is enabled.
type WakuConf* {.requiresInit.} = ref object
# ref because `getRunningNetConfig` modifies it
nodeKey*: crypto.PrivateKey
clusterId*: uint16
shards*: seq[uint16]
protectedShards*: seq[ProtectedShard]
# TODO: move to an autoShardingConf
numShardsInNetwork*: uint32
contentTopics*: seq[string]
relay*: bool
lightPush*: bool
peerExchange*: bool
# TODO: remove relay peer exchange
relayPeerExchange*: bool
rendezvous*: bool
circuitRelayClient*: bool
keepAlive*: bool
discv5Conf*: Option[Discv5Conf]
dnsDiscoveryConf*: Option[DnsDiscoveryConf]
filterServiceConf*: Option[FilterServiceConf]
storeServiceConf*: Option[StoreServiceConf]
rlnRelayConf*: Option[RlnRelayConf]
restServerConf*: Option[RestServerConf]
metricsServerConf*: Option[MetricsServerConf]
webSocketConf*: Option[WebSocketConf]
portsShift*: uint16
dnsAddrs*: bool
dnsAddrsNameServers*: seq[IpAddress]
networkConf*: NetworkConfig
wakuFlags*: CapabilitiesBitfield
# TODO: could probably make it a `PeerRemoteInfo`
staticNodes*: seq[string]
remoteStoreNode*: Option[string]
remoteLightPushNode*: Option[string]
remoteFilterNode*: Option[string]
remotePeerExchangeNode*: Option[string]
maxMessageSizeBytes*: uint64
logLevel*: logging.LogLevel
logFormat*: logging.LogFormat
peerPersistence*: bool
# TODO: should clearly be a uint
peerStoreCapacity*: Option[int]
# TODO: should clearly be a uint
maxConnections*: int
agentString*: string
colocationLimit*: int
# TODO: use proper type
rateLimits*: seq[string]
# TODO: those could be in a relay conf object
maxRelayPeers*: Option[int]
relayShardedPeerManagement*: bool
# TODO: use proper type
relayServiceRatio*: string
p2pReliability*: bool
proc logConf*(conf: WakuConf) =
info "Configuration: Enabled protocols",
relay = conf.relay,
rlnRelay = conf.rlnRelayConf.isSome(),
store = conf.storeServiceConf.isSome(),
filter = conf.filterServiceConf.isSome(),
lightPush = conf.lightPush,
peerExchange = conf.peerExchange
info "Configuration. Network", cluster = conf.clusterId
for shard in conf.shards:
info "Configuration. Shards", shard = shard
if conf.discv5Conf.isSome():
for i in conf.discv5Conf.get().bootstrapNodes:
info "Configuration. Bootstrap nodes", node = i.string
if conf.rlnRelayConf.isSome():
var rlnRelayConf = conf.rlnRelayConf.get()
if rlnRelayConf.dynamic:
info "Configuration. Validation",
mechanism = "onchain rln",
contract = rlnRelayConf.ethContractAddress.string,
maxMessageSize = conf.maxMessageSizeBytes,
rlnEpochSizeSec = rlnRelayConf.epochSizeSec,
rlnRelayUserMessageLimit = rlnRelayConf.userMessageLimit,
rlnRelayEthClientAddress = string(rlnRelayConf.ethClientAddress)
proc validateNodeKey(wakuConf: WakuConf): Result[void, string] =
wakuConf.nodeKey.getPublicKey().isOkOr:
return err("Node key is invalid")
return ok()
proc validateShards(wakuConf: WakuConf): Result[void, string] =
let numShardsInNetwork = wakuConf.numShardsInNetwork
# TODO: fix up this behaviour
if numShardsInNetwork == 0:
return ok()
for shard in wakuConf.shards:
if shard >= numShardsInNetwork:
let msg =
"validateShards invalid shard: " & $shard & " when numShardsInNetwork: " &
$numShardsInNetwork # fmt doesn't work
error "validateShards failed", error = msg
return err(msg)
return ok()
proc validateNoEmptyStrings(wakuConf: WakuConf): Result[void, string] =
if wakuConf.networkConf.dns4DomainName.isSome() and
isEmptyOrWhiteSpace(wakuConf.networkConf.dns4DomainName.get().string):
return err("dns4DomainName is an empty string, set it to none(string) instead")
if isEmptyOrWhiteSpace(wakuConf.relayServiceRatio):
return err("relayServiceRatio is an empty string")
for sn in wakuConf.staticNodes:
if isEmptyOrWhiteSpace(sn):
return err("staticNodes contain an empty string")
if wakuConf.remoteStoreNode.isSome() and
isEmptyOrWhiteSpace(wakuConf.remoteStoreNode.get()):
return err("remoteStoreNode is an empty string, set it to none(string) instead")
if wakuConf.remoteLightPushNode.isSome() and
isEmptyOrWhiteSpace(wakuConf.remoteLightPushNode.get()):
return err("remoteLightPushNode is an empty string, set it to none(string) instead")
if wakuConf.remotePeerExchangeNode.isSome() and
isEmptyOrWhiteSpace(wakuConf.remotePeerExchangeNode.get()):
return
err("remotePeerExchangeNode is an empty string, set it to none(string) instead")
if wakuConf.remoteFilterNode.isSome() and
isEmptyOrWhiteSpace(wakuConf.remoteFilterNode.get()):
return
err("remotePeerExchangeNode is an empty string, set it to none(string) instead")
if wakuConf.dnsDiscoveryConf.isSome() and
isEmptyOrWhiteSpace(wakuConf.dnsDiscoveryConf.get().enrTreeUrl):
return err("dnsDiscoveryConf.enrTreeUrl is an empty string")
# TODO: rln relay config should validate itself
if wakuConf.rlnRelayConf.isSome():
let rlnRelayConf = wakuConf.rlnRelayConf.get()
if isEmptyOrWhiteSpace(rlnRelayConf.treePath):
return err("rlnRelayConf.treepath is an empty string")
if isEmptyOrWhiteSpace(rlnRelayConf.ethClientAddress):
return err("rlnRelayConf.ethClientAddress is an empty string")
if isEmptyOrWhiteSpace(rlnRelayConf.ethContractAddress):
return err("rlnRelayConf.ethContractAddress is an empty string")
if rlnRelayConf.creds.isSome():
let creds = rlnRelayConf.creds.get()
if isEmptyOrWhiteSpace(creds.path):
return err (
"rlnRelayConf.creds.path is an empty string, set rlnRelayConf.creds it to none instead"
)
if isEmptyOrWhiteSpace(creds.password):
return err (
"rlnRelayConf.creds.password is an empty string, set rlnRelayConf.creds to none instead"
)
return ok()
proc validate*(wakuConf: WakuConf): Result[void, string] =
?wakuConf.validateNodeKey()
?wakuConf.validateShards()
?wakuConf.validateNoEmptyStrings()
return ok()

View File

@ -61,6 +61,8 @@ proc isWsAddress*(ma: MultiAddress): bool =
proc containsWsAddress(extMultiAddrs: seq[MultiAddress]): bool =
return extMultiAddrs.filterIt(it.isWsAddress()).len > 0
const DefaultWsBindPort = static(Port(8000))
# TODO: migrate to builder pattern with nested configs
proc init*(
T: type NetConfig,
bindIp: IpAddress,
@ -69,7 +71,7 @@ proc init*(
extPort = none(Port),
extMultiAddrs = newSeq[MultiAddress](),
extMultiAddrsOnly: bool = false,
wsBindPort: Port = Port(8000),
wsBindPort: Option[Port] = some(DefaultWsBindPort),
wsEnabled: bool = false,
wssEnabled: bool = false,
dns4DomainName = none(string),
@ -86,7 +88,9 @@ proc init*(
var wsHostAddress = none(MultiAddress)
if wsEnabled or wssEnabled:
try:
wsHostAddress = some(ip4TcpEndPoint(bindIp, wsbindPort) & wsFlag(wssEnabled))
wsHostAddress = some(
ip4TcpEndPoint(bindIp, wsbindPort.get(DefaultWsBindPort)) & wsFlag(wssEnabled)
)
except CatchableError:
return err(getCurrentExceptionMsg())
@ -113,8 +117,10 @@ proc init*(
if wsHostAddress.isSome():
try:
wsExtAddress =
some(dns4TcpEndPoint(dns4DomainName.get(), wsBindPort) & wsFlag(wssEnabled))
wsExtAddress = some(
dns4TcpEndPoint(dns4DomainName.get(), wsBindPort.get(DefaultWsBindPort)) &
wsFlag(wssEnabled)
)
except CatchableError:
return err(getCurrentExceptionMsg())
else:
@ -124,8 +130,10 @@ proc init*(
if wsHostAddress.isSome():
try:
wsExtAddress =
some(ip4TcpEndPoint(extIp.get(), wsBindPort) & wsFlag(wssEnabled))
wsExtAddress = some(
ip4TcpEndPoint(extIp.get(), wsBindPort.get(DefaultWsBindPort)) &
wsFlag(wssEnabled)
)
except CatchableError:
return err(getCurrentExceptionMsg())

View File

@ -5,14 +5,18 @@ import
../waku_rln_relay/protocol_metrics as rln_metrics,
../utils/collector,
./peer_manager,
./waku_node,
../factory/external_config
./waku_node
const LogInterval = 10.minutes
logScope:
topics = "waku node metrics"
type MetricsServerConf* = object
httpAddress*: IpAddress
httpPort*: Port
logging*: bool
proc startMetricsLog*() =
var logMetrics: CallbackFunc
@ -70,17 +74,15 @@ proc startMetricsServer(
return ok(server)
proc startMetricsServerAndLogging*(
conf: WakuNodeConf
conf: MetricsServerConf, portsShift: uint16
): Result[MetricsHttpServerRef, string] =
var metricsServer: MetricsHttpServerRef
if conf.metricsServer:
metricsServer = startMetricsServer(
conf.metricsServerAddress, Port(conf.metricsServerPort + conf.portsShift)
).valueOr:
return
err("Starting metrics server failed. Continuing in current state:" & $error)
metricsServer = startMetricsServer(
conf.httpAddress, Port(conf.httpPort.uint16 + portsShift)
).valueOr:
return err("Starting metrics server failed. Continuing in current state:" & $error)
if conf.metricsLogging:
if conf.logging:
startMetricsLog()
return ok(metricsServer)

View File

@ -46,7 +46,7 @@ import
../waku_enr,
../waku_peer_exchange,
../waku_rln_relay,
./config,
./net_config,
./peer_manager,
../common/rate_limit/setting
@ -207,9 +207,9 @@ proc mountSharding*(
proc mountStoreSync*(
node: WakuNode,
storeSyncRange = 3600,
storeSyncInterval = 300,
storeSyncRelayJitter = 20,
storeSyncRange = 3600.uint32,
storeSyncInterval = 300.uint32,
storeSyncRelayJitter = 20.uint32,
): Future[Result[void, string]] {.async.} =
let idsChannel = newAsyncQueue[SyncID](0)
let wantsChannel = newAsyncQueue[PeerId](0)
@ -1231,8 +1231,7 @@ proc mountRlnRelay*(
raise
newException(CatchableError, "failed to mount WakuRlnRelay: " & rlnRelayRes.error)
let rlnRelay = rlnRelayRes.get()
if (rlnConf.rlnRelayUserMessageLimit > rlnRelay.groupManager.rlnRelayMaxMessageLimit):
if (rlnConf.userMessageLimit > rlnRelay.groupManager.rlnRelayMaxMessageLimit):
error "rln-relay-user-message-limit can't exceed the MAX_MESSAGE_LIMIT in the rln contract"
let validator = generateRlnValidator(rlnRelay, spamHandler)

View File

@ -22,9 +22,19 @@ template parseAndAccumulate*(collector: Collector, cumulativeValue: float64): fl
cumulativeValue = total
freshCount
template parseAndAccumulate*(
collector: typedesc[IgnoredCollector], cumulativeValue: float64
): float64 =
## Used when metrics are disabled (undefined `metrics` compilation flag)
0.0
template collectorAsF64*(collector: Collector): float64 =
## This template is used to get metrics from 0
## Serves as a wrapper for parseCollectorIntoF64 which is gcsafe
{.gcsafe.}:
let total = parseCollectorIntoF64(collector)
total
template collectorAsF64*(collector: typedesc[IgnoredCollector]): float64 =
## Used when metrics are disabled (undefined `metrics` compilation flag)
0.0

View File

@ -5,7 +5,6 @@ import presto
import
waku/waku_node,
waku/discovery/waku_discv5,
waku/factory/external_config,
waku/waku_api/message_cache,
waku/waku_api/handlers,
waku/waku_api/rest/server,
@ -31,12 +30,18 @@ import
var restServerNotInstalledTab {.threadvar.}: TableRef[string, string]
restServerNotInstalledTab = newTable[string, string]()
proc startRestServerEsentials*(
nodeHealthMonitor: WakuNodeHealthMonitor, conf: WakuNodeConf
): Result[WakuRestServerRef, string] =
if not conf.rest:
return ok(nil)
export WakuRestServerRef
type RestServerConf* = object
allowOrigin*: seq[string]
listenAddress*: IpAddress
port*: Port
admin*: bool
relayCacheCapacity*: uint32
proc startRestServerEssentials*(
nodeHealthMonitor: WakuNodeHealthMonitor, conf: RestServerConf, portsShift: uint16
): Result[WakuRestServerRef, string] =
let requestErrorHandler: RestRequestErrorHandler = proc(
error: RestRequestError, request: HttpRequestRef
): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} =
@ -72,13 +77,13 @@ proc startRestServerEsentials*(
return defaultResponse()
let allowedOrigin =
if len(conf.restAllowOrigin) > 0:
some(conf.restAllowOrigin.join(","))
if len(conf.allowOrigin) > 0:
some(conf.allowOrigin.join(","))
else:
none(string)
let address = conf.restAddress
let port = Port(conf.restPort + conf.portsShift)
let address = conf.listenAddress
let port = Port(conf.port.uint16 + portsShift)
let server =
?newRestHttpServer(
address,
@ -112,14 +117,16 @@ proc startRestServerProtocolSupport*(
restServer: WakuRestServerRef,
node: WakuNode,
wakuDiscv5: WakuDiscoveryV5,
conf: WakuNodeConf,
conf: RestServerConf,
relayEnabled: bool,
lightPushEnabled: bool,
clusterId: uint16,
shards: seq[uint16],
contentTopics: seq[string],
): Result[void, string] =
if not conf.rest:
return ok()
var router = restServer.router
## Admin REST API
if conf.restAdmin:
if conf.admin:
installAdminApiHandlers(router, node)
else:
restServerNotInstalledTab["admin"] =
@ -129,22 +136,23 @@ proc startRestServerProtocolSupport*(
installDebugApiHandlers(router, node)
## Relay REST API
if conf.relay:
if relayEnabled:
## This MessageCache is used, f.e., in js-waku<>nwaku interop tests.
## js-waku tests asks nwaku-docker through REST whether a message is properly received.
let cache = MessageCache.init(int(conf.restRelayCacheCapacity))
const RestRelayCacheCapacity = 50
let cache = MessageCache.init(int(RestRelayCacheCapacity))
let handler: WakuRelayHandler = messageCacheHandler(cache)
for shard in conf.shards:
let pubsubTopic = $RelayShard(clusterId: conf.clusterId, shardId: shard)
for shard in shards:
let pubsubTopic = $RelayShard(clusterId: clusterId, shardId: shard)
cache.pubsubSubscribe(pubsubTopic)
## TODO: remove this line. use observer-observable pattern
## within waku_node::registerRelayDefaultHandler
discard node.wakuRelay.subscribe(pubsubTopic, handler)
for contentTopic in conf.contentTopics:
for contentTopic in contentTopics:
cache.contentSubscribe(contentTopic)
let shard = node.wakuSharding.getShard(contentTopic).valueOr:
@ -192,7 +200,7 @@ proc startRestServerProtocolSupport*(
## or install it to be used with self-hosted lightpush service
## We either get lightpushnode (lightpush service node) from config or discovered or self served
if (node.wakuLegacyLightpushClient != nil) or
(conf.lightpush and node.wakuLegacyLightPush != nil and node.wakuRelay != nil):
(lightPushEnabled and node.wakuLegacyLightPush != nil and node.wakuRelay != nil):
let lightDiscoHandler =
if not wakuDiscv5.isNil():
some(defaultDiscoveryHandler(wakuDiscv5, Lightpush))

View File

@ -23,7 +23,7 @@ type
### Configuration
type RestServerConf* = object
type RestServerConf* {.requiresInit.} = object
cacheSize*: Natural
## \
## The maximum number of recently accessed states that are kept in \

View File

@ -1,7 +1,7 @@
import
./node/config,
./node/net_config,
./node/waku_switch as switch,
./node/waku_node as node,
./node/health_monitor as health_monitor
export config, switch, node, health_monitor
export net_config, switch, node, health_monitor

View File

@ -32,18 +32,26 @@ import
logScope:
topics = "waku rln_relay"
type WakuRlnConfig* = object
rlnRelayDynamic*: bool
rlnRelayCredIndex*: Option[uint]
rlnRelayEthContractAddress*: string
rlnRelayEthClientAddress*: string
rlnRelayChainId*: uint
rlnRelayCredPath*: string
rlnRelayCredPassword*: string
rlnRelayTreePath*: string
rlnEpochSizeSec*: uint64
type RlnRelayCreds* {.requiresInit.} = object
path*: string
password*: string
type RlnRelayConf* = object of RootObj
# TODO: severals parameters are only needed when it's dynamic
# change the config to either nest or use enum/type variant so it's obvious
# and then it can be set to `requiresInit`
dynamic*: bool
credIndex*: Option[uint]
ethContractAddress*: string
ethClientAddress*: string
chainId*: uint
creds*: Option[RlnRelayCreds]
treePath*: string
epochSizeSec*: uint64
userMessageLimit*: uint64
type WakuRlnConfig* = object of RlnRelayConf
onFatalErrorAction*: OnFatalErrorHandler
rlnRelayUserMessageLimit*: uint64
proc createMembershipList*(
rln: ptr RLN, n: int
@ -421,10 +429,10 @@ proc mount(
groupManager: GroupManager
wakuRlnRelay: WakuRLNRelay
# create an RLN instance
let rlnInstance = createRLNInstance(tree_path = conf.rlnRelayTreePath).valueOr:
let rlnInstance = createRLNInstance(tree_path = conf.treePath).valueOr:
return err("could not create RLN instance: " & $error)
if not conf.rlnRelayDynamic:
if not conf.dynamic:
# static setup
let parsedGroupKeys = StaticGroupKeys.toIdentityCredentials().valueOr:
return err("could not parse static group keys: " & $error)
@ -432,31 +440,27 @@ proc mount(
groupManager = StaticGroupManager(
groupSize: StaticGroupSize,
groupKeys: parsedGroupKeys,
membershipIndex: conf.rlnRelayCredIndex,
membershipIndex: conf.credIndex,
rlnInstance: rlnInstance,
onFatalErrorAction: conf.onFatalErrorAction,
)
# we don't persist credentials in static mode since they exist in ./constants.nim
else:
# dynamic setup
proc useValueOrNone(s: string): Option[string] =
if s == "":
none(string)
let (rlnRelayCredPath, rlnRelayCredPassword) =
if conf.creds.isSome:
(some(conf.creds.get().path), some(conf.creds.get().password))
else:
some(s)
(none(string), none(string))
let
rlnRelayCredPath = useValueOrNone(conf.rlnRelayCredPath)
rlnRelayCredPassword = useValueOrNone(conf.rlnRelayCredPassword)
groupManager = OnchainGroupManager(
ethClientUrl: string(conf.rlnRelayethClientAddress),
ethContractAddress: $conf.rlnRelayEthContractAddress,
chainId: conf.rlnRelayChainId,
ethClientUrl: string(conf.ethClientAddress),
ethContractAddress: $conf.ethContractAddress,
chainId: conf.chainId,
rlnInstance: rlnInstance,
registrationHandler: registrationHandler,
keystorePath: rlnRelayCredPath,
keystorePassword: rlnRelayCredPassword,
membershipIndex: conf.rlnRelayCredIndex,
membershipIndex: conf.credIndex,
onFatalErrorAction: conf.onFatalErrorAction,
)
@ -469,10 +473,9 @@ proc mount(
wakuRlnRelay = WakuRLNRelay(
groupManager: groupManager,
nonceManager:
NonceManager.init(conf.rlnRelayUserMessageLimit, conf.rlnEpochSizeSec.float),
rlnEpochSizeSec: conf.rlnEpochSizeSec,
rlnMaxEpochGap: max(uint64(MaxClockGapSeconds / float64(conf.rlnEpochSizeSec)), 1),
nonceManager: NonceManager.init(conf.userMessageLimit, conf.epochSizeSec.float),
rlnEpochSizeSec: conf.epochSizeSec,
rlnMaxEpochGap: max(uint64(MaxClockGapSeconds / float64(conf.epochSizeSec)), 1),
onFatalErrorAction: conf.onFatalErrorAction,
)