Chore: adapt cli args for delivery api (#3744)

* LogosDeliveryAPI: NodeConfig -> WakluNodeConf + mode selector and logos.dev preset

* Adjustment made on test, logos.dev preset

* change default agentString from nwaku to logos-delivery

* Add p2pReliability switch to presets and make it default to true.

* Borrow entryNode idea from NodeConfig to WakuNodeConf to easy shortcut among diffrent bootstrap node list all needs different formats

* Fix rateLimit assignment for builder

* Remove Core mode default as we already have a defaul, user must override

* Removed obsolate API createNode with NodeConfig - tests are refactored for WakuNodeConf usage

* Fix failing test due to twn-clusterid(1) default has overwrite for maxMessagSize. Fix readme.
This commit is contained in:
NagyZoltanPeter 2026-03-03 19:17:54 +01:00 committed by GitHub
parent 09618a2656
commit 1f9c4cb8cc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 641 additions and 1092 deletions

View File

@ -59,19 +59,24 @@ when isMainModule:
echo "Starting Waku node..."
let config =
if (args.ethRpcEndpoint == ""):
# Create a basic configuration for the Waku node
# No RLN as we don't have an ETH RPC Endpoint
NodeConfig.init(
protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 42)
)
else:
# Connect to TWN, use ETH RPC Endpoint for RLN
NodeConfig.init(mode = WakuMode.Core, ethRpcEndpoints = @[args.ethRpcEndpoint])
# Use WakuNodeConf (the CLI configuration type) for node setup
var conf = defaultWakuNodeConf().valueOr:
echo "Failed to create default config: ", error
quit(QuitFailure)
if args.ethRpcEndpoint == "":
# Create a basic configuration for the Waku node
# No RLN as we don't have an ETH RPC Endpoint
conf.mode = Core
conf.preset = "logos.dev"
else:
# Connect to TWN, use ETH RPC Endpoint for RLN
conf.mode = Core
conf.preset = "twn"
conf.ethClientUrls = @[EthRpcUrl(args.ethRpcEndpoint)]
# Create the node using the library API's createNode function
let node = (waitFor createNode(config)).valueOr:
let node = (waitFor createNode(conf)).valueOr:
echo "Failed to create node: ", error
quit(QuitFailure)

View File

@ -32,18 +32,17 @@ void *logosdelivery_create_node(
```json
{
"mode": "Core",
"clusterId": 1,
"entryNodes": [
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im"
],
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
"preset": "logos.dev",
"listenAddress": "0.0.0.0",
"tcpPort": 60000,
"discv5UdpPort": 9000
}
```
Configuration uses flat field names matching `WakuNodeConf` in `tools/confutils/cli_args.nim`.
Use `"preset"` to select a network preset (e.g., `"twn"`, `"logos.dev"`) which auto-configures
entry nodes, cluster ID, sharding, and other network-specific settings.
#### `logosdelivery_start_node`
Starts the node.
@ -207,8 +206,9 @@ void callback(int ret, const char *msg, size_t len, void *userData) {
int main() {
const char *config = "{"
"\"logLevel\": \"INFO\","
"\"mode\": \"Core\","
"\"clusterId\": 1"
"\"preset\": \"logos.dev\""
"}";
// Create node

View File

@ -61,7 +61,7 @@ void event_callback(int ret, const char *msg, size_t len, void *userData) {
char messageHash[128];
extract_json_field(eventJson, "requestId", requestId, sizeof(requestId));
extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash));
printf("📤 [EVENT] Message sent - RequestID: %s, Hash: %s\n", requestId, messageHash);
printf("[EVENT] Message sent - RequestID: %s, Hash: %s\n", requestId, messageHash);
} else if (strcmp(eventType, "message_error") == 0) {
char requestId[128];
@ -70,7 +70,7 @@ void event_callback(int ret, const char *msg, size_t len, void *userData) {
extract_json_field(eventJson, "requestId", requestId, sizeof(requestId));
extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash));
extract_json_field(eventJson, "error", error, sizeof(error));
printf("[EVENT] Message error - RequestID: %s, Hash: %s, Error: %s\n",
printf("[EVENT] Message error - RequestID: %s, Hash: %s, Error: %s\n",
requestId, messageHash, error);
} else if (strcmp(eventType, "message_propagated") == 0) {
@ -78,10 +78,15 @@ void event_callback(int ret, const char *msg, size_t len, void *userData) {
char messageHash[128];
extract_json_field(eventJson, "requestId", requestId, sizeof(requestId));
extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash));
printf("✅ [EVENT] Message propagated - RequestID: %s, Hash: %s\n", requestId, messageHash);
printf("[EVENT] Message propagated - RequestID: %s, Hash: %s\n", requestId, messageHash);
} else if (strcmp(eventType, "connection_status_change") == 0) {
char connectionStatus[256];
extract_json_field(eventJson, "connectionStatus", connectionStatus, sizeof(connectionStatus));
printf("[EVENT] Connection status change - Status: %s\n", connectionStatus);
} else {
printf(" [EVENT] Unknown event type: %s\n", eventType);
printf("[EVENT] Unknown event type: %s\n", eventType);
}
free(eventJson);
@ -109,23 +114,12 @@ void simple_callback(int ret, const char *msg, size_t len, void *userData) {
int main() {
printf("=== Logos Messaging API (LMAPI) Example ===\n\n");
// Configuration JSON for creating a node
// Configuration JSON using WakuNodeConf field names (flat structure).
// Field names match Nim identifiers from WakuNodeConf in tools/confutils/cli_args.nim.
const char *config = "{"
"\"logLevel\": \"DEBUG\","
// "\"mode\": \"Edge\","
"\"logLevel\": \"INFO\","
"\"mode\": \"Core\","
"\"protocolsConfig\": {"
"\"entryNodes\": [\"/dns4/node-01.do-ams3.misc.logos-chat.status.im/tcp/30303/p2p/16Uiu2HAkxoqUTud5LUPQBRmkeL2xP4iKx2kaABYXomQRgmLUgf78\"],"
"\"clusterId\": 42,"
"\"autoShardingConfig\": {"
"\"numShardsInCluster\": 8"
"}"
"},"
"\"networkingConfig\": {"
"\"listenIpv4\": \"0.0.0.0\","
"\"p2pTcpPort\": 60000,"
"\"discv5UdpPort\": 9000"
"}"
"\"preset\": \"logos.dev\""
"}";
printf("1. Creating node...\n");
@ -152,7 +146,7 @@ int main() {
logosdelivery_start_node(ctx, simple_callback, (void *)"start_node");
// Wait for node to start
sleep(2);
sleep(10);
printf("\n4. Subscribing to content topic...\n");
const char *contentTopic = "/example/1/chat/proto";

View File

@ -22,7 +22,9 @@ extern "C"
// Creates a new instance of the node from the given configuration JSON.
// Returns a pointer to the Context needed by the rest of the API functions.
// Configuration should be in JSON format following the NodeConfig structure.
// Configuration should be in JSON format using WakuNodeConf field names.
// Field names match Nim identifiers from WakuNodeConf (camelCase).
// Example: {"mode": "Core", "clusterId": 42, "relay": true}
void *logosdelivery_create_node(
const char *configJson,
FFICallBack callback,

View File

@ -1,10 +1,11 @@
import std/json
import chronos, results, ffi
import std/[json, strutils]
import chronos, chronicles, results, confutils, confutils/std/net, ffi
import
waku/factory/waku,
waku/node/waku_node,
waku/api/[api, api_conf, types],
waku/api/[api, types],
waku/events/[message_events, health_events],
tools/confutils/cli_args,
../declare_lib,
../json_event
@ -14,15 +15,32 @@ proc `%`*(id: RequestId): JsonNode =
registerReqFFI(CreateNodeRequest, ctx: ptr FFIContext[Waku]):
proc(configJson: cstring): Future[Result[string, string]] {.async.} =
## Parse the JSON configuration and create a node
let nodeConfig =
try:
decodeNodeConfigFromJson($configJson)
except SerializationError as e:
return err("Failed to parse config JSON: " & e.msg)
## Parse the JSON configuration using fieldPairs approach (WakuNodeConf)
var conf = defaultWakuNodeConf().valueOr:
return err("Failed creating default conf: " & error)
var jsonNode: JsonNode
try:
jsonNode = parseJson($configJson)
except Exception:
return err(
"Failed to parse config JSON: " & getCurrentExceptionMsg() &
" configJson string: " & $configJson
)
for confField, confValue in fieldPairs(conf):
if jsonNode.contains(confField):
let formattedString = ($jsonNode[confField]).strip(chars = {'\"'})
try:
confValue = parseCmdArg(typeof(confValue), formattedString)
except Exception:
return err(
"Failed to parse field '" & confField & "': " &
getCurrentExceptionMsg() & ". Value: " & formattedString
)
# Create the node
ctx.myLib[] = (await api.createNode(nodeConfig)).valueOr:
ctx.myLib[] = (await api.createNode(conf)).valueOr:
let errMsg = $error
chronicles.error "CreateNodeRequest failed", err = errMsg
return err(errMsg)

View File

@ -13,9 +13,10 @@ import
waku/events/health_events,
waku/common/waku_protocol,
waku/factory/waku_conf
import tools/confutils/cli_args
const TestTimeout = chronos.seconds(10)
const DefaultShard = PubsubTopic("/waku/2/rs/1/0")
const DefaultShard = PubsubTopic("/waku/2/rs/3/0")
const TestContentTopic = ContentTopic("/waku/2/default-content/proto")
proc dummyHandler(
@ -80,7 +81,7 @@ suite "LM API health checking":
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
(await serviceNode.mountRelay()).isOkOr:
raiseAssert error
serviceNode.mountMetadata(1, @[0'u16]).isOkOr:
serviceNode.mountMetadata(3, @[0'u16]).isOkOr:
raiseAssert error
await serviceNode.mountLibp2pPing()
await serviceNode.start()
@ -89,16 +90,15 @@ suite "LM API health checking":
serviceNode.wakuRelay.subscribe(DefaultShard, dummyHandler)
lockNewGlobalBrokerContext:
let conf = NodeConfig.init(
mode = WakuMode.Core,
networkingConfig =
NetworkingConfig(listenIpv4: "0.0.0.0", p2pTcpPort: 0, discv5UdpPort: 0),
protocolsConfig = ProtocolsConfig.init(
entryNodes = @[],
clusterId = 1'u16,
autoShardingConfig = AutoShardingConfig(numShardsInCluster: 1),
),
)
var conf = defaultWakuNodeConf().valueOr:
raiseAssert error
conf.mode = Core
conf.listenAddress = parseIpAddress("0.0.0.0")
conf.tcpPort = Port(0)
conf.discv5UdpPort = Port(0)
conf.clusterId = 3'u16
conf.numShardsInNetwork = 1
conf.rest = false
client = (await createNode(conf)).valueOr:
raiseAssert error
@ -267,17 +267,15 @@ suite "LM API health checking":
var edgeWaku: Waku
lockNewGlobalBrokerContext:
let edgeConf = NodeConfig.init(
mode = WakuMode.Edge,
networkingConfig =
NetworkingConfig(listenIpv4: "0.0.0.0", p2pTcpPort: 0, discv5UdpPort: 0),
protocolsConfig = ProtocolsConfig.init(
entryNodes = @[],
clusterId = 1'u16,
messageValidation =
MessageValidation(maxMessageSize: "150 KiB", rlnConfig: none(RlnConfig)),
),
)
var edgeConf = defaultWakuNodeConf().valueOr:
raiseAssert error
edgeConf.mode = Edge
edgeConf.listenAddress = parseIpAddress("0.0.0.0")
edgeConf.tcpPort = Port(0)
edgeConf.discv5UdpPort = Port(0)
edgeConf.clusterId = 3'u16
edgeConf.maxMessageSize = "150 KiB"
edgeConf.rest = false
edgeWaku = (await createNode(edgeConf)).valueOr:
raiseAssert "Failed to create edge node: " & error

View File

@ -6,7 +6,8 @@ import ../testlib/[common, wakucore, wakunode, testasync]
import ../waku_archive/archive_utils
import
waku, waku/[waku_node, waku_core, waku_relay/protocol, common/broker/broker_context]
import waku/api/api_conf, waku/factory/waku_conf
import waku/factory/waku_conf
import tools/confutils/cli_args
type SendEventOutcome {.pure.} = enum
Sent
@ -116,20 +117,18 @@ proc validate(
for requestId in manager.errorRequestIds:
check requestId == expectedRequestId
proc createApiNodeConf(mode: WakuMode = WakuMode.Core): NodeConfig =
# allocate random ports to avoid port-already-in-use errors
let netConf = NetworkingConfig(listenIpv4: "0.0.0.0", p2pTcpPort: 0, discv5UdpPort: 0)
result = NodeConfig.init(
mode = mode,
protocolsConfig = ProtocolsConfig.init(
entryNodes = @[],
clusterId = 1,
autoShardingConfig = AutoShardingConfig(numShardsInCluster: 1),
),
networkingConfig = netConf,
p2pReliability = true,
)
proc createApiNodeConf(mode: cli_args.WakuMode = cli_args.WakuMode.Core): WakuNodeConf =
var conf = defaultWakuNodeConf().valueOr:
raiseAssert error
conf.mode = mode
conf.listenAddress = parseIpAddress("0.0.0.0")
conf.tcpPort = Port(0)
conf.discv5UdpPort = Port(0)
conf.clusterId = 3'u16
conf.numShardsInNetwork = 1
conf.reliabilityEnabled = true
conf.rest = false
result = conf
suite "Waku API - Send":
var
@ -153,7 +152,7 @@ suite "Waku API - Send":
lockNewGlobalBrokerContext:
relayNode1 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
relayNode1.mountMetadata(1, @[0'u16]).isOkOr:
relayNode1.mountMetadata(3, @[0'u16]).isOkOr:
raiseAssert "Failed to mount metadata: " & error
(await relayNode1.mountRelay()).isOkOr:
raiseAssert "Failed to mount relay"
@ -163,7 +162,7 @@ suite "Waku API - Send":
lockNewGlobalBrokerContext:
relayNode2 =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
relayNode2.mountMetadata(1, @[0'u16]).isOkOr:
relayNode2.mountMetadata(3, @[0'u16]).isOkOr:
raiseAssert "Failed to mount metadata: " & error
(await relayNode2.mountRelay()).isOkOr:
raiseAssert "Failed to mount relay"
@ -173,7 +172,7 @@ suite "Waku API - Send":
lockNewGlobalBrokerContext:
lightpushNode =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
lightpushNode.mountMetadata(1, @[0'u16]).isOkOr:
lightpushNode.mountMetadata(3, @[0'u16]).isOkOr:
raiseAssert "Failed to mount metadata: " & error
(await lightpushNode.mountRelay()).isOkOr:
raiseAssert "Failed to mount relay"
@ -185,7 +184,7 @@ suite "Waku API - Send":
lockNewGlobalBrokerContext:
storeNode =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
storeNode.mountMetadata(1, @[0'u16]).isOkOr:
storeNode.mountMetadata(3, @[0'u16]).isOkOr:
raiseAssert "Failed to mount metadata: " & error
(await storeNode.mountRelay()).isOkOr:
raiseAssert "Failed to mount relay"
@ -210,7 +209,7 @@ suite "Waku API - Send":
storeNodePeerId = storeNode.peerInfo.peerId
# Subscribe all relay nodes to the default shard topic
const testPubsubTopic = PubsubTopic("/waku/2/rs/1/0")
const testPubsubTopic = PubsubTopic("/waku/2/rs/3/0")
proc dummyHandler(
topic: PubsubTopic, msg: WakuMessage
): Future[void] {.async, gcsafe.} =
@ -387,7 +386,7 @@ suite "Waku API - Send":
lockNewGlobalBrokerContext:
fakeLightpushNode =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
fakeLightpushNode.mountMetadata(1, @[0'u16]).isOkOr:
fakeLightpushNode.mountMetadata(3, @[0'u16]).isOkOr:
raiseAssert "Failed to mount metadata: " & error
(await fakeLightpushNode.mountRelay()).isOkOr:
raiseAssert "Failed to mount relay"
@ -402,13 +401,13 @@ suite "Waku API - Send":
discard
fakeLightpushNode.subscribe(
(kind: PubsubSub, topic: PubsubTopic("/waku/2/rs/1/0")), dummyHandler
(kind: PubsubSub, topic: PubsubTopic("/waku/2/rs/3/0")), dummyHandler
).isOkOr:
raiseAssert "Failed to subscribe fakeLightpushNode: " & error
var node: Waku
lockNewGlobalBrokerContext:
node = (await createNode(createApiNodeConf(WakuMode.Edge))).valueOr:
node = (await createNode(createApiNodeConf(cli_args.WakuMode.Edge))).valueOr:
raiseAssert error
(await startWaku(addr node)).isOkOr:
raiseAssert "Failed to start Waku node: " & error

View File

@ -14,7 +14,8 @@ import
events/message_events,
waku_relay/protocol,
]
import waku/api/api_conf, waku/factory/waku_conf
import waku/factory/waku_conf
import tools/confutils/cli_args
# TODO: Edge testing (after MAPI edge support is completed)
@ -64,21 +65,21 @@ type TestNetwork = ref object
publisherPeerInfo: RemotePeerInfo
proc createApiNodeConf(
mode: WakuMode = WakuMode.Core, numShards: uint16 = 1
): NodeConfig =
let netConf = NetworkingConfig(listenIpv4: "0.0.0.0", p2pTcpPort: 0, discv5UdpPort: 0)
result = NodeConfig.init(
mode = mode,
protocolsConfig = ProtocolsConfig.init(
entryNodes = @[],
clusterId = 1,
autoShardingConfig = AutoShardingConfig(numShardsInCluster: numShards),
),
networkingConfig = netConf,
p2pReliability = true,
)
mode: cli_args.WakuMode = cli_args.WakuMode.Core, numShards: uint16 = 1
): WakuNodeConf =
var conf = defaultWakuNodeConf().valueOr:
raiseAssert error
conf.mode = mode
conf.listenAddress = parseIpAddress("0.0.0.0")
conf.tcpPort = Port(0)
conf.discv5UdpPort = Port(0)
conf.clusterId = 3'u16
conf.numShardsInNetwork = numShards
conf.reliabilityEnabled = true
conf.rest = false
result = conf
proc setupSubscriberNode(conf: NodeConfig): Future[Waku] {.async.} =
proc setupSubscriberNode(conf: WakuNodeConf): Future[Waku] {.async.} =
var node: Waku
lockNewGlobalBrokerContext:
node = (await createNode(conf)).expect("Failed to create subscriber node")
@ -86,14 +87,14 @@ proc setupSubscriberNode(conf: NodeConfig): Future[Waku] {.async.} =
return node
proc setupNetwork(
numShards: uint16 = 1, mode: WakuMode = WakuMode.Core
numShards: uint16 = 1, mode: cli_args.WakuMode = cli_args.WakuMode.Core
): Future[TestNetwork] {.async.} =
var net = TestNetwork()
lockNewGlobalBrokerContext:
net.publisher =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
net.publisher.mountMetadata(1, @[0'u16]).expect("Failed to mount metadata")
net.publisher.mountMetadata(3, @[0'u16]).expect("Failed to mount metadata")
(await net.publisher.mountRelay()).expect("Failed to mount relay")
await net.publisher.mountLibp2pPing()
await net.publisher.start()
@ -108,7 +109,7 @@ proc setupNetwork(
# that changes, this will be needed to cause the publisher to have shard interest
# for any shards the subscriber may want to use, which is required for waitForMesh to work.
for i in 0 ..< numShards.int:
let shard = PubsubTopic("/waku/2/rs/1/" & $i)
let shard = PubsubTopic("/waku/2/rs/3/" & $i)
net.publisher.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect(
"Failed to sub publisher"
)

View File

@ -2,7 +2,7 @@
import std/options, results, testutils/unittests
import waku/api/entry_nodes
import tools/confutils/entry_nodes
# Since classifyEntryNode is internal, we test it indirectly through processEntryNodes behavior
# The enum is exported so we can test against it

File diff suppressed because it is too large Load Diff

View File

@ -3,49 +3,49 @@
import chronos, testutils/unittests, std/options
import waku
import tools/confutils/cli_args
suite "Waku API - Create node":
asyncTest "Create node with minimal configuration":
## Given
let nodeConfig = NodeConfig.init(
protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 1)
)
var nodeConf = defaultWakuNodeConf().valueOr:
raiseAssert error
nodeConf.mode = Core
nodeConf.clusterId = 3'u16
nodeConf.rest = false
# This is the actual minimal config but as the node auto-start, it is not suitable for tests
# NodeConfig.init(ethRpcEndpoints = @["http://someaddress"])
## When
let node = (await createNode(nodeConfig)).valueOr:
let node = (await createNode(nodeConf)).valueOr:
raiseAssert error
## Then
check:
not node.isNil()
node.conf.clusterId == 1
node.conf.clusterId == 3
node.conf.relay == true
asyncTest "Create node with full configuration":
## Given
let nodeConfig = NodeConfig.init(
mode = Core,
protocolsConfig = ProtocolsConfig.init(
entryNodes =
@[
"enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g"
],
staticStoreNodes =
@[
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"
],
clusterId = 99,
autoShardingConfig = AutoShardingConfig(numShardsInCluster: 16),
messageValidation =
MessageValidation(maxMessageSize: "1024 KiB", rlnConfig: none(RlnConfig)),
),
)
var nodeConf = defaultWakuNodeConf().valueOr:
raiseAssert error
nodeConf.mode = Core
nodeConf.clusterId = 99'u16
nodeConf.rest = false
nodeConf.numShardsInNetwork = 16
nodeConf.maxMessageSize = "1024 KiB"
nodeConf.entryNodes =
@[
"enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g"
]
nodeConf.staticnodes =
@[
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"
]
## When
let node = (await createNode(nodeConfig)).valueOr:
let node = (await createNode(nodeConf)).valueOr:
raiseAssert error
## Then
@ -62,20 +62,19 @@ suite "Waku API - Create node":
asyncTest "Create node with mixed entry nodes (enrtree, multiaddr)":
## Given
let nodeConfig = NodeConfig.init(
mode = Core,
protocolsConfig = ProtocolsConfig.init(
entryNodes =
@[
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im",
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc",
],
clusterId = 42,
),
)
var nodeConf = defaultWakuNodeConf().valueOr:
raiseAssert error
nodeConf.mode = Core
nodeConf.clusterId = 42'u16
nodeConf.rest = false
nodeConf.entryNodes =
@[
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im",
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc",
]
## When
let node = (await createNode(nodeConfig)).valueOr:
let node = (await createNode(nodeConf)).valueOr:
raiseAssert error
## Then

View File

@ -30,7 +30,8 @@ import
waku_core/message/default_values,
waku_mix,
],
../../tools/rln_keystore_generator/rln_keystore_generator
../../tools/rln_keystore_generator/rln_keystore_generator,
./entry_nodes
import ./envvar as confEnvvarDefs, ./envvar_net as confEnvvarNet
@ -52,6 +53,11 @@ type StartUpCommand* = enum
noCommand # default, runs waku
generateRlnKeystore # generates a new RLN keystore
type WakuMode* {.pure.} = enum
noMode # default - use explicit CLI flags as-is
Core # full service node
Edge # client-only node
type WakuNodeConf* = object
configFile* {.
desc: "Loads configuration from a TOML file (cmd-line parameters take precedence)",
@ -150,9 +156,16 @@ type WakuNodeConf* = object
.}: seq[ProtectedShard]
## General node config
mode* {.
desc:
"Node operation mode. 'Core' enables relay+service protocols. 'Edge' enables client-only protocols. Default: explicit CLI flags used.",
defaultValue: WakuMode.noMode,
name: "mode"
.}: WakuMode
preset* {.
desc:
"Network preset to use. 'twn' is The RLN-protected Waku Network (cluster 1). Overrides other values.",
"Network preset to use. 'twn' is The RLN-protected Waku Network (cluster 1). 'logos.dev' is the Logos Dev Network (cluster 2). Overrides other values.",
defaultValue: "",
name: "preset"
.}: string
@ -165,7 +178,7 @@ type WakuNodeConf* = object
.}: uint16
agentString* {.
defaultValue: "nwaku-" & cli_args.git_version,
defaultValue: "logos-delivery-" & cli_args.git_version,
desc: "Node agent string which is used as identifier in network",
name: "agent-string"
.}: string
@ -293,6 +306,14 @@ hence would have reachability issues.""",
name: "rln-relay-dynamic"
.}: bool
entryNodes* {.
desc:
"Entry node address (enrtree:, enr:, or multiaddr). " &
"Automatically classified and distributed to DNS discovery, discv5 bootstrap, " &
"and static nodes. Argument may be repeated.",
name: "entry-node"
.}: seq[string]
staticnodes* {.
desc: "Peer multiaddr to directly connect with. Argument may be repeated.",
name: "staticnode"
@ -453,7 +474,7 @@ hence would have reachability issues.""",
desc:
"""Adds an extra effort in the delivery/reception of messages by leveraging store-v3 requests.
with the drawback of consuming some more bandwidth.""",
defaultValue: false,
defaultValue: true,
name: "reliability"
.}: bool
@ -907,12 +928,19 @@ proc toNetworkConf(
"TWN - The Waku Network configuration will not be applied when `--cluster-id=1` is passed in future releases. Use `--preset=twn` instead."
)
lcPreset = "twn"
if clusterId.isSome() and clusterId.get() == 2:
warn(
"Logos.dev - Logos.dev configuration will not be applied when `--cluster-id=2` is passed in future releases. Use `--preset=logos.dev` instead."
)
lcPreset = "logos.dev"
case lcPreset
of "":
ok(none(NetworkConf))
of "twn":
ok(some(NetworkConf.TheWakuNetworkConf()))
of "logos.dev", "logosdev":
ok(some(NetworkConf.LogosDevConf()))
else:
err("Invalid --preset value passed: " & lcPreset)
@ -982,6 +1010,26 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.withRelayShardedPeerManagement(n.relayShardedPeerManagement)
b.withStaticNodes(n.staticNodes)
# Process entry nodes - supports enrtree:, enr:, and multiaddress formats
if n.entryNodes.len > 0:
let (enrTreeUrls, bootstrapEnrs, staticNodesFromEntry) = processEntryNodes(
n.entryNodes
).valueOr:
return err("Failed to process entry nodes: " & error)
# Set ENRTree URLs for DNS discovery
if enrTreeUrls.len > 0:
for url in enrTreeUrls:
b.dnsDiscoveryConf.withEnrTreeUrl(url)
# Set ENR records as bootstrap nodes for discv5
if bootstrapEnrs.len > 0:
b.discv5Conf.withBootstrapNodes(bootstrapEnrs)
# Add static nodes (multiaddrs and those extracted from ENR entries)
if staticNodesFromEntry.len > 0:
b.withStaticNodes(staticNodesFromEntry)
if n.numShardsInNetwork != 0:
b.withNumShardsInCluster(n.numShardsInNetwork)
b.withShardingConf(AutoSharding)
@ -1069,9 +1117,31 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.webSocketConf.withKeyPath(n.websocketSecureKeyPath)
b.webSocketConf.withCertPath(n.websocketSecureCertPath)
b.rateLimitConf.withRateLimits(n.rateLimits)
if n.rateLimits.len > 0:
b.rateLimitConf.withRateLimits(n.rateLimits)
b.kademliaDiscoveryConf.withEnabled(n.enableKadDiscovery)
b.kademliaDiscoveryConf.withBootstrapNodes(n.kadBootstrapNodes)
# Mode-driven configuration overrides
case n.mode
of WakuMode.Core:
b.withRelay(true)
b.filterServiceConf.withEnabled(true)
b.withLightPush(true)
b.discv5Conf.withEnabled(true)
b.withPeerExchange(true)
b.withRendezvous(true)
b.rateLimitConf.withRateLimitsIfNotAssigned(
@["filter:100/1s", "lightpush:5/1s", "px:5/1s"]
)
of WakuMode.Edge:
b.withPeerExchange(true)
b.withRelay(false)
b.filterServiceConf.withEnabled(false)
b.withLightPush(false)
b.storeServiceConf.withEnabled(false)
of WakuMode.noMode:
discard # use explicit CLI flags as-is
return b.build()

View File

@ -1,4 +1,5 @@
import ./api/[api, api_conf, entry_nodes]
import ./api/[api, api_conf]
import ./events/message_events
import tools/confutils/entry_nodes
export api, api_conf, entry_nodes, message_events

View File

@ -1,18 +1,20 @@
import chronicles, chronos, results, std/strutils
import chronicles, chronos, results
import waku/factory/waku
import waku/[requests/health_requests, waku_core, waku_node]
import waku/node/delivery_service/send_service
import waku/node/delivery_service/subscription_manager
import libp2p/peerid
import ../../tools/confutils/cli_args
import ./[api_conf, types]
export cli_args
logScope:
topics = "api"
# TODO: Specs says it should return a `WakuNode`. As `send` and other APIs are defined, we can align.
proc createNode*(config: NodeConfig): Future[Result[Waku, string]] {.async.} =
let wakuConf = toWakuConf(config).valueOr:
proc createNode*(conf: WakuNodeConf): Future[Result[Waku, string]] {.async.} =
let wakuConf = conf.toWakuConf().valueOr:
return err("Failed to handle the configuration: " & error)
## We are not defining app callbacks at node creation

View File

@ -9,7 +9,7 @@ import
waku/factory/waku_conf,
waku/factory/conf_builder/conf_builder,
waku/factory/networks_config,
./entry_nodes
tools/confutils/entry_nodes
export json_serialization, json_options
@ -85,7 +85,9 @@ type WakuMode* {.pure.} = enum
Edge
Core
type NodeConfig* {.requiresInit.} = object
type NodeConfig* {.
requiresInit, deprecated: "Use WakuNodeConf from tools/confutils/cli_args instead"
.} = object
mode: WakuMode
protocolsConfig: ProtocolsConfig
networkingConfig: NetworkingConfig
@ -154,7 +156,9 @@ proc logLevel*(c: NodeConfig): LogLevel =
proc logFormat*(c: NodeConfig): LogFormat =
c.logFormat
proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] =
proc toWakuConf*(
nodeConfig: NodeConfig
): Result[WakuConf, string] {.deprecated: "Use WakuNodeConf.toWakuConf instead".} =
var b = WakuConfBuilder.init()
# Apply log configuration
@ -516,7 +520,10 @@ proc readValue*(
proc decodeNodeConfigFromJson*(
jsonStr: string
): NodeConfig {.raises: [SerializationError].} =
): NodeConfig {.
raises: [SerializationError],
deprecated: "Use WakuNodeConf with fieldPairs-based JSON parsing instead"
.} =
var val = NodeConfig.init() # default-initialized
try:
var stream = unsafeMemoryInput(jsonStr)

View File

@ -22,6 +22,12 @@ proc withEnabled*(b: var FilterServiceConfBuilder, enabled: bool) =
proc withMaxPeersToServe*(b: var FilterServiceConfBuilder, maxPeersToServe: uint32) =
b.maxPeersToServe = some(maxPeersToServe)
proc withMaxPeersToServeIfNotAssigned*(
b: var FilterServiceConfBuilder, maxPeersToServe: uint32
) =
if b.maxPeersToServe.isNone():
b.maxPeersToServe = some(maxPeersToServe)
proc withSubscriptionTimeout*(
b: var FilterServiceConfBuilder, subscriptionTimeout: uint16
) =

View File

@ -14,6 +14,12 @@ proc init*(T: type RateLimitConfBuilder): RateLimitConfBuilder =
proc withRateLimits*(b: var RateLimitConfBuilder, rateLimits: seq[string]) =
b.strValue = some(rateLimits)
proc withRateLimitsIfNotAssigned*(
b: var RateLimitConfBuilder, rateLimits: seq[string]
) =
if b.strValue.isNone() or b.strValue.get().len == 0:
b.strValue = some(rateLimits)
proc build*(b: RateLimitConfBuilder): Result[ProtocolRateLimitSettings, string] =
if b.strValue.isSome() and b.objValue.isSome():
return err("Rate limits conf must only be set once on the builder")

View File

@ -12,7 +12,8 @@ import
../networks_config,
../../common/logging,
../../common/utils/parse_size_units,
../../waku_enr/capabilities
../../waku_enr/capabilities,
tools/confutils/entry_nodes
import
./filter_service_conf_builder,
@ -393,6 +394,42 @@ proc applyNetworkConf(builder: var WakuConfBuilder) =
discarded = builder.discv5Conf.bootstrapNodes
builder.discv5Conf.withBootstrapNodes(networkConf.discv5BootstrapNodes)
if networkConf.enableKadDiscovery:
if not builder.kademliaDiscoveryConf.enabled:
builder.kademliaDiscoveryConf.withEnabled(networkConf.enableKadDiscovery)
if builder.kademliaDiscoveryConf.bootstrapNodes.len == 0 and
networkConf.kadBootstrapNodes.len > 0:
builder.kademliaDiscoveryConf.withBootstrapNodes(networkConf.kadBootstrapNodes)
if networkConf.mix:
if builder.mix.isNone:
builder.mix = some(networkConf.mix)
if builder.p2pReliability.isNone:
builder.withP2pReliability(networkConf.p2pReliability)
# Process entry nodes from network config - classify and distribute
if networkConf.entryNodes.len > 0:
let processed = processEntryNodes(networkConf.entryNodes)
if processed.isOk():
let (enrTreeUrls, bootstrapEnrs, staticNodesFromEntry) = processed.get()
# Set ENRTree URLs for DNS discovery
if enrTreeUrls.len > 0:
for url in enrTreeUrls:
builder.dnsDiscoveryConf.withEnrTreeUrl(url)
# Set ENR records as bootstrap nodes for discv5
if bootstrapEnrs.len > 0:
builder.discv5Conf.withBootstrapNodes(bootstrapEnrs)
# Add static nodes (multiaddrs and those extracted from ENR entries)
if staticNodesFromEntry.len > 0:
builder.withStaticNodes(staticNodesFromEntry)
else:
warn "Failed to process entry nodes from network conf", error = processed.error()
proc build*(
builder: var WakuConfBuilder, rng: ref HmacDrbgContext = crypto.newRng()
): Result[WakuConf, string] =
@ -606,7 +643,7 @@ proc build*(
provided = maxConnections, recommended = DefaultMaxConnections
# TODO: Do the git version thing here
let agentString = builder.agentString.get("nwaku")
let agentString = builder.agentString.get("logos-delivery")
# TODO: use `DefaultColocationLimit`. the user of this value should
# probably be defining a config object

View File

@ -29,6 +29,11 @@ type NetworkConf* = object
shardingConf*: ShardingConf
discv5Discovery*: bool
discv5BootstrapNodes*: seq[string]
enableKadDiscovery*: bool
kadBootstrapNodes*: seq[string]
entryNodes*: seq[string]
mix*: bool
p2pReliability*: bool
# cluster-id=1 (aka The Waku Network)
# Cluster configuration corresponding to The Waku Network. Note that it
@ -45,6 +50,11 @@ proc TheWakuNetworkConf*(T: type NetworkConf): NetworkConf =
rlnEpochSizeSec: 600,
rlnRelayUserMessageLimit: 100,
shardingConf: ShardingConf(kind: AutoSharding, numShardsInCluster: 8),
enableKadDiscovery: false,
kadBootstrapNodes: @[],
entryNodes: @[],
mix: false,
p2pReliability: false,
discv5Discovery: true,
discv5BootstrapNodes:
@[
@ -54,6 +64,36 @@ proc TheWakuNetworkConf*(T: type NetworkConf): NetworkConf =
],
)
# cluster-id=2 (Logos Dev Network)
# Cluster configuration for the Logos Dev Network.
proc LogosDevConf*(T: type NetworkConf): NetworkConf =
const ZeroChainId = 0'u256
return NetworkConf(
maxMessageSize: "150KiB",
clusterId: 2,
rlnRelay: false,
rlnRelayEthContractAddress: "",
rlnRelayDynamic: false,
rlnRelayChainId: ZeroChainId,
rlnEpochSizeSec: 0,
rlnRelayUserMessageLimit: 0,
shardingConf: ShardingConf(kind: AutoSharding, numShardsInCluster: 8),
enableKadDiscovery: true,
mix: true,
p2pReliability: true,
discv5Discovery: true,
discv5BootstrapNodes: @[],
entryNodes:
@[
"/dns4/delivery-01.do-ams3.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAmTUbnxLGT9JvV6mu9oPyDjqHK4Phs1VDJNUgESgNSkuby",
"/dns4/delivery-02.do-ams3.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAmMK7PYygBtKUQ8EHp7EfaD3bCEsJrkFooK8RQ2PVpJprH",
"/dns4/delivery-01.gc-us-central1-a.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAm4S1JYkuzDKLKQvwgAhZKs9otxXqt8SCGtB4hoJP1S397",
"/dns4/delivery-02.gc-us-central1-a.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAm8Y9kgBNtjxvCnf1X6gnZJW5EGE4UwwCL3CCm55TwqBiH",
"/dns4/delivery-01.ac-cn-hongkong-c.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAm8YokiNun9BkeA1ZRmhLbtNUvcwRr64F69tYj9fkGyuEP",
"/dns4/delivery-02.ac-cn-hongkong-c.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAkvwhGHKNry6LACrB8TmEFoCJKEX29XR5dDUzk3UT3UNSE",
],
)
proc validateShards*(
shardingConf: ShardingConf, shards: seq[uint16]
): Result[void, string] =

View File

@ -28,7 +28,6 @@ import
# It will always be called from main thread anyway.
# Ref: https://nim-lang.org/docs/manual.html#threads-gc-safety
var restServerNotInstalledTab {.threadvar.}: TableRef[string, string]
restServerNotInstalledTab = newTable[string, string]()
export WakuRestServerRef
@ -42,6 +41,9 @@ type RestServerConf* = object
proc startRestServerEssentials*(
nodeHealthMonitor: NodeHealthMonitor, conf: RestServerConf, portsShift: uint16
): Result[WakuRestServerRef, string] =
if restServerNotInstalledTab.isNil:
restServerNotInstalledTab = newTable[string, string]()
let requestErrorHandler: RestRequestErrorHandler = proc(
error: RestRequestError, request: HttpRequestRef
): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} =