From 1f9c4cb8cc4cac2347d19bac6eecb169775ca366 Mon Sep 17 00:00:00 2001 From: NagyZoltanPeter <113987313+NagyZoltanPeter@users.noreply.github.com> Date: Tue, 3 Mar 2026 19:17:54 +0100 Subject: [PATCH] Chore: adapt cli args for delivery api (#3744) * LogosDeliveryAPI: NodeConfig -> WakluNodeConf + mode selector and logos.dev preset * Adjustment made on test, logos.dev preset * change default agentString from nwaku to logos-delivery * Add p2pReliability switch to presets and make it default to true. * Borrow entryNode idea from NodeConfig to WakuNodeConf to easy shortcut among diffrent bootstrap node list all needs different formats * Fix rateLimit assignment for builder * Remove Core mode default as we already have a defaul, user must override * Removed obsolate API createNode with NodeConfig - tests are refactored for WakuNodeConf usage * Fix failing test due to twn-clusterid(1) default has overwrite for maxMessagSize. Fix readme. --- examples/api_example/api_example.nim | 27 +- liblogosdelivery/README.md | 20 +- .../examples/logosdelivery_example.c | 34 +- liblogosdelivery/liblogosdelivery.h | 4 +- .../logos_delivery_api/node_api.nim | 38 +- tests/api/test_api_health.nim | 44 +- tests/api/test_api_send.nim | 45 +- tests/api/test_api_subscription.nim | 37 +- tests/api/test_entry_nodes.nim | 2 +- tests/api/test_node_conf.nim | 1206 ++++------------- tests/test_waku.nim | 71 +- tools/confutils/cli_args.nim | 80 +- {waku/api => tools/confutils}/entry_nodes.nim | 0 waku/api.nim | 3 +- waku/api/api.nim | 10 +- waku/api/api_conf.nim | 15 +- .../filter_service_conf_builder.nim | 6 + .../conf_builder/rate_limit_conf_builder.nim | 6 + .../conf_builder/waku_conf_builder.nim | 41 +- waku/factory/networks_config.nim | 40 + waku/rest_api/endpoint/builder.nim | 4 +- 21 files changed, 641 insertions(+), 1092 deletions(-) rename {waku/api => tools/confutils}/entry_nodes.nim (100%) diff --git a/examples/api_example/api_example.nim b/examples/api_example/api_example.nim index 37dd5d34b..4a7cde5db 100644 --- a/examples/api_example/api_example.nim +++ b/examples/api_example/api_example.nim @@ -59,19 +59,24 @@ when isMainModule: echo "Starting Waku node..." - let config = - if (args.ethRpcEndpoint == ""): - # Create a basic configuration for the Waku node - # No RLN as we don't have an ETH RPC Endpoint - NodeConfig.init( - protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 42) - ) - else: - # Connect to TWN, use ETH RPC Endpoint for RLN - NodeConfig.init(mode = WakuMode.Core, ethRpcEndpoints = @[args.ethRpcEndpoint]) + # Use WakuNodeConf (the CLI configuration type) for node setup + var conf = defaultWakuNodeConf().valueOr: + echo "Failed to create default config: ", error + quit(QuitFailure) + + if args.ethRpcEndpoint == "": + # Create a basic configuration for the Waku node + # No RLN as we don't have an ETH RPC Endpoint + conf.mode = Core + conf.preset = "logos.dev" + else: + # Connect to TWN, use ETH RPC Endpoint for RLN + conf.mode = Core + conf.preset = "twn" + conf.ethClientUrls = @[EthRpcUrl(args.ethRpcEndpoint)] # Create the node using the library API's createNode function - let node = (waitFor createNode(config)).valueOr: + let node = (waitFor createNode(conf)).valueOr: echo "Failed to create node: ", error quit(QuitFailure) diff --git a/liblogosdelivery/README.md b/liblogosdelivery/README.md index f9909dd3d..e8352c611 100644 --- a/liblogosdelivery/README.md +++ b/liblogosdelivery/README.md @@ -32,18 +32,17 @@ void *logosdelivery_create_node( ```json { "mode": "Core", - "clusterId": 1, - "entryNodes": [ - "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im" - ], - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - } + "preset": "logos.dev", + "listenAddress": "0.0.0.0", + "tcpPort": 60000, + "discv5UdpPort": 9000 } ``` +Configuration uses flat field names matching `WakuNodeConf` in `tools/confutils/cli_args.nim`. +Use `"preset"` to select a network preset (e.g., `"twn"`, `"logos.dev"`) which auto-configures +entry nodes, cluster ID, sharding, and other network-specific settings. + #### `logosdelivery_start_node` Starts the node. @@ -207,8 +206,9 @@ void callback(int ret, const char *msg, size_t len, void *userData) { int main() { const char *config = "{" + "\"logLevel\": \"INFO\"," "\"mode\": \"Core\"," - "\"clusterId\": 1" + "\"preset\": \"logos.dev\"" "}"; // Create node diff --git a/liblogosdelivery/examples/logosdelivery_example.c b/liblogosdelivery/examples/logosdelivery_example.c index c0929e650..61333f84d 100644 --- a/liblogosdelivery/examples/logosdelivery_example.c +++ b/liblogosdelivery/examples/logosdelivery_example.c @@ -61,7 +61,7 @@ void event_callback(int ret, const char *msg, size_t len, void *userData) { char messageHash[128]; extract_json_field(eventJson, "requestId", requestId, sizeof(requestId)); extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash)); - printf("📤 [EVENT] Message sent - RequestID: %s, Hash: %s\n", requestId, messageHash); + printf("[EVENT] Message sent - RequestID: %s, Hash: %s\n", requestId, messageHash); } else if (strcmp(eventType, "message_error") == 0) { char requestId[128]; @@ -70,7 +70,7 @@ void event_callback(int ret, const char *msg, size_t len, void *userData) { extract_json_field(eventJson, "requestId", requestId, sizeof(requestId)); extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash)); extract_json_field(eventJson, "error", error, sizeof(error)); - printf("❌ [EVENT] Message error - RequestID: %s, Hash: %s, Error: %s\n", + printf("[EVENT] Message error - RequestID: %s, Hash: %s, Error: %s\n", requestId, messageHash, error); } else if (strcmp(eventType, "message_propagated") == 0) { @@ -78,10 +78,15 @@ void event_callback(int ret, const char *msg, size_t len, void *userData) { char messageHash[128]; extract_json_field(eventJson, "requestId", requestId, sizeof(requestId)); extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash)); - printf("✅ [EVENT] Message propagated - RequestID: %s, Hash: %s\n", requestId, messageHash); + printf("[EVENT] Message propagated - RequestID: %s, Hash: %s\n", requestId, messageHash); + + } else if (strcmp(eventType, "connection_status_change") == 0) { + char connectionStatus[256]; + extract_json_field(eventJson, "connectionStatus", connectionStatus, sizeof(connectionStatus)); + printf("[EVENT] Connection status change - Status: %s\n", connectionStatus); } else { - printf("â„šī¸ [EVENT] Unknown event type: %s\n", eventType); + printf("[EVENT] Unknown event type: %s\n", eventType); } free(eventJson); @@ -109,23 +114,12 @@ void simple_callback(int ret, const char *msg, size_t len, void *userData) { int main() { printf("=== Logos Messaging API (LMAPI) Example ===\n\n"); - // Configuration JSON for creating a node + // Configuration JSON using WakuNodeConf field names (flat structure). + // Field names match Nim identifiers from WakuNodeConf in tools/confutils/cli_args.nim. const char *config = "{" - "\"logLevel\": \"DEBUG\"," - // "\"mode\": \"Edge\"," + "\"logLevel\": \"INFO\"," "\"mode\": \"Core\"," - "\"protocolsConfig\": {" - "\"entryNodes\": [\"/dns4/node-01.do-ams3.misc.logos-chat.status.im/tcp/30303/p2p/16Uiu2HAkxoqUTud5LUPQBRmkeL2xP4iKx2kaABYXomQRgmLUgf78\"]," - "\"clusterId\": 42," - "\"autoShardingConfig\": {" - "\"numShardsInCluster\": 8" - "}" - "}," - "\"networkingConfig\": {" - "\"listenIpv4\": \"0.0.0.0\"," - "\"p2pTcpPort\": 60000," - "\"discv5UdpPort\": 9000" - "}" + "\"preset\": \"logos.dev\"" "}"; printf("1. Creating node...\n"); @@ -152,7 +146,7 @@ int main() { logosdelivery_start_node(ctx, simple_callback, (void *)"start_node"); // Wait for node to start - sleep(2); + sleep(10); printf("\n4. Subscribing to content topic...\n"); const char *contentTopic = "/example/1/chat/proto"; diff --git a/liblogosdelivery/liblogosdelivery.h b/liblogosdelivery/liblogosdelivery.h index 0d318b691..5092db9f2 100644 --- a/liblogosdelivery/liblogosdelivery.h +++ b/liblogosdelivery/liblogosdelivery.h @@ -22,7 +22,9 @@ extern "C" // Creates a new instance of the node from the given configuration JSON. // Returns a pointer to the Context needed by the rest of the API functions. - // Configuration should be in JSON format following the NodeConfig structure. + // Configuration should be in JSON format using WakuNodeConf field names. + // Field names match Nim identifiers from WakuNodeConf (camelCase). + // Example: {"mode": "Core", "clusterId": 42, "relay": true} void *logosdelivery_create_node( const char *configJson, FFICallBack callback, diff --git a/liblogosdelivery/logos_delivery_api/node_api.nim b/liblogosdelivery/logos_delivery_api/node_api.nim index 2d6c8d0de..1835f75b5 100644 --- a/liblogosdelivery/logos_delivery_api/node_api.nim +++ b/liblogosdelivery/logos_delivery_api/node_api.nim @@ -1,10 +1,11 @@ -import std/json -import chronos, results, ffi +import std/[json, strutils] +import chronos, chronicles, results, confutils, confutils/std/net, ffi import waku/factory/waku, waku/node/waku_node, - waku/api/[api, api_conf, types], + waku/api/[api, types], waku/events/[message_events, health_events], + tools/confutils/cli_args, ../declare_lib, ../json_event @@ -14,15 +15,32 @@ proc `%`*(id: RequestId): JsonNode = registerReqFFI(CreateNodeRequest, ctx: ptr FFIContext[Waku]): proc(configJson: cstring): Future[Result[string, string]] {.async.} = - ## Parse the JSON configuration and create a node - let nodeConfig = - try: - decodeNodeConfigFromJson($configJson) - except SerializationError as e: - return err("Failed to parse config JSON: " & e.msg) + ## Parse the JSON configuration using fieldPairs approach (WakuNodeConf) + var conf = defaultWakuNodeConf().valueOr: + return err("Failed creating default conf: " & error) + + var jsonNode: JsonNode + try: + jsonNode = parseJson($configJson) + except Exception: + return err( + "Failed to parse config JSON: " & getCurrentExceptionMsg() & + " configJson string: " & $configJson + ) + + for confField, confValue in fieldPairs(conf): + if jsonNode.contains(confField): + let formattedString = ($jsonNode[confField]).strip(chars = {'\"'}) + try: + confValue = parseCmdArg(typeof(confValue), formattedString) + except Exception: + return err( + "Failed to parse field '" & confField & "': " & + getCurrentExceptionMsg() & ". Value: " & formattedString + ) # Create the node - ctx.myLib[] = (await api.createNode(nodeConfig)).valueOr: + ctx.myLib[] = (await api.createNode(conf)).valueOr: let errMsg = $error chronicles.error "CreateNodeRequest failed", err = errMsg return err(errMsg) diff --git a/tests/api/test_api_health.nim b/tests/api/test_api_health.nim index b7aab43f9..f3dd340af 100644 --- a/tests/api/test_api_health.nim +++ b/tests/api/test_api_health.nim @@ -13,9 +13,10 @@ import waku/events/health_events, waku/common/waku_protocol, waku/factory/waku_conf +import tools/confutils/cli_args const TestTimeout = chronos.seconds(10) -const DefaultShard = PubsubTopic("/waku/2/rs/1/0") +const DefaultShard = PubsubTopic("/waku/2/rs/3/0") const TestContentTopic = ContentTopic("/waku/2/default-content/proto") proc dummyHandler( @@ -80,7 +81,7 @@ suite "LM API health checking": newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) (await serviceNode.mountRelay()).isOkOr: raiseAssert error - serviceNode.mountMetadata(1, @[0'u16]).isOkOr: + serviceNode.mountMetadata(3, @[0'u16]).isOkOr: raiseAssert error await serviceNode.mountLibp2pPing() await serviceNode.start() @@ -89,16 +90,15 @@ suite "LM API health checking": serviceNode.wakuRelay.subscribe(DefaultShard, dummyHandler) lockNewGlobalBrokerContext: - let conf = NodeConfig.init( - mode = WakuMode.Core, - networkingConfig = - NetworkingConfig(listenIpv4: "0.0.0.0", p2pTcpPort: 0, discv5UdpPort: 0), - protocolsConfig = ProtocolsConfig.init( - entryNodes = @[], - clusterId = 1'u16, - autoShardingConfig = AutoShardingConfig(numShardsInCluster: 1), - ), - ) + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.mode = Core + conf.listenAddress = parseIpAddress("0.0.0.0") + conf.tcpPort = Port(0) + conf.discv5UdpPort = Port(0) + conf.clusterId = 3'u16 + conf.numShardsInNetwork = 1 + conf.rest = false client = (await createNode(conf)).valueOr: raiseAssert error @@ -267,17 +267,15 @@ suite "LM API health checking": var edgeWaku: Waku lockNewGlobalBrokerContext: - let edgeConf = NodeConfig.init( - mode = WakuMode.Edge, - networkingConfig = - NetworkingConfig(listenIpv4: "0.0.0.0", p2pTcpPort: 0, discv5UdpPort: 0), - protocolsConfig = ProtocolsConfig.init( - entryNodes = @[], - clusterId = 1'u16, - messageValidation = - MessageValidation(maxMessageSize: "150 KiB", rlnConfig: none(RlnConfig)), - ), - ) + var edgeConf = defaultWakuNodeConf().valueOr: + raiseAssert error + edgeConf.mode = Edge + edgeConf.listenAddress = parseIpAddress("0.0.0.0") + edgeConf.tcpPort = Port(0) + edgeConf.discv5UdpPort = Port(0) + edgeConf.clusterId = 3'u16 + edgeConf.maxMessageSize = "150 KiB" + edgeConf.rest = false edgeWaku = (await createNode(edgeConf)).valueOr: raiseAssert "Failed to create edge node: " & error diff --git a/tests/api/test_api_send.nim b/tests/api/test_api_send.nim index 7343fc655..30a176119 100644 --- a/tests/api/test_api_send.nim +++ b/tests/api/test_api_send.nim @@ -6,7 +6,8 @@ import ../testlib/[common, wakucore, wakunode, testasync] import ../waku_archive/archive_utils import waku, waku/[waku_node, waku_core, waku_relay/protocol, common/broker/broker_context] -import waku/api/api_conf, waku/factory/waku_conf +import waku/factory/waku_conf +import tools/confutils/cli_args type SendEventOutcome {.pure.} = enum Sent @@ -116,20 +117,18 @@ proc validate( for requestId in manager.errorRequestIds: check requestId == expectedRequestId -proc createApiNodeConf(mode: WakuMode = WakuMode.Core): NodeConfig = - # allocate random ports to avoid port-already-in-use errors - let netConf = NetworkingConfig(listenIpv4: "0.0.0.0", p2pTcpPort: 0, discv5UdpPort: 0) - - result = NodeConfig.init( - mode = mode, - protocolsConfig = ProtocolsConfig.init( - entryNodes = @[], - clusterId = 1, - autoShardingConfig = AutoShardingConfig(numShardsInCluster: 1), - ), - networkingConfig = netConf, - p2pReliability = true, - ) +proc createApiNodeConf(mode: cli_args.WakuMode = cli_args.WakuMode.Core): WakuNodeConf = + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.mode = mode + conf.listenAddress = parseIpAddress("0.0.0.0") + conf.tcpPort = Port(0) + conf.discv5UdpPort = Port(0) + conf.clusterId = 3'u16 + conf.numShardsInNetwork = 1 + conf.reliabilityEnabled = true + conf.rest = false + result = conf suite "Waku API - Send": var @@ -153,7 +152,7 @@ suite "Waku API - Send": lockNewGlobalBrokerContext: relayNode1 = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) - relayNode1.mountMetadata(1, @[0'u16]).isOkOr: + relayNode1.mountMetadata(3, @[0'u16]).isOkOr: raiseAssert "Failed to mount metadata: " & error (await relayNode1.mountRelay()).isOkOr: raiseAssert "Failed to mount relay" @@ -163,7 +162,7 @@ suite "Waku API - Send": lockNewGlobalBrokerContext: relayNode2 = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) - relayNode2.mountMetadata(1, @[0'u16]).isOkOr: + relayNode2.mountMetadata(3, @[0'u16]).isOkOr: raiseAssert "Failed to mount metadata: " & error (await relayNode2.mountRelay()).isOkOr: raiseAssert "Failed to mount relay" @@ -173,7 +172,7 @@ suite "Waku API - Send": lockNewGlobalBrokerContext: lightpushNode = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) - lightpushNode.mountMetadata(1, @[0'u16]).isOkOr: + lightpushNode.mountMetadata(3, @[0'u16]).isOkOr: raiseAssert "Failed to mount metadata: " & error (await lightpushNode.mountRelay()).isOkOr: raiseAssert "Failed to mount relay" @@ -185,7 +184,7 @@ suite "Waku API - Send": lockNewGlobalBrokerContext: storeNode = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) - storeNode.mountMetadata(1, @[0'u16]).isOkOr: + storeNode.mountMetadata(3, @[0'u16]).isOkOr: raiseAssert "Failed to mount metadata: " & error (await storeNode.mountRelay()).isOkOr: raiseAssert "Failed to mount relay" @@ -210,7 +209,7 @@ suite "Waku API - Send": storeNodePeerId = storeNode.peerInfo.peerId # Subscribe all relay nodes to the default shard topic - const testPubsubTopic = PubsubTopic("/waku/2/rs/1/0") + const testPubsubTopic = PubsubTopic("/waku/2/rs/3/0") proc dummyHandler( topic: PubsubTopic, msg: WakuMessage ): Future[void] {.async, gcsafe.} = @@ -387,7 +386,7 @@ suite "Waku API - Send": lockNewGlobalBrokerContext: fakeLightpushNode = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) - fakeLightpushNode.mountMetadata(1, @[0'u16]).isOkOr: + fakeLightpushNode.mountMetadata(3, @[0'u16]).isOkOr: raiseAssert "Failed to mount metadata: " & error (await fakeLightpushNode.mountRelay()).isOkOr: raiseAssert "Failed to mount relay" @@ -402,13 +401,13 @@ suite "Waku API - Send": discard fakeLightpushNode.subscribe( - (kind: PubsubSub, topic: PubsubTopic("/waku/2/rs/1/0")), dummyHandler + (kind: PubsubSub, topic: PubsubTopic("/waku/2/rs/3/0")), dummyHandler ).isOkOr: raiseAssert "Failed to subscribe fakeLightpushNode: " & error var node: Waku lockNewGlobalBrokerContext: - node = (await createNode(createApiNodeConf(WakuMode.Edge))).valueOr: + node = (await createNode(createApiNodeConf(cli_args.WakuMode.Edge))).valueOr: raiseAssert error (await startWaku(addr node)).isOkOr: raiseAssert "Failed to start Waku node: " & error diff --git a/tests/api/test_api_subscription.nim b/tests/api/test_api_subscription.nim index 8983c2934..6639e3dea 100644 --- a/tests/api/test_api_subscription.nim +++ b/tests/api/test_api_subscription.nim @@ -14,7 +14,8 @@ import events/message_events, waku_relay/protocol, ] -import waku/api/api_conf, waku/factory/waku_conf +import waku/factory/waku_conf +import tools/confutils/cli_args # TODO: Edge testing (after MAPI edge support is completed) @@ -64,21 +65,21 @@ type TestNetwork = ref object publisherPeerInfo: RemotePeerInfo proc createApiNodeConf( - mode: WakuMode = WakuMode.Core, numShards: uint16 = 1 -): NodeConfig = - let netConf = NetworkingConfig(listenIpv4: "0.0.0.0", p2pTcpPort: 0, discv5UdpPort: 0) - result = NodeConfig.init( - mode = mode, - protocolsConfig = ProtocolsConfig.init( - entryNodes = @[], - clusterId = 1, - autoShardingConfig = AutoShardingConfig(numShardsInCluster: numShards), - ), - networkingConfig = netConf, - p2pReliability = true, - ) + mode: cli_args.WakuMode = cli_args.WakuMode.Core, numShards: uint16 = 1 +): WakuNodeConf = + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.mode = mode + conf.listenAddress = parseIpAddress("0.0.0.0") + conf.tcpPort = Port(0) + conf.discv5UdpPort = Port(0) + conf.clusterId = 3'u16 + conf.numShardsInNetwork = numShards + conf.reliabilityEnabled = true + conf.rest = false + result = conf -proc setupSubscriberNode(conf: NodeConfig): Future[Waku] {.async.} = +proc setupSubscriberNode(conf: WakuNodeConf): Future[Waku] {.async.} = var node: Waku lockNewGlobalBrokerContext: node = (await createNode(conf)).expect("Failed to create subscriber node") @@ -86,14 +87,14 @@ proc setupSubscriberNode(conf: NodeConfig): Future[Waku] {.async.} = return node proc setupNetwork( - numShards: uint16 = 1, mode: WakuMode = WakuMode.Core + numShards: uint16 = 1, mode: cli_args.WakuMode = cli_args.WakuMode.Core ): Future[TestNetwork] {.async.} = var net = TestNetwork() lockNewGlobalBrokerContext: net.publisher = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0)) - net.publisher.mountMetadata(1, @[0'u16]).expect("Failed to mount metadata") + net.publisher.mountMetadata(3, @[0'u16]).expect("Failed to mount metadata") (await net.publisher.mountRelay()).expect("Failed to mount relay") await net.publisher.mountLibp2pPing() await net.publisher.start() @@ -108,7 +109,7 @@ proc setupNetwork( # that changes, this will be needed to cause the publisher to have shard interest # for any shards the subscriber may want to use, which is required for waitForMesh to work. for i in 0 ..< numShards.int: - let shard = PubsubTopic("/waku/2/rs/1/" & $i) + let shard = PubsubTopic("/waku/2/rs/3/" & $i) net.publisher.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect( "Failed to sub publisher" ) diff --git a/tests/api/test_entry_nodes.nim b/tests/api/test_entry_nodes.nim index 136a49b2b..38dc38ba4 100644 --- a/tests/api/test_entry_nodes.nim +++ b/tests/api/test_entry_nodes.nim @@ -2,7 +2,7 @@ import std/options, results, testutils/unittests -import waku/api/entry_nodes +import tools/confutils/entry_nodes # Since classifyEntryNode is internal, we test it indirectly through processEntryNodes behavior # The enum is exported so we can test against it diff --git a/tests/api/test_node_conf.nim b/tests/api/test_node_conf.nim index 84bbfead3..d0b3d433c 100644 --- a/tests/api/test_node_conf.nim +++ b/tests/api/test_node_conf.nim @@ -1,36 +1,64 @@ {.used.} -import std/options, results, stint, testutils/unittests +import std/[options, json, strutils], results, stint, testutils/unittests import json_serialization -import waku/api/api_conf, waku/factory/waku_conf, waku/factory/networks_config +import confutils, confutils/std/net +import tools/confutils/cli_args +import waku/factory/waku_conf, waku/factory/networks_config import waku/common/logging -suite "LibWaku Conf - toWakuConf": - test "Minimal configuration": +# Helper: parse JSON into WakuNodeConf using fieldPairs (same as liblogosdelivery) +proc parseWakuNodeConfFromJson(jsonStr: string): Result[WakuNodeConf, string] = + var conf = defaultWakuNodeConf().valueOr: + return err(error) + var jsonNode: JsonNode + try: + jsonNode = parseJson(jsonStr) + except Exception: + return err("JSON parse error: " & getCurrentExceptionMsg()) + for confField, confValue in fieldPairs(conf): + if jsonNode.contains(confField): + let formattedString = ($jsonNode[confField]).strip(chars = {'\"'}) + try: + confValue = parseCmdArg(typeof(confValue), formattedString) + except Exception: + return err( + "Field '" & confField & "' parse error: " & getCurrentExceptionMsg() & + ". Value: " & formattedString + ) + return ok(conf) + +suite "WakuNodeConf - mode-driven toWakuConf": + test "Core mode enables service protocols": ## Given - let nodeConfig = NodeConfig.init(ethRpcEndpoints = @["http://someaddress"]) + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.mode = Core + conf.clusterId = 1 ## When - let wakuConfRes = toWakuConf(nodeConfig) + let wakuConfRes = conf.toWakuConf() ## Then - let wakuConf = wakuConfRes.valueOr: - raiseAssert error - wakuConf.validate().isOkOr: - raiseAssert error + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() check: + wakuConf.relay == true + wakuConf.lightPush == true + wakuConf.peerExchangeService == true + wakuConf.rendezvous == true wakuConf.clusterId == 1 - wakuConf.shardingConf.numShardsInCluster == 8 - wakuConf.staticNodes.len == 0 - test "Edge mode configuration": + test "Edge mode disables service protocols": ## Given - let protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 1) - - let nodeConfig = NodeConfig.init(mode = Edge, protocolsConfig = protocolsConfig) + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.mode = Edge + conf.clusterId = 1 ## When - let wakuConfRes = toWakuConf(nodeConfig) + let wakuConfRes = conf.toWakuConf() ## Then require wakuConfRes.isOk() @@ -42,16 +70,175 @@ suite "LibWaku Conf - toWakuConf": wakuConf.filterServiceConf.isSome() == false wakuConf.storeServiceConf.isSome() == false wakuConf.peerExchangeService == true - wakuConf.clusterId == 1 - test "Core mode configuration": + test "noMode uses explicit CLI flags as-is": ## Given - let protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 1) - - let nodeConfig = NodeConfig.init(mode = Core, protocolsConfig = protocolsConfig) + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.mode = WakuMode.noMode + conf.relay = true + conf.lightpush = false + conf.clusterId = 5 ## When - let wakuConfRes = toWakuConf(nodeConfig) + let wakuConfRes = conf.toWakuConf() + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.relay == true + wakuConf.lightPush == false + wakuConf.clusterId == 5 + + test "Core mode overrides individual protocol flags": + ## Given - user sets relay=false but mode=Core should override + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.mode = Core + conf.relay = false # will be overridden by Core mode + + ## When + let wakuConfRes = conf.toWakuConf() + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.relay == true # mode overrides + +suite "WakuNodeConf - JSON parsing with fieldPairs": + test "Empty JSON produces valid default conf": + ## Given / When + let confRes = parseWakuNodeConfFromJson("{}") + + ## Then + require confRes.isOk() + let conf = confRes.get() + check: + conf.mode == WakuMode.noMode + conf.clusterId == 0 + conf.logLevel == logging.LogLevel.INFO + + test "JSON with mode and clusterId": + ## Given / When + let confRes = parseWakuNodeConfFromJson("""{"mode": "Core", "clusterId": 42}""") + + ## Then + require confRes.isOk() + let conf = confRes.get() + check: + conf.mode == Core + conf.clusterId == 42 + + test "JSON with Edge mode": + ## Given / When + let confRes = parseWakuNodeConfFromJson("""{"mode": "Edge"}""") + + ## Then + require confRes.isOk() + let conf = confRes.get() + check: + conf.mode == Edge + + test "JSON with logLevel": + ## Given / When + let confRes = parseWakuNodeConfFromJson("""{"logLevel": "DEBUG"}""") + + ## Then + require confRes.isOk() + let conf = confRes.get() + check: + conf.logLevel == logging.LogLevel.DEBUG + + test "JSON with sharding config": + ## Given / When + let confRes = + parseWakuNodeConfFromJson("""{"clusterId": 99, "numShardsInNetwork": 16}""") + + ## Then + require confRes.isOk() + let conf = confRes.get() + check: + conf.clusterId == 99 + conf.numShardsInNetwork == 16 + + test "JSON with unknown fields is silently ignored": + ## Given / When + let confRes = + parseWakuNodeConfFromJson("""{"unknownField": true, "clusterId": 5}""") + + ## Then - unknown fields are just ignored (not in fieldPairs) + require confRes.isOk() + let conf = confRes.get() + check: + conf.clusterId == 5 + + test "Invalid JSON syntax returns error": + ## Given / When + let confRes = parseWakuNodeConfFromJson("{ not valid json }") + + ## Then + check confRes.isErr() + +suite "WakuNodeConf - preset integration": + test "TWN preset applies TheWakuNetworkConf": + ## Given + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.preset = "twn" + + ## When + let wakuConfRes = conf.toWakuConf() + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.clusterId == 1 + + test "LogosDev preset applies LogosDevConf": + ## Given + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.preset = "logosdev" + + ## When + let wakuConfRes = conf.toWakuConf() + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.clusterId == 2 + + test "Invalid preset returns error": + ## Given + var conf = defaultWakuNodeConf().valueOr: + raiseAssert error + conf.preset = "nonexistent" + + ## When + let wakuConfRes = conf.toWakuConf() + + ## Then + check wakuConfRes.isErr() + +suite "WakuNodeConf JSON -> WakuConf integration": + test "Core mode JSON config produces valid WakuConf": + ## Given + let confRes = parseWakuNodeConfFromJson( + """{"mode": "Core", "clusterId": 55, "numShardsInNetwork": 6}""" + ) + require confRes.isOk() + let conf = confRes.get() + + ## When + let wakuConfRes = conf.toWakuConf() ## Then require wakuConfRes.isOk() @@ -61,93 +248,72 @@ suite "LibWaku Conf - toWakuConf": wakuConf.relay == true wakuConf.lightPush == true wakuConf.peerExchangeService == true - wakuConf.clusterId == 1 + wakuConf.clusterId == 55 + wakuConf.shardingConf.numShardsInCluster == 6 - test "Auto-sharding configuration": + test "Edge mode JSON config produces valid WakuConf": ## Given - let nodeConfig = NodeConfig.init( - mode = Core, - protocolsConfig = ProtocolsConfig.init( - entryNodes = @[], - staticStoreNodes = @[], - clusterId = 42, - autoShardingConfig = AutoShardingConfig(numShardsInCluster: 16), - ), - ) + let confRes = parseWakuNodeConfFromJson("""{"mode": "Edge", "clusterId": 1}""") + require confRes.isOk() + let conf = confRes.get() ## When - let wakuConfRes = toWakuConf(nodeConfig) + let wakuConfRes = conf.toWakuConf() ## Then require wakuConfRes.isOk() let wakuConf = wakuConfRes.get() require wakuConf.validate().isOk() check: - wakuConf.clusterId == 42 - wakuConf.shardingConf.numShardsInCluster == 16 + wakuConf.relay == false + wakuConf.lightPush == false + wakuConf.peerExchangeService == true - test "Bootstrap nodes configuration": + test "JSON with preset produces valid WakuConf": ## Given - let entryNodes = - @[ - "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g", - "enr:-QEkuECnZ3IbVAgkOzv-QLnKC4dRKAPRY80m1-R7G8jZ7yfT3ipEfBrhKN7ARcQgQ-vg-h40AQzyvAkPYlHPaFKk6u9MBgmlkgnY0iXNlY3AyNTZrMaEDk49D8JjMSns4p1XVNBvJquOUzT4PENSJknkROspfAFGg3RjcIJ2X4N1ZHCCd2g", - ] - let libConf = NodeConfig.init( - mode = Core, - protocolsConfig = ProtocolsConfig.init( - entryNodes = entryNodes, staticStoreNodes = @[], clusterId = 1 - ), - ) + let confRes = + parseWakuNodeConfFromJson("""{"mode": "Core", "preset": "logosdev"}""") + require confRes.isOk() + let conf = confRes.get() ## When - let wakuConfRes = toWakuConf(libConf) - - ## Then - require wakuConfRes.isOk() - let wakuConf = wakuConfRes.get() - require wakuConf.validate().isOk() - require wakuConf.discv5Conf.isSome() - check: - wakuConf.discv5Conf.get().bootstrapNodes == entryNodes - - test "Static store nodes configuration": - ## Given - let staticStoreNodes = - @[ - "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", - "/ip4/192.168.1.1/tcp/60001/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYd", - ] - let nodeConf = NodeConfig.init( - protocolsConfig = ProtocolsConfig.init( - entryNodes = @[], staticStoreNodes = staticStoreNodes, clusterId = 1 - ) - ) - - ## When - let wakuConfRes = toWakuConf(nodeConf) + let wakuConfRes = conf.toWakuConf() ## Then require wakuConfRes.isOk() let wakuConf = wakuConfRes.get() require wakuConf.validate().isOk() check: - wakuConf.staticNodes == staticStoreNodes + wakuConf.clusterId == 2 + wakuConf.relay == true - test "Message validation with max message size": + test "JSON with static nodes": ## Given - let nodeConfig = NodeConfig.init( - protocolsConfig = ProtocolsConfig.init( - entryNodes = @[], - staticStoreNodes = @[], - clusterId = 1, - messageValidation = - MessageValidation(maxMessageSize: "100KiB", rlnConfig: none(RlnConfig)), - ) + let confRes = parseWakuNodeConfFromJson( + """{"mode": "Core", "clusterId": 42, "staticnodes": ["/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"]}""" ) + require confRes.isOk() + let conf = confRes.get() ## When - let wakuConfRes = toWakuConf(nodeConfig) + let wakuConfRes = conf.toWakuConf() + + ## Then + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.staticNodes.len == 1 + + test "JSON with max message size": + ## Given + let confRes = + parseWakuNodeConfFromJson("""{"clusterId": 42, "maxMessageSize": "100KiB"}""") + require confRes.isOk() + let conf = confRes.get() + + ## When + let wakuConfRes = conf.toWakuConf() ## Then require wakuConfRes.isOk() @@ -156,853 +322,49 @@ suite "LibWaku Conf - toWakuConf": check: wakuConf.maxMessageSizeBytes == 100'u64 * 1024'u64 - test "Message validation with RLN config": - ## Given - let nodeConfig = NodeConfig.init( - protocolsConfig = ProtocolsConfig.init( - entryNodes = @[], - clusterId = 1, - messageValidation = MessageValidation( - maxMessageSize: "150 KiB", - rlnConfig: some( - RlnConfig( - contractAddress: "0x1234567890123456789012345678901234567890", - chainId: 1'u, - epochSizeSec: 600'u64, - ) - ), - ), - ), - ethRpcEndpoints = @["http://127.0.0.1:1111"], - ) +# ---- Deprecated NodeConfig tests (kept for backward compatibility) ---- - ## When - let wakuConf = toWakuConf(nodeConfig).valueOr: - raiseAssert error +{.push warning[Deprecated]: off.} - wakuConf.validate().isOkOr: - raiseAssert error +import waku/api/api_conf - check: - wakuConf.maxMessageSizeBytes == 150'u64 * 1024'u64 - - require wakuConf.rlnRelayConf.isSome() - let rlnConf = wakuConf.rlnRelayConf.get() - check: - rlnConf.dynamic == true - rlnConf.ethContractAddress == "0x1234567890123456789012345678901234567890" - rlnConf.chainId == 1'u256 - rlnConf.epochSizeSec == 600'u64 - - test "Full Core mode configuration with all fields": - ## Given - let nodeConfig = NodeConfig.init( - mode = Core, - protocolsConfig = ProtocolsConfig.init( - entryNodes = - @[ - "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g" - ], - staticStoreNodes = - @[ - "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" - ], - clusterId = 99, - autoShardingConfig = AutoShardingConfig(numShardsInCluster: 12), - messageValidation = MessageValidation( - maxMessageSize: "512KiB", - rlnConfig: some( - RlnConfig( - contractAddress: "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - chainId: 5'u, # Goerli - epochSizeSec: 300'u64, - ) - ), - ), - ), - ethRpcEndpoints = @["https://127.0.0.1:8333"], - ) - - ## When - let wakuConfRes = toWakuConf(nodeConfig) - - ## Then +suite "NodeConfig (deprecated) - toWakuConf": + test "Minimal configuration": + let nodeConfig = NodeConfig.init(ethRpcEndpoints = @["http://someaddress"]) + let wakuConfRes = api_conf.toWakuConf(nodeConfig) let wakuConf = wakuConfRes.valueOr: raiseAssert error wakuConf.validate().isOkOr: raiseAssert error + check: + wakuConf.clusterId == 1 + wakuConf.shardingConf.numShardsInCluster == 8 + wakuConf.staticNodes.len == 0 - # Check basic settings + test "Edge mode configuration": + let protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 1) + let nodeConfig = + NodeConfig.init(mode = api_conf.WakuMode.Edge, protocolsConfig = protocolsConfig) + let wakuConfRes = api_conf.toWakuConf(nodeConfig) + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() + check: + wakuConf.relay == false + wakuConf.lightPush == false + wakuConf.peerExchangeService == true + + test "Core mode configuration": + let protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 1) + let nodeConfig = + NodeConfig.init(mode = api_conf.WakuMode.Core, protocolsConfig = protocolsConfig) + let wakuConfRes = api_conf.toWakuConf(nodeConfig) + require wakuConfRes.isOk() + let wakuConf = wakuConfRes.get() + require wakuConf.validate().isOk() check: wakuConf.relay == true wakuConf.lightPush == true wakuConf.peerExchangeService == true - wakuConf.rendezvous == true - wakuConf.clusterId == 99 - # Check sharding - check: - wakuConf.shardingConf.numShardsInCluster == 12 - - # Check bootstrap nodes - require wakuConf.discv5Conf.isSome() - check: - wakuConf.discv5Conf.get().bootstrapNodes.len == 1 - - # Check static nodes - check: - wakuConf.staticNodes.len == 1 - wakuConf.staticNodes[0] == - "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" - - # Check message validation - check: - wakuConf.maxMessageSizeBytes == 512'u64 * 1024'u64 - - # Check RLN config - require wakuConf.rlnRelayConf.isSome() - let rlnConf = wakuConf.rlnRelayConf.get() - check: - rlnConf.dynamic == true - rlnConf.ethContractAddress == "0xAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" - rlnConf.chainId == 5'u256 - rlnConf.epochSizeSec == 300'u64 - - test "NodeConfig with mixed entry nodes (integration test)": - ## Given - let entryNodes = - @[ - "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im", - "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", - ] - - let nodeConfig = NodeConfig.init( - mode = Core, - protocolsConfig = ProtocolsConfig.init( - entryNodes = entryNodes, staticStoreNodes = @[], clusterId = 1 - ), - ) - - ## When - let wakuConfRes = toWakuConf(nodeConfig) - - ## Then - require wakuConfRes.isOk() - let wakuConf = wakuConfRes.get() - require wakuConf.validate().isOk() - - # Check that ENRTree went to DNS discovery - require wakuConf.dnsDiscoveryConf.isSome() - check: - wakuConf.dnsDiscoveryConf.get().enrTreeUrl == entryNodes[0] - - # Check that multiaddr went to static nodes - check: - wakuConf.staticNodes.len == 1 - wakuConf.staticNodes[0] == entryNodes[1] - -suite "NodeConfig JSON - complete format": - test "Full NodeConfig from complete JSON with field validation": - ## Given - let jsonStr = - """ - { - "mode": "Core", - "protocolsConfig": { - "entryNodes": ["enrtree://TREE@nodes.example.com"], - "staticStoreNodes": ["/ip4/1.2.3.4/tcp/80/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"], - "clusterId": 10, - "autoShardingConfig": { - "numShardsInCluster": 4 - }, - "messageValidation": { - "maxMessageSize": "100 KiB", - "rlnConfig": null - } - }, - "networkingConfig": { - "listenIpv4": "192.168.1.1", - "p2pTcpPort": 7000, - "discv5UdpPort": 7001 - }, - "ethRpcEndpoints": ["http://localhost:8545"], - "p2pReliability": true, - "logLevel": "WARN", - "logFormat": "TEXT" - } - """ - - ## When - let config = decodeNodeConfigFromJson(jsonStr) - - ## Then — check every field - check: - config.mode == WakuMode.Core - config.ethRpcEndpoints == @["http://localhost:8545"] - config.p2pReliability == true - config.logLevel == LogLevel.WARN - config.logFormat == LogFormat.TEXT - - check: - config.networkingConfig.listenIpv4 == "192.168.1.1" - config.networkingConfig.p2pTcpPort == 7000 - config.networkingConfig.discv5UdpPort == 7001 - - let pc = config.protocolsConfig - check: - pc.entryNodes == @["enrtree://TREE@nodes.example.com"] - pc.staticStoreNodes == - @[ - "/ip4/1.2.3.4/tcp/80/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" - ] - pc.clusterId == 10 - pc.autoShardingConfig.numShardsInCluster == 4 - pc.messageValidation.maxMessageSize == "100 KiB" - pc.messageValidation.rlnConfig.isNone() - - test "Full NodeConfig with RlnConfig present": - ## Given - let jsonStr = - """ - { - "mode": "Edge", - "protocolsConfig": { - "entryNodes": [], - "clusterId": 1, - "messageValidation": { - "maxMessageSize": "150 KiB", - "rlnConfig": { - "contractAddress": "0x1234567890ABCDEF1234567890ABCDEF12345678", - "chainId": 5, - "epochSizeSec": 600 - } - } - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - } - } - """ - - ## When - let config = decodeNodeConfigFromJson(jsonStr) - - ## Then - check config.mode == WakuMode.Edge - - let mv = config.protocolsConfig.messageValidation - check: - mv.maxMessageSize == "150 KiB" - mv.rlnConfig.isSome() - let rln = mv.rlnConfig.get() - check: - rln.contractAddress == "0x1234567890ABCDEF1234567890ABCDEF12345678" - rln.chainId == 5'u - rln.epochSizeSec == 600'u64 - - test "Round-trip encode/decode preserves all fields": - ## Given - let original = NodeConfig.init( - mode = Edge, - protocolsConfig = ProtocolsConfig.init( - entryNodes = @["enrtree://TREE@example.com"], - staticStoreNodes = - @[ - "/ip4/1.2.3.4/tcp/80/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" - ], - clusterId = 42, - autoShardingConfig = AutoShardingConfig(numShardsInCluster: 16), - messageValidation = MessageValidation( - maxMessageSize: "256 KiB", - rlnConfig: some( - RlnConfig( - contractAddress: "0xAABBCCDDEEFF00112233445566778899AABBCCDD", - chainId: 137, - epochSizeSec: 300, - ) - ), - ), - ), - networkingConfig = - NetworkingConfig(listenIpv4: "10.0.0.1", p2pTcpPort: 9090, discv5UdpPort: 9091), - ethRpcEndpoints = @["https://rpc.example.com"], - p2pReliability = true, - logLevel = LogLevel.DEBUG, - logFormat = LogFormat.JSON, - ) - - ## When - let decoded = decodeNodeConfigFromJson(Json.encode(original)) - - ## Then — check field by field - check: - decoded.mode == original.mode - decoded.ethRpcEndpoints == original.ethRpcEndpoints - decoded.p2pReliability == original.p2pReliability - decoded.logLevel == original.logLevel - decoded.logFormat == original.logFormat - decoded.networkingConfig.listenIpv4 == original.networkingConfig.listenIpv4 - decoded.networkingConfig.p2pTcpPort == original.networkingConfig.p2pTcpPort - decoded.networkingConfig.discv5UdpPort == original.networkingConfig.discv5UdpPort - decoded.protocolsConfig.entryNodes == original.protocolsConfig.entryNodes - decoded.protocolsConfig.staticStoreNodes == - original.protocolsConfig.staticStoreNodes - decoded.protocolsConfig.clusterId == original.protocolsConfig.clusterId - decoded.protocolsConfig.autoShardingConfig.numShardsInCluster == - original.protocolsConfig.autoShardingConfig.numShardsInCluster - decoded.protocolsConfig.messageValidation.maxMessageSize == - original.protocolsConfig.messageValidation.maxMessageSize - decoded.protocolsConfig.messageValidation.rlnConfig.isSome() - - let decodedRln = decoded.protocolsConfig.messageValidation.rlnConfig.get() - let originalRln = original.protocolsConfig.messageValidation.rlnConfig.get() - check: - decodedRln.contractAddress == originalRln.contractAddress - decodedRln.chainId == originalRln.chainId - decodedRln.epochSizeSec == originalRln.epochSizeSec - -suite "NodeConfig JSON - partial format with defaults": - test "Minimal NodeConfig - empty object uses all defaults": - ## Given - let config = decodeNodeConfigFromJson("{}") - let defaultConfig = NodeConfig.init() - - ## Then — compare field by field against defaults - check: - config.mode == defaultConfig.mode - config.ethRpcEndpoints == defaultConfig.ethRpcEndpoints - config.p2pReliability == defaultConfig.p2pReliability - config.logLevel == defaultConfig.logLevel - config.logFormat == defaultConfig.logFormat - config.networkingConfig.listenIpv4 == defaultConfig.networkingConfig.listenIpv4 - config.networkingConfig.p2pTcpPort == defaultConfig.networkingConfig.p2pTcpPort - config.networkingConfig.discv5UdpPort == - defaultConfig.networkingConfig.discv5UdpPort - config.protocolsConfig.entryNodes == defaultConfig.protocolsConfig.entryNodes - config.protocolsConfig.staticStoreNodes == - defaultConfig.protocolsConfig.staticStoreNodes - config.protocolsConfig.clusterId == defaultConfig.protocolsConfig.clusterId - config.protocolsConfig.autoShardingConfig.numShardsInCluster == - defaultConfig.protocolsConfig.autoShardingConfig.numShardsInCluster - config.protocolsConfig.messageValidation.maxMessageSize == - defaultConfig.protocolsConfig.messageValidation.maxMessageSize - config.protocolsConfig.messageValidation.rlnConfig.isSome() == - defaultConfig.protocolsConfig.messageValidation.rlnConfig.isSome() - - test "Minimal NodeConfig keeps network preset defaults": - ## Given - let config = decodeNodeConfigFromJson("{}") - - ## Then - check: - config.protocolsConfig.entryNodes == TheWakuNetworkPreset.entryNodes - config.protocolsConfig.messageValidation.rlnConfig.isSome() - - test "NodeConfig with only mode specified": - ## Given - let config = decodeNodeConfigFromJson("""{"mode": "Edge"}""") - - ## Then - check: - config.mode == WakuMode.Edge - ## Remaining fields get defaults - config.logLevel == LogLevel.INFO - config.logFormat == LogFormat.TEXT - config.p2pReliability == false - config.ethRpcEndpoints == newSeq[string]() - - test "ProtocolsConfig partial - optional fields get defaults": - ## Given — only entryNodes and clusterId provided - let jsonStr = - """ - { - "protocolsConfig": { - "entryNodes": ["enrtree://X@y.com"], - "clusterId": 5 - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - } - } - """ - - ## When - let config = decodeNodeConfigFromJson(jsonStr) - - ## Then — required fields are set, optionals get defaults - check: - config.protocolsConfig.entryNodes == @["enrtree://X@y.com"] - config.protocolsConfig.clusterId == 5 - config.protocolsConfig.staticStoreNodes == newSeq[string]() - config.protocolsConfig.autoShardingConfig.numShardsInCluster == - DefaultAutoShardingConfig.numShardsInCluster - config.protocolsConfig.messageValidation.maxMessageSize == - DefaultMessageValidation.maxMessageSize - config.protocolsConfig.messageValidation.rlnConfig.isNone() - - test "MessageValidation partial - rlnConfig omitted defaults to none": - ## Given - let jsonStr = - """ - { - "protocolsConfig": { - "entryNodes": [], - "clusterId": 1, - "messageValidation": { - "maxMessageSize": "200 KiB" - } - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - } - } - """ - - ## When - let config = decodeNodeConfigFromJson(jsonStr) - - ## Then - check: - config.protocolsConfig.messageValidation.maxMessageSize == "200 KiB" - config.protocolsConfig.messageValidation.rlnConfig.isNone() - - test "logLevel and logFormat omitted use defaults": - ## Given - let jsonStr = - """ - { - "mode": "Core", - "protocolsConfig": { - "entryNodes": [], - "clusterId": 1 - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - } - } - """ - - ## When - let config = decodeNodeConfigFromJson(jsonStr) - - ## Then - check: - config.logLevel == LogLevel.INFO - config.logFormat == LogFormat.TEXT - -suite "NodeConfig JSON - unsupported fields raise errors": - test "Unknown field at NodeConfig level raises": - let jsonStr = - """ - { - "mode": "Core", - "unknownTopLevel": true - } - """ - - var raised = false - try: - discard decodeNodeConfigFromJson(jsonStr) - except SerializationError: - raised = true - check raised - - test "Typo in NodeConfig field name raises": - let jsonStr = - """ - { - "modes": "Core" - } - """ - - var raised = false - try: - discard decodeNodeConfigFromJson(jsonStr) - except SerializationError: - raised = true - check raised - - test "Unknown field in ProtocolsConfig raises": - let jsonStr = - """ - { - "protocolsConfig": { - "entryNodes": [], - "clusterId": 1, - "futureField": "something" - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - } - } - """ - - var raised = false - try: - discard decodeNodeConfigFromJson(jsonStr) - except SerializationError: - raised = true - check raised - - test "Unknown field in NetworkingConfig raises": - let jsonStr = - """ - { - "protocolsConfig": { - "entryNodes": [], - "clusterId": 1 - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000, - "futureNetworkField": "value" - } - } - """ - - var raised = false - try: - discard decodeNodeConfigFromJson(jsonStr) - except SerializationError: - raised = true - check raised - - test "Unknown field in MessageValidation raises": - let jsonStr = - """ - { - "protocolsConfig": { - "entryNodes": [], - "clusterId": 1, - "messageValidation": { - "maxMessageSize": "150 KiB", - "maxMesssageSize": "typo" - } - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - } - } - """ - - var raised = false - try: - discard decodeNodeConfigFromJson(jsonStr) - except SerializationError: - raised = true - check raised - - test "Unknown field in RlnConfig raises": - let jsonStr = - """ - { - "protocolsConfig": { - "entryNodes": [], - "clusterId": 1, - "messageValidation": { - "maxMessageSize": "150 KiB", - "rlnConfig": { - "contractAddress": "0xABCDEF1234567890ABCDEF1234567890ABCDEF12", - "chainId": 1, - "epochSizeSec": 600, - "unknownRlnField": true - } - } - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - } - } - """ - - var raised = false - try: - discard decodeNodeConfigFromJson(jsonStr) - except SerializationError: - raised = true - check raised - - test "Unknown field in AutoShardingConfig raises": - let jsonStr = - """ - { - "protocolsConfig": { - "entryNodes": [], - "clusterId": 1, - "autoShardingConfig": { - "numShardsInCluster": 8, - "shardPrefix": "extra" - } - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - } - } - """ - - var raised = false - try: - discard decodeNodeConfigFromJson(jsonStr) - except SerializationError: - raised = true - check raised - -suite "NodeConfig JSON - missing required fields": - test "Missing 'entryNodes' in ProtocolsConfig": - let jsonStr = - """ - { - "protocolsConfig": { - "clusterId": 1 - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - } - } - """ - - var raised = false - try: - discard decodeNodeConfigFromJson(jsonStr) - except SerializationError: - raised = true - check raised - - test "Missing 'clusterId' in ProtocolsConfig": - let jsonStr = - """ - { - "protocolsConfig": { - "entryNodes": [] - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - } - } - """ - - var raised = false - try: - discard decodeNodeConfigFromJson(jsonStr) - except SerializationError: - raised = true - check raised - - test "Missing required fields in NetworkingConfig": - let jsonStr = - """ - { - "protocolsConfig": { - "entryNodes": [], - "clusterId": 1 - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0" - } - } - """ - - var raised = false - try: - discard decodeNodeConfigFromJson(jsonStr) - except SerializationError: - raised = true - check raised - - test "Missing 'numShardsInCluster' in AutoShardingConfig": - let jsonStr = - """ - { - "protocolsConfig": { - "entryNodes": [], - "clusterId": 1, - "autoShardingConfig": {} - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - } - } - """ - - var raised = false - try: - discard decodeNodeConfigFromJson(jsonStr) - except SerializationError: - raised = true - check raised - - test "Missing required fields in RlnConfig": - let jsonStr = - """ - { - "protocolsConfig": { - "entryNodes": [], - "clusterId": 1, - "messageValidation": { - "maxMessageSize": "150 KiB", - "rlnConfig": { - "contractAddress": "0xABCD" - } - } - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - } - } - """ - - var raised = false - try: - discard decodeNodeConfigFromJson(jsonStr) - except SerializationError: - raised = true - check raised - - test "Missing 'maxMessageSize' in MessageValidation": - let jsonStr = - """ - { - "protocolsConfig": { - "entryNodes": [], - "clusterId": 1, - "messageValidation": { - "rlnConfig": null - } - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - } - } - """ - - var raised = false - try: - discard decodeNodeConfigFromJson(jsonStr) - except SerializationError: - raised = true - check raised - -suite "NodeConfig JSON - invalid values": - test "Invalid enum value for mode": - var raised = false - try: - discard decodeNodeConfigFromJson("""{"mode": "InvalidMode"}""") - except SerializationError: - raised = true - check raised - - test "Invalid enum value for logLevel": - var raised = false - try: - discard decodeNodeConfigFromJson("""{"logLevel": "SUPERVERBOSE"}""") - except SerializationError: - raised = true - check raised - - test "Wrong type for clusterId (string instead of number)": - let jsonStr = - """ - { - "protocolsConfig": { - "entryNodes": [], - "clusterId": "not-a-number" - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - } - } - """ - - var raised = false - try: - discard decodeNodeConfigFromJson(jsonStr) - except SerializationError: - raised = true - check raised - - test "Completely invalid JSON syntax": - var raised = false - try: - discard decodeNodeConfigFromJson("""{ not valid json at all }""") - except SerializationError: - raised = true - check raised - -suite "NodeConfig JSON -> WakuConf integration": - test "Decoded config translates to valid WakuConf": - ## Given - let jsonStr = - """ - { - "mode": "Core", - "protocolsConfig": { - "entryNodes": [ - "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im" - ], - "staticStoreNodes": [ - "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" - ], - "clusterId": 55, - "autoShardingConfig": { - "numShardsInCluster": 6 - }, - "messageValidation": { - "maxMessageSize": "256 KiB", - "rlnConfig": null - } - }, - "networkingConfig": { - "listenIpv4": "0.0.0.0", - "p2pTcpPort": 60000, - "discv5UdpPort": 9000 - }, - "ethRpcEndpoints": ["http://localhost:8545"], - "p2pReliability": true, - "logLevel": "INFO", - "logFormat": "TEXT" - } - """ - - ## When - let nodeConfig = decodeNodeConfigFromJson(jsonStr) - let wakuConfRes = toWakuConf(nodeConfig) - - ## Then - require wakuConfRes.isOk() - let wakuConf = wakuConfRes.get() - require wakuConf.validate().isOk() - check: - wakuConf.clusterId == 55 - wakuConf.shardingConf.numShardsInCluster == 6 - wakuConf.maxMessageSizeBytes == 256'u64 * 1024'u64 - wakuConf.staticNodes.len == 1 - wakuConf.p2pReliability == true +{.pop.} diff --git a/tests/test_waku.nim b/tests/test_waku.nim index b8e2b26b1..dabd65af7 100644 --- a/tests/test_waku.nim +++ b/tests/test_waku.nim @@ -3,49 +3,49 @@ import chronos, testutils/unittests, std/options import waku +import tools/confutils/cli_args suite "Waku API - Create node": asyncTest "Create node with minimal configuration": ## Given - let nodeConfig = NodeConfig.init( - protocolsConfig = ProtocolsConfig.init(entryNodes = @[], clusterId = 1) - ) + var nodeConf = defaultWakuNodeConf().valueOr: + raiseAssert error + nodeConf.mode = Core + nodeConf.clusterId = 3'u16 + nodeConf.rest = false # This is the actual minimal config but as the node auto-start, it is not suitable for tests - # NodeConfig.init(ethRpcEndpoints = @["http://someaddress"]) ## When - let node = (await createNode(nodeConfig)).valueOr: + let node = (await createNode(nodeConf)).valueOr: raiseAssert error ## Then check: not node.isNil() - node.conf.clusterId == 1 + node.conf.clusterId == 3 node.conf.relay == true asyncTest "Create node with full configuration": ## Given - let nodeConfig = NodeConfig.init( - mode = Core, - protocolsConfig = ProtocolsConfig.init( - entryNodes = - @[ - "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g" - ], - staticStoreNodes = - @[ - "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" - ], - clusterId = 99, - autoShardingConfig = AutoShardingConfig(numShardsInCluster: 16), - messageValidation = - MessageValidation(maxMessageSize: "1024 KiB", rlnConfig: none(RlnConfig)), - ), - ) + var nodeConf = defaultWakuNodeConf().valueOr: + raiseAssert error + nodeConf.mode = Core + nodeConf.clusterId = 99'u16 + nodeConf.rest = false + nodeConf.numShardsInNetwork = 16 + nodeConf.maxMessageSize = "1024 KiB" + nodeConf.entryNodes = + @[ + "enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g" + ] + nodeConf.staticnodes = + @[ + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc" + ] ## When - let node = (await createNode(nodeConfig)).valueOr: + let node = (await createNode(nodeConf)).valueOr: raiseAssert error ## Then @@ -62,20 +62,19 @@ suite "Waku API - Create node": asyncTest "Create node with mixed entry nodes (enrtree, multiaddr)": ## Given - let nodeConfig = NodeConfig.init( - mode = Core, - protocolsConfig = ProtocolsConfig.init( - entryNodes = - @[ - "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im", - "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", - ], - clusterId = 42, - ), - ) + var nodeConf = defaultWakuNodeConf().valueOr: + raiseAssert error + nodeConf.mode = Core + nodeConf.clusterId = 42'u16 + nodeConf.rest = false + nodeConf.entryNodes = + @[ + "enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im", + "/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc", + ] ## When - let node = (await createNode(nodeConfig)).valueOr: + let node = (await createNode(nodeConf)).valueOr: raiseAssert error ## Then diff --git a/tools/confutils/cli_args.nim b/tools/confutils/cli_args.nim index 5e4adacb2..d8bd9b7f5 100644 --- a/tools/confutils/cli_args.nim +++ b/tools/confutils/cli_args.nim @@ -30,7 +30,8 @@ import waku_core/message/default_values, waku_mix, ], - ../../tools/rln_keystore_generator/rln_keystore_generator + ../../tools/rln_keystore_generator/rln_keystore_generator, + ./entry_nodes import ./envvar as confEnvvarDefs, ./envvar_net as confEnvvarNet @@ -52,6 +53,11 @@ type StartUpCommand* = enum noCommand # default, runs waku generateRlnKeystore # generates a new RLN keystore +type WakuMode* {.pure.} = enum + noMode # default - use explicit CLI flags as-is + Core # full service node + Edge # client-only node + type WakuNodeConf* = object configFile* {. desc: "Loads configuration from a TOML file (cmd-line parameters take precedence)", @@ -150,9 +156,16 @@ type WakuNodeConf* = object .}: seq[ProtectedShard] ## General node config + mode* {. + desc: + "Node operation mode. 'Core' enables relay+service protocols. 'Edge' enables client-only protocols. Default: explicit CLI flags used.", + defaultValue: WakuMode.noMode, + name: "mode" + .}: WakuMode + preset* {. desc: - "Network preset to use. 'twn' is The RLN-protected Waku Network (cluster 1). Overrides other values.", + "Network preset to use. 'twn' is The RLN-protected Waku Network (cluster 1). 'logos.dev' is the Logos Dev Network (cluster 2). Overrides other values.", defaultValue: "", name: "preset" .}: string @@ -165,7 +178,7 @@ type WakuNodeConf* = object .}: uint16 agentString* {. - defaultValue: "nwaku-" & cli_args.git_version, + defaultValue: "logos-delivery-" & cli_args.git_version, desc: "Node agent string which is used as identifier in network", name: "agent-string" .}: string @@ -293,6 +306,14 @@ hence would have reachability issues.""", name: "rln-relay-dynamic" .}: bool + entryNodes* {. + desc: + "Entry node address (enrtree:, enr:, or multiaddr). " & + "Automatically classified and distributed to DNS discovery, discv5 bootstrap, " & + "and static nodes. Argument may be repeated.", + name: "entry-node" + .}: seq[string] + staticnodes* {. desc: "Peer multiaddr to directly connect with. Argument may be repeated.", name: "staticnode" @@ -453,7 +474,7 @@ hence would have reachability issues.""", desc: """Adds an extra effort in the delivery/reception of messages by leveraging store-v3 requests. with the drawback of consuming some more bandwidth.""", - defaultValue: false, + defaultValue: true, name: "reliability" .}: bool @@ -907,12 +928,19 @@ proc toNetworkConf( "TWN - The Waku Network configuration will not be applied when `--cluster-id=1` is passed in future releases. Use `--preset=twn` instead." ) lcPreset = "twn" + if clusterId.isSome() and clusterId.get() == 2: + warn( + "Logos.dev - Logos.dev configuration will not be applied when `--cluster-id=2` is passed in future releases. Use `--preset=logos.dev` instead." + ) + lcPreset = "logos.dev" case lcPreset of "": ok(none(NetworkConf)) of "twn": ok(some(NetworkConf.TheWakuNetworkConf())) + of "logos.dev", "logosdev": + ok(some(NetworkConf.LogosDevConf())) else: err("Invalid --preset value passed: " & lcPreset) @@ -982,6 +1010,26 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] = b.withRelayShardedPeerManagement(n.relayShardedPeerManagement) b.withStaticNodes(n.staticNodes) + # Process entry nodes - supports enrtree:, enr:, and multiaddress formats + if n.entryNodes.len > 0: + let (enrTreeUrls, bootstrapEnrs, staticNodesFromEntry) = processEntryNodes( + n.entryNodes + ).valueOr: + return err("Failed to process entry nodes: " & error) + + # Set ENRTree URLs for DNS discovery + if enrTreeUrls.len > 0: + for url in enrTreeUrls: + b.dnsDiscoveryConf.withEnrTreeUrl(url) + + # Set ENR records as bootstrap nodes for discv5 + if bootstrapEnrs.len > 0: + b.discv5Conf.withBootstrapNodes(bootstrapEnrs) + + # Add static nodes (multiaddrs and those extracted from ENR entries) + if staticNodesFromEntry.len > 0: + b.withStaticNodes(staticNodesFromEntry) + if n.numShardsInNetwork != 0: b.withNumShardsInCluster(n.numShardsInNetwork) b.withShardingConf(AutoSharding) @@ -1069,9 +1117,31 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] = b.webSocketConf.withKeyPath(n.websocketSecureKeyPath) b.webSocketConf.withCertPath(n.websocketSecureCertPath) - b.rateLimitConf.withRateLimits(n.rateLimits) + if n.rateLimits.len > 0: + b.rateLimitConf.withRateLimits(n.rateLimits) b.kademliaDiscoveryConf.withEnabled(n.enableKadDiscovery) b.kademliaDiscoveryConf.withBootstrapNodes(n.kadBootstrapNodes) + # Mode-driven configuration overrides + case n.mode + of WakuMode.Core: + b.withRelay(true) + b.filterServiceConf.withEnabled(true) + b.withLightPush(true) + b.discv5Conf.withEnabled(true) + b.withPeerExchange(true) + b.withRendezvous(true) + b.rateLimitConf.withRateLimitsIfNotAssigned( + @["filter:100/1s", "lightpush:5/1s", "px:5/1s"] + ) + of WakuMode.Edge: + b.withPeerExchange(true) + b.withRelay(false) + b.filterServiceConf.withEnabled(false) + b.withLightPush(false) + b.storeServiceConf.withEnabled(false) + of WakuMode.noMode: + discard # use explicit CLI flags as-is + return b.build() diff --git a/waku/api/entry_nodes.nim b/tools/confutils/entry_nodes.nim similarity index 100% rename from waku/api/entry_nodes.nim rename to tools/confutils/entry_nodes.nim diff --git a/waku/api.nim b/waku/api.nim index 110a8f431..a977a062a 100644 --- a/waku/api.nim +++ b/waku/api.nim @@ -1,4 +1,5 @@ -import ./api/[api, api_conf, entry_nodes] +import ./api/[api, api_conf] import ./events/message_events +import tools/confutils/entry_nodes export api, api_conf, entry_nodes, message_events diff --git a/waku/api/api.nim b/waku/api/api.nim index ba6f83b78..1eee982fd 100644 --- a/waku/api/api.nim +++ b/waku/api/api.nim @@ -1,18 +1,20 @@ -import chronicles, chronos, results, std/strutils +import chronicles, chronos, results import waku/factory/waku import waku/[requests/health_requests, waku_core, waku_node] import waku/node/delivery_service/send_service import waku/node/delivery_service/subscription_manager import libp2p/peerid +import ../../tools/confutils/cli_args import ./[api_conf, types] +export cli_args + logScope: topics = "api" -# TODO: Specs says it should return a `WakuNode`. As `send` and other APIs are defined, we can align. -proc createNode*(config: NodeConfig): Future[Result[Waku, string]] {.async.} = - let wakuConf = toWakuConf(config).valueOr: +proc createNode*(conf: WakuNodeConf): Future[Result[Waku, string]] {.async.} = + let wakuConf = conf.toWakuConf().valueOr: return err("Failed to handle the configuration: " & error) ## We are not defining app callbacks at node creation diff --git a/waku/api/api_conf.nim b/waku/api/api_conf.nim index 7cac66426..70bb02af3 100644 --- a/waku/api/api_conf.nim +++ b/waku/api/api_conf.nim @@ -9,7 +9,7 @@ import waku/factory/waku_conf, waku/factory/conf_builder/conf_builder, waku/factory/networks_config, - ./entry_nodes + tools/confutils/entry_nodes export json_serialization, json_options @@ -85,7 +85,9 @@ type WakuMode* {.pure.} = enum Edge Core -type NodeConfig* {.requiresInit.} = object +type NodeConfig* {. + requiresInit, deprecated: "Use WakuNodeConf from tools/confutils/cli_args instead" +.} = object mode: WakuMode protocolsConfig: ProtocolsConfig networkingConfig: NetworkingConfig @@ -154,7 +156,9 @@ proc logLevel*(c: NodeConfig): LogLevel = proc logFormat*(c: NodeConfig): LogFormat = c.logFormat -proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] = +proc toWakuConf*( + nodeConfig: NodeConfig +): Result[WakuConf, string] {.deprecated: "Use WakuNodeConf.toWakuConf instead".} = var b = WakuConfBuilder.init() # Apply log configuration @@ -516,7 +520,10 @@ proc readValue*( proc decodeNodeConfigFromJson*( jsonStr: string -): NodeConfig {.raises: [SerializationError].} = +): NodeConfig {. + raises: [SerializationError], + deprecated: "Use WakuNodeConf with fieldPairs-based JSON parsing instead" +.} = var val = NodeConfig.init() # default-initialized try: var stream = unsafeMemoryInput(jsonStr) diff --git a/waku/factory/conf_builder/filter_service_conf_builder.nim b/waku/factory/conf_builder/filter_service_conf_builder.nim index a3f056b01..0a6617430 100644 --- a/waku/factory/conf_builder/filter_service_conf_builder.nim +++ b/waku/factory/conf_builder/filter_service_conf_builder.nim @@ -22,6 +22,12 @@ proc withEnabled*(b: var FilterServiceConfBuilder, enabled: bool) = proc withMaxPeersToServe*(b: var FilterServiceConfBuilder, maxPeersToServe: uint32) = b.maxPeersToServe = some(maxPeersToServe) +proc withMaxPeersToServeIfNotAssigned*( + b: var FilterServiceConfBuilder, maxPeersToServe: uint32 +) = + if b.maxPeersToServe.isNone(): + b.maxPeersToServe = some(maxPeersToServe) + proc withSubscriptionTimeout*( b: var FilterServiceConfBuilder, subscriptionTimeout: uint16 ) = diff --git a/waku/factory/conf_builder/rate_limit_conf_builder.nim b/waku/factory/conf_builder/rate_limit_conf_builder.nim index 0d466a132..b2edbef03 100644 --- a/waku/factory/conf_builder/rate_limit_conf_builder.nim +++ b/waku/factory/conf_builder/rate_limit_conf_builder.nim @@ -14,6 +14,12 @@ proc init*(T: type RateLimitConfBuilder): RateLimitConfBuilder = proc withRateLimits*(b: var RateLimitConfBuilder, rateLimits: seq[string]) = b.strValue = some(rateLimits) +proc withRateLimitsIfNotAssigned*( + b: var RateLimitConfBuilder, rateLimits: seq[string] +) = + if b.strValue.isNone() or b.strValue.get().len == 0: + b.strValue = some(rateLimits) + proc build*(b: RateLimitConfBuilder): Result[ProtocolRateLimitSettings, string] = if b.strValue.isSome() and b.objValue.isSome(): return err("Rate limits conf must only be set once on the builder") diff --git a/waku/factory/conf_builder/waku_conf_builder.nim b/waku/factory/conf_builder/waku_conf_builder.nim index e51f02dbd..2c427918d 100644 --- a/waku/factory/conf_builder/waku_conf_builder.nim +++ b/waku/factory/conf_builder/waku_conf_builder.nim @@ -12,7 +12,8 @@ import ../networks_config, ../../common/logging, ../../common/utils/parse_size_units, - ../../waku_enr/capabilities + ../../waku_enr/capabilities, + tools/confutils/entry_nodes import ./filter_service_conf_builder, @@ -393,6 +394,42 @@ proc applyNetworkConf(builder: var WakuConfBuilder) = discarded = builder.discv5Conf.bootstrapNodes builder.discv5Conf.withBootstrapNodes(networkConf.discv5BootstrapNodes) + if networkConf.enableKadDiscovery: + if not builder.kademliaDiscoveryConf.enabled: + builder.kademliaDiscoveryConf.withEnabled(networkConf.enableKadDiscovery) + + if builder.kademliaDiscoveryConf.bootstrapNodes.len == 0 and + networkConf.kadBootstrapNodes.len > 0: + builder.kademliaDiscoveryConf.withBootstrapNodes(networkConf.kadBootstrapNodes) + + if networkConf.mix: + if builder.mix.isNone: + builder.mix = some(networkConf.mix) + + if builder.p2pReliability.isNone: + builder.withP2pReliability(networkConf.p2pReliability) + + # Process entry nodes from network config - classify and distribute + if networkConf.entryNodes.len > 0: + let processed = processEntryNodes(networkConf.entryNodes) + if processed.isOk(): + let (enrTreeUrls, bootstrapEnrs, staticNodesFromEntry) = processed.get() + + # Set ENRTree URLs for DNS discovery + if enrTreeUrls.len > 0: + for url in enrTreeUrls: + builder.dnsDiscoveryConf.withEnrTreeUrl(url) + + # Set ENR records as bootstrap nodes for discv5 + if bootstrapEnrs.len > 0: + builder.discv5Conf.withBootstrapNodes(bootstrapEnrs) + + # Add static nodes (multiaddrs and those extracted from ENR entries) + if staticNodesFromEntry.len > 0: + builder.withStaticNodes(staticNodesFromEntry) + else: + warn "Failed to process entry nodes from network conf", error = processed.error() + proc build*( builder: var WakuConfBuilder, rng: ref HmacDrbgContext = crypto.newRng() ): Result[WakuConf, string] = @@ -606,7 +643,7 @@ proc build*( provided = maxConnections, recommended = DefaultMaxConnections # TODO: Do the git version thing here - let agentString = builder.agentString.get("nwaku") + let agentString = builder.agentString.get("logos-delivery") # TODO: use `DefaultColocationLimit`. the user of this value should # probably be defining a config object diff --git a/waku/factory/networks_config.nim b/waku/factory/networks_config.nim index c7193aa9c..94856fb21 100644 --- a/waku/factory/networks_config.nim +++ b/waku/factory/networks_config.nim @@ -29,6 +29,11 @@ type NetworkConf* = object shardingConf*: ShardingConf discv5Discovery*: bool discv5BootstrapNodes*: seq[string] + enableKadDiscovery*: bool + kadBootstrapNodes*: seq[string] + entryNodes*: seq[string] + mix*: bool + p2pReliability*: bool # cluster-id=1 (aka The Waku Network) # Cluster configuration corresponding to The Waku Network. Note that it @@ -45,6 +50,11 @@ proc TheWakuNetworkConf*(T: type NetworkConf): NetworkConf = rlnEpochSizeSec: 600, rlnRelayUserMessageLimit: 100, shardingConf: ShardingConf(kind: AutoSharding, numShardsInCluster: 8), + enableKadDiscovery: false, + kadBootstrapNodes: @[], + entryNodes: @[], + mix: false, + p2pReliability: false, discv5Discovery: true, discv5BootstrapNodes: @[ @@ -54,6 +64,36 @@ proc TheWakuNetworkConf*(T: type NetworkConf): NetworkConf = ], ) +# cluster-id=2 (Logos Dev Network) +# Cluster configuration for the Logos Dev Network. +proc LogosDevConf*(T: type NetworkConf): NetworkConf = + const ZeroChainId = 0'u256 + return NetworkConf( + maxMessageSize: "150KiB", + clusterId: 2, + rlnRelay: false, + rlnRelayEthContractAddress: "", + rlnRelayDynamic: false, + rlnRelayChainId: ZeroChainId, + rlnEpochSizeSec: 0, + rlnRelayUserMessageLimit: 0, + shardingConf: ShardingConf(kind: AutoSharding, numShardsInCluster: 8), + enableKadDiscovery: true, + mix: true, + p2pReliability: true, + discv5Discovery: true, + discv5BootstrapNodes: @[], + entryNodes: + @[ + "/dns4/delivery-01.do-ams3.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAmTUbnxLGT9JvV6mu9oPyDjqHK4Phs1VDJNUgESgNSkuby", + "/dns4/delivery-02.do-ams3.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAmMK7PYygBtKUQ8EHp7EfaD3bCEsJrkFooK8RQ2PVpJprH", + "/dns4/delivery-01.gc-us-central1-a.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAm4S1JYkuzDKLKQvwgAhZKs9otxXqt8SCGtB4hoJP1S397", + "/dns4/delivery-02.gc-us-central1-a.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAm8Y9kgBNtjxvCnf1X6gnZJW5EGE4UwwCL3CCm55TwqBiH", + "/dns4/delivery-01.ac-cn-hongkong-c.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAm8YokiNun9BkeA1ZRmhLbtNUvcwRr64F69tYj9fkGyuEP", + "/dns4/delivery-02.ac-cn-hongkong-c.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAkvwhGHKNry6LACrB8TmEFoCJKEX29XR5dDUzk3UT3UNSE", + ], + ) + proc validateShards*( shardingConf: ShardingConf, shards: seq[uint16] ): Result[void, string] = diff --git a/waku/rest_api/endpoint/builder.nim b/waku/rest_api/endpoint/builder.nim index bbd8de422..41ab7e06b 100644 --- a/waku/rest_api/endpoint/builder.nim +++ b/waku/rest_api/endpoint/builder.nim @@ -28,7 +28,6 @@ import # It will always be called from main thread anyway. # Ref: https://nim-lang.org/docs/manual.html#threads-gc-safety var restServerNotInstalledTab {.threadvar.}: TableRef[string, string] -restServerNotInstalledTab = newTable[string, string]() export WakuRestServerRef @@ -42,6 +41,9 @@ type RestServerConf* = object proc startRestServerEssentials*( nodeHealthMonitor: NodeHealthMonitor, conf: RestServerConf, portsShift: uint16 ): Result[WakuRestServerRef, string] = + if restServerNotInstalledTab.isNil: + restServerNotInstalledTab = newTable[string, string]() + let requestErrorHandler: RestRequestErrorHandler = proc( error: RestRequestError, request: HttpRequestRef ): Future[HttpResponseRef] {.async: (raises: [CancelledError]).} =