feat: liblogosdelivery FFI library of new API (#3714)

* Initial for liblogosdelivery library (static & dynamic) based on current state of API. 
* nix build support added.
* logosdelivery_example
* Added support for missing logLevel/logFormat in new API create_node
* Added full JSON to NodeConfig support
* Added ctx and ctx.myLib check to avoid uninitialzed calls and crash. Adjusted logosdelivery_example with proper error handling and JSON config format
* target aware install phase
* Fix base64 decode of payload
This commit is contained in:
NagyZoltanPeter 2026-02-17 10:38:35 +01:00 committed by GitHub
parent b38b5aaea1
commit 3603b838b9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 2557 additions and 13 deletions

View File

@ -434,10 +434,11 @@ docker-liteprotocoltester-push:
################
## C Bindings ##
################
.PHONY: cbindings cwaku_example libwaku
.PHONY: cbindings cwaku_example libwaku liblogosdelivery liblogosdelivery_example
STATIC ?= 0
BUILD_COMMAND ?= libwakuDynamic
LIBWAKU_BUILD_COMMAND ?= libwakuDynamic
LIBLOGOSDELIVERY_BUILD_COMMAND ?= liblogosdeliveryDynamic
ifeq ($(detected_OS),Windows)
LIB_EXT_DYNAMIC = dll
@ -453,11 +454,40 @@ endif
LIB_EXT := $(LIB_EXT_DYNAMIC)
ifeq ($(STATIC), 1)
LIB_EXT = $(LIB_EXT_STATIC)
BUILD_COMMAND = libwakuStatic
LIBWAKU_BUILD_COMMAND = libwakuStatic
LIBLOGOSDELIVERY_BUILD_COMMAND = liblogosdeliveryStatic
endif
libwaku: | build deps librln
echo -e $(BUILD_MSG) "build/$@.$(LIB_EXT)" && $(ENV_SCRIPT) nim $(BUILD_COMMAND) $(NIM_PARAMS) waku.nims $@.$(LIB_EXT)
echo -e $(BUILD_MSG) "build/$@.$(LIB_EXT)" && $(ENV_SCRIPT) nim $(LIBWAKU_BUILD_COMMAND) $(NIM_PARAMS) waku.nims $@.$(LIB_EXT)
liblogosdelivery: | build deps librln
echo -e $(BUILD_MSG) "build/$@.$(LIB_EXT)" && $(ENV_SCRIPT) nim $(LIBLOGOSDELIVERY_BUILD_COMMAND) $(NIM_PARAMS) waku.nims $@.$(LIB_EXT)
logosdelivery_example: | build liblogosdelivery
@echo -e $(BUILD_MSG) "build/$@"
ifeq ($(detected_OS),Darwin)
gcc -o build/$@ \
liblogosdelivery/examples/logosdelivery_example.c \
-I./liblogosdelivery \
-L./build \
-llogosdelivery \
-Wl,-rpath,./build
else ifeq ($(detected_OS),Linux)
gcc -o build/$@ \
liblogosdelivery/examples/logosdelivery_example.c \
-I./liblogosdelivery \
-L./build \
-llogosdelivery \
-Wl,-rpath,'$$ORIGIN'
else ifeq ($(detected_OS),Windows)
gcc -o build/$@.exe \
liblogosdelivery/examples/logosdelivery_example.c \
-I./liblogosdelivery \
-L./build \
-llogosdelivery \
-lws2_32
endif
#####################
## Mobile Bindings ##

View File

@ -71,6 +71,13 @@
zerokitRln = zerokit.packages.${system}.rln;
};
liblogosdelivery = pkgs.callPackage ./nix/default.nix {
inherit stableSystems;
src = self;
targets = ["liblogosdelivery"];
zerokitRln = zerokit.packages.${system}.rln;
};
default = libwaku;
});

123
liblogosdelivery/BUILD.md Normal file
View File

@ -0,0 +1,123 @@
# Building liblogosdelivery and Examples
## Prerequisites
- Nim 2.x compiler
- Rust toolchain (for RLN dependencies)
- GCC or Clang compiler
- Make
## Building the Library
### Dynamic Library
```bash
make liblogosdelivery
```
This creates `build/liblogosdelivery.dylib` (macOS) or `build/liblogosdelivery.so` (Linux).
### Static Library
```bash
nim liblogosdelivery STATIC=1
```
This creates `build/liblogosdelivery.a`.
## Building Examples
### liblogosdelivery Example
Compile the C example that demonstrates all library features:
```bash
# Using Make (recommended)
make liblogosdelivery_example
## Running Examples
```bash
./build/liblogosdelivery_example
```
The example will:
1. Create a Logos Messaging node
2. Register event callbacks for message events
3. Start the node
4. Subscribe to a content topic
5. Send a message
6. Show message delivery events (sent, propagated, or error)
7. Unsubscribe and cleanup
## Build Artifacts
After building, you'll have:
```
build/
├── liblogosdelivery.dylib # Dynamic library (34MB)
├── liblogosdelivery.dylib.dSYM/ # Debug symbols
└── liblogosdelivery_example # Compiled example (34KB)
```
## Library Headers
The main header file is:
- `liblogosdelivery/liblogosdelivery.h` - C API declarations
## Troubleshooting
### Library not found at runtime
If you get "library not found" errors when running the example:
**macOS:**
```bash
export DYLD_LIBRARY_PATH=/path/to/build:$DYLD_LIBRARY_PATH
./build/liblogosdelivery_example
```
**Linux:**
```bash
export LD_LIBRARY_PATH=/path/to/build:$LD_LIBRARY_PATH
./build/liblogosdelivery_example
```
## Cross-Compilation
For cross-compilation, you need to:
1. Build the Nim library for the target platform
2. Use the appropriate cross-compiler
3. Link against the target platform's liblogosdelivery
Example for Linux from macOS:
```bash
# Build library for Linux (requires Docker or cross-compilation setup)
# Then compile with cross-compiler
```
## Integration with Your Project
### CMake
```cmake
find_library(LMAPI_LIBRARY NAMES lmapi PATHS ${PROJECT_SOURCE_DIR}/build)
include_directories(${PROJECT_SOURCE_DIR}/liblogosdelivery)
target_link_libraries(your_target ${LMAPI_LIBRARY})
```
### Makefile
```makefile
CFLAGS += -I/path/to/liblogosdelivery
LDFLAGS += -L/path/to/build -llmapi -Wl,-rpath,/path/to/build
your_program: your_program.c
$(CC) $(CFLAGS) $< -o $@ $(LDFLAGS)
```
## API Documentation
See:
- [liblogosdelivery.h](liblogosdelivery/liblogosdelivery.h) - API function declarations
- [MESSAGE_EVENTS.md](liblogosdelivery/MESSAGE_EVENTS.md) - Message event handling guide

View File

@ -0,0 +1,148 @@
# Message Event Handling in LMAPI
## Overview
The liblogosdelivery library emits three types of message delivery events that clients can listen to by registering an event callback using `logosdelivery_set_event_callback()`.
## Event Types
### 1. message_sent
Emitted when a message is successfully accepted by the send service and queued for delivery.
**JSON Structure:**
```json
{
"eventType": "message_sent",
"requestId": "unique-request-id",
"messageHash": "0x..."
}
```
**Fields:**
- `eventType`: Always "message_sent"
- `requestId`: Request ID returned from the send operation
- `messageHash`: Hash of the message that was sent
### 2. message_propagated
Emitted when a message has been successfully propagated to neighboring nodes on the network.
**JSON Structure:**
```json
{
"eventType": "message_propagated",
"requestId": "unique-request-id",
"messageHash": "0x..."
}
```
**Fields:**
- `eventType`: Always "message_propagated"
- `requestId`: Request ID from the send operation
- `messageHash`: Hash of the message that was propagated
### 3. message_error
Emitted when an error occurs during message sending or propagation.
**JSON Structure:**
```json
{
"eventType": "message_error",
"requestId": "unique-request-id",
"messageHash": "0x...",
"error": "error description"
}
```
**Fields:**
- `eventType`: Always "message_error"
- `requestId`: Request ID from the send operation
- `messageHash`: Hash of the message that failed
- `error`: Description of what went wrong
## Usage
### 1. Define an Event Callback
```c
void event_callback(int ret, const char *msg, size_t len, void *userData) {
if (ret != RET_OK || msg == NULL || len == 0) {
return;
}
// Parse the JSON message
// Extract eventType field
// Handle based on event type
if (eventType == "message_sent") {
// Handle message sent
} else if (eventType == "message_propagated") {
// Handle message propagated
} else if (eventType == "message_error") {
// Handle message error
}
}
```
### 2. Register the Callback
```c
void *ctx = logosdelivery_create_node(config, callback, userData);
logosdelivery_set_event_callback(ctx, event_callback, NULL);
```
### 3. Start the Node
Once the node is started, events will be delivered to your callback:
```c
logosdelivery_start_node(ctx, callback, userData);
```
## Event Flow
For a typical successful message send:
1. **send** → Returns request ID
2. **message_sent** → Message accepted and queued
3. **message_propagated** → Message delivered to peers
For a failed message send:
1. **send** → Returns request ID
2. **message_sent** → Message accepted and queued
3. **message_error** → Delivery failed with error description
## Important Notes
1. **Thread Safety**: The event callback is invoked from the FFI worker thread. Ensure your callback is thread-safe if it accesses shared state.
2. **Non-Blocking**: Keep the callback fast and non-blocking. Do not perform long-running operations in the callback.
3. **JSON Parsing**: The example uses a simple string-based parser. For production, use a proper JSON library like:
- [cJSON](https://github.com/DaveGamble/cJSON)
- [json-c](https://github.com/json-c/json-c)
- [Jansson](https://github.com/akheron/jansson)
4. **Memory Management**: The message buffer is owned by the library. Copy any data you need to retain.
5. **Event Order**: Events are delivered in the order they occur, but timing depends on network conditions.
## Example Implementation
See `examples/liblogosdelivery_example.c` for a complete working example that:
- Registers an event callback
- Sends a message
- Receives and prints all three event types
- Properly parses the JSON event structure
## Debugging Events
To see all events during development:
```c
void debug_event_callback(int ret, const char *msg, size_t len, void *userData) {
printf("Event received: %.*s\n", (int)len, msg);
}
```
This will print the raw JSON for all events, helping you understand the event structure.

262
liblogosdelivery/README.md Normal file
View File

@ -0,0 +1,262 @@
# Logos Messaging API (LMAPI) Library
A C FFI library providing a simplified interface to Logos Messaging functionality.
## Overview
This library wraps the high-level API functions from `waku/api/api.nim` and exposes them via a C FFI interface, making them accessible from C, C++, and other languages that support C FFI.
## API Functions
### Node Lifecycle
#### `logosdelivery_create_node`
Creates a new instance of the node from the given configuration JSON.
```c
void *logosdelivery_create_node(
const char *configJson,
FFICallBack callback,
void *userData
);
```
**Parameters:**
- `configJson`: JSON string containing node configuration
- `callback`: Callback function to receive the result
- `userData`: User data passed to the callback
**Returns:** Pointer to the context needed by other API functions, or NULL on error.
**Example configuration JSON:**
```json
{
"mode": "Core",
"clusterId": 1,
"entryNodes": [
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im"
],
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
```
#### `logosdelivery_start_node`
Starts the node.
```c
int logosdelivery_start_node(
void *ctx,
FFICallBack callback,
void *userData
);
```
#### `logosdelivery_stop_node`
Stops the node.
```c
int logosdelivery_stop_node(
void *ctx,
FFICallBack callback,
void *userData
);
```
#### `logosdelivery_destroy`
Destroys a node instance and frees resources.
```c
int logosdelivery_destroy(
void *ctx,
FFICallBack callback,
void *userData
);
```
### Messaging
#### `logosdelivery_subscribe`
Subscribe to a content topic to receive messages.
```c
int logosdelivery_subscribe(
void *ctx,
FFICallBack callback,
void *userData,
const char *contentTopic
);
```
**Parameters:**
- `ctx`: Context pointer from `logosdelivery_create_node`
- `callback`: Callback function to receive the result
- `userData`: User data passed to the callback
- `contentTopic`: Content topic string (e.g., "/myapp/1/chat/proto")
#### `logosdelivery_unsubscribe`
Unsubscribe from a content topic.
```c
int logosdelivery_unsubscribe(
void *ctx,
FFICallBack callback,
void *userData,
const char *contentTopic
);
```
#### `logosdelivery_send`
Send a message.
```c
int logosdelivery_send(
void *ctx,
FFICallBack callback,
void *userData,
const char *messageJson
);
```
**Parameters:**
- `messageJson`: JSON string containing the message
**Example message JSON:**
```json
{
"contentTopic": "/myapp/1/chat/proto",
"payload": "SGVsbG8gV29ybGQ=",
"ephemeral": false
}
```
Note: The `payload` field should be base64-encoded.
**Returns:** Request ID in the callback message that can be used to track message delivery.
### Events
#### `logosdelivery_set_event_callback`
Sets a callback that will be invoked whenever an event occurs (e.g., message received).
```c
void logosdelivery_set_event_callback(
void *ctx,
FFICallBack callback,
void *userData
);
```
**Important:** The callback should be fast, non-blocking, and thread-safe.
## Building
The library follows the same build system as the main Logos Messaging project.
### Build the library
```bash
make liblogosdeliveryStatic # Build static library
# or
make liblogosdeliveryDynamic # Build dynamic library
```
## Return Codes
All functions that return `int` use the following return codes:
- `RET_OK` (0): Success
- `RET_ERR` (1): Error
- `RET_MISSING_CALLBACK` (2): Missing callback function
## Callback Function
All API functions use the following callback signature:
```c
typedef void (*FFICallBack)(
int callerRet,
const char *msg,
size_t len,
void *userData
);
```
**Parameters:**
- `callerRet`: Return code (RET_OK, RET_ERR, etc.)
- `msg`: Response message (may be empty for success)
- `len`: Length of the message
- `userData`: User data passed in the original call
## Example Usage
```c
#include "liblogosdelivery.h"
#include <stdio.h>
void callback(int ret, const char *msg, size_t len, void *userData) {
if (ret == RET_OK) {
printf("Success: %.*s\n", (int)len, msg);
} else {
printf("Error: %.*s\n", (int)len, msg);
}
}
int main() {
const char *config = "{"
"\"mode\": \"Core\","
"\"clusterId\": 1"
"}";
// Create node
void *ctx = logosdelivery_create_node(config, callback, NULL);
if (ctx == NULL) {
return 1;
}
// Start node
logosdelivery_start_node(ctx, callback, NULL);
// Subscribe to a topic
logosdelivery_subscribe(ctx, callback, NULL, "/myapp/1/chat/proto");
// Send a message
const char *msg = "{"
"\"contentTopic\": \"/myapp/1/chat/proto\","
"\"payload\": \"SGVsbG8gV29ybGQ=\","
"\"ephemeral\": false"
"}";
logosdelivery_send(ctx, callback, NULL, msg);
// Clean up
logosdelivery_stop_node(ctx, callback, NULL);
logosdelivery_destroy(ctx, callback, NULL);
return 0;
}
```
## Architecture
The library is structured as follows:
- `liblogosdelivery.h`: C header file with function declarations
- `liblogosdelivery.nim`: Main library entry point
- `declare_lib.nim`: Library declaration and initialization
- `lmapi/node_api.nim`: Node lifecycle API implementation
- `lmapi/messaging_api.nim`: Subscribe/send API implementation
The library uses the nim-ffi framework for FFI infrastructure, which handles:
- Thread-safe request processing
- Async operation management
- Memory management between C and Nim
- Callback marshaling
## See Also
- Main API documentation: `waku/api/api.nim`
- Original libwaku library: `library/libwaku.nim`
- nim-ffi framework: `vendor/nim-ffi/`

View File

@ -0,0 +1,24 @@
import ffi
import waku/factory/waku
declareLibrary("logosdelivery")
template requireInitializedNode*(
ctx: ptr FFIContext[Waku], opName: string, onError: untyped
) =
if isNil(ctx):
let errMsg {.inject.} = opName & " failed: invalid context"
onError
elif isNil(ctx.myLib) or isNil(ctx.myLib[]):
let errMsg {.inject.} = opName & " failed: node is not initialized"
onError
proc logosdelivery_set_event_callback(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.dynlib, exportc, cdecl.} =
if isNil(ctx):
echo "error: invalid context in logosdelivery_set_event_callback"
return
ctx[].eventCallback = cast[pointer](callback)
ctx[].eventUserData = userData

View File

@ -0,0 +1,193 @@
#include "../liblogosdelivery.h"
#include <stdio.h>
#include <string.h>
#include <unistd.h>
#include <stdlib.h>
static int create_node_ok = -1;
// Helper function to extract a JSON string field value
// Very basic parser - for production use a proper JSON library
const char* extract_json_field(const char *json, const char *field, char *buffer, size_t bufSize) {
char searchStr[256];
snprintf(searchStr, sizeof(searchStr), "\"%s\":\"", field);
const char *start = strstr(json, searchStr);
if (!start) {
return NULL;
}
start += strlen(searchStr);
const char *end = strchr(start, '"');
if (!end) {
return NULL;
}
size_t len = end - start;
if (len >= bufSize) {
len = bufSize - 1;
}
memcpy(buffer, start, len);
buffer[len] = '\0';
return buffer;
}
// Event callback that handles message events
void event_callback(int ret, const char *msg, size_t len, void *userData) {
if (ret != RET_OK || msg == NULL || len == 0) {
return;
}
// Create null-terminated string for easier parsing
char *eventJson = malloc(len + 1);
if (!eventJson) {
return;
}
memcpy(eventJson, msg, len);
eventJson[len] = '\0';
// Extract eventType
char eventType[64];
if (!extract_json_field(eventJson, "eventType", eventType, sizeof(eventType))) {
free(eventJson);
return;
}
// Handle different event types
if (strcmp(eventType, "message_sent") == 0) {
char requestId[128];
char messageHash[128];
extract_json_field(eventJson, "requestId", requestId, sizeof(requestId));
extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash));
printf("📤 [EVENT] Message sent - RequestID: %s, Hash: %s\n", requestId, messageHash);
} else if (strcmp(eventType, "message_error") == 0) {
char requestId[128];
char messageHash[128];
char error[256];
extract_json_field(eventJson, "requestId", requestId, sizeof(requestId));
extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash));
extract_json_field(eventJson, "error", error, sizeof(error));
printf("❌ [EVENT] Message error - RequestID: %s, Hash: %s, Error: %s\n",
requestId, messageHash, error);
} else if (strcmp(eventType, "message_propagated") == 0) {
char requestId[128];
char messageHash[128];
extract_json_field(eventJson, "requestId", requestId, sizeof(requestId));
extract_json_field(eventJson, "messageHash", messageHash, sizeof(messageHash));
printf("✅ [EVENT] Message propagated - RequestID: %s, Hash: %s\n", requestId, messageHash);
} else {
printf(" [EVENT] Unknown event type: %s\n", eventType);
}
free(eventJson);
}
// Simple callback that prints results
void simple_callback(int ret, const char *msg, size_t len, void *userData) {
const char *operation = (const char *)userData;
if (operation != NULL && strcmp(operation, "create_node") == 0) {
create_node_ok = (ret == RET_OK) ? 1 : 0;
}
if (ret == RET_OK) {
if (len > 0) {
printf("[%s] Success: %.*s\n", operation, (int)len, msg);
} else {
printf("[%s] Success\n", operation);
}
} else {
printf("[%s] Error: %.*s\n", operation, (int)len, msg);
}
}
int main() {
printf("=== Logos Messaging API (LMAPI) Example ===\n\n");
// Configuration JSON for creating a node
const char *config = "{"
"\"logLevel\": \"DEBUG\","
// "\"mode\": \"Edge\","
"\"mode\": \"Core\","
"\"protocolsConfig\": {"
"\"entryNodes\": [\"/dns4/node-01.do-ams3.misc.logos-chat.status.im/tcp/30303/p2p/16Uiu2HAkxoqUTud5LUPQBRmkeL2xP4iKx2kaABYXomQRgmLUgf78\"],"
"\"clusterId\": 42,"
"\"autoShardingConfig\": {"
"\"numShardsInCluster\": 8"
"}"
"},"
"\"networkingConfig\": {"
"\"listenIpv4\": \"0.0.0.0\","
"\"p2pTcpPort\": 60000,"
"\"discv5UdpPort\": 9000"
"}"
"}";
printf("1. Creating node...\n");
void *ctx = logosdelivery_create_node(config, simple_callback, (void *)"create_node");
if (ctx == NULL) {
printf("Failed to create node\n");
return 1;
}
// Wait a bit for the callback
sleep(1);
if (create_node_ok != 1) {
printf("Create node failed, stopping example early.\n");
logosdelivery_destroy(ctx, simple_callback, (void *)"destroy");
return 1;
}
printf("\n2. Setting up event callback...\n");
logosdelivery_set_event_callback(ctx, event_callback, NULL);
printf("Event callback registered for message events\n");
printf("\n3. Starting node...\n");
logosdelivery_start_node(ctx, simple_callback, (void *)"start_node");
// Wait for node to start
sleep(2);
printf("\n4. Subscribing to content topic...\n");
const char *contentTopic = "/example/1/chat/proto";
logosdelivery_subscribe(ctx, simple_callback, (void *)"subscribe", contentTopic);
// Wait for subscription
sleep(1);
printf("\n5. Sending a message...\n");
printf("Watch for message events (sent, propagated, or error):\n");
// Create base64-encoded payload: "Hello, Logos Messaging!"
const char *message = "{"
"\"contentTopic\": \"/example/1/chat/proto\","
"\"payload\": \"SGVsbG8sIExvZ29zIE1lc3NhZ2luZyE=\","
"\"ephemeral\": false"
"}";
logosdelivery_send(ctx, simple_callback, (void *)"send", message);
// Wait for message events to arrive
printf("Waiting for message delivery events...\n");
sleep(60);
printf("\n6. Unsubscribing from content topic...\n");
logosdelivery_unsubscribe(ctx, simple_callback, (void *)"unsubscribe", contentTopic);
sleep(1);
printf("\n7. Stopping node...\n");
logosdelivery_stop_node(ctx, simple_callback, (void *)"stop_node");
sleep(1);
printf("\n8. Destroying context...\n");
logosdelivery_destroy(ctx, simple_callback, (void *)"destroy");
printf("\n=== Example completed ===\n");
return 0;
}

View File

@ -0,0 +1,27 @@
import std/[json, macros]
type JsonEvent*[T] = ref object
eventType*: string
payload*: T
macro toFlatJson*(event: JsonEvent): JsonNode =
## Serializes JsonEvent[T] to flat JSON with eventType first,
## followed by all fields from T's payload
result = quote:
var jsonObj = newJObject()
jsonObj["eventType"] = %`event`.eventType
# Serialize payload fields into the same object (flattening)
let payloadJson = %`event`.payload
for key, val in payloadJson.pairs:
jsonObj[key] = val
jsonObj
proc `$`*[T](event: JsonEvent[T]): string =
$toFlatJson(event)
proc newJsonEvent*[T](eventType: string, payload: T): JsonEvent[T] =
## Creates a new JsonEvent with the given eventType and payload.
## The payload's fields will be flattened into the JSON output.
JsonEvent[T](eventType: eventType, payload: payload)

View File

@ -0,0 +1,82 @@
// Generated manually and inspired by libwaku.h
// Header file for Logos Messaging API (LMAPI) library
#pragma once
#ifndef __liblogosdelivery__
#define __liblogosdelivery__
#include <stddef.h>
#include <stdint.h>
// The possible returned values for the functions that return int
#define RET_OK 0
#define RET_ERR 1
#define RET_MISSING_CALLBACK 2
#ifdef __cplusplus
extern "C"
{
#endif
typedef void (*FFICallBack)(int callerRet, const char *msg, size_t len, void *userData);
// Creates a new instance of the node from the given configuration JSON.
// Returns a pointer to the Context needed by the rest of the API functions.
// Configuration should be in JSON format following the NodeConfig structure.
void *logosdelivery_create_node(
const char *configJson,
FFICallBack callback,
void *userData);
// Starts the node.
int logosdelivery_start_node(void *ctx,
FFICallBack callback,
void *userData);
// Stops the node.
int logosdelivery_stop_node(void *ctx,
FFICallBack callback,
void *userData);
// Destroys an instance of a node created with logosdelivery_create_node
int logosdelivery_destroy(void *ctx,
FFICallBack callback,
void *userData);
// Subscribe to a content topic.
// contentTopic: string representing the content topic (e.g., "/myapp/1/chat/proto")
int logosdelivery_subscribe(void *ctx,
FFICallBack callback,
void *userData,
const char *contentTopic);
// Unsubscribe from a content topic.
int logosdelivery_unsubscribe(void *ctx,
FFICallBack callback,
void *userData,
const char *contentTopic);
// Send a message.
// messageJson: JSON string with the following structure:
// {
// "contentTopic": "/myapp/1/chat/proto",
// "payload": "base64-encoded-payload",
// "ephemeral": false
// }
// Returns a request ID that can be used to track the message delivery.
int logosdelivery_send(void *ctx,
FFICallBack callback,
void *userData,
const char *messageJson);
// Sets a callback that will be invoked whenever an event occurs.
// It is crucial that the passed callback is fast, non-blocking and potentially thread-safe.
void logosdelivery_set_event_callback(void *ctx,
FFICallBack callback,
void *userData);
#ifdef __cplusplus
}
#endif
#endif /* __liblogosdelivery__ */

View File

@ -0,0 +1,29 @@
import std/[atomics, options]
import chronicles, chronos, chronos/threadsync, ffi
import waku/factory/waku, waku/node/waku_node, ./declare_lib
################################################################################
## Include different APIs, i.e. all procs with {.ffi.} pragma
include ./logos_delivery_api/node_api, ./logos_delivery_api/messaging_api
################################################################################
### Exported procs
proc logosdelivery_destroy(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
): cint {.dynlib, exportc, cdecl.} =
initializeLibrary()
checkParams(ctx, callback, userData)
ffi.destroyFFIContext(ctx).isOkOr:
let msg = "liblogosdelivery error: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
## always need to invoke the callback although we don't retrieve value to the caller
callback(RET_OK, nil, 0, userData)
return RET_OK
# ### End of exported procs
# ################################################################################

View File

@ -0,0 +1,91 @@
import std/[json]
import chronos, results, ffi
import stew/byteutils
import
waku/common/base64,
waku/factory/waku,
waku/waku_core/topics/content_topic,
waku/api/[api, types],
../declare_lib
proc logosdelivery_subscribe(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
contentTopicStr: cstring,
) {.ffi.} =
requireInitializedNode(ctx, "Subscribe"):
return err(errMsg)
# ContentTopic is just a string type alias
let contentTopic = ContentTopic($contentTopicStr)
(await api.subscribe(ctx.myLib[], contentTopic)).isOkOr:
let errMsg = $error
return err("Subscribe failed: " & errMsg)
return ok("")
proc logosdelivery_unsubscribe(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
contentTopicStr: cstring,
) {.ffi.} =
requireInitializedNode(ctx, "Unsubscribe"):
return err(errMsg)
# ContentTopic is just a string type alias
let contentTopic = ContentTopic($contentTopicStr)
api.unsubscribe(ctx.myLib[], contentTopic).isOkOr:
let errMsg = $error
return err("Unsubscribe failed: " & errMsg)
return ok("")
proc logosdelivery_send(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
messageJson: cstring,
) {.ffi.} =
requireInitializedNode(ctx, "Send"):
return err(errMsg)
## Parse the message JSON and send the message
var jsonNode: JsonNode
try:
jsonNode = parseJson($messageJson)
except Exception as e:
return err("Failed to parse message JSON: " & e.msg)
# Extract content topic
if not jsonNode.hasKey("contentTopic"):
return err("Missing contentTopic field")
# ContentTopic is just a string type alias
let contentTopic = ContentTopic(jsonNode["contentTopic"].getStr())
# Extract payload (expect base64 encoded string)
if not jsonNode.hasKey("payload"):
return err("Missing payload field")
let payloadStr = jsonNode["payload"].getStr()
let payload = base64.decode(Base64String(payloadStr)).valueOr:
return err("invalid payload format: " & error)
# Extract ephemeral flag
let ephemeral = jsonNode.getOrDefault("ephemeral").getBool(false)
# Create message envelope
let envelope = MessageEnvelope.init(
contentTopic = contentTopic, payload = payload, ephemeral = ephemeral
)
# Send the message
let requestId = (await api.send(ctx.myLib[], envelope)).valueOr:
let errMsg = $error
return err("Send failed: " & errMsg)
return ok($requestId)

View File

@ -0,0 +1,111 @@
import std/json
import chronos, results, ffi
import
waku/factory/waku,
waku/node/waku_node,
waku/api/[api, api_conf, types],
waku/events/message_events,
../declare_lib,
../json_event
# Add JSON serialization for RequestId
proc `%`*(id: RequestId): JsonNode =
%($id)
registerReqFFI(CreateNodeRequest, ctx: ptr FFIContext[Waku]):
proc(configJson: cstring): Future[Result[string, string]] {.async.} =
## Parse the JSON configuration and create a node
let nodeConfig =
try:
decodeNodeConfigFromJson($configJson)
except SerializationError as e:
return err("Failed to parse config JSON: " & e.msg)
# Create the node
ctx.myLib[] = (await api.createNode(nodeConfig)).valueOr:
let errMsg = $error
chronicles.error "CreateNodeRequest failed", err = errMsg
return err(errMsg)
return ok("")
proc logosdelivery_create_node(
configJson: cstring, callback: FFICallback, userData: pointer
): pointer {.dynlib, exportc, cdecl.} =
initializeLibrary()
if isNil(callback):
echo "error: missing callback in logosdelivery_create_node"
return nil
var ctx = ffi.createFFIContext[Waku]().valueOr:
let msg = "Error in createFFIContext: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return nil
ctx.userData = userData
ffi.sendRequestToFFIThread(
ctx, CreateNodeRequest.ffiNewReq(callback, userData, configJson)
).isOkOr:
let msg = "error in sendRequestToFFIThread: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return nil
return ctx
proc logosdelivery_start_node(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
requireInitializedNode(ctx, "START_NODE"):
return err(errMsg)
# setting up outgoing event listeners
let sentListener = MessageSentEvent.listen(
ctx.myLib[].brokerCtx,
proc(event: MessageSentEvent) {.async: (raises: []).} =
callEventCallback(ctx, "onMessageSent"):
$newJsonEvent("message_sent", event),
).valueOr:
chronicles.error "MessageSentEvent.listen failed", err = $error
return err("MessageSentEvent.listen failed: " & $error)
let errorListener = MessageErrorEvent.listen(
ctx.myLib[].brokerCtx,
proc(event: MessageErrorEvent) {.async: (raises: []).} =
callEventCallback(ctx, "onMessageError"):
$newJsonEvent("message_error", event),
).valueOr:
chronicles.error "MessageErrorEvent.listen failed", err = $error
return err("MessageErrorEvent.listen failed: " & $error)
let propagatedListener = MessagePropagatedEvent.listen(
ctx.myLib[].brokerCtx,
proc(event: MessagePropagatedEvent) {.async: (raises: []).} =
callEventCallback(ctx, "onMessagePropagated"):
$newJsonEvent("message_propagated", event),
).valueOr:
chronicles.error "MessagePropagatedEvent.listen failed", err = $error
return err("MessagePropagatedEvent.listen failed: " & $error)
(await startWaku(addr ctx.myLib[])).isOkOr:
let errMsg = $error
chronicles.error "START_NODE failed", err = errMsg
return err("failed to start: " & errMsg)
return ok("")
proc logosdelivery_stop_node(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
requireInitializedNode(ctx, "STOP_NODE"):
return err(errMsg)
MessageErrorEvent.dropAllListeners(ctx.myLib[].brokerCtx)
MessageSentEvent.dropAllListeners(ctx.myLib[].brokerCtx)
MessagePropagatedEvent.dropAllListeners(ctx.myLib[].brokerCtx)
(await ctx.myLib[].stop()).isOkOr:
let errMsg = $error
chronicles.error "STOP_NODE failed", err = errMsg
return err("failed to stop: " & errMsg)
return ok("")

27
liblogosdelivery/nim.cfg Normal file
View File

@ -0,0 +1,27 @@
# Nim configuration for liblogosdelivery
# Ensure correct compiler configuration
--gc:
refc
--threads:
on
# Include paths
--path:
"../vendor/nim-ffi"
--path:
"../"
# Optimization and debugging
--opt:
speed
--debugger:
native
# Export symbols for dynamic library
--app:
lib
--noMain
# Enable FFI macro features when needed for debugging
# --define:ffiDumpMacros

View File

@ -23,6 +23,10 @@ let
tools = pkgs.callPackage ./tools.nix {};
version = tools.findKeyValue "^version = \"([a-f0-9.-]+)\"$" ../waku.nimble;
revision = lib.substring 0 8 (src.rev or src.dirtyRev or "00000000");
copyLibwaku = lib.elem "libwaku" targets;
copyLiblogosdelivery = lib.elem "liblogosdelivery" targets;
copyWakunode2 = lib.elem "wakunode2" targets;
hasKnownInstallTarget = copyLibwaku || copyLiblogosdelivery || copyWakunode2;
in stdenv.mkDerivation {
pname = "logos-messaging-nim";
@ -91,11 +95,39 @@ in stdenv.mkDerivation {
'' else ''
mkdir -p $out/bin $out/include
# Copy library files
cp build/* $out/bin/ 2>/dev/null || true
# Copy artifacts from build directory (created by Make during buildPhase)
# Note: build/ is in the source tree, not result/ (which is a post-build symlink)
if [ -d build ]; then
${lib.optionalString copyLibwaku ''
cp build/libwaku.{so,dylib,dll,a,lib} $out/bin/ 2>/dev/null || true
''}
# Copy the header file
cp library/libwaku.h $out/include/
${lib.optionalString copyLiblogosdelivery ''
cp build/liblogosdelivery.{so,dylib,dll,a,lib} $out/bin/ 2>/dev/null || true
''}
${lib.optionalString copyWakunode2 ''
cp build/wakunode2 $out/bin/ 2>/dev/null || true
''}
${lib.optionalString (!hasKnownInstallTarget) ''
cp build/lib*.{so,dylib,dll,a,lib} $out/bin/ 2>/dev/null || true
''}
fi
# Copy header files
${lib.optionalString copyLibwaku ''
cp library/libwaku.h $out/include/ 2>/dev/null || true
''}
${lib.optionalString copyLiblogosdelivery ''
cp liblogosdelivery/liblogosdelivery.h $out/include/ 2>/dev/null || true
''}
${lib.optionalString (!hasKnownInstallTarget) ''
cp library/libwaku.h $out/include/ 2>/dev/null || true
cp liblogosdelivery/liblogosdelivery.h $out/include/ 2>/dev/null || true
''}
'';
meta = with pkgs.lib; {

247
nix/submodules.json Normal file
View File

@ -0,0 +1,247 @@
[
{
"path": "vendor/db_connector",
"url": "https://github.com/nim-lang/db_connector.git",
"rev": "74aef399e5c232f95c9fc5c987cebac846f09d62"
}
,
{
"path": "vendor/dnsclient.nim",
"url": "https://github.com/ba0f3/dnsclient.nim.git",
"rev": "23214235d4784d24aceed99bbfe153379ea557c8"
}
,
{
"path": "vendor/nim-bearssl",
"url": "https://github.com/status-im/nim-bearssl.git",
"rev": "11e798b62b8e6beabe958e048e9e24c7e0f9ee63"
}
,
{
"path": "vendor/nim-chronicles",
"url": "https://github.com/status-im/nim-chronicles.git",
"rev": "54f5b726025e8c7385e3a6529d3aa27454c6e6ff"
}
,
{
"path": "vendor/nim-chronos",
"url": "https://github.com/status-im/nim-chronos.git",
"rev": "85af4db764ecd3573c4704139560df3943216cf1"
}
,
{
"path": "vendor/nim-confutils",
"url": "https://github.com/status-im/nim-confutils.git",
"rev": "e214b3992a31acece6a9aada7d0a1ad37c928f3b"
}
,
{
"path": "vendor/nim-dnsdisc",
"url": "https://github.com/status-im/nim-dnsdisc.git",
"rev": "b71d029f4da4ec56974d54c04518bada00e1b623"
}
,
{
"path": "vendor/nim-eth",
"url": "https://github.com/status-im/nim-eth.git",
"rev": "d9135e6c3c5d6d819afdfb566aa8d958756b73a8"
}
,
{
"path": "vendor/nim-faststreams",
"url": "https://github.com/status-im/nim-faststreams.git",
"rev": "c3ac3f639ed1d62f59d3077d376a29c63ac9750c"
}
,
{
"path": "vendor/nim-ffi",
"url": "https://github.com/logos-messaging/nim-ffi",
"rev": "06111de155253b34e47ed2aaed1d61d08d62cc1b"
}
,
{
"path": "vendor/nim-http-utils",
"url": "https://github.com/status-im/nim-http-utils.git",
"rev": "79cbab1460f4c0cdde2084589d017c43a3d7b4f1"
}
,
{
"path": "vendor/nim-json-rpc",
"url": "https://github.com/status-im/nim-json-rpc.git",
"rev": "9665c265035f49f5ff94bbffdeadde68e19d6221"
}
,
{
"path": "vendor/nim-json-serialization",
"url": "https://github.com/status-im/nim-json-serialization.git",
"rev": "b65fd6a7e64c864dabe40e7dfd6c7d07db0014ac"
}
,
{
"path": "vendor/nim-jwt",
"url": "https://github.com/vacp2p/nim-jwt.git",
"rev": "18f8378de52b241f321c1f9ea905456e89b95c6f"
}
,
{
"path": "vendor/nim-libbacktrace",
"url": "https://github.com/status-im/nim-libbacktrace.git",
"rev": "d8bd4ce5c46bb6d2f984f6b3f3d7380897d95ecb"
}
,
{
"path": "vendor/nim-libp2p",
"url": "https://github.com/vacp2p/nim-libp2p.git",
"rev": "eb7e6ff89889e41b57515f891ba82986c54809fb"
}
,
{
"path": "vendor/nim-lsquic",
"url": "https://github.com/vacp2p/nim-lsquic",
"rev": "f3fe33462601ea34eb2e8e9c357c92e61f8d121b"
}
,
{
"path": "vendor/nim-metrics",
"url": "https://github.com/status-im/nim-metrics.git",
"rev": "ecf64c6078d1276d3b7d9b3d931fbdb70004db11"
}
,
{
"path": "vendor/nim-minilru",
"url": "https://github.com/status-im/nim-minilru.git",
"rev": "0c4b2bce959591f0a862e9b541ba43c6d0cf3476"
}
,
{
"path": "vendor/nim-nat-traversal",
"url": "https://github.com/status-im/nim-nat-traversal.git",
"rev": "860e18c37667b5dd005b94c63264560c35d88004"
}
,
{
"path": "vendor/nim-presto",
"url": "https://github.com/status-im/nim-presto.git",
"rev": "92b1c7ff141e6920e1f8a98a14c35c1fa098e3be"
}
,
{
"path": "vendor/nim-regex",
"url": "https://github.com/nitely/nim-regex.git",
"rev": "4593305ed1e49731fc75af1dc572dd2559aad19c"
}
,
{
"path": "vendor/nim-results",
"url": "https://github.com/arnetheduck/nim-results.git",
"rev": "df8113dda4c2d74d460a8fa98252b0b771bf1f27"
}
,
{
"path": "vendor/nim-secp256k1",
"url": "https://github.com/status-im/nim-secp256k1.git",
"rev": "9dd3df62124aae79d564da636bb22627c53c7676"
}
,
{
"path": "vendor/nim-serialization",
"url": "https://github.com/status-im/nim-serialization.git",
"rev": "6f525d5447d97256750ca7856faead03e562ed20"
}
,
{
"path": "vendor/nim-sqlite3-abi",
"url": "https://github.com/arnetheduck/nim-sqlite3-abi.git",
"rev": "bdf01cf4236fb40788f0733466cdf6708783cbac"
}
,
{
"path": "vendor/nim-stew",
"url": "https://github.com/status-im/nim-stew.git",
"rev": "e5740014961438610d336cd81706582dbf2c96f0"
}
,
{
"path": "vendor/nim-stint",
"url": "https://github.com/status-im/nim-stint.git",
"rev": "470b7892561b5179ab20bd389a69217d6213fe58"
}
,
{
"path": "vendor/nim-taskpools",
"url": "https://github.com/status-im/nim-taskpools.git",
"rev": "9e8ccc754631ac55ac2fd495e167e74e86293edb"
}
,
{
"path": "vendor/nim-testutils",
"url": "https://github.com/status-im/nim-testutils.git",
"rev": "94d68e796c045d5b37cabc6be32d7bfa168f8857"
}
,
{
"path": "vendor/nim-toml-serialization",
"url": "https://github.com/status-im/nim-toml-serialization.git",
"rev": "fea85b27f0badcf617033ca1bc05444b5fd8aa7a"
}
,
{
"path": "vendor/nim-unicodedb",
"url": "https://github.com/nitely/nim-unicodedb.git",
"rev": "66f2458710dc641dd4640368f9483c8a0ec70561"
}
,
{
"path": "vendor/nim-unittest2",
"url": "https://github.com/status-im/nim-unittest2.git",
"rev": "8b51e99b4a57fcfb31689230e75595f024543024"
}
,
{
"path": "vendor/nim-web3",
"url": "https://github.com/status-im/nim-web3.git",
"rev": "81ee8ce479d86acb73be7c4f365328e238d9b4a3"
}
,
{
"path": "vendor/nim-websock",
"url": "https://github.com/status-im/nim-websock.git",
"rev": "ebe308a79a7b440a11dfbe74f352be86a3883508"
}
,
{
"path": "vendor/nim-zlib",
"url": "https://github.com/status-im/nim-zlib.git",
"rev": "daa8723fd32299d4ca621c837430c29a5a11e19a"
}
,
{
"path": "vendor/nimbus-build-system",
"url": "https://github.com/status-im/nimbus-build-system.git",
"rev": "e6c2c9da39c2d368d9cf420ac22692e99715d22c"
}
,
{
"path": "vendor/nimcrypto",
"url": "https://github.com/cheatfate/nimcrypto.git",
"rev": "721fb99ee099b632eb86dfad1f0d96ee87583774"
}
,
{
"path": "vendor/nph",
"url": "https://github.com/arnetheduck/nph.git",
"rev": "c6e03162dc2820d3088660f644818d7040e95791"
}
,
{
"path": "vendor/waku-rlnv2-contract",
"url": "https://github.com/logos-messaging/waku-rlnv2-contract.git",
"rev": "8a338f354481e8a3f3d64a72e38fad4c62e32dcd"
}
,
{
"path": "vendor/zerokit",
"url": "https://github.com/vacp2p/zerokit.git",
"rev": "70c79fbc989d4f87d9352b2f4bddcb60ebe55b19"
}
]

View File

@ -0,0 +1,82 @@
#!/usr/bin/env bash
# Generates nix/submodules.json from .gitmodules and git ls-tree.
# This allows Nix to fetch all git submodules without requiring
# locally initialized submodules or the '?submodules=1' URI flag.
#
# Usage: ./scripts/generate_nix_submodules.sh
#
# Run this script after:
# - Adding/removing submodules
# - Updating submodule commits (e.g. after 'make update')
# - Any change to .gitmodules
#
# Compatible with macOS bash 3.x (no associative arrays).
set -euo pipefail
REPO_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
OUTPUT="${REPO_ROOT}/nix/submodules.json"
cd "$REPO_ROOT"
TMP_URLS=$(mktemp)
TMP_REVS=$(mktemp)
trap 'rm -f "$TMP_URLS" "$TMP_REVS"' EXIT
# Parse .gitmodules: extract (path, url) pairs
current_path=""
while IFS= read -r line; do
case "$line" in
*"path = "*)
current_path="${line#*path = }"
;;
*"url = "*)
if [ -n "$current_path" ]; then
url="${line#*url = }"
url="${url%/}"
printf '%s\t%s\n' "$current_path" "$url" >> "$TMP_URLS"
current_path=""
fi
;;
esac
done < .gitmodules
# Get pinned commit hashes from git tree
git ls-tree HEAD vendor/ | while IFS= read -r tree_line; do
mode=$(echo "$tree_line" | awk '{print $1}')
type=$(echo "$tree_line" | awk '{print $2}')
hash=$(echo "$tree_line" | awk '{print $3}')
path=$(echo "$tree_line" | awk '{print $4}')
if [ "$type" = "commit" ]; then
path="${path%/}"
printf '%s\t%s\n' "$path" "$hash" >> "$TMP_REVS"
fi
done
# Generate JSON by joining urls and revs on path
printf '[\n' > "$OUTPUT"
first=true
sort "$TMP_URLS" | while IFS="$(printf '\t')" read -r path url; do
rev=$(grep "^${path} " "$TMP_REVS" | cut -f2 || true)
if [ -z "$rev" ]; then
echo "WARNING: No commit hash found for submodule '$path', skipping" >&2
continue
fi
if [ "$first" = true ]; then
first=false
else
printf ' ,\n' >> "$OUTPUT"
fi
printf ' {\n "path": "%s",\n "url": "%s",\n "rev": "%s"\n }\n' \
"$path" "$url" "$rev" >> "$OUTPUT"
done
printf ']\n' >> "$OUTPUT"
count=$(grep -c '"path"' "$OUTPUT" || echo 0)
echo "Generated $OUTPUT with $count submodule entries"

View File

@ -1,7 +1,9 @@
{.used.}
import std/options, results, stint, testutils/unittests
import json_serialization
import waku/api/api_conf, waku/factory/waku_conf, waku/factory/networks_config
import waku/common/logging
suite "LibWaku Conf - toWakuConf":
test "Minimal configuration":
@ -298,3 +300,709 @@ suite "LibWaku Conf - toWakuConf":
check:
wakuConf.staticNodes.len == 1
wakuConf.staticNodes[0] == entryNodes[1]
suite "NodeConfig JSON - complete format":
test "Full NodeConfig from complete JSON with field validation":
## Given
let jsonStr =
"""
{
"mode": "Core",
"protocolsConfig": {
"entryNodes": ["enrtree://TREE@nodes.example.com"],
"staticStoreNodes": ["/ip4/1.2.3.4/tcp/80/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"],
"clusterId": 10,
"autoShardingConfig": {
"numShardsInCluster": 4
},
"messageValidation": {
"maxMessageSize": "100 KiB",
"rlnConfig": null
}
},
"networkingConfig": {
"listenIpv4": "192.168.1.1",
"p2pTcpPort": 7000,
"discv5UdpPort": 7001
},
"ethRpcEndpoints": ["http://localhost:8545"],
"p2pReliability": true,
"logLevel": "WARN",
"logFormat": "TEXT"
}
"""
## When
let config = decodeNodeConfigFromJson(jsonStr)
## Then — check every field
check:
config.mode == WakuMode.Core
config.ethRpcEndpoints == @["http://localhost:8545"]
config.p2pReliability == true
config.logLevel == LogLevel.WARN
config.logFormat == LogFormat.TEXT
check:
config.networkingConfig.listenIpv4 == "192.168.1.1"
config.networkingConfig.p2pTcpPort == 7000
config.networkingConfig.discv5UdpPort == 7001
let pc = config.protocolsConfig
check:
pc.entryNodes == @["enrtree://TREE@nodes.example.com"]
pc.staticStoreNodes ==
@[
"/ip4/1.2.3.4/tcp/80/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"
]
pc.clusterId == 10
pc.autoShardingConfig.numShardsInCluster == 4
pc.messageValidation.maxMessageSize == "100 KiB"
pc.messageValidation.rlnConfig.isNone()
test "Full NodeConfig with RlnConfig present":
## Given
let jsonStr =
"""
{
"mode": "Edge",
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"messageValidation": {
"maxMessageSize": "150 KiB",
"rlnConfig": {
"contractAddress": "0x1234567890ABCDEF1234567890ABCDEF12345678",
"chainId": 5,
"epochSizeSec": 600
}
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
## When
let config = decodeNodeConfigFromJson(jsonStr)
## Then
check config.mode == WakuMode.Edge
let mv = config.protocolsConfig.messageValidation
check:
mv.maxMessageSize == "150 KiB"
mv.rlnConfig.isSome()
let rln = mv.rlnConfig.get()
check:
rln.contractAddress == "0x1234567890ABCDEF1234567890ABCDEF12345678"
rln.chainId == 5'u
rln.epochSizeSec == 600'u64
test "Round-trip encode/decode preserves all fields":
## Given
let original = NodeConfig.init(
mode = Edge,
protocolsConfig = ProtocolsConfig.init(
entryNodes = @["enrtree://TREE@example.com"],
staticStoreNodes =
@[
"/ip4/1.2.3.4/tcp/80/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"
],
clusterId = 42,
autoShardingConfig = AutoShardingConfig(numShardsInCluster: 16),
messageValidation = MessageValidation(
maxMessageSize: "256 KiB",
rlnConfig: some(
RlnConfig(
contractAddress: "0xAABBCCDDEEFF00112233445566778899AABBCCDD",
chainId: 137,
epochSizeSec: 300,
)
),
),
),
networkingConfig =
NetworkingConfig(listenIpv4: "10.0.0.1", p2pTcpPort: 9090, discv5UdpPort: 9091),
ethRpcEndpoints = @["https://rpc.example.com"],
p2pReliability = true,
logLevel = LogLevel.DEBUG,
logFormat = LogFormat.JSON,
)
## When
let decoded = decodeNodeConfigFromJson(Json.encode(original))
## Then — check field by field
check:
decoded.mode == original.mode
decoded.ethRpcEndpoints == original.ethRpcEndpoints
decoded.p2pReliability == original.p2pReliability
decoded.logLevel == original.logLevel
decoded.logFormat == original.logFormat
decoded.networkingConfig.listenIpv4 == original.networkingConfig.listenIpv4
decoded.networkingConfig.p2pTcpPort == original.networkingConfig.p2pTcpPort
decoded.networkingConfig.discv5UdpPort == original.networkingConfig.discv5UdpPort
decoded.protocolsConfig.entryNodes == original.protocolsConfig.entryNodes
decoded.protocolsConfig.staticStoreNodes ==
original.protocolsConfig.staticStoreNodes
decoded.protocolsConfig.clusterId == original.protocolsConfig.clusterId
decoded.protocolsConfig.autoShardingConfig.numShardsInCluster ==
original.protocolsConfig.autoShardingConfig.numShardsInCluster
decoded.protocolsConfig.messageValidation.maxMessageSize ==
original.protocolsConfig.messageValidation.maxMessageSize
decoded.protocolsConfig.messageValidation.rlnConfig.isSome()
let decodedRln = decoded.protocolsConfig.messageValidation.rlnConfig.get()
let originalRln = original.protocolsConfig.messageValidation.rlnConfig.get()
check:
decodedRln.contractAddress == originalRln.contractAddress
decodedRln.chainId == originalRln.chainId
decodedRln.epochSizeSec == originalRln.epochSizeSec
suite "NodeConfig JSON - partial format with defaults":
test "Minimal NodeConfig - empty object uses all defaults":
## Given
let config = decodeNodeConfigFromJson("{}")
let defaultConfig = NodeConfig.init()
## Then — compare field by field against defaults
check:
config.mode == defaultConfig.mode
config.ethRpcEndpoints == defaultConfig.ethRpcEndpoints
config.p2pReliability == defaultConfig.p2pReliability
config.logLevel == defaultConfig.logLevel
config.logFormat == defaultConfig.logFormat
config.networkingConfig.listenIpv4 == defaultConfig.networkingConfig.listenIpv4
config.networkingConfig.p2pTcpPort == defaultConfig.networkingConfig.p2pTcpPort
config.networkingConfig.discv5UdpPort ==
defaultConfig.networkingConfig.discv5UdpPort
config.protocolsConfig.entryNodes == defaultConfig.protocolsConfig.entryNodes
config.protocolsConfig.staticStoreNodes ==
defaultConfig.protocolsConfig.staticStoreNodes
config.protocolsConfig.clusterId == defaultConfig.protocolsConfig.clusterId
config.protocolsConfig.autoShardingConfig.numShardsInCluster ==
defaultConfig.protocolsConfig.autoShardingConfig.numShardsInCluster
config.protocolsConfig.messageValidation.maxMessageSize ==
defaultConfig.protocolsConfig.messageValidation.maxMessageSize
config.protocolsConfig.messageValidation.rlnConfig.isSome() ==
defaultConfig.protocolsConfig.messageValidation.rlnConfig.isSome()
test "Minimal NodeConfig keeps network preset defaults":
## Given
let config = decodeNodeConfigFromJson("{}")
## Then
check:
config.protocolsConfig.entryNodes == TheWakuNetworkPreset.entryNodes
config.protocolsConfig.messageValidation.rlnConfig.isSome()
test "NodeConfig with only mode specified":
## Given
let config = decodeNodeConfigFromJson("""{"mode": "Edge"}""")
## Then
check:
config.mode == WakuMode.Edge
## Remaining fields get defaults
config.logLevel == LogLevel.INFO
config.logFormat == LogFormat.TEXT
config.p2pReliability == false
config.ethRpcEndpoints == newSeq[string]()
test "ProtocolsConfig partial - optional fields get defaults":
## Given — only entryNodes and clusterId provided
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": ["enrtree://X@y.com"],
"clusterId": 5
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
## When
let config = decodeNodeConfigFromJson(jsonStr)
## Then — required fields are set, optionals get defaults
check:
config.protocolsConfig.entryNodes == @["enrtree://X@y.com"]
config.protocolsConfig.clusterId == 5
config.protocolsConfig.staticStoreNodes == newSeq[string]()
config.protocolsConfig.autoShardingConfig.numShardsInCluster ==
DefaultAutoShardingConfig.numShardsInCluster
config.protocolsConfig.messageValidation.maxMessageSize ==
DefaultMessageValidation.maxMessageSize
config.protocolsConfig.messageValidation.rlnConfig.isNone()
test "MessageValidation partial - rlnConfig omitted defaults to none":
## Given
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"messageValidation": {
"maxMessageSize": "200 KiB"
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
## When
let config = decodeNodeConfigFromJson(jsonStr)
## Then
check:
config.protocolsConfig.messageValidation.maxMessageSize == "200 KiB"
config.protocolsConfig.messageValidation.rlnConfig.isNone()
test "logLevel and logFormat omitted use defaults":
## Given
let jsonStr =
"""
{
"mode": "Core",
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
## When
let config = decodeNodeConfigFromJson(jsonStr)
## Then
check:
config.logLevel == LogLevel.INFO
config.logFormat == LogFormat.TEXT
suite "NodeConfig JSON - unsupported fields raise errors":
test "Unknown field at NodeConfig level raises":
let jsonStr =
"""
{
"mode": "Core",
"unknownTopLevel": true
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Typo in NodeConfig field name raises":
let jsonStr =
"""
{
"modes": "Core"
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Unknown field in ProtocolsConfig raises":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"futureField": "something"
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Unknown field in NetworkingConfig raises":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000,
"futureNetworkField": "value"
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Unknown field in MessageValidation raises":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"messageValidation": {
"maxMessageSize": "150 KiB",
"maxMesssageSize": "typo"
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Unknown field in RlnConfig raises":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"messageValidation": {
"maxMessageSize": "150 KiB",
"rlnConfig": {
"contractAddress": "0xABCDEF1234567890ABCDEF1234567890ABCDEF12",
"chainId": 1,
"epochSizeSec": 600,
"unknownRlnField": true
}
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Unknown field in AutoShardingConfig raises":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"autoShardingConfig": {
"numShardsInCluster": 8,
"shardPrefix": "extra"
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
suite "NodeConfig JSON - missing required fields":
test "Missing 'entryNodes' in ProtocolsConfig":
let jsonStr =
"""
{
"protocolsConfig": {
"clusterId": 1
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Missing 'clusterId' in ProtocolsConfig":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": []
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Missing required fields in NetworkingConfig":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1
},
"networkingConfig": {
"listenIpv4": "0.0.0.0"
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Missing 'numShardsInCluster' in AutoShardingConfig":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"autoShardingConfig": {}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Missing required fields in RlnConfig":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"messageValidation": {
"maxMessageSize": "150 KiB",
"rlnConfig": {
"contractAddress": "0xABCD"
}
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Missing 'maxMessageSize' in MessageValidation":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": 1,
"messageValidation": {
"rlnConfig": null
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
suite "NodeConfig JSON - invalid values":
test "Invalid enum value for mode":
var raised = false
try:
discard decodeNodeConfigFromJson("""{"mode": "InvalidMode"}""")
except SerializationError:
raised = true
check raised
test "Invalid enum value for logLevel":
var raised = false
try:
discard decodeNodeConfigFromJson("""{"logLevel": "SUPERVERBOSE"}""")
except SerializationError:
raised = true
check raised
test "Wrong type for clusterId (string instead of number)":
let jsonStr =
"""
{
"protocolsConfig": {
"entryNodes": [],
"clusterId": "not-a-number"
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
}
}
"""
var raised = false
try:
discard decodeNodeConfigFromJson(jsonStr)
except SerializationError:
raised = true
check raised
test "Completely invalid JSON syntax":
var raised = false
try:
discard decodeNodeConfigFromJson("""{ not valid json at all }""")
except SerializationError:
raised = true
check raised
suite "NodeConfig JSON -> WakuConf integration":
test "Decoded config translates to valid WakuConf":
## Given
let jsonStr =
"""
{
"mode": "Core",
"protocolsConfig": {
"entryNodes": [
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im"
],
"staticStoreNodes": [
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"
],
"clusterId": 55,
"autoShardingConfig": {
"numShardsInCluster": 6
},
"messageValidation": {
"maxMessageSize": "256 KiB",
"rlnConfig": null
}
},
"networkingConfig": {
"listenIpv4": "0.0.0.0",
"p2pTcpPort": 60000,
"discv5UdpPort": 9000
},
"ethRpcEndpoints": ["http://localhost:8545"],
"p2pReliability": true,
"logLevel": "INFO",
"logFormat": "TEXT"
}
"""
## When
let nodeConfig = decodeNodeConfigFromJson(jsonStr)
let wakuConfRes = toWakuConf(nodeConfig)
## Then
require wakuConfRes.isOk()
let wakuConf = wakuConfRes.get()
require wakuConf.validate().isOk()
check:
wakuConf.clusterId == 55
wakuConf.shardingConf.numShardsInCluster == 6
wakuConf.maxMessageSizeBytes == 256'u64 * 1024'u64
wakuConf.staticNodes.len == 1
wakuConf.p2pReliability == true

View File

@ -64,7 +64,7 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
exec "nim " & lang & " --out:build/" & name & " --mm:refc " & extra_params & " " &
srcDir & name & ".nim"
proc buildLibrary(lib_name: string, srcDir = "./", params = "", `type` = "static") =
proc buildLibrary(lib_name: string, srcDir = "./", params = "", `type` = "static", srcFile = "libwaku.nim", mainPrefix = "libwaku") =
if not dirExists "build":
mkDir "build"
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
@ -73,12 +73,12 @@ proc buildLibrary(lib_name: string, srcDir = "./", params = "", `type` = "static
extra_params &= " " & paramStr(i)
if `type` == "static":
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & "libwaku.nim"
" --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:" & mainPrefix & " --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & srcFile
else:
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & "libwaku.nim"
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:" & mainPrefix & " --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & srcFile
proc buildMobileAndroid(srcDir = ".", params = "") =
let cpu = getEnv("CPU")
@ -400,3 +400,11 @@ task libWakuIOS, "Build the mobile bindings for iOS":
let srcDir = "./library"
let extraParams = "-d:chronicles_log_level=ERROR"
buildMobileIOS srcDir, extraParams
task liblogosdeliveryStatic, "Build the liblogosdelivery (Logos Messaging Delivery API) static library":
let lib_name = paramStr(paramCount())
buildLibrary lib_name, "liblogosdelivery/", chroniclesParams, "static", "liblogosdelivery.nim", "liblogosdelivery"
task liblogosdeliveryDynamic, "Build the liblogosdelivery (Logos Messaging Delivery API) dynamic library":
let lib_name = paramStr(paramCount())
buildLibrary lib_name, "liblogosdelivery/", chroniclesParams, "dynamic", "liblogosdelivery.nim", "liblogosdelivery"

View File

@ -4,6 +4,7 @@ import waku/factory/waku
import waku/[requests/health_requests, waku_core, waku_node]
import waku/node/delivery_service/send_service
import waku/node/delivery_service/subscription_service
import libp2p/peerid
import ./[api_conf, types]
logScope:

View File

@ -1,14 +1,18 @@
import std/[net, options]
import results
import json_serialization, json_serialization/std/options as json_options
import
waku/common/utils/parse_size_units,
waku/common/logging,
waku/factory/waku_conf,
waku/factory/conf_builder/conf_builder,
waku/factory/networks_config,
./entry_nodes
export json_serialization, json_options
type AutoShardingConfig* {.requiresInit.} = object
numShardsInCluster*: uint16
@ -87,6 +91,8 @@ type NodeConfig* {.requiresInit.} = object
networkingConfig: NetworkingConfig
ethRpcEndpoints: seq[string]
p2pReliability: bool
logLevel: LogLevel
logFormat: LogFormat
proc init*(
T: typedesc[NodeConfig],
@ -95,6 +101,8 @@ proc init*(
networkingConfig: NetworkingConfig = DefaultNetworkingConfig,
ethRpcEndpoints: seq[string] = @[],
p2pReliability: bool = false,
logLevel: LogLevel = LogLevel.INFO,
logFormat: LogFormat = LogFormat.TEXT,
): T =
return T(
mode: mode,
@ -102,11 +110,57 @@ proc init*(
networkingConfig: networkingConfig,
ethRpcEndpoints: ethRpcEndpoints,
p2pReliability: p2pReliability,
logLevel: logLevel,
logFormat: logFormat,
)
# -- Getters for ProtocolsConfig (private fields) - used for testing --
proc entryNodes*(c: ProtocolsConfig): seq[string] =
c.entryNodes
proc staticStoreNodes*(c: ProtocolsConfig): seq[string] =
c.staticStoreNodes
proc clusterId*(c: ProtocolsConfig): uint16 =
c.clusterId
proc autoShardingConfig*(c: ProtocolsConfig): AutoShardingConfig =
c.autoShardingConfig
proc messageValidation*(c: ProtocolsConfig): MessageValidation =
c.messageValidation
# -- Getters for NodeConfig (private fields) - used for testing --
proc mode*(c: NodeConfig): WakuMode =
c.mode
proc protocolsConfig*(c: NodeConfig): ProtocolsConfig =
c.protocolsConfig
proc networkingConfig*(c: NodeConfig): NetworkingConfig =
c.networkingConfig
proc ethRpcEndpoints*(c: NodeConfig): seq[string] =
c.ethRpcEndpoints
proc p2pReliability*(c: NodeConfig): bool =
c.p2pReliability
proc logLevel*(c: NodeConfig): LogLevel =
c.logLevel
proc logFormat*(c: NodeConfig): LogFormat =
c.logFormat
proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] =
var b = WakuConfBuilder.init()
# Apply log configuration
b.withLogLevel(nodeConfig.logLevel)
b.withLogFormat(nodeConfig.logFormat)
# Apply networking configuration
let networkingConfig = nodeConfig.networkingConfig
let ip = parseIpAddress(networkingConfig.listenIpv4)
@ -214,3 +268,260 @@ proc toWakuConf*(nodeConfig: NodeConfig): Result[WakuConf, string] =
return err("Failed to validate configuration: " & error)
return ok(wakuConf)
# ---- JSON serialization (writeValue / readValue) ----
# ---------- AutoShardingConfig ----------
proc writeValue*(w: var JsonWriter, val: AutoShardingConfig) {.raises: [IOError].} =
w.beginRecord()
w.writeField("numShardsInCluster", val.numShardsInCluster)
w.endRecord()
proc readValue*(
r: var JsonReader, val: var AutoShardingConfig
) {.raises: [SerializationError, IOError].} =
var numShardsInCluster: Option[uint16]
for fieldName in readObjectFields(r):
case fieldName
of "numShardsInCluster":
numShardsInCluster = some(r.readValue(uint16))
else:
r.raiseUnexpectedField(fieldName, "AutoShardingConfig")
if numShardsInCluster.isNone():
r.raiseUnexpectedValue("Missing required field 'numShardsInCluster'")
val = AutoShardingConfig(numShardsInCluster: numShardsInCluster.get())
# ---------- RlnConfig ----------
proc writeValue*(w: var JsonWriter, val: RlnConfig) {.raises: [IOError].} =
w.beginRecord()
w.writeField("contractAddress", val.contractAddress)
w.writeField("chainId", val.chainId)
w.writeField("epochSizeSec", val.epochSizeSec)
w.endRecord()
proc readValue*(
r: var JsonReader, val: var RlnConfig
) {.raises: [SerializationError, IOError].} =
var
contractAddress: Option[string]
chainId: Option[uint]
epochSizeSec: Option[uint64]
for fieldName in readObjectFields(r):
case fieldName
of "contractAddress":
contractAddress = some(r.readValue(string))
of "chainId":
chainId = some(r.readValue(uint))
of "epochSizeSec":
epochSizeSec = some(r.readValue(uint64))
else:
r.raiseUnexpectedField(fieldName, "RlnConfig")
if contractAddress.isNone():
r.raiseUnexpectedValue("Missing required field 'contractAddress'")
if chainId.isNone():
r.raiseUnexpectedValue("Missing required field 'chainId'")
if epochSizeSec.isNone():
r.raiseUnexpectedValue("Missing required field 'epochSizeSec'")
val = RlnConfig(
contractAddress: contractAddress.get(),
chainId: chainId.get(),
epochSizeSec: epochSizeSec.get(),
)
# ---------- NetworkingConfig ----------
proc writeValue*(w: var JsonWriter, val: NetworkingConfig) {.raises: [IOError].} =
w.beginRecord()
w.writeField("listenIpv4", val.listenIpv4)
w.writeField("p2pTcpPort", val.p2pTcpPort)
w.writeField("discv5UdpPort", val.discv5UdpPort)
w.endRecord()
proc readValue*(
r: var JsonReader, val: var NetworkingConfig
) {.raises: [SerializationError, IOError].} =
var
listenIpv4: Option[string]
p2pTcpPort: Option[uint16]
discv5UdpPort: Option[uint16]
for fieldName in readObjectFields(r):
case fieldName
of "listenIpv4":
listenIpv4 = some(r.readValue(string))
of "p2pTcpPort":
p2pTcpPort = some(r.readValue(uint16))
of "discv5UdpPort":
discv5UdpPort = some(r.readValue(uint16))
else:
r.raiseUnexpectedField(fieldName, "NetworkingConfig")
if listenIpv4.isNone():
r.raiseUnexpectedValue("Missing required field 'listenIpv4'")
if p2pTcpPort.isNone():
r.raiseUnexpectedValue("Missing required field 'p2pTcpPort'")
if discv5UdpPort.isNone():
r.raiseUnexpectedValue("Missing required field 'discv5UdpPort'")
val = NetworkingConfig(
listenIpv4: listenIpv4.get(),
p2pTcpPort: p2pTcpPort.get(),
discv5UdpPort: discv5UdpPort.get(),
)
# ---------- MessageValidation ----------
proc writeValue*(w: var JsonWriter, val: MessageValidation) {.raises: [IOError].} =
w.beginRecord()
w.writeField("maxMessageSize", val.maxMessageSize)
w.writeField("rlnConfig", val.rlnConfig)
w.endRecord()
proc readValue*(
r: var JsonReader, val: var MessageValidation
) {.raises: [SerializationError, IOError].} =
var
maxMessageSize: Option[string]
rlnConfig: Option[Option[RlnConfig]]
for fieldName in readObjectFields(r):
case fieldName
of "maxMessageSize":
maxMessageSize = some(r.readValue(string))
of "rlnConfig":
rlnConfig = some(r.readValue(Option[RlnConfig]))
else:
r.raiseUnexpectedField(fieldName, "MessageValidation")
if maxMessageSize.isNone():
r.raiseUnexpectedValue("Missing required field 'maxMessageSize'")
val = MessageValidation(
maxMessageSize: maxMessageSize.get(), rlnConfig: rlnConfig.get(none(RlnConfig))
)
# ---------- ProtocolsConfig ----------
proc writeValue*(w: var JsonWriter, val: ProtocolsConfig) {.raises: [IOError].} =
w.beginRecord()
w.writeField("entryNodes", val.entryNodes)
w.writeField("staticStoreNodes", val.staticStoreNodes)
w.writeField("clusterId", val.clusterId)
w.writeField("autoShardingConfig", val.autoShardingConfig)
w.writeField("messageValidation", val.messageValidation)
w.endRecord()
proc readValue*(
r: var JsonReader, val: var ProtocolsConfig
) {.raises: [SerializationError, IOError].} =
var
entryNodes: Option[seq[string]]
staticStoreNodes: Option[seq[string]]
clusterId: Option[uint16]
autoShardingConfig: Option[AutoShardingConfig]
messageValidation: Option[MessageValidation]
for fieldName in readObjectFields(r):
case fieldName
of "entryNodes":
entryNodes = some(r.readValue(seq[string]))
of "staticStoreNodes":
staticStoreNodes = some(r.readValue(seq[string]))
of "clusterId":
clusterId = some(r.readValue(uint16))
of "autoShardingConfig":
autoShardingConfig = some(r.readValue(AutoShardingConfig))
of "messageValidation":
messageValidation = some(r.readValue(MessageValidation))
else:
r.raiseUnexpectedField(fieldName, "ProtocolsConfig")
if entryNodes.isNone():
r.raiseUnexpectedValue("Missing required field 'entryNodes'")
if clusterId.isNone():
r.raiseUnexpectedValue("Missing required field 'clusterId'")
val = ProtocolsConfig.init(
entryNodes = entryNodes.get(),
staticStoreNodes = staticStoreNodes.get(@[]),
clusterId = clusterId.get(),
autoShardingConfig = autoShardingConfig.get(DefaultAutoShardingConfig),
messageValidation = messageValidation.get(DefaultMessageValidation),
)
# ---------- NodeConfig ----------
proc writeValue*(w: var JsonWriter, val: NodeConfig) {.raises: [IOError].} =
w.beginRecord()
w.writeField("mode", val.mode)
w.writeField("protocolsConfig", val.protocolsConfig)
w.writeField("networkingConfig", val.networkingConfig)
w.writeField("ethRpcEndpoints", val.ethRpcEndpoints)
w.writeField("p2pReliability", val.p2pReliability)
w.writeField("logLevel", val.logLevel)
w.writeField("logFormat", val.logFormat)
w.endRecord()
proc readValue*(
r: var JsonReader, val: var NodeConfig
) {.raises: [SerializationError, IOError].} =
var
mode: Option[WakuMode]
protocolsConfig: Option[ProtocolsConfig]
networkingConfig: Option[NetworkingConfig]
ethRpcEndpoints: Option[seq[string]]
p2pReliability: Option[bool]
logLevel: Option[LogLevel]
logFormat: Option[LogFormat]
for fieldName in readObjectFields(r):
case fieldName
of "mode":
mode = some(r.readValue(WakuMode))
of "protocolsConfig":
protocolsConfig = some(r.readValue(ProtocolsConfig))
of "networkingConfig":
networkingConfig = some(r.readValue(NetworkingConfig))
of "ethRpcEndpoints":
ethRpcEndpoints = some(r.readValue(seq[string]))
of "p2pReliability":
p2pReliability = some(r.readValue(bool))
of "logLevel":
logLevel = some(r.readValue(LogLevel))
of "logFormat":
logFormat = some(r.readValue(LogFormat))
else:
r.raiseUnexpectedField(fieldName, "NodeConfig")
val = NodeConfig.init(
mode = mode.get(WakuMode.Core),
protocolsConfig = protocolsConfig.get(TheWakuNetworkPreset),
networkingConfig = networkingConfig.get(DefaultNetworkingConfig),
ethRpcEndpoints = ethRpcEndpoints.get(@[]),
p2pReliability = p2pReliability.get(false),
logLevel = logLevel.get(LogLevel.INFO),
logFormat = logFormat.get(LogFormat.TEXT),
)
# ---------- Decode helper ----------
# Json.decode returns T via `result`, which conflicts with {.requiresInit.}
# on Nim 2.x. This helper avoids the issue by using readValue into a var.
proc decodeNodeConfigFromJson*(
jsonStr: string
): NodeConfig {.raises: [SerializationError].} =
var val = NodeConfig.init() # default-initialized
try:
var stream = unsafeMemoryInput(jsonStr)
var reader = JsonReader[DefaultFlavor].init(stream)
reader.readValue(val)
except IOError as err:
raise (ref SerializationError)(msg: err.msg)
return val

View File

@ -91,6 +91,7 @@ proc setupSendProcessorChain(
for i in 1 ..< processors.len:
currentProcessor.chain(processors[i])
currentProcessor = processors[i]
trace "Send processor chain", index = i, processor = type(processors[i]).name
return ok(processors[0])