Merge branch 'master' into store_query_issue

This commit is contained in:
Darshan 2025-12-23 14:36:41 +05:30 committed by GitHub
commit 0bd5a7ba59
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
58 changed files with 4256 additions and 2993 deletions

View File

@ -76,6 +76,9 @@ jobs:
.git/modules
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
- name: Make update
run: make update
- name: Build binaries
run: make V=1 QUICK_AND_DIRTY_COMPILER=1 all tools
@ -114,6 +117,9 @@ jobs:
.git/modules
key: ${{ runner.os }}-vendor-modules-${{ steps.submodules.outputs.hash }}
- name: Make update
run: make update
- name: Run tests
run: |
postgres_enabled=0
@ -132,12 +138,12 @@ jobs:
build-docker-image:
needs: changes
if: ${{ needs.changes.outputs.v2 == 'true' || needs.changes.outputs.common == 'true' || needs.changes.outputs.docker == 'true' }}
uses: logos-messaging/nwaku/.github/workflows/container-image.yml@master
uses: logos-messaging/logos-messaging-nim/.github/workflows/container-image.yml@10dc3d3eb4b6a3d4313f7b2cc4a85a925e9ce039
secrets: inherit
nwaku-nwaku-interop-tests:
needs: build-docker-image
uses: logos-messaging/logos-messaging-interop-tests/.github/workflows/nim_waku_PR.yml@SMOKE_TEST_0.0.1
uses: logos-messaging/logos-messaging-interop-tests/.github/workflows/nim_waku_PR.yml@SMOKE_TEST_STABLE
with:
node_nwaku: ${{ needs.build-docker-image.outputs.image }}

View File

@ -41,7 +41,7 @@ jobs:
env:
QUAY_PASSWORD: ${{ secrets.QUAY_PASSWORD }}
QUAY_USER: ${{ secrets.QUAY_USER }}
- name: Checkout code
if: ${{ steps.secrets.outcome == 'success' }}
uses: actions/checkout@v4
@ -65,6 +65,7 @@ jobs:
id: build
if: ${{ steps.secrets.outcome == 'success' }}
run: |
make update
make -j${NPROC} V=1 QUICK_AND_DIRTY_COMPILER=1 NIMFLAGS="-d:disableMarchNative -d:postgres -d:chronicles_colors:none" wakunode2

View File

@ -41,25 +41,84 @@ jobs:
.git/modules
key: ${{ runner.os }}-${{matrix.arch}}-submodules-${{ steps.submodules.outputs.hash }}
- name: prep variables
- name: Get tag
id: version
run: |
# Use full tag, e.g., v0.37.0
echo "version=${GITHUB_REF_NAME}" >> $GITHUB_OUTPUT
- name: Prep variables
id: vars
run: |
NWAKU_ARTIFACT_NAME=$(echo "nwaku-${{matrix.arch}}-${{runner.os}}.tar.gz" | tr "[:upper:]" "[:lower:]")
VERSION=${{ steps.version.outputs.version }}
echo "nwaku=${NWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT
NWAKU_ARTIFACT_NAME=$(echo "waku-${{matrix.arch}}-${{runner.os}}.tar.gz" | tr "[:upper:]" "[:lower:]")
echo "waku=${NWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT
- name: Install dependencies
if [[ "${{ runner.os }}" == "Linux" ]]; then
LIBWAKU_ARTIFACT_NAME=$(echo "libwaku-${VERSION}-${{matrix.arch}}-${{runner.os}}-linux.deb" | tr "[:upper:]" "[:lower:]")
fi
if [[ "${{ runner.os }}" == "macOS" ]]; then
LIBWAKU_ARTIFACT_NAME=$(echo "libwaku-${VERSION}-${{matrix.arch}}-macos.tar.gz" | tr "[:upper:]" "[:lower:]")
fi
echo "libwaku=${LIBWAKU_ARTIFACT_NAME}" >> $GITHUB_OUTPUT
- name: Install build dependencies
run: |
if [[ "${{ runner.os }}" == "Linux" ]]; then
sudo apt-get update && sudo apt-get install -y build-essential dpkg-dev
fi
- name: Build Waku artifacts
run: |
OS=$([[ "${{runner.os}}" == "macOS" ]] && echo "macosx" || echo "linux")
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" V=1 update
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false wakunode2
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}}" CI=false chat2
tar -cvzf ${{steps.vars.outputs.nwaku}} ./build/
tar -cvzf ${{steps.vars.outputs.waku}} ./build/
- name: Upload asset
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false libwaku
make -j${NPROC} NIMFLAGS="--parallelBuild:${NPROC} -d:disableMarchNative --os:${OS} --cpu:${{matrix.arch}} -d:postgres" CI=false STATIC=1 libwaku
- name: Create distributable libwaku package
run: |
VERSION=${{ steps.version.outputs.version }}
if [[ "${{ runner.os }}" == "Linux" ]]; then
rm -rf pkg
mkdir -p pkg/DEBIAN pkg/usr/local/lib pkg/usr/local/include
cp build/libwaku.so pkg/usr/local/lib/
cp build/libwaku.a pkg/usr/local/lib/
cp library/libwaku.h pkg/usr/local/include/
echo "Package: waku" >> pkg/DEBIAN/control
echo "Version: ${VERSION}" >> pkg/DEBIAN/control
echo "Priority: optional" >> pkg/DEBIAN/control
echo "Section: libs" >> pkg/DEBIAN/control
echo "Architecture: ${{matrix.arch}}" >> pkg/DEBIAN/control
echo "Maintainer: Waku Team <ivansete@status.im>" >> pkg/DEBIAN/control
echo "Description: Waku library" >> pkg/DEBIAN/control
dpkg-deb --build pkg ${{steps.vars.outputs.libwaku}}
fi
if [[ "${{ runner.os }}" == "macOS" ]]; then
tar -cvzf ${{steps.vars.outputs.libwaku}} ./build/libwaku.dylib ./build/libwaku.a ./library/libwaku.h
fi
- name: Upload waku artifact
uses: actions/upload-artifact@v4.4.0
with:
name: ${{steps.vars.outputs.nwaku}}
path: ${{steps.vars.outputs.nwaku}}
name: waku-${{ steps.version.outputs.version }}-${{ matrix.arch }}-${{ runner.os }}
path: ${{ steps.vars.outputs.waku }}
if-no-files-found: error
- name: Upload libwaku artifact
uses: actions/upload-artifact@v4.4.0
with:
name: libwaku-${{ steps.version.outputs.version }}-${{ matrix.arch }}-${{ runner.os }}
path: ${{ steps.vars.outputs.libwaku }}
if-no-files-found: error

10
.gitignore vendored
View File

@ -59,6 +59,10 @@ nimbus-build-system.paths
/examples/nodejs/build/
/examples/rust/target/
# Xcode user data
xcuserdata/
*.xcuserstate
# Coverage
coverage_html_report/
@ -79,3 +83,9 @@ waku_handler.moc.cpp
# Nix build result
result
# llms
AGENTS.md
nimble.develop
nimble.paths
nimbledeps

5
.gitmodules vendored
View File

@ -184,3 +184,8 @@
url = https://github.com/logos-messaging/waku-rlnv2-contract.git
ignore = untracked
branch = master
[submodule "vendor/nim-ffi"]
path = vendor/nim-ffi
url = https://github.com/logos-messaging/nim-ffi/
ignore = untracked
branch = master

View File

@ -119,6 +119,10 @@ endif
##################
.PHONY: deps libbacktrace
FOUNDRY_VERSION := 1.5.0
PNPM_VERSION := 10.23.0
rustup:
ifeq (, $(shell which cargo))
# Install Rustup if it's not installed
@ -128,7 +132,7 @@ ifeq (, $(shell which cargo))
endif
rln-deps: rustup
./scripts/install_rln_tests_dependencies.sh
./scripts/install_rln_tests_dependencies.sh $(FOUNDRY_VERSION) $(PNPM_VERSION)
deps: | deps-common nat-libs waku.nims
@ -426,18 +430,27 @@ docker-liteprotocoltester-push:
.PHONY: cbindings cwaku_example libwaku
STATIC ?= 0
BUILD_COMMAND ?= libwakuDynamic
ifeq ($(detected_OS),Windows)
LIB_EXT_DYNAMIC = dll
LIB_EXT_STATIC = lib
else ifeq ($(detected_OS),Darwin)
LIB_EXT_DYNAMIC = dylib
LIB_EXT_STATIC = a
else ifeq ($(detected_OS),Linux)
LIB_EXT_DYNAMIC = so
LIB_EXT_STATIC = a
endif
LIB_EXT := $(LIB_EXT_DYNAMIC)
ifeq ($(STATIC), 1)
LIB_EXT = $(LIB_EXT_STATIC)
BUILD_COMMAND = libwakuStatic
endif
libwaku: | build deps librln
rm -f build/libwaku*
ifeq ($(STATIC), 1)
echo -e $(BUILD_MSG) "build/$@.a" && $(ENV_SCRIPT) nim libwakuStatic $(NIM_PARAMS) waku.nims
else ifeq ($(detected_OS),Windows)
make -f scripts/libwaku_windows_setup.mk windows-setup
echo -e $(BUILD_MSG) "build/$@.dll" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims
else
echo -e $(BUILD_MSG) "build/$@.so" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims
endif
echo -e $(BUILD_MSG) "build/$@.$(LIB_EXT)" && $(ENV_SCRIPT) nim $(BUILD_COMMAND) $(NIM_PARAMS) waku.nims $@.$(LIB_EXT)
#####################
## Mobile Bindings ##
@ -504,6 +517,51 @@ libwaku-android:
# It's likely this architecture is not used so we might just not support it.
# $(MAKE) libwaku-android-arm
#################
## iOS Bindings #
#################
.PHONY: libwaku-ios-precheck \
libwaku-ios-device \
libwaku-ios-simulator \
libwaku-ios
IOS_DEPLOYMENT_TARGET ?= 18.0
# Get SDK paths dynamically using xcrun
define get_ios_sdk_path
$(shell xcrun --sdk $(1) --show-sdk-path 2>/dev/null)
endef
libwaku-ios-precheck:
ifeq ($(detected_OS),Darwin)
@command -v xcrun >/dev/null 2>&1 || { echo "Error: Xcode command line tools not installed"; exit 1; }
else
$(error iOS builds are only supported on macOS)
endif
# Build for iOS architecture
build-libwaku-for-ios-arch:
IOS_SDK=$(IOS_SDK) IOS_ARCH=$(IOS_ARCH) IOS_SDK_PATH=$(IOS_SDK_PATH) $(ENV_SCRIPT) nim libWakuIOS $(NIM_PARAMS) waku.nims
# iOS device (arm64)
libwaku-ios-device: IOS_ARCH=arm64
libwaku-ios-device: IOS_SDK=iphoneos
libwaku-ios-device: IOS_SDK_PATH=$(call get_ios_sdk_path,iphoneos)
libwaku-ios-device: | libwaku-ios-precheck build deps
$(MAKE) build-libwaku-for-ios-arch IOS_ARCH=$(IOS_ARCH) IOS_SDK=$(IOS_SDK) IOS_SDK_PATH=$(IOS_SDK_PATH)
# iOS simulator (arm64 - Apple Silicon Macs)
libwaku-ios-simulator: IOS_ARCH=arm64
libwaku-ios-simulator: IOS_SDK=iphonesimulator
libwaku-ios-simulator: IOS_SDK_PATH=$(call get_ios_sdk_path,iphonesimulator)
libwaku-ios-simulator: | libwaku-ios-precheck build deps
$(MAKE) build-libwaku-for-ios-arch IOS_ARCH=$(IOS_ARCH) IOS_SDK=$(IOS_SDK) IOS_SDK_PATH=$(IOS_SDK_PATH)
# Build all iOS targets
libwaku-ios:
$(MAKE) libwaku-ios-device
$(MAKE) libwaku-ios-simulator
cwaku_example: | build libwaku
echo -e $(BUILD_MSG) "build/$@" && \
cc -o "build/$@" \
@ -549,4 +607,3 @@ release-notes:
sed -E 's@#([0-9]+)@[#\1](https://github.com/waku-org/nwaku/issues/\1)@g'
# I could not get the tool to replace issue ids with links, so using sed for now,
# asked here: https://github.com/bvieira/sv4git/discussions/101

View File

@ -19,283 +19,309 @@ pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
int callback_executed = 0;
void waitForCallback() {
pthread_mutex_lock(&mutex);
while (!callback_executed) {
pthread_cond_wait(&cond, &mutex);
}
callback_executed = 0;
pthread_mutex_unlock(&mutex);
void waitForCallback()
{
pthread_mutex_lock(&mutex);
while (!callback_executed)
{
pthread_cond_wait(&cond, &mutex);
}
callback_executed = 0;
pthread_mutex_unlock(&mutex);
}
#define WAKU_CALL(call) \
do { \
int ret = call; \
if (ret != 0) { \
printf("Failed the call to: %s. Returned code: %d\n", #call, ret); \
exit(1); \
} \
waitForCallback(); \
} while (0)
#define WAKU_CALL(call) \
do \
{ \
int ret = call; \
if (ret != 0) \
{ \
printf("Failed the call to: %s. Returned code: %d\n", #call, ret); \
exit(1); \
} \
waitForCallback(); \
} while (0)
struct ConfigNode {
char host[128];
int port;
char key[128];
int relay;
char peers[2048];
int store;
char storeNode[2048];
char storeRetentionPolicy[64];
char storeDbUrl[256];
int storeVacuum;
int storeDbMigration;
int storeMaxNumDbConnections;
struct ConfigNode
{
char host[128];
int port;
char key[128];
int relay;
char peers[2048];
int store;
char storeNode[2048];
char storeRetentionPolicy[64];
char storeDbUrl[256];
int storeVacuum;
int storeDbMigration;
int storeMaxNumDbConnections;
};
// libwaku Context
void* ctx;
void *ctx;
// For the case of C language we don't need to store a particular userData
void* userData = NULL;
void *userData = NULL;
// Arguments parsing
static char doc[] = "\nC example that shows how to use the waku library.";
static char args_doc[] = "";
static struct argp_option options[] = {
{ "host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"},
{ "port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"},
{ "key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."},
{ "relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"},
{ "peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\
{"host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"},
{"port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"},
{"key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."},
{"relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"},
{"peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\
to. (default: \"\") e.g. \"/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\""},
{ 0 }
};
{0}};
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
static error_t parse_opt(int key, char *arg, struct argp_state *state)
{
struct ConfigNode *cfgNode = state->input;
switch (key) {
case 'h':
snprintf(cfgNode->host, 128, "%s", arg);
break;
case 'p':
cfgNode->port = atoi(arg);
break;
case 'k':
snprintf(cfgNode->key, 128, "%s", arg);
break;
case 'r':
cfgNode->relay = atoi(arg);
break;
case 'a':
snprintf(cfgNode->peers, 2048, "%s", arg);
break;
case ARGP_KEY_ARG:
if (state->arg_num >= 1) /* Too many arguments. */
argp_usage(state);
break;
case ARGP_KEY_END:
break;
default:
return ARGP_ERR_UNKNOWN;
}
struct ConfigNode *cfgNode = state->input;
switch (key)
{
case 'h':
snprintf(cfgNode->host, 128, "%s", arg);
break;
case 'p':
cfgNode->port = atoi(arg);
break;
case 'k':
snprintf(cfgNode->key, 128, "%s", arg);
break;
case 'r':
cfgNode->relay = atoi(arg);
break;
case 'a':
snprintf(cfgNode->peers, 2048, "%s", arg);
break;
case ARGP_KEY_ARG:
if (state->arg_num >= 1) /* Too many arguments. */
argp_usage(state);
break;
case ARGP_KEY_END:
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
return 0;
}
void signal_cond() {
pthread_mutex_lock(&mutex);
callback_executed = 1;
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
void signal_cond()
{
pthread_mutex_lock(&mutex);
callback_executed = 1;
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
}
static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 };
static struct argp argp = {options, parse_opt, args_doc, doc, 0, 0, 0};
void event_handler(int callerRet, const char* msg, size_t len, void* userData) {
if (callerRet == RET_ERR) {
printf("Error: %s\n", msg);
exit(1);
}
else if (callerRet == RET_OK) {
printf("Receiving event: %s\n", msg);
}
void event_handler(int callerRet, const char *msg, size_t len, void *userData)
{
if (callerRet == RET_ERR)
{
printf("Error: %s\n", msg);
exit(1);
}
else if (callerRet == RET_OK)
{
printf("Receiving event: %s\n", msg);
}
signal_cond();
signal_cond();
}
void on_event_received(int callerRet, const char* msg, size_t len, void* userData) {
if (callerRet == RET_ERR) {
printf("Error: %s\n", msg);
exit(1);
}
else if (callerRet == RET_OK) {
printf("Receiving event: %s\n", msg);
}
void on_event_received(int callerRet, const char *msg, size_t len, void *userData)
{
if (callerRet == RET_ERR)
{
printf("Error: %s\n", msg);
exit(1);
}
else if (callerRet == RET_OK)
{
printf("Receiving event: %s\n", msg);
}
}
char* contentTopic = NULL;
void handle_content_topic(int callerRet, const char* msg, size_t len, void* userData) {
if (contentTopic != NULL) {
free(contentTopic);
}
char *contentTopic = NULL;
void handle_content_topic(int callerRet, const char *msg, size_t len, void *userData)
{
if (contentTopic != NULL)
{
free(contentTopic);
}
contentTopic = malloc(len * sizeof(char) + 1);
strcpy(contentTopic, msg);
signal_cond();
contentTopic = malloc(len * sizeof(char) + 1);
strcpy(contentTopic, msg);
signal_cond();
}
char* publishResponse = NULL;
void handle_publish_ok(int callerRet, const char* msg, size_t len, void* userData) {
printf("Publish Ok: %s %lu\n", msg, len);
char *publishResponse = NULL;
void handle_publish_ok(int callerRet, const char *msg, size_t len, void *userData)
{
printf("Publish Ok: %s %lu\n", msg, len);
if (publishResponse != NULL) {
free(publishResponse);
}
if (publishResponse != NULL)
{
free(publishResponse);
}
publishResponse = malloc(len * sizeof(char) + 1);
strcpy(publishResponse, msg);
publishResponse = malloc(len * sizeof(char) + 1);
strcpy(publishResponse, msg);
}
#define MAX_MSG_SIZE 65535
void publish_message(const char* msg) {
char jsonWakuMsg[MAX_MSG_SIZE];
char *msgPayload = b64_encode(msg, strlen(msg));
void publish_message(const char *msg)
{
char jsonWakuMsg[MAX_MSG_SIZE];
char *msgPayload = b64_encode(msg, strlen(msg));
WAKU_CALL( waku_content_topic(ctx,
"appName",
1,
"contentTopicName",
"encoding",
handle_content_topic,
userData) );
snprintf(jsonWakuMsg,
MAX_MSG_SIZE,
"{\"payload\":\"%s\",\"contentTopic\":\"%s\"}",
msgPayload, contentTopic);
WAKU_CALL(waku_content_topic(ctx,
handle_content_topic,
userData,
"appName",
1,
"contentTopicName",
"encoding"));
snprintf(jsonWakuMsg,
MAX_MSG_SIZE,
"{\"payload\":\"%s\",\"contentTopic\":\"%s\"}",
msgPayload, contentTopic);
free(msgPayload);
free(msgPayload);
WAKU_CALL( waku_relay_publish(ctx,
"/waku/2/rs/16/32",
jsonWakuMsg,
10000 /*timeout ms*/,
event_handler,
userData) );
WAKU_CALL(waku_relay_publish(ctx,
event_handler,
userData,
"/waku/2/rs/16/32",
jsonWakuMsg,
10000 /*timeout ms*/));
}
void show_help_and_exit() {
printf("Wrong parameters\n");
exit(1);
void show_help_and_exit()
{
printf("Wrong parameters\n");
exit(1);
}
void print_default_pubsub_topic(int callerRet, const char* msg, size_t len, void* userData) {
printf("Default pubsub topic: %s\n", msg);
signal_cond();
void print_default_pubsub_topic(int callerRet, const char *msg, size_t len, void *userData)
{
printf("Default pubsub topic: %s\n", msg);
signal_cond();
}
void print_waku_version(int callerRet, const char* msg, size_t len, void* userData) {
printf("Git Version: %s\n", msg);
signal_cond();
void print_waku_version(int callerRet, const char *msg, size_t len, void *userData)
{
printf("Git Version: %s\n", msg);
signal_cond();
}
// Beginning of UI program logic
enum PROGRAM_STATE {
MAIN_MENU,
SUBSCRIBE_TOPIC_MENU,
CONNECT_TO_OTHER_NODE_MENU,
PUBLISH_MESSAGE_MENU
enum PROGRAM_STATE
{
MAIN_MENU,
SUBSCRIBE_TOPIC_MENU,
CONNECT_TO_OTHER_NODE_MENU,
PUBLISH_MESSAGE_MENU
};
enum PROGRAM_STATE current_state = MAIN_MENU;
void show_main_menu() {
printf("\nPlease, select an option:\n");
printf("\t1.) Subscribe to topic\n");
printf("\t2.) Connect to other node\n");
printf("\t3.) Publish a message\n");
void show_main_menu()
{
printf("\nPlease, select an option:\n");
printf("\t1.) Subscribe to topic\n");
printf("\t2.) Connect to other node\n");
printf("\t3.) Publish a message\n");
}
void handle_user_input() {
char cmd[1024];
memset(cmd, 0, 1024);
int numRead = read(0, cmd, 1024);
if (numRead <= 0) {
return;
}
void handle_user_input()
{
char cmd[1024];
memset(cmd, 0, 1024);
int numRead = read(0, cmd, 1024);
if (numRead <= 0)
{
return;
}
switch (atoi(cmd))
{
case SUBSCRIBE_TOPIC_MENU:
{
printf("Indicate the Pubsubtopic to subscribe:\n");
char pubsubTopic[128];
scanf("%127s", pubsubTopic);
switch (atoi(cmd))
{
case SUBSCRIBE_TOPIC_MENU:
{
printf("Indicate the Pubsubtopic to subscribe:\n");
char pubsubTopic[128];
scanf("%127s", pubsubTopic);
WAKU_CALL( waku_relay_subscribe(ctx,
pubsubTopic,
event_handler,
userData) );
printf("The subscription went well\n");
WAKU_CALL(waku_relay_subscribe(ctx,
event_handler,
userData,
pubsubTopic));
printf("The subscription went well\n");
show_main_menu();
}
show_main_menu();
}
break;
case CONNECT_TO_OTHER_NODE_MENU:
// printf("Connecting to a node. Please indicate the peer Multiaddress:\n");
// printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n");
// char peerAddr[512];
// scanf("%511s", peerAddr);
// WAKU_CALL(waku_connect(ctx, peerAddr, 10000 /* timeoutMs */, event_handler, userData));
show_main_menu();
break;
case CONNECT_TO_OTHER_NODE_MENU:
printf("Connecting to a node. Please indicate the peer Multiaddress:\n");
printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n");
char peerAddr[512];
scanf("%511s", peerAddr);
WAKU_CALL(waku_connect(ctx, peerAddr, 10000 /* timeoutMs */, event_handler, userData));
show_main_menu();
case PUBLISH_MESSAGE_MENU:
{
printf("Type the message to publish:\n");
char msg[1024];
scanf("%1023s", msg);
publish_message(msg);
show_main_menu();
}
break;
case MAIN_MENU:
break;
case PUBLISH_MESSAGE_MENU:
{
printf("Type the message to publish:\n");
char msg[1024];
scanf("%1023s", msg);
publish_message(msg);
show_main_menu();
}
break;
case MAIN_MENU:
break;
}
}
}
// End of UI program logic
int main(int argc, char** argv) {
struct ConfigNode cfgNode;
// default values
snprintf(cfgNode.host, 128, "0.0.0.0");
cfgNode.port = 60000;
cfgNode.relay = 1;
int main(int argc, char **argv)
{
struct ConfigNode cfgNode;
// default values
snprintf(cfgNode.host, 128, "0.0.0.0");
cfgNode.port = 60000;
cfgNode.relay = 1;
cfgNode.store = 0;
snprintf(cfgNode.storeNode, 2048, "");
snprintf(cfgNode.storeRetentionPolicy, 64, "time:6000000");
snprintf(cfgNode.storeDbUrl, 256, "postgres://postgres:test123@localhost:5432/postgres");
cfgNode.storeVacuum = 0;
cfgNode.storeDbMigration = 0;
cfgNode.storeMaxNumDbConnections = 30;
cfgNode.store = 0;
snprintf(cfgNode.storeNode, 2048, "");
snprintf(cfgNode.storeRetentionPolicy, 64, "time:6000000");
snprintf(cfgNode.storeDbUrl, 256, "postgres://postgres:test123@localhost:5432/postgres");
cfgNode.storeVacuum = 0;
cfgNode.storeDbMigration = 0;
cfgNode.storeMaxNumDbConnections = 30;
if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode)
== ARGP_ERR_UNKNOWN) {
show_help_and_exit();
}
if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode) == ARGP_ERR_UNKNOWN)
{
show_help_and_exit();
}
char jsonConfig[5000];
snprintf(jsonConfig, 5000, "{ \
char jsonConfig[5000];
snprintf(jsonConfig, 5000, "{ \
\"clusterId\": 16, \
\"shards\": [ 1, 32, 64, 128, 256 ], \
\"numShardsInNetwork\": 257, \
@ -313,54 +339,56 @@ int main(int argc, char** argv) {
\"discv5UdpPort\": 9999, \
\"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \
\"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \
}", cfgNode.host,
cfgNode.port,
cfgNode.relay ? "true":"false",
cfgNode.store ? "true":"false",
cfgNode.storeDbUrl,
cfgNode.storeRetentionPolicy,
cfgNode.storeMaxNumDbConnections);
}",
cfgNode.host,
cfgNode.port,
cfgNode.relay ? "true" : "false",
cfgNode.store ? "true" : "false",
cfgNode.storeDbUrl,
cfgNode.storeRetentionPolicy,
cfgNode.storeMaxNumDbConnections);
ctx = waku_new(jsonConfig, event_handler, userData);
waitForCallback();
ctx = waku_new(jsonConfig, event_handler, userData);
waitForCallback();
WAKU_CALL( waku_default_pubsub_topic(ctx, print_default_pubsub_topic, userData) );
WAKU_CALL( waku_version(ctx, print_waku_version, userData) );
WAKU_CALL(waku_default_pubsub_topic(ctx, print_default_pubsub_topic, userData));
WAKU_CALL(waku_version(ctx, print_waku_version, userData));
printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port);
printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES": "NO");
printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port);
printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES" : "NO");
waku_set_event_callback(ctx, on_event_received, userData);
set_event_callback(ctx, on_event_received, userData);
waku_start(ctx, event_handler, userData);
waitForCallback();
waku_start(ctx, event_handler, userData);
waitForCallback();
WAKU_CALL( waku_listen_addresses(ctx, event_handler, userData) );
WAKU_CALL(waku_listen_addresses(ctx, event_handler, userData));
WAKU_CALL( waku_relay_subscribe(ctx,
"/waku/2/rs/0/0",
event_handler,
userData) );
WAKU_CALL(waku_relay_subscribe(ctx,
event_handler,
userData,
"/waku/2/rs/16/32"));
WAKU_CALL( waku_discv5_update_bootnodes(ctx,
"[\"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\",\"enr:-QEkuEB3WHNS-xA3RDpfu9A2Qycr3bN3u7VoArMEiDIFZJ66F1EB3d4wxZN1hcdcOX-RfuXB-MQauhJGQbpz3qUofOtLAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQPK35Nnz0cWUtSAhBp7zvHEhyU_AqeQUlqzLiLxfP2L4oN0Y3CCdl-DdWRwgiMohXdha3UyDw\"]",
event_handler,
userData) );
WAKU_CALL(waku_discv5_update_bootnodes(ctx,
event_handler,
userData,
"[\"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\",\"enr:-QEkuEB3WHNS-xA3RDpfu9A2Qycr3bN3u7VoArMEiDIFZJ66F1EB3d4wxZN1hcdcOX-RfuXB-MQauhJGQbpz3qUofOtLAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQPK35Nnz0cWUtSAhBp7zvHEhyU_AqeQUlqzLiLxfP2L4oN0Y3CCdl-DdWRwgiMohXdha3UyDw\"]"));
WAKU_CALL( waku_get_peerids_from_peerstore(ctx,
event_handler,
userData) );
WAKU_CALL(waku_get_peerids_from_peerstore(ctx,
event_handler,
userData));
show_main_menu();
while(1) {
handle_user_input();
show_main_menu();
while (1)
{
handle_user_input();
// Uncomment the following if need to test the metrics retrieval
// WAKU_CALL( waku_get_metrics(ctx,
// event_handler,
// userData) );
}
// Uncomment the following if need to test the metrics retrieval
// WAKU_CALL( waku_get_metrics(ctx,
// event_handler,
// userData) );
}
pthread_mutex_destroy(&mutex);
pthread_cond_destroy(&cond);
pthread_mutex_destroy(&mutex);
pthread_cond_destroy(&cond);
}

View File

@ -21,37 +21,43 @@ pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
int callback_executed = 0;
void waitForCallback() {
void waitForCallback()
{
pthread_mutex_lock(&mutex);
while (!callback_executed) {
while (!callback_executed)
{
pthread_cond_wait(&cond, &mutex);
}
callback_executed = 0;
pthread_mutex_unlock(&mutex);
}
void signal_cond() {
void signal_cond()
{
pthread_mutex_lock(&mutex);
callback_executed = 1;
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
}
#define WAKU_CALL(call) \
do { \
int ret = call; \
if (ret != 0) { \
std::cout << "Failed the call to: " << #call << ". Code: " << ret << "\n"; \
} \
waitForCallback(); \
} while (0)
#define WAKU_CALL(call) \
do \
{ \
int ret = call; \
if (ret != 0) \
{ \
std::cout << "Failed the call to: " << #call << ". Code: " << ret << "\n"; \
} \
waitForCallback(); \
} while (0)
struct ConfigNode {
char host[128];
int port;
char key[128];
int relay;
char peers[2048];
struct ConfigNode
{
char host[128];
int port;
char key[128];
int relay;
char peers[2048];
};
// Arguments parsing
@ -59,70 +65,76 @@ static char doc[] = "\nC example that shows how to use the waku library.";
static char args_doc[] = "";
static struct argp_option options[] = {
{ "host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"},
{ "port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"},
{ "key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."},
{ "relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"},
{ "peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\
{"host", 'h', "HOST", 0, "IP to listen for for LibP2P traffic. (default: \"0.0.0.0\")"},
{"port", 'p', "PORT", 0, "TCP listening port. (default: \"60000\")"},
{"key", 'k', "KEY", 0, "P2P node private key as 64 char hex string."},
{"relay", 'r', "RELAY", 0, "Enable relay protocol: 1 or 0. (default: 1)"},
{"peers", 'a', "PEERS", 0, "Comma-separated list of peer-multiaddress to connect\
to. (default: \"\") e.g. \"/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\""},
{ 0 }
};
{0}};
static error_t parse_opt(int key, char *arg, struct argp_state *state) {
static error_t parse_opt(int key, char *arg, struct argp_state *state)
{
struct ConfigNode *cfgNode = (ConfigNode *) state->input;
switch (key) {
case 'h':
snprintf(cfgNode->host, 128, "%s", arg);
break;
case 'p':
cfgNode->port = atoi(arg);
break;
case 'k':
snprintf(cfgNode->key, 128, "%s", arg);
break;
case 'r':
cfgNode->relay = atoi(arg);
break;
case 'a':
snprintf(cfgNode->peers, 2048, "%s", arg);
break;
case ARGP_KEY_ARG:
if (state->arg_num >= 1) /* Too many arguments. */
struct ConfigNode *cfgNode = (ConfigNode *)state->input;
switch (key)
{
case 'h':
snprintf(cfgNode->host, 128, "%s", arg);
break;
case 'p':
cfgNode->port = atoi(arg);
break;
case 'k':
snprintf(cfgNode->key, 128, "%s", arg);
break;
case 'r':
cfgNode->relay = atoi(arg);
break;
case 'a':
snprintf(cfgNode->peers, 2048, "%s", arg);
break;
case ARGP_KEY_ARG:
if (state->arg_num >= 1) /* Too many arguments. */
argp_usage(state);
break;
case ARGP_KEY_END:
break;
default:
return ARGP_ERR_UNKNOWN;
}
break;
case ARGP_KEY_END:
break;
default:
return ARGP_ERR_UNKNOWN;
}
return 0;
}
void event_handler(const char* msg, size_t len) {
void event_handler(const char *msg, size_t len)
{
printf("Receiving event: %s\n", msg);
}
void handle_error(const char* msg, size_t len) {
void handle_error(const char *msg, size_t len)
{
printf("handle_error: %s\n", msg);
exit(1);
}
template <class F>
auto cify(F&& f) {
static F fn = std::forward<F>(f);
return [](int callerRet, const char* msg, size_t len, void* userData) {
signal_cond();
return fn(msg, len);
};
auto cify(F &&f)
{
static F fn = std::forward<F>(f);
return [](int callerRet, const char *msg, size_t len, void *userData)
{
signal_cond();
return fn(msg, len);
};
}
static struct argp argp = { options, parse_opt, args_doc, doc, 0, 0, 0 };
static struct argp argp = {options, parse_opt, args_doc, doc, 0, 0, 0};
// Beginning of UI program logic
enum PROGRAM_STATE {
enum PROGRAM_STATE
{
MAIN_MENU,
SUBSCRIBE_TOPIC_MENU,
CONNECT_TO_OTHER_NODE_MENU,
@ -131,18 +143,21 @@ enum PROGRAM_STATE {
enum PROGRAM_STATE current_state = MAIN_MENU;
void show_main_menu() {
void show_main_menu()
{
printf("\nPlease, select an option:\n");
printf("\t1.) Subscribe to topic\n");
printf("\t2.) Connect to other node\n");
printf("\t3.) Publish a message\n");
}
void handle_user_input(void* ctx) {
void handle_user_input(void *ctx)
{
char cmd[1024];
memset(cmd, 0, 1024);
int numRead = read(0, cmd, 1024);
if (numRead <= 0) {
if (numRead <= 0)
{
return;
}
@ -154,12 +169,11 @@ void handle_user_input(void* ctx) {
char pubsubTopic[128];
scanf("%127s", pubsubTopic);
WAKU_CALL( waku_relay_subscribe(ctx,
pubsubTopic,
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr) );
WAKU_CALL(waku_relay_subscribe(ctx,
cify([&](const char *msg, size_t len)
{ event_handler(msg, len); }),
nullptr,
pubsubTopic));
printf("The subscription went well\n");
show_main_menu();
@ -171,15 +185,14 @@ void handle_user_input(void* ctx) {
printf("e.g.: /ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\n");
char peerAddr[512];
scanf("%511s", peerAddr);
WAKU_CALL( waku_connect(ctx,
peerAddr,
10000 /* timeoutMs */,
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr));
WAKU_CALL(waku_connect(ctx,
cify([&](const char *msg, size_t len)
{ event_handler(msg, len); }),
nullptr,
peerAddr,
10000 /* timeoutMs */));
show_main_menu();
break;
break;
case PUBLISH_MESSAGE_MENU:
{
@ -193,28 +206,26 @@ void handle_user_input(void* ctx) {
std::string contentTopic;
waku_content_topic(ctx,
cify([&contentTopic](const char *msg, size_t len)
{ contentTopic = msg; }),
nullptr,
"appName",
1,
"contentTopicName",
"encoding",
cify([&contentTopic](const char* msg, size_t len) {
contentTopic = msg;
}),
nullptr);
1,
"contentTopicName",
"encoding");
snprintf(jsonWakuMsg,
2048,
"{\"payload\":\"%s\",\"contentTopic\":\"%s\"}",
msgPayload.data(), contentTopic.c_str());
WAKU_CALL( waku_relay_publish(ctx,
"/waku/2/rs/16/32",
jsonWakuMsg,
10000 /*timeout ms*/,
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr) );
WAKU_CALL(waku_relay_publish(ctx,
cify([&](const char *msg, size_t len)
{ event_handler(msg, len); }),
nullptr,
"/waku/2/rs/16/32",
jsonWakuMsg,
10000 /*timeout ms*/));
show_main_menu();
}
@ -227,12 +238,14 @@ void handle_user_input(void* ctx) {
// End of UI program logic
void show_help_and_exit() {
void show_help_and_exit()
{
printf("Wrong parameters\n");
exit(1);
}
int main(int argc, char** argv) {
int main(int argc, char **argv)
{
struct ConfigNode cfgNode;
// default values
snprintf(cfgNode.host, 128, "0.0.0.0");
@ -241,8 +254,8 @@ int main(int argc, char** argv) {
cfgNode.port = 60000;
cfgNode.relay = 1;
if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode)
== ARGP_ERR_UNKNOWN) {
if (argp_parse(&argp, argc, argv, 0, 0, &cfgNode) == ARGP_ERR_UNKNOWN)
{
show_help_and_exit();
}
@ -260,72 +273,64 @@ int main(int argc, char** argv) {
\"discv5UdpPort\": 9999, \
\"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \
\"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \
}", cfgNode.host,
cfgNode.port);
}",
cfgNode.host,
cfgNode.port);
void* ctx =
void *ctx =
waku_new(jsonConfig,
cify([](const char* msg, size_t len) {
std::cout << "waku_new feedback: " << msg << std::endl;
}
),
nullptr
);
cify([](const char *msg, size_t len)
{ std::cout << "waku_new feedback: " << msg << std::endl; }),
nullptr);
waitForCallback();
// example on how to retrieve a value from the `libwaku` callback.
std::string defaultPubsubTopic;
WAKU_CALL(
waku_default_pubsub_topic(
ctx,
cify([&defaultPubsubTopic](const char* msg, size_t len) {
defaultPubsubTopic = msg;
}
),
nullptr));
ctx,
cify([&defaultPubsubTopic](const char *msg, size_t len)
{ defaultPubsubTopic = msg; }),
nullptr));
std::cout << "Default pubsub topic: " << defaultPubsubTopic << std::endl;
WAKU_CALL(waku_version(ctx,
cify([&](const char* msg, size_t len) {
std::cout << "Git Version: " << msg << std::endl;
}),
WAKU_CALL(waku_version(ctx,
cify([&](const char *msg, size_t len)
{ std::cout << "Git Version: " << msg << std::endl; }),
nullptr));
printf("Bind addr: %s:%u\n", cfgNode.host, cfgNode.port);
printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES": "NO");
printf("Waku Relay enabled: %s\n", cfgNode.relay == 1 ? "YES" : "NO");
std::string pubsubTopic;
WAKU_CALL(waku_pubsub_topic(ctx,
"example",
cify([&](const char* msg, size_t len) {
pubsubTopic = msg;
}),
nullptr));
WAKU_CALL(waku_pubsub_topic(ctx,
cify([&](const char *msg, size_t len)
{ pubsubTopic = msg; }),
nullptr,
"example"));
std::cout << "Custom pubsub topic: " << pubsubTopic << std::endl;
waku_set_event_callback(ctx,
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr);
set_event_callback(ctx,
cify([&](const char *msg, size_t len)
{ event_handler(msg, len); }),
nullptr);
WAKU_CALL( waku_start(ctx,
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr));
WAKU_CALL(waku_start(ctx,
cify([&](const char *msg, size_t len)
{ event_handler(msg, len); }),
nullptr));
WAKU_CALL( waku_relay_subscribe(ctx,
defaultPubsubTopic.c_str(),
cify([&](const char* msg, size_t len) {
event_handler(msg, len);
}),
nullptr) );
WAKU_CALL(waku_relay_subscribe(ctx,
cify([&](const char *msg, size_t len)
{ event_handler(msg, len); }),
nullptr,
defaultPubsubTopic.c_str()));
show_main_menu();
while(1) {
while (1)
{
handle_user_input(ctx);
}
}

View File

@ -71,32 +71,32 @@ package main
static void* cGoWakuNew(const char* configJson, void* resp) {
// We pass NULL because we are not interested in retrieving data from this callback
void* ret = waku_new(configJson, (WakuCallBack) callback, resp);
void* ret = waku_new(configJson, (FFICallBack) callback, resp);
return ret;
}
static void cGoWakuStart(void* wakuCtx, void* resp) {
WAKU_CALL(waku_start(wakuCtx, (WakuCallBack) callback, resp));
WAKU_CALL(waku_start(wakuCtx, (FFICallBack) callback, resp));
}
static void cGoWakuStop(void* wakuCtx, void* resp) {
WAKU_CALL(waku_stop(wakuCtx, (WakuCallBack) callback, resp));
WAKU_CALL(waku_stop(wakuCtx, (FFICallBack) callback, resp));
}
static void cGoWakuDestroy(void* wakuCtx, void* resp) {
WAKU_CALL(waku_destroy(wakuCtx, (WakuCallBack) callback, resp));
WAKU_CALL(waku_destroy(wakuCtx, (FFICallBack) callback, resp));
}
static void cGoWakuStartDiscV5(void* wakuCtx, void* resp) {
WAKU_CALL(waku_start_discv5(wakuCtx, (WakuCallBack) callback, resp));
WAKU_CALL(waku_start_discv5(wakuCtx, (FFICallBack) callback, resp));
}
static void cGoWakuStopDiscV5(void* wakuCtx, void* resp) {
WAKU_CALL(waku_stop_discv5(wakuCtx, (WakuCallBack) callback, resp));
WAKU_CALL(waku_stop_discv5(wakuCtx, (FFICallBack) callback, resp));
}
static void cGoWakuVersion(void* wakuCtx, void* resp) {
WAKU_CALL(waku_version(wakuCtx, (WakuCallBack) callback, resp));
WAKU_CALL(waku_version(wakuCtx, (FFICallBack) callback, resp));
}
static void cGoWakuSetEventCallback(void* wakuCtx) {
@ -112,7 +112,7 @@ package main
// This technique is needed because cgo only allows to export Go functions and not methods.
waku_set_event_callback(wakuCtx, (WakuCallBack) globalEventCallback, wakuCtx);
set_event_callback(wakuCtx, (FFICallBack) globalEventCallback, wakuCtx);
}
static void cGoWakuContentTopic(void* wakuCtx,
@ -123,20 +123,21 @@ package main
void* resp) {
WAKU_CALL( waku_content_topic(wakuCtx,
(FFICallBack) callback,
resp,
appName,
appVersion,
contentTopicName,
encoding,
(WakuCallBack) callback,
resp) );
encoding
) );
}
static void cGoWakuPubsubTopic(void* wakuCtx, char* topicName, void* resp) {
WAKU_CALL( waku_pubsub_topic(wakuCtx, topicName, (WakuCallBack) callback, resp) );
WAKU_CALL( waku_pubsub_topic(wakuCtx, (FFICallBack) callback, resp, topicName) );
}
static void cGoWakuDefaultPubsubTopic(void* wakuCtx, void* resp) {
WAKU_CALL (waku_default_pubsub_topic(wakuCtx, (WakuCallBack) callback, resp));
WAKU_CALL (waku_default_pubsub_topic(wakuCtx, (FFICallBack) callback, resp));
}
static void cGoWakuRelayPublish(void* wakuCtx,
@ -146,34 +147,36 @@ package main
void* resp) {
WAKU_CALL (waku_relay_publish(wakuCtx,
(FFICallBack) callback,
resp,
pubSubTopic,
jsonWakuMessage,
timeoutMs,
(WakuCallBack) callback,
resp));
timeoutMs
));
}
static void cGoWakuRelaySubscribe(void* wakuCtx, char* pubSubTopic, void* resp) {
WAKU_CALL ( waku_relay_subscribe(wakuCtx,
pubSubTopic,
(WakuCallBack) callback,
resp) );
(FFICallBack) callback,
resp,
pubSubTopic) );
}
static void cGoWakuRelayUnsubscribe(void* wakuCtx, char* pubSubTopic, void* resp) {
WAKU_CALL ( waku_relay_unsubscribe(wakuCtx,
pubSubTopic,
(WakuCallBack) callback,
resp) );
(FFICallBack) callback,
resp,
pubSubTopic) );
}
static void cGoWakuConnect(void* wakuCtx, char* peerMultiAddr, int timeoutMs, void* resp) {
WAKU_CALL( waku_connect(wakuCtx,
(FFICallBack) callback,
resp,
peerMultiAddr,
timeoutMs,
(WakuCallBack) callback,
resp) );
timeoutMs
) );
}
static void cGoWakuDialPeerById(void* wakuCtx,
@ -183,42 +186,44 @@ package main
void* resp) {
WAKU_CALL( waku_dial_peer_by_id(wakuCtx,
(FFICallBack) callback,
resp,
peerId,
protocol,
timeoutMs,
(WakuCallBack) callback,
resp) );
timeoutMs
) );
}
static void cGoWakuDisconnectPeerById(void* wakuCtx, char* peerId, void* resp) {
WAKU_CALL( waku_disconnect_peer_by_id(wakuCtx,
peerId,
(WakuCallBack) callback,
resp) );
(FFICallBack) callback,
resp,
peerId
) );
}
static void cGoWakuListenAddresses(void* wakuCtx, void* resp) {
WAKU_CALL (waku_listen_addresses(wakuCtx, (WakuCallBack) callback, resp) );
WAKU_CALL (waku_listen_addresses(wakuCtx, (FFICallBack) callback, resp) );
}
static void cGoWakuGetMyENR(void* ctx, void* resp) {
WAKU_CALL (waku_get_my_enr(ctx, (WakuCallBack) callback, resp) );
WAKU_CALL (waku_get_my_enr(ctx, (FFICallBack) callback, resp) );
}
static void cGoWakuGetMyPeerId(void* ctx, void* resp) {
WAKU_CALL (waku_get_my_peerid(ctx, (WakuCallBack) callback, resp) );
WAKU_CALL (waku_get_my_peerid(ctx, (FFICallBack) callback, resp) );
}
static void cGoWakuListPeersInMesh(void* ctx, char* pubSubTopic, void* resp) {
WAKU_CALL (waku_relay_get_num_peers_in_mesh(ctx, pubSubTopic, (WakuCallBack) callback, resp) );
WAKU_CALL (waku_relay_get_num_peers_in_mesh(ctx, (FFICallBack) callback, resp, pubSubTopic) );
}
static void cGoWakuGetNumConnectedPeers(void* ctx, char* pubSubTopic, void* resp) {
WAKU_CALL (waku_relay_get_num_connected_peers(ctx, pubSubTopic, (WakuCallBack) callback, resp) );
WAKU_CALL (waku_relay_get_num_connected_peers(ctx, (FFICallBack) callback, resp, pubSubTopic) );
}
static void cGoWakuGetPeerIdsFromPeerStore(void* wakuCtx, void* resp) {
WAKU_CALL (waku_get_peerids_from_peerstore(wakuCtx, (WakuCallBack) callback, resp) );
WAKU_CALL (waku_get_peerids_from_peerstore(wakuCtx, (FFICallBack) callback, resp) );
}
static void cGoWakuLightpushPublish(void* wakuCtx,
@ -227,10 +232,11 @@ package main
void* resp) {
WAKU_CALL (waku_lightpush_publish(wakuCtx,
(FFICallBack) callback,
resp,
pubSubTopic,
jsonWakuMessage,
(WakuCallBack) callback,
resp));
jsonWakuMessage
));
}
static void cGoWakuStoreQuery(void* wakuCtx,
@ -240,11 +246,12 @@ package main
void* resp) {
WAKU_CALL (waku_store_query(wakuCtx,
(FFICallBack) callback,
resp,
jsonQuery,
peerAddr,
timeoutMs,
(WakuCallBack) callback,
resp));
timeoutMs
));
}
static void cGoWakuPeerExchangeQuery(void* wakuCtx,
@ -252,9 +259,10 @@ package main
void* resp) {
WAKU_CALL (waku_peer_exchange_request(wakuCtx,
numPeers,
(WakuCallBack) callback,
resp));
(FFICallBack) callback,
resp,
numPeers
));
}
static void cGoWakuGetPeerIdsByProtocol(void* wakuCtx,
@ -262,9 +270,10 @@ package main
void* resp) {
WAKU_CALL (waku_get_peerids_by_protocol(wakuCtx,
protocol,
(WakuCallBack) callback,
resp));
(FFICallBack) callback,
resp,
protocol
));
}
*/

View File

@ -0,0 +1,331 @@
// !$*UTF8*$!
{
archiveVersion = 1;
classes = {
};
objectVersion = 63;
objects = {
/* Begin PBXBuildFile section */
45714AF6D1D12AF5C36694FB /* WakuExampleApp.swift in Sources */ = {isa = PBXBuildFile; fileRef = 0671AF6DCB0D788B0C1E9C8B /* WakuExampleApp.swift */; };
6468FA3F5F760D3FCAD6CDBF /* ContentView.swift in Sources */ = {isa = PBXBuildFile; fileRef = 7D8744E36DADC11F38A1CC99 /* ContentView.swift */; };
C4EA202B782038F96336401F /* WakuNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = 638A565C495A63CFF7396FBC /* WakuNode.swift */; };
/* End PBXBuildFile section */
/* Begin PBXFileReference section */
0671AF6DCB0D788B0C1E9C8B /* WakuExampleApp.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WakuExampleApp.swift; sourceTree = "<group>"; };
31BE20DB2755A11000723420 /* libwaku.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = libwaku.h; sourceTree = "<group>"; };
5C5AAC91E0166D28BFA986DB /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist; path = Info.plist; sourceTree = "<group>"; };
638A565C495A63CFF7396FBC /* WakuNode.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = WakuNode.swift; sourceTree = "<group>"; };
7D8744E36DADC11F38A1CC99 /* ContentView.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ContentView.swift; sourceTree = "<group>"; };
A8655016B3DF9B0877631CE5 /* WakuExample-Bridging-Header.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = "WakuExample-Bridging-Header.h"; sourceTree = "<group>"; };
CFBE844B6E18ACB81C65F83B /* WakuExample.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = WakuExample.app; sourceTree = BUILT_PRODUCTS_DIR; };
/* End PBXFileReference section */
/* Begin PBXGroup section */
34547A6259485BD047D6375C /* Products */ = {
isa = PBXGroup;
children = (
CFBE844B6E18ACB81C65F83B /* WakuExample.app */,
);
name = Products;
sourceTree = "<group>";
};
4F76CB85EC44E951B8E75522 /* WakuExample */ = {
isa = PBXGroup;
children = (
7D8744E36DADC11F38A1CC99 /* ContentView.swift */,
5C5AAC91E0166D28BFA986DB /* Info.plist */,
31BE20DB2755A11000723420 /* libwaku.h */,
A8655016B3DF9B0877631CE5 /* WakuExample-Bridging-Header.h */,
0671AF6DCB0D788B0C1E9C8B /* WakuExampleApp.swift */,
638A565C495A63CFF7396FBC /* WakuNode.swift */,
);
path = WakuExample;
sourceTree = "<group>";
};
D40CD2446F177CAABB0A747A = {
isa = PBXGroup;
children = (
4F76CB85EC44E951B8E75522 /* WakuExample */,
34547A6259485BD047D6375C /* Products */,
);
sourceTree = "<group>";
};
/* End PBXGroup section */
/* Begin PBXNativeTarget section */
F751EF8294AD21F713D47FDA /* WakuExample */ = {
isa = PBXNativeTarget;
buildConfigurationList = 757FA0123629BD63CB254113 /* Build configuration list for PBXNativeTarget "WakuExample" */;
buildPhases = (
D3AFD8C4DA68BF5C4F7D8E10 /* Sources */,
);
buildRules = (
);
dependencies = (
);
name = WakuExample;
packageProductDependencies = (
);
productName = WakuExample;
productReference = CFBE844B6E18ACB81C65F83B /* WakuExample.app */;
productType = "com.apple.product-type.application";
};
/* End PBXNativeTarget section */
/* Begin PBXProject section */
4FF82F0F4AF8E1E34728F150 /* Project object */ = {
isa = PBXProject;
attributes = {
BuildIndependentTargetsInParallel = YES;
LastUpgradeCheck = 1500;
};
buildConfigurationList = B3A4F48294254543E79767C4 /* Build configuration list for PBXProject "WakuExample" */;
compatibilityVersion = "Xcode 14.0";
developmentRegion = en;
hasScannedForEncodings = 0;
knownRegions = (
Base,
en,
);
mainGroup = D40CD2446F177CAABB0A747A;
minimizedProjectReferenceProxies = 1;
projectDirPath = "";
projectRoot = "";
targets = (
F751EF8294AD21F713D47FDA /* WakuExample */,
);
};
/* End PBXProject section */
/* Begin PBXSourcesBuildPhase section */
D3AFD8C4DA68BF5C4F7D8E10 /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
6468FA3F5F760D3FCAD6CDBF /* ContentView.swift in Sources */,
45714AF6D1D12AF5C36694FB /* WakuExampleApp.swift in Sources */,
C4EA202B782038F96336401F /* WakuNode.swift in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXSourcesBuildPhase section */
/* Begin XCBuildConfiguration section */
36939122077C66DD94082311 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
CODE_SIGN_IDENTITY = "iPhone Developer";
DEVELOPMENT_TEAM = 2Q52K2W84K;
HEADER_SEARCH_PATHS = "$(PROJECT_DIR)/WakuExample";
INFOPLIST_FILE = WakuExample/Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 18.6;
LD_RUNPATH_SEARCH_PATHS = (
"$(inherited)",
"@executable_path/Frameworks",
);
"LIBRARY_SEARCH_PATHS[sdk=iphoneos*]" = "$(PROJECT_DIR)/../../build/ios/iphoneos-arm64";
"LIBRARY_SEARCH_PATHS[sdk=iphonesimulator*]" = "$(PROJECT_DIR)/../../build/ios/iphonesimulator-arm64";
MACOSX_DEPLOYMENT_TARGET = 15.6;
OTHER_LDFLAGS = (
"-lc++",
"-force_load",
"$(PROJECT_DIR)/../../build/ios/iphoneos-arm64/libwaku.a",
"-lsqlite3",
"-lz",
);
PRODUCT_BUNDLE_IDENTIFIER = org.waku.example;
SDKROOT = iphoneos;
SUPPORTED_PLATFORMS = "iphoneos iphonesimulator";
SUPPORTS_MACCATALYST = NO;
SUPPORTS_MAC_DESIGNED_FOR_IPHONE_IPAD = YES;
SUPPORTS_XR_DESIGNED_FOR_IPHONE_IPAD = YES;
SWIFT_OBJC_BRIDGING_HEADER = "WakuExample/WakuExample-Bridging-Header.h";
TARGETED_DEVICE_FAMILY = "1,2";
};
name = Release;
};
9BA833A09EEDB4B3FCCD8F8E /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
CLANG_ANALYZER_NONNULL = YES;
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
CLANG_CXX_LANGUAGE_STANDARD = "gnu++14";
CLANG_CXX_LIBRARY = "libc++";
CLANG_ENABLE_MODULES = YES;
CLANG_ENABLE_OBJC_ARC = YES;
CLANG_ENABLE_OBJC_WEAK = YES;
CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
CLANG_WARN_BOOL_CONVERSION = YES;
CLANG_WARN_COMMA = YES;
CLANG_WARN_CONSTANT_CONVERSION = YES;
CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
CLANG_WARN_EMPTY_BODY = YES;
CLANG_WARN_ENUM_CONVERSION = YES;
CLANG_WARN_INFINITE_RECURSION = YES;
CLANG_WARN_INT_CONVERSION = YES;
CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
CLANG_WARN_STRICT_PROTOTYPES = YES;
CLANG_WARN_SUSPICIOUS_MOVE = YES;
CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
CLANG_WARN_UNREACHABLE_CODE = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
COPY_PHASE_STRIP = NO;
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
ENABLE_NS_ASSERTIONS = NO;
ENABLE_STRICT_OBJC_MSGSEND = YES;
GCC_C_LANGUAGE_STANDARD = gnu11;
GCC_NO_COMMON_BLOCKS = YES;
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
GCC_WARN_UNDECLARED_SELECTOR = YES;
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
GCC_WARN_UNUSED_FUNCTION = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
IPHONEOS_DEPLOYMENT_TARGET = 18.6;
MTL_ENABLE_DEBUG_INFO = NO;
MTL_FAST_MATH = YES;
PRODUCT_NAME = "$(TARGET_NAME)";
SDKROOT = iphoneos;
SUPPORTED_PLATFORMS = "iphoneos iphonesimulator";
SUPPORTS_MACCATALYST = NO;
SWIFT_COMPILATION_MODE = wholemodule;
SWIFT_OPTIMIZATION_LEVEL = "-O";
SWIFT_VERSION = 5.0;
};
name = Release;
};
A59ABFB792FED8974231E5AC /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
CLANG_ANALYZER_NONNULL = YES;
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
CLANG_CXX_LANGUAGE_STANDARD = "gnu++14";
CLANG_CXX_LIBRARY = "libc++";
CLANG_ENABLE_MODULES = YES;
CLANG_ENABLE_OBJC_ARC = YES;
CLANG_ENABLE_OBJC_WEAK = YES;
CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
CLANG_WARN_BOOL_CONVERSION = YES;
CLANG_WARN_COMMA = YES;
CLANG_WARN_CONSTANT_CONVERSION = YES;
CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
CLANG_WARN_EMPTY_BODY = YES;
CLANG_WARN_ENUM_CONVERSION = YES;
CLANG_WARN_INFINITE_RECURSION = YES;
CLANG_WARN_INT_CONVERSION = YES;
CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
CLANG_WARN_STRICT_PROTOTYPES = YES;
CLANG_WARN_SUSPICIOUS_MOVE = YES;
CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
CLANG_WARN_UNREACHABLE_CODE = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
COPY_PHASE_STRIP = NO;
DEBUG_INFORMATION_FORMAT = dwarf;
ENABLE_STRICT_OBJC_MSGSEND = YES;
ENABLE_TESTABILITY = YES;
GCC_C_LANGUAGE_STANDARD = gnu11;
GCC_DYNAMIC_NO_PIC = NO;
GCC_NO_COMMON_BLOCKS = YES;
GCC_OPTIMIZATION_LEVEL = 0;
GCC_PREPROCESSOR_DEFINITIONS = (
"$(inherited)",
"DEBUG=1",
);
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
GCC_WARN_UNDECLARED_SELECTOR = YES;
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
GCC_WARN_UNUSED_FUNCTION = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
IPHONEOS_DEPLOYMENT_TARGET = 18.6;
MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE;
MTL_FAST_MATH = YES;
ONLY_ACTIVE_ARCH = YES;
PRODUCT_NAME = "$(TARGET_NAME)";
SDKROOT = iphoneos;
SUPPORTED_PLATFORMS = "iphoneos iphonesimulator";
SUPPORTS_MACCATALYST = NO;
SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG;
SWIFT_OPTIMIZATION_LEVEL = "-Onone";
SWIFT_VERSION = 5.0;
};
name = Debug;
};
AF5ADDAA865B1F6BD4E70A79 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon;
CODE_SIGN_IDENTITY = "iPhone Developer";
DEVELOPMENT_TEAM = 2Q52K2W84K;
HEADER_SEARCH_PATHS = "$(PROJECT_DIR)/WakuExample";
INFOPLIST_FILE = WakuExample/Info.plist;
IPHONEOS_DEPLOYMENT_TARGET = 18.6;
LD_RUNPATH_SEARCH_PATHS = (
"$(inherited)",
"@executable_path/Frameworks",
);
"LIBRARY_SEARCH_PATHS[sdk=iphoneos*]" = "$(PROJECT_DIR)/../../build/ios/iphoneos-arm64";
"LIBRARY_SEARCH_PATHS[sdk=iphonesimulator*]" = "$(PROJECT_DIR)/../../build/ios/iphonesimulator-arm64";
MACOSX_DEPLOYMENT_TARGET = 15.6;
OTHER_LDFLAGS = (
"-lc++",
"-force_load",
"$(PROJECT_DIR)/../../build/ios/iphoneos-arm64/libwaku.a",
"-lsqlite3",
"-lz",
);
PRODUCT_BUNDLE_IDENTIFIER = org.waku.example;
SDKROOT = iphoneos;
SUPPORTED_PLATFORMS = "iphoneos iphonesimulator";
SUPPORTS_MACCATALYST = NO;
SUPPORTS_MAC_DESIGNED_FOR_IPHONE_IPAD = YES;
SUPPORTS_XR_DESIGNED_FOR_IPHONE_IPAD = YES;
SWIFT_OBJC_BRIDGING_HEADER = "WakuExample/WakuExample-Bridging-Header.h";
TARGETED_DEVICE_FAMILY = "1,2";
};
name = Debug;
};
/* End XCBuildConfiguration section */
/* Begin XCConfigurationList section */
757FA0123629BD63CB254113 /* Build configuration list for PBXNativeTarget "WakuExample" */ = {
isa = XCConfigurationList;
buildConfigurations = (
AF5ADDAA865B1F6BD4E70A79 /* Debug */,
36939122077C66DD94082311 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Debug;
};
B3A4F48294254543E79767C4 /* Build configuration list for PBXProject "WakuExample" */ = {
isa = XCConfigurationList;
buildConfigurations = (
A59ABFB792FED8974231E5AC /* Debug */,
9BA833A09EEDB4B3FCCD8F8E /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Debug;
};
/* End XCConfigurationList section */
};
rootObject = 4FF82F0F4AF8E1E34728F150 /* Project object */;
}

View File

@ -0,0 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<Workspace
version = "1.0">
<FileRef
location = "self:">
</FileRef>
</Workspace>

View File

@ -0,0 +1,229 @@
//
// ContentView.swift
// WakuExample
//
// Minimal chat PoC using libwaku on iOS
//
import SwiftUI
struct ContentView: View {
@StateObject private var wakuNode = WakuNode()
@State private var messageText = ""
var body: some View {
ZStack {
// Main content
VStack(spacing: 0) {
// Header with status
HStack {
Circle()
.fill(statusColor)
.frame(width: 10, height: 10)
VStack(alignment: .leading, spacing: 2) {
Text(wakuNode.status.rawValue)
.font(.caption)
if wakuNode.status == .running {
HStack(spacing: 4) {
Text(wakuNode.isConnected ? "Connected" : "Discovering...")
Text("")
filterStatusView
}
.font(.caption2)
.foregroundColor(.secondary)
// Subscription maintenance status
if wakuNode.subscriptionMaintenanceActive {
HStack(spacing: 4) {
Image(systemName: "arrow.triangle.2.circlepath")
.foregroundColor(.blue)
Text("Maintenance active")
if wakuNode.failedSubscribeAttempts > 0 {
Text("(\(wakuNode.failedSubscribeAttempts) retries)")
.foregroundColor(.orange)
}
}
.font(.caption2)
.foregroundColor(.secondary)
}
}
}
Spacer()
if wakuNode.status == .stopped {
Button("Start") {
wakuNode.start()
}
.buttonStyle(.borderedProminent)
.controlSize(.small)
} else if wakuNode.status == .running {
if !wakuNode.filterSubscribed {
Button("Resub") {
wakuNode.resubscribe()
}
.buttonStyle(.bordered)
.controlSize(.small)
}
Button("Stop") {
wakuNode.stop()
}
.buttonStyle(.bordered)
.controlSize(.small)
}
}
.padding()
.background(Color.gray.opacity(0.1))
// Messages list
ScrollViewReader { proxy in
ScrollView {
LazyVStack(alignment: .leading, spacing: 8) {
ForEach(wakuNode.receivedMessages.reversed()) { message in
MessageBubble(message: message)
.id(message.id)
}
}
.padding()
}
.onChange(of: wakuNode.receivedMessages.count) { _, newCount in
if let lastMessage = wakuNode.receivedMessages.first {
withAnimation {
proxy.scrollTo(lastMessage.id, anchor: .bottom)
}
}
}
}
Divider()
// Message input
HStack(spacing: 12) {
TextField("Message", text: $messageText)
.textFieldStyle(.roundedBorder)
.disabled(wakuNode.status != .running)
Button(action: sendMessage) {
Image(systemName: "paperplane.fill")
.foregroundColor(.white)
.padding(10)
.background(canSend ? Color.blue : Color.gray)
.clipShape(Circle())
}
.disabled(!canSend)
}
.padding()
.background(Color.gray.opacity(0.1))
}
// Toast overlay for errors
VStack {
ForEach(wakuNode.errorQueue) { error in
ToastView(error: error) {
wakuNode.dismissError(error)
}
.transition(.asymmetric(
insertion: .move(edge: .top).combined(with: .opacity),
removal: .opacity
))
}
Spacer()
}
.padding(.top, 8)
.animation(.easeInOut(duration: 0.3), value: wakuNode.errorQueue)
}
}
private var statusColor: Color {
switch wakuNode.status {
case .stopped: return .gray
case .starting: return .yellow
case .running: return .green
case .error: return .red
}
}
@ViewBuilder
private var filterStatusView: some View {
if wakuNode.filterSubscribed {
Text("Filter OK")
.foregroundColor(.green)
} else if wakuNode.failedSubscribeAttempts > 0 {
Text("Filter retrying (\(wakuNode.failedSubscribeAttempts))")
.foregroundColor(.orange)
} else {
Text("Filter pending")
.foregroundColor(.orange)
}
}
private var canSend: Bool {
wakuNode.status == .running && wakuNode.isConnected && !messageText.trimmingCharacters(in: .whitespaces).isEmpty
}
private func sendMessage() {
let text = messageText.trimmingCharacters(in: .whitespaces)
guard !text.isEmpty else { return }
wakuNode.publish(message: text)
messageText = ""
}
}
// MARK: - Toast View
struct ToastView: View {
let error: TimestampedError
let onDismiss: () -> Void
var body: some View {
HStack(spacing: 12) {
Image(systemName: "exclamationmark.triangle.fill")
.foregroundColor(.white)
Text(error.message)
.font(.subheadline)
.foregroundColor(.white)
.lineLimit(2)
Spacer()
Button(action: onDismiss) {
Image(systemName: "xmark.circle.fill")
.foregroundColor(.white.opacity(0.8))
.font(.title3)
}
.buttonStyle(.plain)
}
.padding(.horizontal, 16)
.padding(.vertical, 12)
.background(
RoundedRectangle(cornerRadius: 12)
.fill(Color.red.opacity(0.9))
.shadow(color: .black.opacity(0.2), radius: 8, x: 0, y: 4)
)
.padding(.horizontal, 16)
.padding(.vertical, 4)
}
}
// MARK: - Message Bubble
struct MessageBubble: View {
let message: WakuMessage
var body: some View {
VStack(alignment: .leading, spacing: 4) {
Text(message.payload)
.padding(10)
.background(Color.blue.opacity(0.1))
.cornerRadius(12)
Text(message.timestamp, style: .time)
.font(.caption2)
.foregroundColor(.secondary)
}
}
}
#Preview {
ContentView()
}

View File

@ -0,0 +1,36 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>$(DEVELOPMENT_LANGUAGE)</string>
<key>CFBundleDisplayName</key>
<string>Waku Example</string>
<key>CFBundleExecutable</key>
<string>$(EXECUTABLE_NAME)</string>
<key>CFBundleIdentifier</key>
<string>org.waku.example</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>WakuExample</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleShortVersionString</key>
<string>1.0</string>
<key>CFBundleVersion</key>
<string>1</string>
<key>NSAppTransportSecurity</key>
<dict>
<key>NSAllowsArbitraryLoads</key>
<true/>
</dict>
<key>UILaunchScreen</key>
<dict/>
<key>UISupportedInterfaceOrientations</key>
<array>
<string>UIInterfaceOrientationPortrait</string>
</array>
</dict>
</plist>

View File

@ -0,0 +1,15 @@
//
// WakuExample-Bridging-Header.h
// WakuExample
//
// Bridging header to expose libwaku C functions to Swift
//
#ifndef WakuExample_Bridging_Header_h
#define WakuExample_Bridging_Header_h
#import "libwaku.h"
#endif /* WakuExample_Bridging_Header_h */

View File

@ -0,0 +1,19 @@
//
// WakuExampleApp.swift
// WakuExample
//
// SwiftUI app entry point for Waku iOS example
//
import SwiftUI
@main
struct WakuExampleApp: App {
var body: some Scene {
WindowGroup {
ContentView()
}
}
}

View File

@ -0,0 +1,739 @@
//
// WakuNode.swift
// WakuExample
//
// Swift wrapper around libwaku C API for edge mode (lightpush + filter)
// Uses Swift actors for thread safety and UI responsiveness
//
import Foundation
// MARK: - Data Types
/// Message received from Waku network
struct WakuMessage: Identifiable, Equatable, Sendable {
let id: String // messageHash from Waku - unique identifier for deduplication
let payload: String
let contentTopic: String
let timestamp: Date
}
/// Waku node status
enum WakuNodeStatus: String, Sendable {
case stopped = "Stopped"
case starting = "Starting..."
case running = "Running"
case error = "Error"
}
/// Status updates from WakuActor to WakuNode
enum WakuStatusUpdate: Sendable {
case statusChanged(WakuNodeStatus)
case connectionChanged(isConnected: Bool)
case filterSubscriptionChanged(subscribed: Bool, failedAttempts: Int)
case maintenanceChanged(active: Bool)
case error(String)
}
/// Error with timestamp for toast queue
struct TimestampedError: Identifiable, Equatable {
let id = UUID()
let message: String
let timestamp: Date
static func == (lhs: TimestampedError, rhs: TimestampedError) -> Bool {
lhs.id == rhs.id
}
}
// MARK: - Callback Context for C API
private final class CallbackContext: @unchecked Sendable {
private let lock = NSLock()
private var _continuation: CheckedContinuation<(success: Bool, result: String?), Never>?
private var _resumed = false
var success: Bool = false
var result: String?
var continuation: CheckedContinuation<(success: Bool, result: String?), Never>? {
get {
lock.lock()
defer { lock.unlock() }
return _continuation
}
set {
lock.lock()
defer { lock.unlock() }
_continuation = newValue
}
}
/// Thread-safe resume - ensures continuation is only resumed once
/// Returns true if this call actually resumed, false if already resumed
@discardableResult
func resumeOnce(returning value: (success: Bool, result: String?)) -> Bool {
lock.lock()
defer { lock.unlock() }
guard !_resumed, let cont = _continuation else {
return false
}
_resumed = true
_continuation = nil
cont.resume(returning: value)
return true
}
}
// MARK: - WakuActor
/// Actor that isolates all Waku operations from the main thread
/// All C API calls and mutable state are contained here
actor WakuActor {
// MARK: - State
private var ctx: UnsafeMutableRawPointer?
private var seenMessageHashes: Set<String> = []
private var isSubscribed: Bool = false
private var isSubscribing: Bool = false
private var hasPeers: Bool = false
private var maintenanceTask: Task<Void, Never>?
private var eventProcessingTask: Task<Void, Never>?
// Stream continuations for communicating with UI
private var messageContinuation: AsyncStream<WakuMessage>.Continuation?
private var statusContinuation: AsyncStream<WakuStatusUpdate>.Continuation?
// Event stream from C callbacks
private var eventContinuation: AsyncStream<String>.Continuation?
// Configuration
let defaultPubsubTopic = "/waku/2/rs/1/0"
let defaultContentTopic = "/waku-ios-example/1/chat/proto"
private let staticPeer = "/dns4/node-01.do-ams3.waku.sandbox.status.im/tcp/30303/p2p/16Uiu2HAmPLe7Mzm8TsYUubgCAW1aJoeFScxrLj8ppHFivPo97bUZ"
// Subscription maintenance settings
private let maxFailedSubscribes = 3
private let retryWaitSeconds: UInt64 = 2_000_000_000 // 2 seconds in nanoseconds
private let maintenanceIntervalSeconds: UInt64 = 30_000_000_000 // 30 seconds in nanoseconds
private let maxSeenHashes = 1000
// MARK: - Static callback storage (for C callbacks)
// We need a way for C callbacks to reach the actor
// Using a simple static reference (safe because we only have one instance)
private static var sharedEventContinuation: AsyncStream<String>.Continuation?
private static let eventCallback: WakuCallBack = { ret, msg, len, userData in
guard ret == RET_OK, let msg = msg else { return }
let str = String(cString: msg)
WakuActor.sharedEventContinuation?.yield(str)
}
private static let syncCallback: WakuCallBack = { ret, msg, len, userData in
guard let userData = userData else { return }
let context = Unmanaged<CallbackContext>.fromOpaque(userData).takeUnretainedValue()
let success = (ret == RET_OK)
var resultStr: String? = nil
if let msg = msg {
resultStr = String(cString: msg)
}
context.resumeOnce(returning: (success, resultStr))
}
// MARK: - Stream Setup
func setMessageContinuation(_ continuation: AsyncStream<WakuMessage>.Continuation?) {
self.messageContinuation = continuation
}
func setStatusContinuation(_ continuation: AsyncStream<WakuStatusUpdate>.Continuation?) {
self.statusContinuation = continuation
}
// MARK: - Public API
var isRunning: Bool {
ctx != nil
}
var hasConnectedPeers: Bool {
hasPeers
}
func start() async {
guard ctx == nil else {
print("[WakuActor] Already started")
return
}
statusContinuation?.yield(.statusChanged(.starting))
// Create event stream for C callbacks
let eventStream = AsyncStream<String> { continuation in
self.eventContinuation = continuation
WakuActor.sharedEventContinuation = continuation
}
// Start event processing task
eventProcessingTask = Task { [weak self] in
for await eventJson in eventStream {
await self?.handleEvent(eventJson)
}
}
// Initialize the node
let success = await initializeNode()
if success {
statusContinuation?.yield(.statusChanged(.running))
// Connect to peer
let connected = await connectToPeer()
if connected {
hasPeers = true
statusContinuation?.yield(.connectionChanged(isConnected: true))
// Start maintenance loop
startMaintenanceLoop()
} else {
statusContinuation?.yield(.error("Failed to connect to service peer"))
}
}
}
func stop() async {
guard let context = ctx else { return }
// Stop maintenance loop
maintenanceTask?.cancel()
maintenanceTask = nil
// Stop event processing
eventProcessingTask?.cancel()
eventProcessingTask = nil
// Close event stream
eventContinuation?.finish()
eventContinuation = nil
WakuActor.sharedEventContinuation = nil
statusContinuation?.yield(.statusChanged(.stopped))
statusContinuation?.yield(.connectionChanged(isConnected: false))
statusContinuation?.yield(.filterSubscriptionChanged(subscribed: false, failedAttempts: 0))
statusContinuation?.yield(.maintenanceChanged(active: false))
// Reset state
let ctxToStop = context
ctx = nil
isSubscribed = false
isSubscribing = false
hasPeers = false
seenMessageHashes.removeAll()
// Unsubscribe and stop in background (fire and forget)
Task.detached {
// Unsubscribe
_ = await self.callWakuSync { waku_filter_unsubscribe_all(ctxToStop, WakuActor.syncCallback, $0) }
print("[WakuActor] Unsubscribed from filter")
// Stop
_ = await self.callWakuSync { waku_stop(ctxToStop, WakuActor.syncCallback, $0) }
print("[WakuActor] Node stopped")
// Destroy
_ = await self.callWakuSync { waku_destroy(ctxToStop, WakuActor.syncCallback, $0) }
print("[WakuActor] Node destroyed")
}
}
func publish(message: String, contentTopic: String? = nil) async {
guard let context = ctx else {
print("[WakuActor] Node not started")
return
}
guard hasPeers else {
print("[WakuActor] No peers connected yet")
statusContinuation?.yield(.error("No peers connected yet. Please wait..."))
return
}
let topic = contentTopic ?? defaultContentTopic
guard let payloadData = message.data(using: .utf8) else { return }
let payloadBase64 = payloadData.base64EncodedString()
let timestamp = Int64(Date().timeIntervalSince1970 * 1_000_000_000)
let jsonMessage = """
{"payload":"\(payloadBase64)","contentTopic":"\(topic)","timestamp":\(timestamp)}
"""
let result = await callWakuSync { userData in
waku_lightpush_publish(
context,
self.defaultPubsubTopic,
jsonMessage,
WakuActor.syncCallback,
userData
)
}
if result.success {
print("[WakuActor] Published message")
} else {
print("[WakuActor] Publish error: \(result.result ?? "unknown")")
statusContinuation?.yield(.error("Failed to send message"))
}
}
func resubscribe() async {
print("[WakuActor] Force resubscribe requested")
isSubscribed = false
isSubscribing = false
statusContinuation?.yield(.filterSubscriptionChanged(subscribed: false, failedAttempts: 0))
_ = await subscribe()
}
// MARK: - Private Methods
private func initializeNode() async -> Bool {
let config = """
{
"tcpPort": 60000,
"clusterId": 1,
"shards": [0],
"relay": false,
"lightpush": true,
"filter": true,
"logLevel": "DEBUG",
"discv5Discovery": true,
"discv5BootstrapNodes": [
"enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw",
"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw"
],
"discv5UdpPort": 9999,
"dnsDiscovery": true,
"dnsDiscoveryUrl": "enrtree://AOGYWMBYOUIMOENHXCHILPKY3ZRFEULMFI4DOM442QSZ73TT2A7VI@test.waku.nodes.status.im",
"dnsDiscoveryNameServers": ["8.8.8.8", "1.0.0.1"]
}
"""
// Create node - waku_new is special, it returns the context directly
let createResult = await withCheckedContinuation { (continuation: CheckedContinuation<(ctx: UnsafeMutableRawPointer?, success: Bool, result: String?), Never>) in
let callbackCtx = CallbackContext()
let userDataPtr = Unmanaged.passRetained(callbackCtx).toOpaque()
// Set up a simple callback for waku_new
let newCtx = waku_new(config, { ret, msg, len, userData in
guard let userData = userData else { return }
let context = Unmanaged<CallbackContext>.fromOpaque(userData).takeUnretainedValue()
context.success = (ret == RET_OK)
if let msg = msg {
context.result = String(cString: msg)
}
}, userDataPtr)
// Small delay to ensure callback completes
DispatchQueue.global().asyncAfter(deadline: .now() + 0.1) {
Unmanaged<CallbackContext>.fromOpaque(userDataPtr).release()
continuation.resume(returning: (newCtx, callbackCtx.success, callbackCtx.result))
}
}
guard createResult.ctx != nil else {
statusContinuation?.yield(.statusChanged(.error))
statusContinuation?.yield(.error("Failed to create node: \(createResult.result ?? "unknown")"))
return false
}
ctx = createResult.ctx
// Set event callback
waku_set_event_callback(ctx, WakuActor.eventCallback, nil)
// Start node
let startResult = await callWakuSync { userData in
waku_start(self.ctx, WakuActor.syncCallback, userData)
}
guard startResult.success else {
statusContinuation?.yield(.statusChanged(.error))
statusContinuation?.yield(.error("Failed to start node: \(startResult.result ?? "unknown")"))
ctx = nil
return false
}
print("[WakuActor] Node started")
return true
}
private func connectToPeer() async -> Bool {
guard let context = ctx else { return false }
print("[WakuActor] Connecting to static peer...")
let result = await callWakuSync { userData in
waku_connect(context, self.staticPeer, 10000, WakuActor.syncCallback, userData)
}
if result.success {
print("[WakuActor] Connected to peer successfully")
return true
} else {
print("[WakuActor] Failed to connect: \(result.result ?? "unknown")")
return false
}
}
private func subscribe(contentTopic: String? = nil) async -> Bool {
guard let context = ctx else { return false }
guard !isSubscribed && !isSubscribing else { return isSubscribed }
isSubscribing = true
let topic = contentTopic ?? defaultContentTopic
let result = await callWakuSync { userData in
waku_filter_subscribe(
context,
self.defaultPubsubTopic,
topic,
WakuActor.syncCallback,
userData
)
}
isSubscribing = false
if result.success {
print("[WakuActor] Subscribe request successful to \(topic)")
isSubscribed = true
statusContinuation?.yield(.filterSubscriptionChanged(subscribed: true, failedAttempts: 0))
return true
} else {
print("[WakuActor] Subscribe error: \(result.result ?? "unknown")")
isSubscribed = false
return false
}
}
private func pingFilterPeer() async -> Bool {
guard let context = ctx else { return false }
let result = await callWakuSync { userData in
waku_ping_peer(
context,
self.staticPeer,
10000,
WakuActor.syncCallback,
userData
)
}
return result.success
}
// MARK: - Subscription Maintenance
private func startMaintenanceLoop() {
guard maintenanceTask == nil else {
print("[WakuActor] Maintenance loop already running")
return
}
statusContinuation?.yield(.maintenanceChanged(active: true))
print("[WakuActor] Starting subscription maintenance loop")
maintenanceTask = Task { [weak self] in
guard let self = self else { return }
var failedSubscribes = 0
var isFirstPingOnConnection = true
while !Task.isCancelled {
guard await self.isRunning else { break }
print("[WakuActor] Maintaining subscription...")
let pingSuccess = await self.pingFilterPeer()
let currentlySubscribed = await self.isSubscribed
if pingSuccess && currentlySubscribed {
print("[WakuActor] Subscription is live, waiting 30s")
try? await Task.sleep(nanoseconds: self.maintenanceIntervalSeconds)
continue
}
if !isFirstPingOnConnection && !pingSuccess {
print("[WakuActor] Ping failed - subscription may be lost")
await self.statusContinuation?.yield(.filterSubscriptionChanged(subscribed: false, failedAttempts: failedSubscribes))
}
isFirstPingOnConnection = false
print("[WakuActor] No active subscription found. Sending subscribe request...")
await self.resetSubscriptionState()
let subscribeSuccess = await self.subscribe()
if subscribeSuccess {
print("[WakuActor] Subscribe request successful")
failedSubscribes = 0
try? await Task.sleep(nanoseconds: self.maintenanceIntervalSeconds)
continue
}
failedSubscribes += 1
await self.statusContinuation?.yield(.filterSubscriptionChanged(subscribed: false, failedAttempts: failedSubscribes))
print("[WakuActor] Subscribe request failed. Attempt \(failedSubscribes)/\(self.maxFailedSubscribes)")
if failedSubscribes < self.maxFailedSubscribes {
print("[WakuActor] Retrying in 2s...")
try? await Task.sleep(nanoseconds: self.retryWaitSeconds)
} else {
print("[WakuActor] Max subscribe failures reached")
await self.statusContinuation?.yield(.error("Filter subscription failed after \(self.maxFailedSubscribes) attempts"))
failedSubscribes = 0
try? await Task.sleep(nanoseconds: self.maintenanceIntervalSeconds)
}
}
print("[WakuActor] Subscription maintenance loop stopped")
await self.statusContinuation?.yield(.maintenanceChanged(active: false))
}
}
private func resetSubscriptionState() {
isSubscribed = false
isSubscribing = false
}
// MARK: - Event Handling
private func handleEvent(_ eventJson: String) {
guard let data = eventJson.data(using: .utf8),
let json = try? JSONSerialization.jsonObject(with: data) as? [String: Any],
let eventType = json["eventType"] as? String else {
return
}
if eventType == "connection_change" {
handleConnectionChange(json)
} else if eventType == "message" {
handleMessage(json)
}
}
private func handleConnectionChange(_ json: [String: Any]) {
guard let peerEvent = json["peerEvent"] as? String else { return }
if peerEvent == "Joined" || peerEvent == "Identified" {
hasPeers = true
statusContinuation?.yield(.connectionChanged(isConnected: true))
} else if peerEvent == "Left" {
statusContinuation?.yield(.filterSubscriptionChanged(subscribed: false, failedAttempts: 0))
}
}
private func handleMessage(_ json: [String: Any]) {
guard let messageHash = json["messageHash"] as? String,
let wakuMessage = json["wakuMessage"] as? [String: Any],
let payloadBase64 = wakuMessage["payload"] as? String,
let contentTopic = wakuMessage["contentTopic"] as? String,
let payloadData = Data(base64Encoded: payloadBase64),
let payloadString = String(data: payloadData, encoding: .utf8) else {
return
}
// Deduplicate
guard !seenMessageHashes.contains(messageHash) else {
return
}
seenMessageHashes.insert(messageHash)
// Limit memory usage
if seenMessageHashes.count > maxSeenHashes {
seenMessageHashes.removeAll()
}
let message = WakuMessage(
id: messageHash,
payload: payloadString,
contentTopic: contentTopic,
timestamp: Date()
)
messageContinuation?.yield(message)
}
// MARK: - Helper for synchronous C calls
private func callWakuSync(_ work: @escaping (UnsafeMutableRawPointer) -> Void) async -> (success: Bool, result: String?) {
await withCheckedContinuation { continuation in
let context = CallbackContext()
context.continuation = continuation
let userDataPtr = Unmanaged.passRetained(context).toOpaque()
work(userDataPtr)
// Set a timeout to avoid hanging forever
DispatchQueue.global().asyncAfter(deadline: .now() + 15) {
// Try to resume with timeout - will be ignored if callback already resumed
let didTimeout = context.resumeOnce(returning: (false, "Timeout"))
if didTimeout {
print("[WakuActor] Call timed out")
}
Unmanaged<CallbackContext>.fromOpaque(userDataPtr).release()
}
}
}
}
// MARK: - WakuNode (MainActor UI Wrapper)
/// Main-thread UI wrapper that consumes updates from WakuActor via AsyncStreams
@MainActor
class WakuNode: ObservableObject {
// MARK: - Published Properties (UI State)
@Published var status: WakuNodeStatus = .stopped
@Published var receivedMessages: [WakuMessage] = []
@Published var errorQueue: [TimestampedError] = []
@Published var isConnected: Bool = false
@Published var filterSubscribed: Bool = false
@Published var subscriptionMaintenanceActive: Bool = false
@Published var failedSubscribeAttempts: Int = 0
// Topics (read-only access to actor's config)
var defaultPubsubTopic: String { "/waku/2/rs/1/0" }
var defaultContentTopic: String { "/waku-ios-example/1/chat/proto" }
// MARK: - Private Properties
private let actor = WakuActor()
private var messageTask: Task<Void, Never>?
private var statusTask: Task<Void, Never>?
// MARK: - Initialization
init() {}
deinit {
messageTask?.cancel()
statusTask?.cancel()
}
// MARK: - Public API
func start() {
guard status == .stopped || status == .error else {
print("[WakuNode] Already started or starting")
return
}
// Create message stream
let messageStream = AsyncStream<WakuMessage> { continuation in
Task {
await self.actor.setMessageContinuation(continuation)
}
}
// Create status stream
let statusStream = AsyncStream<WakuStatusUpdate> { continuation in
Task {
await self.actor.setStatusContinuation(continuation)
}
}
// Start consuming messages
messageTask = Task { @MainActor in
for await message in messageStream {
self.receivedMessages.insert(message, at: 0)
if self.receivedMessages.count > 100 {
self.receivedMessages.removeLast()
}
}
}
// Start consuming status updates
statusTask = Task { @MainActor in
for await update in statusStream {
self.handleStatusUpdate(update)
}
}
// Start the actor
Task {
await actor.start()
}
}
func stop() {
messageTask?.cancel()
messageTask = nil
statusTask?.cancel()
statusTask = nil
Task {
await actor.stop()
}
// Immediate UI update
status = .stopped
isConnected = false
filterSubscribed = false
subscriptionMaintenanceActive = false
failedSubscribeAttempts = 0
}
func publish(message: String, contentTopic: String? = nil) {
Task {
await actor.publish(message: message, contentTopic: contentTopic)
}
}
func resubscribe() {
Task {
await actor.resubscribe()
}
}
func dismissError(_ error: TimestampedError) {
errorQueue.removeAll { $0.id == error.id }
}
func dismissAllErrors() {
errorQueue.removeAll()
}
// MARK: - Private Methods
private func handleStatusUpdate(_ update: WakuStatusUpdate) {
switch update {
case .statusChanged(let newStatus):
status = newStatus
case .connectionChanged(let connected):
isConnected = connected
case .filterSubscriptionChanged(let subscribed, let attempts):
filterSubscribed = subscribed
failedSubscribeAttempts = attempts
case .maintenanceChanged(let active):
subscriptionMaintenanceActive = active
case .error(let message):
let error = TimestampedError(message: message, timestamp: Date())
errorQueue.append(error)
// Schedule auto-dismiss after 10 seconds
let errorId = error.id
Task { @MainActor in
try? await Task.sleep(nanoseconds: 10_000_000_000)
self.errorQueue.removeAll { $0.id == errorId }
}
}
}
}

View File

@ -0,0 +1,253 @@
// Generated manually and inspired by the one generated by the Nim Compiler.
// In order to see the header file generated by Nim just run `make libwaku`
// from the root repo folder and the header should be created in
// nimcache/release/libwaku/libwaku.h
#ifndef __libwaku__
#define __libwaku__
#include <stddef.h>
#include <stdint.h>
// The possible returned values for the functions that return int
#define RET_OK 0
#define RET_ERR 1
#define RET_MISSING_CALLBACK 2
#ifdef __cplusplus
extern "C" {
#endif
typedef void (*WakuCallBack) (int callerRet, const char* msg, size_t len, void* userData);
// Creates a new instance of the waku node.
// Sets up the waku node from the given configuration.
// Returns a pointer to the Context needed by the rest of the API functions.
void* waku_new(
const char* configJson,
WakuCallBack callback,
void* userData);
int waku_start(void* ctx,
WakuCallBack callback,
void* userData);
int waku_stop(void* ctx,
WakuCallBack callback,
void* userData);
// Destroys an instance of a waku node created with waku_new
int waku_destroy(void* ctx,
WakuCallBack callback,
void* userData);
int waku_version(void* ctx,
WakuCallBack callback,
void* userData);
// Sets a callback that will be invoked whenever an event occurs.
// It is crucial that the passed callback is fast, non-blocking and potentially thread-safe.
void waku_set_event_callback(void* ctx,
WakuCallBack callback,
void* userData);
int waku_content_topic(void* ctx,
const char* appName,
unsigned int appVersion,
const char* contentTopicName,
const char* encoding,
WakuCallBack callback,
void* userData);
int waku_pubsub_topic(void* ctx,
const char* topicName,
WakuCallBack callback,
void* userData);
int waku_default_pubsub_topic(void* ctx,
WakuCallBack callback,
void* userData);
int waku_relay_publish(void* ctx,
const char* pubSubTopic,
const char* jsonWakuMessage,
unsigned int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_lightpush_publish(void* ctx,
const char* pubSubTopic,
const char* jsonWakuMessage,
WakuCallBack callback,
void* userData);
int waku_relay_subscribe(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_add_protected_shard(void* ctx,
int clusterId,
int shardId,
char* publicKey,
WakuCallBack callback,
void* userData);
int waku_relay_unsubscribe(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_filter_subscribe(void* ctx,
const char* pubSubTopic,
const char* contentTopics,
WakuCallBack callback,
void* userData);
int waku_filter_unsubscribe(void* ctx,
const char* pubSubTopic,
const char* contentTopics,
WakuCallBack callback,
void* userData);
int waku_filter_unsubscribe_all(void* ctx,
WakuCallBack callback,
void* userData);
int waku_relay_get_num_connected_peers(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_get_connected_peers(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_get_num_peers_in_mesh(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_get_peers_in_mesh(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_store_query(void* ctx,
const char* jsonQuery,
const char* peerAddr,
int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_connect(void* ctx,
const char* peerMultiAddr,
unsigned int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_disconnect_peer_by_id(void* ctx,
const char* peerId,
WakuCallBack callback,
void* userData);
int waku_disconnect_all_peers(void* ctx,
WakuCallBack callback,
void* userData);
int waku_dial_peer(void* ctx,
const char* peerMultiAddr,
const char* protocol,
int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_dial_peer_by_id(void* ctx,
const char* peerId,
const char* protocol,
int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_get_peerids_from_peerstore(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_connected_peers_info(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_peerids_by_protocol(void* ctx,
const char* protocol,
WakuCallBack callback,
void* userData);
int waku_listen_addresses(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_connected_peers(void* ctx,
WakuCallBack callback,
void* userData);
// Returns a list of multiaddress given a url to a DNS discoverable ENR tree
// Parameters
// char* entTreeUrl: URL containing a discoverable ENR tree
// char* nameDnsServer: The nameserver to resolve the ENR tree url.
// int timeoutMs: Timeout value in milliseconds to execute the call.
int waku_dns_discovery(void* ctx,
const char* entTreeUrl,
const char* nameDnsServer,
int timeoutMs,
WakuCallBack callback,
void* userData);
// Updates the bootnode list used for discovering new peers via DiscoveryV5
// bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]`
int waku_discv5_update_bootnodes(void* ctx,
char* bootnodes,
WakuCallBack callback,
void* userData);
int waku_start_discv5(void* ctx,
WakuCallBack callback,
void* userData);
int waku_stop_discv5(void* ctx,
WakuCallBack callback,
void* userData);
// Retrieves the ENR information
int waku_get_my_enr(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_my_peerid(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_metrics(void* ctx,
WakuCallBack callback,
void* userData);
int waku_peer_exchange_request(void* ctx,
int numPeers,
WakuCallBack callback,
void* userData);
int waku_ping_peer(void* ctx,
const char* peerAddr,
int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_is_online(void* ctx,
WakuCallBack callback,
void* userData);
#ifdef __cplusplus
}
#endif
#endif /* __libwaku__ */

47
examples/ios/project.yml Normal file
View File

@ -0,0 +1,47 @@
name: WakuExample
options:
bundleIdPrefix: org.waku
deploymentTarget:
iOS: "14.0"
xcodeVersion: "15.0"
settings:
SWIFT_VERSION: "5.0"
SUPPORTED_PLATFORMS: "iphoneos iphonesimulator"
SUPPORTS_MACCATALYST: "NO"
targets:
WakuExample:
type: application
platform: iOS
supportedDestinations: [iOS]
sources:
- WakuExample
settings:
INFOPLIST_FILE: WakuExample/Info.plist
PRODUCT_BUNDLE_IDENTIFIER: org.waku.example
SWIFT_OBJC_BRIDGING_HEADER: WakuExample/WakuExample-Bridging-Header.h
HEADER_SEARCH_PATHS:
- "$(PROJECT_DIR)/WakuExample"
"LIBRARY_SEARCH_PATHS[sdk=iphoneos*]":
- "$(PROJECT_DIR)/../../build/ios/iphoneos-arm64"
"LIBRARY_SEARCH_PATHS[sdk=iphonesimulator*]":
- "$(PROJECT_DIR)/../../build/ios/iphonesimulator-arm64"
OTHER_LDFLAGS:
- "-lc++"
- "-lwaku"
IPHONEOS_DEPLOYMENT_TARGET: "14.0"
info:
path: WakuExample/Info.plist
properties:
CFBundleName: WakuExample
CFBundleDisplayName: Waku Example
CFBundleIdentifier: org.waku.example
CFBundleVersion: "1"
CFBundleShortVersionString: "1.0"
UILaunchScreen: {}
UISupportedInterfaceOrientations:
- UIInterfaceOrientationPortrait
NSAppTransportSecurity:
NSAllowsArbitraryLoads: true

View File

@ -102,8 +102,8 @@ print("Waku Relay enabled: {}".format(args.relay))
# Set the event callback
callback = callback_type(handle_event) # This line is important so that the callback is not gc'ed
libwaku.waku_set_event_callback.argtypes = [callback_type, ctypes.c_void_p]
libwaku.waku_set_event_callback(callback, ctypes.c_void_p(0))
libwaku.set_event_callback.argtypes = [callback_type, ctypes.c_void_p]
libwaku.set_event_callback(callback, ctypes.c_void_p(0))
# Start the node
libwaku.waku_start.argtypes = [ctypes.c_void_p,
@ -117,32 +117,32 @@ libwaku.waku_start(ctx,
# Subscribe to the default pubsub topic
libwaku.waku_relay_subscribe.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
callback_type,
ctypes.c_void_p]
ctypes.c_void_p,
ctypes.c_char_p]
libwaku.waku_relay_subscribe(ctx,
default_pubsub_topic.encode('utf-8'),
callback_type(
#onErrCb
lambda ret, msg, len:
print("Error calling waku_relay_subscribe: %s" %
msg.decode('utf-8'))
),
ctypes.c_void_p(0))
ctypes.c_void_p(0),
default_pubsub_topic.encode('utf-8'))
libwaku.waku_connect.argtypes = [ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int,
callback_type,
ctypes.c_void_p]
ctypes.c_void_p,
ctypes.c_char_p,
ctypes.c_int]
libwaku.waku_connect(ctx,
args.peer.encode('utf-8'),
10000,
# onErrCb
callback_type(
lambda ret, msg, len:
print("Error calling waku_connect: %s" % msg.decode('utf-8'))),
ctypes.c_void_p(0))
ctypes.c_void_p(0),
args.peer.encode('utf-8'),
10000)
# app = Flask(__name__)
# @app.route("/")

View File

@ -27,7 +27,7 @@ public:
void initialize(const QString& jsonConfig, WakuCallBack event_handler, void* userData) {
ctx = waku_new(jsonConfig.toUtf8().constData(), WakuCallBack(event_handler), userData);
waku_set_event_callback(ctx, on_event_received, userData);
set_event_callback(ctx, on_event_received, userData);
qDebug() << "Waku context initialized, ready to start.";
}

View File

@ -3,22 +3,22 @@ use std::ffi::CString;
use std::os::raw::{c_char, c_int, c_void};
use std::{slice, thread, time};
pub type WakuCallback = unsafe extern "C" fn(c_int, *const c_char, usize, *const c_void);
pub type FFICallBack = unsafe extern "C" fn(c_int, *const c_char, usize, *const c_void);
extern "C" {
pub fn waku_new(
config_json: *const u8,
cb: WakuCallback,
cb: FFICallBack,
user_data: *const c_void,
) -> *mut c_void;
pub fn waku_version(ctx: *const c_void, cb: WakuCallback, user_data: *const c_void) -> c_int;
pub fn waku_version(ctx: *const c_void, cb: FFICallBack, user_data: *const c_void) -> c_int;
pub fn waku_start(ctx: *const c_void, cb: WakuCallback, user_data: *const c_void) -> c_int;
pub fn waku_start(ctx: *const c_void, cb: FFICallBack, user_data: *const c_void) -> c_int;
pub fn waku_default_pubsub_topic(
ctx: *mut c_void,
cb: WakuCallback,
cb: FFICallBack,
user_data: *const c_void,
) -> *mut c_void;
}
@ -40,7 +40,7 @@ pub unsafe extern "C" fn trampoline<C>(
closure(return_val, &buffer_utf8);
}
pub fn get_trampoline<C>(_closure: &C) -> WakuCallback
pub fn get_trampoline<C>(_closure: &C) -> FFICallBack
where
C: FnMut(i32, &str),
{

View File

@ -1,42 +0,0 @@
## Can be shared safely between threads
type SharedSeq*[T] = tuple[data: ptr UncheckedArray[T], len: int]
proc alloc*(str: cstring): cstring =
# Byte allocation from the given address.
# There should be the corresponding manual deallocation with deallocShared !
if str.isNil():
var ret = cast[cstring](allocShared(1)) # Allocate memory for the null terminator
ret[0] = '\0' # Set the null terminator
return ret
let ret = cast[cstring](allocShared(len(str) + 1))
copyMem(ret, str, len(str) + 1)
return ret
proc alloc*(str: string): cstring =
## Byte allocation from the given address.
## There should be the corresponding manual deallocation with deallocShared !
var ret = cast[cstring](allocShared(str.len + 1))
let s = cast[seq[char]](str)
for i in 0 ..< str.len:
ret[i] = s[i]
ret[str.len] = '\0'
return ret
proc allocSharedSeq*[T](s: seq[T]): SharedSeq[T] =
let data = allocShared(sizeof(T) * s.len)
if s.len != 0:
copyMem(data, unsafeAddr s[0], s.len)
return (cast[ptr UncheckedArray[T]](data), s.len)
proc deallocSharedSeq*[T](s: var SharedSeq[T]) =
deallocShared(s.data)
s.len = 0
proc toSeq*[T](s: SharedSeq[T]): seq[T] =
## Creates a seq[T] from a SharedSeq[T]. No explicit dealloc is required
## as req[T] is a GC managed type.
var ret = newSeq[T]()
for i in 0 ..< s.len:
ret.add(s.data[i])
return ret

10
library/declare_lib.nim Normal file
View File

@ -0,0 +1,10 @@
import ffi
import waku/factory/waku
declareLibrary("waku")
proc set_event_callback(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.dynlib, exportc, cdecl.} =
ctx[].eventCallback = cast[pointer](callback)
ctx[].eventUserData = userData

View File

@ -1,9 +0,0 @@
import system, std/json, ./json_base_event
type JsonWakuNotRespondingEvent* = ref object of JsonEvent
proc new*(T: type JsonWakuNotRespondingEvent): T =
return JsonWakuNotRespondingEvent(eventType: "waku_not_responding")
method `$`*(event: JsonWakuNotRespondingEvent): string =
$(%*event)

View File

@ -1,30 +0,0 @@
################################################################################
### Exported types
type WakuCallBack* = proc(
callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer
) {.cdecl, gcsafe, raises: [].}
const RET_OK*: cint = 0
const RET_ERR*: cint = 1
const RET_MISSING_CALLBACK*: cint = 2
### End of exported types
################################################################################
################################################################################
### FFI utils
template foreignThreadGc*(body: untyped) =
when declared(setupForeignThreadGc):
setupForeignThreadGc()
body
when declared(tearDownForeignThreadGc):
tearDownForeignThreadGc()
type onDone* = proc()
### End of FFI utils
################################################################################

View File

@ -0,0 +1,32 @@
/**
* iOS stubs for BearSSL tools functions not normally included in the library.
* These are typically from the BearSSL tools/ directory which is for CLI tools.
*/
#include <stddef.h>
/* x509_noanchor context - simplified stub */
typedef struct {
void *vtable;
void *inner;
} x509_noanchor_context;
/* Stub for x509_noanchor_init - used to skip anchor validation */
void x509_noanchor_init(x509_noanchor_context *xwc, const void **inner) {
if (xwc && inner) {
xwc->inner = (void*)*inner;
xwc->vtable = NULL;
}
}
/* TAs (Trust Anchors) - empty array stub */
/* This is typically defined by applications with their CA certificates */
typedef struct {
void *dn;
size_t dn_len;
unsigned flags;
void *pkey;
} br_x509_trust_anchor;
const br_x509_trust_anchor TAs[1] = {{0}};
const size_t TAs_NUM = 0;

View File

@ -0,0 +1,14 @@
/**
* iOS stub for getgateway.c functions.
* iOS doesn't have net/route.h, so we provide a stub that returns failure.
* NAT-PMP functionality won't work but the library will link.
*/
#include <stdint.h>
#include <netinet/in.h>
/* getdefaultgateway - returns -1 (failure) on iOS */
int getdefaultgateway(in_addr_t *addr) {
(void)addr; /* unused */
return -1; /* failure - not supported on iOS */
}

View File

@ -0,0 +1,49 @@
import std/json
import
chronicles,
chronos,
results,
eth/p2p/discoveryv5/enr,
strutils,
libp2p/peerid,
metrics,
ffi
import waku/factory/waku, waku/node/waku_node, waku/node/health_monitor, library/declare_lib
proc getMultiaddresses(node: WakuNode): seq[string] =
return node.info().listenAddresses
proc getMetrics(): string =
{.gcsafe.}:
return defaultRegistry.toText() ## defaultRegistry is {.global.} in metrics module
proc waku_version(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
return ok(WakuNodeVersionString)
proc waku_listen_addresses(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
## returns a comma-separated string of the listen addresses
return ok(ctx.myLib[].node.getMultiaddresses().join(","))
proc waku_get_my_enr(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
return ok(ctx.myLib[].node.enr.toURI())
proc waku_get_my_peerid(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
return ok($ctx.myLib[].node.peerId())
proc waku_get_metrics(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
return ok(getMetrics())
proc waku_is_online(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
return ok($ctx.myLib[].healthMonitor.onlineMonitor.amIOnline())

View File

@ -0,0 +1,96 @@
import std/json
import chronos, chronicles, results, strutils, libp2p/multiaddress, ffi
import
waku/factory/waku,
waku/discovery/waku_dnsdisc,
waku/discovery/waku_discv5,
waku/waku_core/peers,
waku/node/waku_node,
waku/node/kernel_api,
library/declare_lib
proc retrieveBootstrapNodes(
enrTreeUrl: string, ipDnsServer: string
): Future[Result[seq[string], string]] {.async.} =
let dnsNameServers = @[parseIpAddress(ipDnsServer)]
let discoveredPeers: seq[RemotePeerInfo] = (
await retrieveDynamicBootstrapNodes(enrTreeUrl, dnsNameServers)
).valueOr:
return err("failed discovering peers from DNS: " & $error)
var multiAddresses = newSeq[string]()
for discPeer in discoveredPeers:
for address in discPeer.addrs:
multiAddresses.add($address & "/p2p/" & $discPeer)
return ok(multiAddresses)
proc updateDiscv5BootstrapNodes(nodes: string, waku: Waku): Result[void, string] =
waku.wakuDiscv5.updateBootstrapRecords(nodes).isOkOr:
return err("error in updateDiscv5BootstrapNodes: " & $error)
return ok()
proc performPeerExchangeRequestTo*(
numPeers: uint64, waku: Waku
): Future[Result[int, string]] {.async.} =
let numPeersRecv = (await waku.node.fetchPeerExchangePeers(numPeers)).valueOr:
return err($error)
return ok(numPeersRecv)
proc waku_discv5_update_bootnodes(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
bootnodes: cstring,
) {.ffi.} =
## Updates the bootnode list used for discovering new peers via DiscoveryV5
## bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]`
updateDiscv5BootstrapNodes($bootnodes, ctx.myLib[]).isOkOr:
error "UPDATE_DISCV5_BOOTSTRAP_NODES failed", error = error
return err($error)
return ok("discovery request processed correctly")
proc waku_dns_discovery(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
enrTreeUrl: cstring,
nameDnsServer: cstring,
timeoutMs: cint,
) {.ffi.} =
let nodes = (await retrieveBootstrapNodes($enrTreeUrl, $nameDnsServer)).valueOr:
error "GET_BOOTSTRAP_NODES failed", error = error
return err($error)
## returns a comma-separated string of bootstrap nodes' multiaddresses
return ok(nodes.join(","))
proc waku_start_discv5(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
(await ctx.myLib[].wakuDiscv5.start()).isOkOr:
error "START_DISCV5 failed", error = error
return err("error starting discv5: " & $error)
return ok("discv5 started correctly")
proc waku_stop_discv5(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
await ctx.myLib[].wakuDiscv5.stop()
return ok("discv5 stopped correctly")
proc waku_peer_exchange_request(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
numPeers: uint64,
) {.ffi.} =
let numValidPeers = (await performPeerExchangeRequestTo(numPeers, ctx.myLib[])).valueOr:
error "waku_peer_exchange_request failed", error = error
return err("failed peer exchange: " & $error)
return ok($numValidPeers)

View File

@ -1,43 +1,14 @@
import std/[options, json, strutils, net]
import chronos, chronicles, results, confutils, confutils/std/net
import chronos, chronicles, results, confutils, confutils/std/net, ffi
import
waku/node/peer_manager/peer_manager,
tools/confutils/cli_args,
waku/factory/waku,
waku/factory/node_factory,
waku/factory/networks_config,
waku/factory/app_callbacks,
waku/rest_api/endpoint/builder
import
../../alloc
type NodeLifecycleMsgType* = enum
CREATE_NODE
START_NODE
STOP_NODE
type NodeLifecycleRequest* = object
operation: NodeLifecycleMsgType
configJson: cstring ## Only used in 'CREATE_NODE' operation
appCallbacks: AppCallbacks
proc createShared*(
T: type NodeLifecycleRequest,
op: NodeLifecycleMsgType,
configJson: cstring = "",
appCallbacks: AppCallbacks = nil,
): ptr type T =
var ret = createShared(T)
ret[].operation = op
ret[].appCallbacks = appCallbacks
ret[].configJson = configJson.alloc()
return ret
proc destroyShared(self: ptr NodeLifecycleRequest) =
deallocShared(self[].configJson)
deallocShared(self)
waku/rest_api/endpoint/builder,
library/declare_lib
proc createWaku(
configJson: cstring, appCallbacks: AppCallbacks = nil
@ -87,26 +58,30 @@ proc createWaku(
return ok(wakuRes)
proc process*(
self: ptr NodeLifecycleRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
case self.operation
of CREATE_NODE:
waku[] = (await createWaku(self.configJson, self.appCallbacks)).valueOr:
error "CREATE_NODE failed", error = error
registerReqFFI(CreateNodeRequest, ctx: ptr FFIContext[Waku]):
proc(
configJson: cstring, appCallbacks: AppCallbacks
): Future[Result[string, string]] {.async.} =
ctx.myLib[] = (await createWaku(configJson, cast[AppCallbacks](appCallbacks))).valueOr:
error "CreateNodeRequest failed", error = error
return err($error)
of START_NODE:
(await waku.startWaku()).isOkOr:
error "START_NODE failed", error = error
return err($error)
of STOP_NODE:
try:
await waku[].stop()
except Exception:
error "STOP_NODE failed", error = getCurrentExceptionMsg()
return err(getCurrentExceptionMsg())
return ok("")
proc waku_start(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
(await startWaku(ctx[].myLib)).isOkOr:
error "START_NODE failed", error = error
return err("failed to start: " & $error)
return ok("")
proc waku_stop(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
try:
await ctx.myLib[].stop()
except Exception as exc:
error "STOP_NODE failed", error = exc.msg
return err("failed to stop: " & exc.msg)
return ok("")

View File

@ -0,0 +1,123 @@
import std/[sequtils, strutils, tables]
import chronicles, chronos, results, options, json, ffi
import waku/factory/waku, waku/node/waku_node, waku/node/peer_manager, ../declare_lib
type PeerInfo = object
protocols: seq[string]
addresses: seq[string]
proc waku_get_peerids_from_peerstore(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
## returns a comma-separated string of peerIDs
let peerIDs =
ctx.myLib[].node.peerManager.switch.peerStore.peers().mapIt($it.peerId).join(",")
return ok(peerIDs)
proc waku_connect(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
peerMultiAddr: cstring,
timeoutMs: cuint,
) {.ffi.} =
let peers = ($peerMultiAddr).split(",").mapIt(strip(it))
await ctx.myLib[].node.connectToNodes(peers, source = "static")
return ok("")
proc waku_disconnect_peer_by_id(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer, peerId: cstring
) {.ffi.} =
let pId = PeerId.init($peerId).valueOr:
error "DISCONNECT_PEER_BY_ID failed", error = $error
return err($error)
await ctx.myLib[].node.peerManager.disconnectNode(pId)
return ok("")
proc waku_disconnect_all_peers(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
await ctx.myLib[].node.peerManager.disconnectAllPeers()
return ok("")
proc waku_dial_peer(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
peerMultiAddr: cstring,
protocol: cstring,
timeoutMs: cuint,
) {.ffi.} =
let remotePeerInfo = parsePeerInfo($peerMultiAddr).valueOr:
error "DIAL_PEER failed", error = $error
return err($error)
let conn = await ctx.myLib[].node.peerManager.dialPeer(remotePeerInfo, $protocol)
if conn.isNone():
let msg = "failed dialing peer"
error "DIAL_PEER failed", error = msg, peerId = $remotePeerInfo.peerId
return err(msg)
return ok("")
proc waku_dial_peer_by_id(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
peerId: cstring,
protocol: cstring,
timeoutMs: cuint,
) {.ffi.} =
let pId = PeerId.init($peerId).valueOr:
error "DIAL_PEER_BY_ID failed", error = $error
return err($error)
let conn = await ctx.myLib[].node.peerManager.dialPeer(pId, $protocol)
if conn.isNone():
let msg = "failed dialing peer"
error "DIAL_PEER_BY_ID failed", error = msg, peerId = $peerId
return err(msg)
return ok("")
proc waku_get_connected_peers_info(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
## returns a JSON string mapping peerIDs to objects with protocols and addresses
var peersMap = initTable[string, PeerInfo]()
let peers = ctx.myLib[].node.peerManager.switch.peerStore.peers().filterIt(
it.connectedness == Connected
)
# Build a map of peer IDs to peer info objects
for peer in peers:
let peerIdStr = $peer.peerId
peersMap[peerIdStr] =
PeerInfo(protocols: peer.protocols, addresses: peer.addrs.mapIt($it))
# Convert the map to JSON string
let jsonObj = %*peersMap
let jsonStr = $jsonObj
return ok(jsonStr)
proc waku_get_connected_peers(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
## returns a comma-separated string of peerIDs
let
(inPeerIds, outPeerIds) = ctx.myLib[].node.peerManager.connectedPeers()
connectedPeerids = concat(inPeerIds, outPeerIds)
return ok(connectedPeerids.mapIt($it).join(","))
proc waku_get_peerids_by_protocol(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
protocol: cstring,
) {.ffi.} =
## returns a comma-separated string of peerIDs that mount the given protocol
let connectedPeers = ctx.myLib[].node.peerManager.switch.peerStore
.peers($protocol)
.filterIt(it.connectedness == Connected)
.mapIt($it.peerId)
.join(",")
return ok(connectedPeers)

View File

@ -0,0 +1,43 @@
import std/[json, strutils]
import chronos, results, ffi
import libp2p/[protocols/ping, switch, multiaddress, multicodec]
import waku/[factory/waku, waku_core/peers, node/waku_node], library/declare_lib
proc waku_ping_peer(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
peerAddr: cstring,
timeoutMs: cuint,
) {.ffi.} =
let peerInfo = peers.parsePeerInfo(($peerAddr).split(",")).valueOr:
return err("PingRequest failed to parse peer addr: " & $error)
let timeout = chronos.milliseconds(timeoutMs)
proc ping(): Future[Result[Duration, string]] {.async, gcsafe.} =
try:
let conn =
await ctx.myLib[].node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec)
defer:
await conn.close()
let pingRTT = await ctx.myLib[].node.libp2pPing.ping(conn)
if pingRTT == 0.nanos:
return err("could not ping peer: rtt-0")
return ok(pingRTT)
except CatchableError as exc:
return err("could not ping peer: " & exc.msg)
let pingFuture = ping()
let pingRTT: Duration =
if timeout == chronos.milliseconds(0): # No timeout expected
(await pingFuture).valueOr:
return err("ping failed, no timeout expected: " & error)
else:
let timedOut = not (await pingFuture.withTimeout(timeout))
if timedOut:
return err("ping timed out")
pingFuture.read().valueOr:
return err("failed to read ping future: " & error)
return ok($(pingRTT.nanos))

View File

@ -0,0 +1,109 @@
import options, std/[strutils, sequtils]
import chronicles, chronos, results, ffi
import
waku/waku_filter_v2/client,
waku/waku_core/message/message,
waku/factory/waku,
waku/waku_relay,
waku/waku_filter_v2/common,
waku/waku_core/subscription/push_handler,
waku/node/peer_manager/peer_manager,
waku/node/waku_node,
waku/node/kernel_api,
waku/waku_core/topics/pubsub_topic,
waku/waku_core/topics/content_topic,
library/events/json_message_event,
library/declare_lib
const FilterOpTimeout = 5.seconds
proc checkFilterClientMounted(waku: Waku): Result[string, string] =
if waku.node.wakuFilterClient.isNil():
let errorMsg = "wakuFilterClient is not mounted"
error "fail filter process", error = errorMsg
return err(errorMsg)
return ok("")
proc waku_filter_subscribe(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
contentTopics: cstring,
) {.ffi.} =
proc onReceivedMessage(ctx: ptr FFIContext): WakuRelayHandler =
return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} =
callEventCallback(ctx, "onReceivedMessage"):
$JsonMessageEvent.new(pubsubTopic, msg)
checkFilterClientMounted(ctx.myLib[]).isOkOr:
return err($error)
var filterPushEventCallback = FilterPushHandler(onReceivedMessage(ctx))
ctx.myLib[].node.wakuFilterClient.registerPushHandler(filterPushEventCallback)
let peer = ctx.myLib[].node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
let errorMsg = "could not find peer with WakuFilterSubscribeCodec when subscribing"
error "fail filter subscribe", error = errorMsg
return err(errorMsg)
let subFut = ctx.myLib[].node.filterSubscribe(
some(PubsubTopic($pubsubTopic)),
($contentTopics).split(",").mapIt(ContentTopic(it)),
peer,
)
if not await subFut.withTimeout(FilterOpTimeout):
let errorMsg = "filter subscription timed out"
error "fail filter unsubscribe", error = errorMsg
return err(errorMsg)
return ok("")
proc waku_filter_unsubscribe(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
contentTopics: cstring,
) {.ffi.} =
checkFilterClientMounted(ctx.myLib[]).isOkOr:
return err($error)
let peer = ctx.myLib[].node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
let errorMsg =
"could not find peer with WakuFilterSubscribeCodec when unsubscribing"
error "fail filter process", error = errorMsg
return err(errorMsg)
let subFut = ctx.myLib[].node.filterUnsubscribe(
some(PubsubTopic($pubsubTopic)),
($contentTopics).split(",").mapIt(ContentTopic(it)),
peer,
)
if not await subFut.withTimeout(FilterOpTimeout):
let errorMsg = "filter un-subscription timed out"
error "fail filter unsubscribe", error = errorMsg
return err(errorMsg)
return ok("")
proc waku_filter_unsubscribe_all(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
checkFilterClientMounted(ctx.myLib[]).isOkOr:
return err($error)
let peer = ctx.myLib[].node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
let errorMsg =
"could not find peer with WakuFilterSubscribeCodec when unsubscribing all"
error "fail filter unsubscribe all", error = errorMsg
return err(errorMsg)
let unsubFut = ctx.myLib[].node.filterUnsubscribeAll(peer)
if not await unsubFut.withTimeout(FilterOpTimeout):
let errorMsg = "filter un-subscription all timed out"
error "fail filter unsubscribe all", error = errorMsg
return err(errorMsg)
return ok("")

View File

@ -0,0 +1,51 @@
import options, std/[json, strformat]
import chronicles, chronos, results, ffi
import
waku/waku_core/message/message,
waku/waku_core/codecs,
waku/factory/waku,
waku/waku_core/message,
waku/waku_core/topics/pubsub_topic,
waku/waku_lightpush_legacy/client,
waku/node/peer_manager/peer_manager,
library/events/json_message_event,
library/declare_lib
proc waku_lightpush_publish(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
jsonWakuMessage: cstring,
) {.ffi.} =
if ctx.myLib[].node.wakuLightpushClient.isNil():
let errorMsg = "LightpushRequest waku.node.wakuLightpushClient is nil"
error "PUBLISH failed", error = errorMsg
return err(errorMsg)
var jsonMessage: JsonMessage
try:
let jsonContent = parseJson($jsonWakuMessage)
jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr:
raise newException(JsonParsingError, $error)
except JsonParsingError as exc:
return err(fmt"Error parsing json message: {exc.msg}")
let msg = json_message_event.toWakuMessage(jsonMessage).valueOr:
return err("Problem building the WakuMessage: " & $error)
let peerOpt = ctx.myLib[].node.peerManager.selectPeer(WakuLightPushCodec)
if peerOpt.isNone():
let errorMsg = "failed to lightpublish message, no suitable remote peers"
error "PUBLISH failed", error = errorMsg
return err(errorMsg)
let msgHashHex = (
await ctx.myLib[].node.wakuLegacyLightpushClient.publish(
$pubsubTopic, msg, peer = peerOpt.get()
)
).valueOr:
error "PUBLISH failed", error = error
return err($error)
return ok(msgHashHex)

View File

@ -0,0 +1,171 @@
import std/[net, sequtils, strutils, json], strformat
import chronicles, chronos, stew/byteutils, results, ffi
import
waku/waku_core/message/message,
waku/factory/[validator_signed, waku],
tools/confutils/cli_args,
waku/waku_core/message,
waku/waku_core/topics/pubsub_topic,
waku/waku_core/topics,
waku/node/kernel_api/relay,
waku/waku_relay/protocol,
waku/node/peer_manager,
library/events/json_message_event,
library/declare_lib
proc waku_relay_get_peers_in_mesh(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
) {.ffi.} =
let meshPeers = ctx.myLib[].node.wakuRelay.getPeersInMesh($pubsubTopic).valueOr:
error "LIST_MESH_PEERS failed", error = error
return err($error)
## returns a comma-separated string of peerIDs
return ok(meshPeers.mapIt($it).join(","))
proc waku_relay_get_num_peers_in_mesh(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
) {.ffi.} =
let numPeersInMesh = ctx.myLib[].node.wakuRelay.getNumPeersInMesh($pubsubTopic).valueOr:
error "NUM_MESH_PEERS failed", error = error
return err($error)
return ok($numPeersInMesh)
proc waku_relay_get_connected_peers(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
) {.ffi.} =
## Returns the list of all connected peers to an specific pubsub topic
let connPeers = ctx.myLib[].node.wakuRelay.getConnectedPeers($pubsubTopic).valueOr:
error "LIST_CONNECTED_PEERS failed", error = error
return err($error)
## returns a comma-separated string of peerIDs
return ok(connPeers.mapIt($it).join(","))
proc waku_relay_get_num_connected_peers(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
) {.ffi.} =
let numConnPeers = ctx.myLib[].node.wakuRelay.getNumConnectedPeers($pubsubTopic).valueOr:
error "NUM_CONNECTED_PEERS failed", error = error
return err($error)
return ok($numConnPeers)
proc waku_relay_add_protected_shard(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
clusterId: cint,
shardId: cint,
publicKey: cstring,
) {.ffi.} =
## Protects a shard with a public key
try:
let relayShard = RelayShard(clusterId: uint16(clusterId), shardId: uint16(shardId))
let protectedShard = ProtectedShard.parseCmdArg($relayShard & ":" & $publicKey)
ctx.myLib[].node.wakuRelay.addSignedShardsValidator(
@[protectedShard], uint16(clusterId)
)
except ValueError as exc:
return err("ERROR in waku_relay_add_protected_shard: " & exc.msg)
return ok("")
proc waku_relay_subscribe(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
) {.ffi.} =
echo "Subscribing to topic: " & $pubSubTopic & " ..."
proc onReceivedMessage(ctx: ptr FFIContext[Waku]): WakuRelayHandler =
return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} =
callEventCallback(ctx, "onReceivedMessage"):
$JsonMessageEvent.new(pubsubTopic, msg)
var cb = onReceivedMessage(ctx)
ctx.myLib[].node.subscribe(
(kind: SubscriptionKind.PubsubSub, topic: $pubsubTopic),
handler = WakuRelayHandler(cb),
).isOkOr:
error "SUBSCRIBE failed", error = error
return err($error)
return ok("")
proc waku_relay_unsubscribe(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
) {.ffi.} =
ctx.myLib[].node.unsubscribe((kind: SubscriptionKind.PubsubSub, topic: $pubsubTopic)).isOkOr:
error "UNSUBSCRIBE failed", error = error
return err($error)
return ok("")
proc waku_relay_publish(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
pubSubTopic: cstring,
jsonWakuMessage: cstring,
timeoutMs: cuint,
) {.ffi.} =
var
# https://rfc.vac.dev/spec/36/#extern-char-waku_relay_publishchar-messagejson-char-pubsubtopic-int-timeoutms
jsonMessage: JsonMessage
try:
let jsonContent = parseJson($jsonWakuMessage)
jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr:
raise newException(JsonParsingError, $error)
except JsonParsingError as exc:
return err(fmt"Error parsing json message: {exc.msg}")
let msg = json_message_event.toWakuMessage(jsonMessage).valueOr:
return err("Problem building the WakuMessage: " & $error)
(await ctx.myLib[].node.wakuRelay.publish($pubsubTopic, msg)).isOkOr:
error "PUBLISH failed", error = error
return err($error)
let msgHash = computeMessageHash($pubSubTopic, msg).to0xHex
return ok(msgHash)
proc waku_default_pubsub_topic(
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
) {.ffi.} =
# https://rfc.vac.dev/spec/36/#extern-char-waku_default_pubsub_topic
return ok(DefaultPubsubTopic)
proc waku_content_topic(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
appName: cstring,
appVersion: cuint,
contentTopicName: cstring,
encoding: cstring,
) {.ffi.} =
# https://rfc.vac.dev/spec/36/#extern-char-waku_content_topicchar-applicationname-unsigned-int-applicationversion-char-contenttopicname-char-encoding
return ok(fmt"/{$appName}/{$appVersion}/{$contentTopicName}/{$encoding}")
proc waku_pubsub_topic(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
topicName: cstring,
) {.ffi.} =
# https://rfc.vac.dev/spec/36/#extern-char-waku_pubsub_topicchar-name-char-encoding
return ok(fmt"/waku/2/{$topicName}")

View File

@ -1,28 +1,16 @@
import std/[json, sugar, strutils, options]
import chronos, chronicles, results, stew/byteutils
import chronos, chronicles, results, stew/byteutils, ffi
import
../../../../waku/factory/waku,
../../../alloc,
../../../utils,
../../../../waku/waku_core/peers,
../../../../waku/waku_core/time,
../../../../waku/waku_core/message/digest,
../../../../waku/waku_store/common,
../../../../waku/waku_store/client,
../../../../waku/common/paging
waku/factory/waku,
library/utils,
waku/waku_core/peers,
waku/waku_core/message/digest,
waku/waku_store/common,
waku/waku_store/client,
waku/common/paging,
library/declare_lib
type StoreReqType* = enum
REMOTE_QUERY ## to perform a query to another Store node
type StoreRequest* = object
operation: StoreReqType
jsonQuery: cstring
peerAddr: cstring
timeoutMs: cint
func fromJsonNode(
T: type StoreRequest, jsonContent: JsonNode
): Result[StoreQueryRequest, string] =
func fromJsonNode(jsonContent: JsonNode): Result[StoreQueryRequest, string] =
var contentTopics: seq[string]
if jsonContent.contains("contentTopics"):
contentTopics = collect(newSeq):
@ -78,54 +66,29 @@ func fromJsonNode(
)
)
proc createShared*(
T: type StoreRequest,
op: StoreReqType,
proc waku_store_query(
ctx: ptr FFIContext[Waku],
callback: FFICallBack,
userData: pointer,
jsonQuery: cstring,
peerAddr: cstring,
timeoutMs: cint,
): ptr type T =
var ret = createShared(T)
ret[].operation = op
ret[].timeoutMs = timeoutMs
ret[].jsonQuery = jsonQuery.alloc()
ret[].peerAddr = peerAddr.alloc()
return ret
proc destroyShared(self: ptr StoreRequest) =
deallocShared(self[].jsonQuery)
deallocShared(self[].peerAddr)
deallocShared(self)
proc process_remote_query(
self: ptr StoreRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
) {.ffi.} =
let jsonContentRes = catch:
parseJson($self[].jsonQuery)
parseJson($jsonQuery)
if jsonContentRes.isErr():
return err("StoreRequest failed parsing store request: " & jsonContentRes.error.msg)
let storeQueryRequest = ?StoreRequest.fromJsonNode(jsonContentRes.get())
let storeQueryRequest = ?fromJsonNode(jsonContentRes.get())
let peer = peers.parsePeerInfo(($self[].peerAddr).split(",")).valueOr:
let peer = peers.parsePeerInfo(($peerAddr).split(",")).valueOr:
return err("StoreRequest failed to parse peer addr: " & $error)
let queryResponse = (await waku.node.wakuStoreClient.query(storeQueryRequest, peer)).valueOr:
let queryResponse = (
await ctx.myLib[].node.wakuStoreClient.query(storeQueryRequest, peer)
).valueOr:
return err("StoreRequest failed store query: " & $error)
let res = $(%*(queryResponse.toHex()))
return ok(res) ## returning the response in json format
proc process*(
self: ptr StoreRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
defer:
deallocShared(self)
case self.operation
of REMOTE_QUERY:
return await self.process_remote_query(waku)
error "store request not handled at all"
return err("store request not handled at all")

View File

@ -10,241 +10,242 @@
#include <stdint.h>
// The possible returned values for the functions that return int
#define RET_OK 0
#define RET_ERR 1
#define RET_MISSING_CALLBACK 2
#define RET_OK 0
#define RET_ERR 1
#define RET_MISSING_CALLBACK 2
#ifdef __cplusplus
extern "C" {
extern "C"
{
#endif
typedef void (*WakuCallBack) (int callerRet, const char* msg, size_t len, void* userData);
typedef void (*FFICallBack)(int callerRet, const char *msg, size_t len, void *userData);
// Creates a new instance of the waku node.
// Sets up the waku node from the given configuration.
// Returns a pointer to the Context needed by the rest of the API functions.
void* waku_new(
const char* configJson,
WakuCallBack callback,
void* userData);
// Creates a new instance of the waku node.
// Sets up the waku node from the given configuration.
// Returns a pointer to the Context needed by the rest of the API functions.
void *waku_new(
const char *configJson,
FFICallBack callback,
void *userData);
int waku_start(void* ctx,
WakuCallBack callback,
void* userData);
int waku_start(void *ctx,
FFICallBack callback,
void *userData);
int waku_stop(void* ctx,
WakuCallBack callback,
void* userData);
int waku_stop(void *ctx,
FFICallBack callback,
void *userData);
// Destroys an instance of a waku node created with waku_new
int waku_destroy(void* ctx,
WakuCallBack callback,
void* userData);
// Destroys an instance of a waku node created with waku_new
int waku_destroy(void *ctx,
FFICallBack callback,
void *userData);
int waku_version(void* ctx,
WakuCallBack callback,
void* userData);
int waku_version(void *ctx,
FFICallBack callback,
void *userData);
// Sets a callback that will be invoked whenever an event occurs.
// It is crucial that the passed callback is fast, non-blocking and potentially thread-safe.
void waku_set_event_callback(void* ctx,
WakuCallBack callback,
void* userData);
// Sets a callback that will be invoked whenever an event occurs.
// It is crucial that the passed callback is fast, non-blocking and potentially thread-safe.
void set_event_callback(void *ctx,
FFICallBack callback,
void *userData);
int waku_content_topic(void* ctx,
const char* appName,
unsigned int appVersion,
const char* contentTopicName,
const char* encoding,
WakuCallBack callback,
void* userData);
int waku_content_topic(void *ctx,
FFICallBack callback,
void *userData,
const char *appName,
unsigned int appVersion,
const char *contentTopicName,
const char *encoding);
int waku_pubsub_topic(void* ctx,
const char* topicName,
WakuCallBack callback,
void* userData);
int waku_pubsub_topic(void *ctx,
FFICallBack callback,
void *userData,
const char *topicName);
int waku_default_pubsub_topic(void* ctx,
WakuCallBack callback,
void* userData);
int waku_default_pubsub_topic(void *ctx,
FFICallBack callback,
void *userData);
int waku_relay_publish(void* ctx,
const char* pubSubTopic,
const char* jsonWakuMessage,
unsigned int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_relay_publish(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic,
const char *jsonWakuMessage,
unsigned int timeoutMs);
int waku_lightpush_publish(void* ctx,
const char* pubSubTopic,
const char* jsonWakuMessage,
WakuCallBack callback,
void* userData);
int waku_lightpush_publish(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic,
const char *jsonWakuMessage);
int waku_relay_subscribe(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_subscribe(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic);
int waku_relay_add_protected_shard(void* ctx,
int clusterId,
int shardId,
char* publicKey,
WakuCallBack callback,
void* userData);
int waku_relay_add_protected_shard(void *ctx,
FFICallBack callback,
void *userData,
int clusterId,
int shardId,
char *publicKey);
int waku_relay_unsubscribe(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_unsubscribe(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic);
int waku_filter_subscribe(void* ctx,
const char* pubSubTopic,
const char* contentTopics,
WakuCallBack callback,
void* userData);
int waku_filter_subscribe(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic,
const char *contentTopics);
int waku_filter_unsubscribe(void* ctx,
const char* pubSubTopic,
const char* contentTopics,
WakuCallBack callback,
void* userData);
int waku_filter_unsubscribe(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic,
const char *contentTopics);
int waku_filter_unsubscribe_all(void* ctx,
WakuCallBack callback,
void* userData);
int waku_filter_unsubscribe_all(void *ctx,
FFICallBack callback,
void *userData);
int waku_relay_get_num_connected_peers(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_get_num_connected_peers(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic);
int waku_relay_get_connected_peers(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_get_connected_peers(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic);
int waku_relay_get_num_peers_in_mesh(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_get_num_peers_in_mesh(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic);
int waku_relay_get_peers_in_mesh(void* ctx,
const char* pubSubTopic,
WakuCallBack callback,
void* userData);
int waku_relay_get_peers_in_mesh(void *ctx,
FFICallBack callback,
void *userData,
const char *pubSubTopic);
int waku_store_query(void* ctx,
const char* jsonQuery,
const char* peerAddr,
int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_store_query(void *ctx,
FFICallBack callback,
void *userData,
const char *jsonQuery,
const char *peerAddr,
int timeoutMs);
int waku_connect(void* ctx,
const char* peerMultiAddr,
unsigned int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_connect(void *ctx,
FFICallBack callback,
void *userData,
const char *peerMultiAddr,
unsigned int timeoutMs);
int waku_disconnect_peer_by_id(void* ctx,
const char* peerId,
WakuCallBack callback,
void* userData);
int waku_disconnect_peer_by_id(void *ctx,
FFICallBack callback,
void *userData,
const char *peerId);
int waku_disconnect_all_peers(void* ctx,
WakuCallBack callback,
void* userData);
int waku_disconnect_all_peers(void *ctx,
FFICallBack callback,
void *userData);
int waku_dial_peer(void* ctx,
const char* peerMultiAddr,
const char* protocol,
int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_dial_peer(void *ctx,
FFICallBack callback,
void *userData,
const char *peerMultiAddr,
const char *protocol,
int timeoutMs);
int waku_dial_peer_by_id(void* ctx,
const char* peerId,
const char* protocol,
int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_dial_peer_by_id(void *ctx,
FFICallBack callback,
void *userData,
const char *peerId,
const char *protocol,
int timeoutMs);
int waku_get_peerids_from_peerstore(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_peerids_from_peerstore(void *ctx,
FFICallBack callback,
void *userData);
int waku_get_connected_peers_info(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_connected_peers_info(void *ctx,
FFICallBack callback,
void *userData);
int waku_get_peerids_by_protocol(void* ctx,
const char* protocol,
WakuCallBack callback,
void* userData);
int waku_get_peerids_by_protocol(void *ctx,
FFICallBack callback,
void *userData,
const char *protocol);
int waku_listen_addresses(void* ctx,
WakuCallBack callback,
void* userData);
int waku_listen_addresses(void *ctx,
FFICallBack callback,
void *userData);
int waku_get_connected_peers(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_connected_peers(void *ctx,
FFICallBack callback,
void *userData);
// Returns a list of multiaddress given a url to a DNS discoverable ENR tree
// Parameters
// char* entTreeUrl: URL containing a discoverable ENR tree
// char* nameDnsServer: The nameserver to resolve the ENR tree url.
// int timeoutMs: Timeout value in milliseconds to execute the call.
int waku_dns_discovery(void* ctx,
const char* entTreeUrl,
const char* nameDnsServer,
int timeoutMs,
WakuCallBack callback,
void* userData);
// Returns a list of multiaddress given a url to a DNS discoverable ENR tree
// Parameters
// char* entTreeUrl: URL containing a discoverable ENR tree
// char* nameDnsServer: The nameserver to resolve the ENR tree url.
// int timeoutMs: Timeout value in milliseconds to execute the call.
int waku_dns_discovery(void *ctx,
FFICallBack callback,
void *userData,
const char *entTreeUrl,
const char *nameDnsServer,
int timeoutMs);
// Updates the bootnode list used for discovering new peers via DiscoveryV5
// bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]`
int waku_discv5_update_bootnodes(void* ctx,
char* bootnodes,
WakuCallBack callback,
void* userData);
// Updates the bootnode list used for discovering new peers via DiscoveryV5
// bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]`
int waku_discv5_update_bootnodes(void *ctx,
FFICallBack callback,
void *userData,
char *bootnodes);
int waku_start_discv5(void* ctx,
WakuCallBack callback,
void* userData);
int waku_start_discv5(void *ctx,
FFICallBack callback,
void *userData);
int waku_stop_discv5(void* ctx,
WakuCallBack callback,
void* userData);
int waku_stop_discv5(void *ctx,
FFICallBack callback,
void *userData);
// Retrieves the ENR information
int waku_get_my_enr(void* ctx,
WakuCallBack callback,
void* userData);
// Retrieves the ENR information
int waku_get_my_enr(void *ctx,
FFICallBack callback,
void *userData);
int waku_get_my_peerid(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_my_peerid(void *ctx,
FFICallBack callback,
void *userData);
int waku_get_metrics(void* ctx,
WakuCallBack callback,
void* userData);
int waku_get_metrics(void *ctx,
FFICallBack callback,
void *userData);
int waku_peer_exchange_request(void* ctx,
int numPeers,
WakuCallBack callback,
void* userData);
int waku_peer_exchange_request(void *ctx,
FFICallBack callback,
void *userData,
int numPeers);
int waku_ping_peer(void* ctx,
const char* peerAddr,
int timeoutMs,
WakuCallBack callback,
void* userData);
int waku_ping_peer(void *ctx,
FFICallBack callback,
void *userData,
const char *peerAddr,
int timeoutMs);
int waku_is_online(void* ctx,
WakuCallBack callback,
void* userData);
int waku_is_online(void *ctx,
FFICallBack callback,
void *userData);
#ifdef __cplusplus
}

View File

@ -1,107 +1,35 @@
{.pragma: exported, exportc, cdecl, raises: [].}
{.pragma: callback, cdecl, raises: [], gcsafe.}
{.passc: "-fPIC".}
when defined(linux):
{.passl: "-Wl,-soname,libwaku.so".}
import std/[json, atomics, strformat, options, atomics]
import chronicles, chronos, chronos/threadsync
import std/[atomics, options, atomics, macros]
import chronicles, chronos, chronos/threadsync, ffi
import
waku/common/base64,
waku/waku_core/message/message,
waku/node/waku_node,
waku/node/peer_manager,
waku/waku_core/topics/pubsub_topic,
waku/waku_core/subscription/push_handler,
waku/waku_relay,
./events/json_message_event,
./waku_context,
./waku_thread_requests/requests/node_lifecycle_request,
./waku_thread_requests/requests/peer_manager_request,
./waku_thread_requests/requests/protocols/relay_request,
./waku_thread_requests/requests/protocols/store_request,
./waku_thread_requests/requests/protocols/lightpush_request,
./waku_thread_requests/requests/protocols/filter_request,
./waku_thread_requests/requests/debug_node_request,
./waku_thread_requests/requests/discovery_request,
./waku_thread_requests/requests/ping_request,
./waku_thread_requests/waku_thread_request,
./alloc,
./ffi_types,
../waku/factory/app_callbacks
./events/json_topic_health_change_event,
./events/json_connection_change_event,
../waku/factory/app_callbacks,
waku/factory/waku,
waku/node/waku_node,
./declare_lib
################################################################################
### Wrapper around the waku node
################################################################################
################################################################################
### Not-exported components
template checkLibwakuParams*(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
) =
if not isNil(ctx):
ctx[].userData = userData
if isNil(callback):
return RET_MISSING_CALLBACK
proc handleRequest(
ctx: ptr WakuContext,
requestType: RequestType,
content: pointer,
callback: WakuCallBack,
userData: pointer,
): cint =
waku_context.sendRequestToWakuThread(ctx, requestType, content, callback, userData).isOkOr:
let msg = "libwaku error: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
return RET_OK
### End of not-exported components
################################################################################
################################################################################
### Library setup
# Every Nim library must have this function called - the name is derived from
# the `--nimMainPrefix` command line option
proc libwakuNimMain() {.importc.}
# To control when the library has been initialized
var initialized: Atomic[bool]
if defined(android):
# Redirect chronicles to Android System logs
when compiles(defaultChroniclesStream.outputs[0].writer):
defaultChroniclesStream.outputs[0].writer = proc(
logLevel: LogLevel, msg: LogOutputStr
) {.raises: [].} =
echo logLevel, msg
proc initializeLibrary() {.exported.} =
if not initialized.exchange(true):
## Every Nim library needs to call `<yourprefix>NimMain` once exactly, to initialize the Nim runtime.
## Being `<yourprefix>` the value given in the optional compilation flag --nimMainPrefix:yourprefix
libwakuNimMain()
when declared(setupForeignThreadGc):
setupForeignThreadGc()
when declared(nimGC_setStackBottom):
var locals {.volatile, noinit.}: pointer
locals = addr(locals)
nimGC_setStackBottom(locals)
### End of library setup
################################################################################
## Include different APIs, i.e. all procs with {.ffi.} pragma
include
./kernel_api/peer_manager_api,
./kernel_api/discovery_api,
./kernel_api/node_lifecycle_api,
./kernel_api/debug_node_api,
./kernel_api/ping_api,
./kernel_api/protocols/relay_api,
./kernel_api/protocols/store_api,
./kernel_api/protocols/lightpush_api,
./kernel_api/protocols/filter_api
################################################################################
### Exported procs
proc waku_new(
configJson: cstring, callback: WakuCallback, userData: pointer
configJson: cstring, callback: FFICallback, userData: pointer
): pointer {.dynlib, exportc, cdecl.} =
initializeLibrary()
@ -111,41 +39,50 @@ proc waku_new(
return nil
## Create the Waku thread that will keep waiting for req from the main thread.
var ctx = waku_context.createWakuContext().valueOr:
let msg = "Error in createWakuContext: " & $error
var ctx = ffi.createFFIContext[Waku]().valueOr:
let msg = "Error in createFFIContext: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return nil
ctx.userData = userData
proc onReceivedMessage(ctx: ptr FFIContext): WakuRelayHandler =
return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} =
callEventCallback(ctx, "onReceivedMessage"):
$JsonMessageEvent.new(pubsubTopic, msg)
proc onTopicHealthChange(ctx: ptr FFIContext): TopicHealthChangeHandler =
return proc(pubsubTopic: PubsubTopic, topicHealth: TopicHealth) {.async.} =
callEventCallback(ctx, "onTopicHealthChange"):
$JsonTopicHealthChangeEvent.new(pubsubTopic, topicHealth)
proc onConnectionChange(ctx: ptr FFIContext): ConnectionChangeHandler =
return proc(peerId: PeerId, peerEvent: PeerEventKind) {.async.} =
callEventCallback(ctx, "onConnectionChange"):
$JsonConnectionChangeEvent.new($peerId, peerEvent)
let appCallbacks = AppCallbacks(
relayHandler: onReceivedMessage(ctx),
topicHealthChangeHandler: onTopicHealthChange(ctx),
connectionChangeHandler: onConnectionChange(ctx),
)
let retCode = handleRequest(
ctx,
RequestType.LIFECYCLE,
NodeLifecycleRequest.createShared(
NodeLifecycleMsgType.CREATE_NODE, configJson, appCallbacks
),
callback,
userData,
)
if retCode == RET_ERR:
ffi.sendRequestToFFIThread(
ctx, CreateNodeRequest.ffiNewReq(callback, userData, configJson, appCallbacks)
).isOkOr:
let msg = "error in sendRequestToFFIThread: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return nil
return ctx
proc waku_destroy(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
ctx: ptr FFIContext[Waku], callback: FFICallBack, userData: pointer
): cint {.dynlib, exportc, cdecl.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
checkParams(ctx, callback, userData)
waku_context.destroyWakuContext(ctx).isOkOr:
ffi.destroyFFIContext(ctx).isOkOr:
let msg = "libwaku error: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
@ -155,699 +92,5 @@ proc waku_destroy(
return RET_OK
proc waku_version(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
callback(
RET_OK,
cast[ptr cchar](WakuNodeVersionString),
cast[csize_t](len(WakuNodeVersionString)),
userData,
)
return RET_OK
proc waku_set_event_callback(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
) {.dynlib, exportc.} =
initializeLibrary()
ctx[].eventCallback = cast[pointer](callback)
ctx[].eventUserData = userData
proc waku_content_topic(
ctx: ptr WakuContext,
appName: cstring,
appVersion: cuint,
contentTopicName: cstring,
encoding: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
# https://rfc.vac.dev/spec/36/#extern-char-waku_content_topicchar-applicationname-unsigned-int-applicationversion-char-contenttopicname-char-encoding
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
let contentTopic = fmt"/{$appName}/{$appVersion}/{$contentTopicName}/{$encoding}"
callback(
RET_OK, unsafeAddr contentTopic[0], cast[csize_t](len(contentTopic)), userData
)
return RET_OK
proc waku_pubsub_topic(
ctx: ptr WakuContext, topicName: cstring, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc, cdecl.} =
# https://rfc.vac.dev/spec/36/#extern-char-waku_pubsub_topicchar-name-char-encoding
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
let outPubsubTopic = fmt"/waku/2/{$topicName}"
callback(
RET_OK, unsafeAddr outPubsubTopic[0], cast[csize_t](len(outPubsubTopic)), userData
)
return RET_OK
proc waku_default_pubsub_topic(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
# https://rfc.vac.dev/spec/36/#extern-char-waku_default_pubsub_topic
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
callback(
RET_OK,
cast[ptr cchar](DefaultPubsubTopic),
cast[csize_t](len(DefaultPubsubTopic)),
userData,
)
return RET_OK
proc waku_relay_publish(
ctx: ptr WakuContext,
pubSubTopic: cstring,
jsonWakuMessage: cstring,
timeoutMs: cuint,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc, cdecl.} =
# https://rfc.vac.dev/spec/36/#extern-char-waku_relay_publishchar-messagejson-char-pubsubtopic-int-timeoutms
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
var jsonMessage: JsonMessage
try:
let jsonContent = parseJson($jsonWakuMessage)
jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr:
raise newException(JsonParsingError, $error)
except JsonParsingError:
let msg = fmt"Error parsing json message: {getCurrentExceptionMsg()}"
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
let wakuMessage = jsonMessage.toWakuMessage().valueOr:
let msg = "Problem building the WakuMessage: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.PUBLISH, pubSubTopic, nil, wakuMessage),
callback,
userData,
)
proc waku_start(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.LIFECYCLE,
NodeLifecycleRequest.createShared(NodeLifecycleMsgType.START_NODE),
callback,
userData,
)
proc waku_stop(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.LIFECYCLE,
NodeLifecycleRequest.createShared(NodeLifecycleMsgType.STOP_NODE),
callback,
userData,
)
proc waku_relay_subscribe(
ctx: ptr WakuContext,
pubSubTopic: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
var cb = onReceivedMessage(ctx)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.SUBSCRIBE, pubSubTopic, WakuRelayHandler(cb)),
callback,
userData,
)
proc waku_relay_add_protected_shard(
ctx: ptr WakuContext,
clusterId: cint,
shardId: cint,
publicKey: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc, cdecl.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(
RelayMsgType.ADD_PROTECTED_SHARD,
clusterId = clusterId,
shardId = shardId,
publicKey = publicKey,
),
callback,
userData,
)
proc waku_relay_unsubscribe(
ctx: ptr WakuContext,
pubSubTopic: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(
RelayMsgType.UNSUBSCRIBE, pubSubTopic, WakuRelayHandler(onReceivedMessage(ctx))
),
callback,
userData,
)
proc waku_relay_get_num_connected_peers(
ctx: ptr WakuContext,
pubSubTopic: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.NUM_CONNECTED_PEERS, pubSubTopic),
callback,
userData,
)
proc waku_relay_get_connected_peers(
ctx: ptr WakuContext,
pubSubTopic: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.LIST_CONNECTED_PEERS, pubSubTopic),
callback,
userData,
)
proc waku_relay_get_num_peers_in_mesh(
ctx: ptr WakuContext,
pubSubTopic: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.NUM_MESH_PEERS, pubSubTopic),
callback,
userData,
)
proc waku_relay_get_peers_in_mesh(
ctx: ptr WakuContext,
pubSubTopic: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.RELAY,
RelayRequest.createShared(RelayMsgType.LIST_MESH_PEERS, pubSubTopic),
callback,
userData,
)
proc waku_filter_subscribe(
ctx: ptr WakuContext,
pubSubTopic: cstring,
contentTopics: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.FILTER,
FilterRequest.createShared(
FilterMsgType.SUBSCRIBE,
pubSubTopic,
contentTopics,
FilterPushHandler(onReceivedMessage(ctx)),
),
callback,
userData,
)
proc waku_filter_unsubscribe(
ctx: ptr WakuContext,
pubSubTopic: cstring,
contentTopics: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.FILTER,
FilterRequest.createShared(FilterMsgType.UNSUBSCRIBE, pubSubTopic, contentTopics),
callback,
userData,
)
proc waku_filter_unsubscribe_all(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.FILTER,
FilterRequest.createShared(FilterMsgType.UNSUBSCRIBE_ALL),
callback,
userData,
)
proc waku_lightpush_publish(
ctx: ptr WakuContext,
pubSubTopic: cstring,
jsonWakuMessage: cstring,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc, cdecl.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
var jsonMessage: JsonMessage
try:
let jsonContent = parseJson($jsonWakuMessage)
jsonMessage = JsonMessage.fromJsonNode(jsonContent).valueOr:
raise newException(JsonParsingError, $error)
except JsonParsingError:
let msg = fmt"Error parsing json message: {getCurrentExceptionMsg()}"
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
let wakuMessage = jsonMessage.toWakuMessage().valueOr:
let msg = "Problem building the WakuMessage: " & $error
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
return RET_ERR
handleRequest(
ctx,
RequestType.LIGHTPUSH,
LightpushRequest.createShared(LightpushMsgType.PUBLISH, pubSubTopic, wakuMessage),
callback,
userData,
)
proc waku_connect(
ctx: ptr WakuContext,
peerMultiAddr: cstring,
timeoutMs: cuint,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(
PeerManagementMsgType.CONNECT_TO, $peerMultiAddr, chronos.milliseconds(timeoutMs)
),
callback,
userData,
)
proc waku_disconnect_peer_by_id(
ctx: ptr WakuContext, peerId: cstring, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(
op = PeerManagementMsgType.DISCONNECT_PEER_BY_ID, peerId = $peerId
),
callback,
userData,
)
proc waku_disconnect_all_peers(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(op = PeerManagementMsgType.DISCONNECT_ALL_PEERS),
callback,
userData,
)
proc waku_dial_peer(
ctx: ptr WakuContext,
peerMultiAddr: cstring,
protocol: cstring,
timeoutMs: cuint,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(
op = PeerManagementMsgType.DIAL_PEER,
peerMultiAddr = $peerMultiAddr,
protocol = $protocol,
),
callback,
userData,
)
proc waku_dial_peer_by_id(
ctx: ptr WakuContext,
peerId: cstring,
protocol: cstring,
timeoutMs: cuint,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(
op = PeerManagementMsgType.DIAL_PEER_BY_ID, peerId = $peerId, protocol = $protocol
),
callback,
userData,
)
proc waku_get_peerids_from_peerstore(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(PeerManagementMsgType.GET_ALL_PEER_IDS),
callback,
userData,
)
proc waku_get_connected_peers_info(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(PeerManagementMsgType.GET_CONNECTED_PEERS_INFO),
callback,
userData,
)
proc waku_get_connected_peers(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(PeerManagementMsgType.GET_CONNECTED_PEERS),
callback,
userData,
)
proc waku_get_peerids_by_protocol(
ctx: ptr WakuContext, protocol: cstring, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PEER_MANAGER,
PeerManagementRequest.createShared(
op = PeerManagementMsgType.GET_PEER_IDS_BY_PROTOCOL, protocol = $protocol
),
callback,
userData,
)
proc waku_store_query(
ctx: ptr WakuContext,
jsonQuery: cstring,
peerAddr: cstring,
timeoutMs: cint,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.STORE,
StoreRequest.createShared(StoreReqType.REMOTE_QUERY, jsonQuery, peerAddr, timeoutMs),
callback,
userData,
)
proc waku_listen_addresses(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DEBUG,
DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_LISTENING_ADDRESSES),
callback,
userData,
)
proc waku_dns_discovery(
ctx: ptr WakuContext,
entTreeUrl: cstring,
nameDnsServer: cstring,
timeoutMs: cint,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DISCOVERY,
DiscoveryRequest.createRetrieveBootstrapNodesRequest(
DiscoveryMsgType.GET_BOOTSTRAP_NODES, entTreeUrl, nameDnsServer, timeoutMs
),
callback,
userData,
)
proc waku_discv5_update_bootnodes(
ctx: ptr WakuContext, bootnodes: cstring, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
## Updates the bootnode list used for discovering new peers via DiscoveryV5
## bootnodes - JSON array containing the bootnode ENRs i.e. `["enr:...", "enr:..."]`
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DISCOVERY,
DiscoveryRequest.createUpdateBootstrapNodesRequest(
DiscoveryMsgType.UPDATE_DISCV5_BOOTSTRAP_NODES, bootnodes
),
callback,
userData,
)
proc waku_get_my_enr(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DEBUG,
DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_MY_ENR),
callback,
userData,
)
proc waku_get_my_peerid(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DEBUG,
DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_MY_PEER_ID),
callback,
userData,
)
proc waku_get_metrics(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DEBUG,
DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_METRICS),
callback,
userData,
)
proc waku_start_discv5(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DISCOVERY,
DiscoveryRequest.createDiscV5StartRequest(),
callback,
userData,
)
proc waku_stop_discv5(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DISCOVERY,
DiscoveryRequest.createDiscV5StopRequest(),
callback,
userData,
)
proc waku_peer_exchange_request(
ctx: ptr WakuContext, numPeers: uint64, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DISCOVERY,
DiscoveryRequest.createPeerExchangeRequest(numPeers),
callback,
userData,
)
proc waku_ping_peer(
ctx: ptr WakuContext,
peerAddr: cstring,
timeoutMs: cuint,
callback: WakuCallBack,
userData: pointer,
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.PING,
PingRequest.createShared(peerAddr, chronos.milliseconds(timeoutMs)),
callback,
userData,
)
proc waku_is_online(
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
): cint {.dynlib, exportc.} =
initializeLibrary()
checkLibwakuParams(ctx, callback, userData)
handleRequest(
ctx,
RequestType.DEBUG,
DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_ONLINE_STATE),
callback,
userData,
)
### End of exported procs
################################################################################
# ### End of exported procs
# ################################################################################

View File

@ -1,223 +0,0 @@
{.pragma: exported, exportc, cdecl, raises: [].}
{.pragma: callback, cdecl, raises: [], gcsafe.}
{.passc: "-fPIC".}
import std/[options, atomics, os, net, locks]
import chronicles, chronos, chronos/threadsync, taskpools/channels_spsc_single, results
import
waku/common/logging,
waku/factory/waku,
waku/node/peer_manager,
waku/waku_relay/[protocol, topic_health],
waku/waku_core/[topics/pubsub_topic, message],
./waku_thread_requests/[waku_thread_request, requests/debug_node_request],
./ffi_types,
./events/[
json_message_event, json_topic_health_change_event, json_connection_change_event,
json_waku_not_responding_event,
]
type WakuContext* = object
wakuThread: Thread[(ptr WakuContext)]
watchdogThread: Thread[(ptr WakuContext)]
# monitors the Waku thread and notifies the Waku SDK consumer if it hangs
lock: Lock
reqChannel: ChannelSPSCSingle[ptr WakuThreadRequest]
reqSignal: ThreadSignalPtr
# to inform The Waku Thread (a.k.a TWT) that a new request is sent
reqReceivedSignal: ThreadSignalPtr
# to inform the main thread that the request is rx by TWT
userData*: pointer
eventCallback*: pointer
eventUserdata*: pointer
running: Atomic[bool] # To control when the threads are running
const git_version* {.strdefine.} = "n/a"
const versionString = "version / git commit hash: " & waku.git_version
template callEventCallback(ctx: ptr WakuContext, eventName: string, body: untyped) =
if isNil(ctx[].eventCallback):
error eventName & " - eventCallback is nil"
return
foreignThreadGc:
try:
let event = body
cast[WakuCallBack](ctx[].eventCallback)(
RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData
)
except Exception, CatchableError:
let msg =
"Exception " & eventName & " when calling 'eventCallBack': " &
getCurrentExceptionMsg()
cast[WakuCallBack](ctx[].eventCallback)(
RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData
)
proc onConnectionChange*(ctx: ptr WakuContext): ConnectionChangeHandler =
return proc(peerId: PeerId, peerEvent: PeerEventKind) {.async.} =
callEventCallback(ctx, "onConnectionChange"):
$JsonConnectionChangeEvent.new($peerId, peerEvent)
proc onReceivedMessage*(ctx: ptr WakuContext): WakuRelayHandler =
return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} =
callEventCallback(ctx, "onReceivedMessage"):
$JsonMessageEvent.new(pubsubTopic, msg)
proc onTopicHealthChange*(ctx: ptr WakuContext): TopicHealthChangeHandler =
return proc(pubsubTopic: PubsubTopic, topicHealth: TopicHealth) {.async.} =
callEventCallback(ctx, "onTopicHealthChange"):
$JsonTopicHealthChangeEvent.new(pubsubTopic, topicHealth)
proc onWakuNotResponding*(ctx: ptr WakuContext) =
callEventCallback(ctx, "onWakuNotResponsive"):
$JsonWakuNotRespondingEvent.new()
proc sendRequestToWakuThread*(
ctx: ptr WakuContext,
reqType: RequestType,
reqContent: pointer,
callback: WakuCallBack,
userData: pointer,
timeout = InfiniteDuration,
): Result[void, string] =
ctx.lock.acquire()
# This lock is only necessary while we use a SP Channel and while the signalling
# between threads assumes that there aren't concurrent requests.
# Rearchitecting the signaling + migrating to a MP Channel will allow us to receive
# requests concurrently and spare us the need of locks
defer:
ctx.lock.release()
let req = WakuThreadRequest.createShared(reqType, reqContent, callback, userData)
## Sending the request
let sentOk = ctx.reqChannel.trySend(req)
if not sentOk:
deallocShared(req)
return err("Couldn't send a request to the waku thread: " & $req[])
let fireSync = ctx.reqSignal.fireSync().valueOr:
deallocShared(req)
return err("failed fireSync: " & $error)
if not fireSync:
deallocShared(req)
return err("Couldn't fireSync in time")
## wait until the Waku Thread properly received the request
ctx.reqReceivedSignal.waitSync(timeout).isOkOr:
deallocShared(req)
return err("Couldn't receive reqReceivedSignal signal")
## Notice that in case of "ok", the deallocShared(req) is performed by the Waku Thread in the
## process proc. See the 'waku_thread_request.nim' module for more details.
ok()
proc watchdogThreadBody(ctx: ptr WakuContext) {.thread.} =
## Watchdog thread that monitors the Waku thread and notifies the library user if it hangs.
let watchdogRun = proc(ctx: ptr WakuContext) {.async.} =
const WatchdogStartDelay = 10.seconds
const WatchdogTimeinterval = 1.seconds
const WakuNotRespondingTimeout = 3.seconds
# Give time for the node to be created and up before sending watchdog requests
await sleepAsync(WatchdogStartDelay)
while true:
await sleepAsync(WatchdogTimeinterval)
if ctx.running.load == false:
info "Watchdog thread exiting because WakuContext is not running"
break
let wakuCallback = proc(
callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer
) {.cdecl, gcsafe, raises: [].} =
discard ## Don't do anything. Just respecting the callback signature.
const nilUserData = nil
trace "Sending watchdog request to Waku thread"
sendRequestToWakuThread(
ctx,
RequestType.DEBUG,
DebugNodeRequest.createShared(DebugNodeMsgType.CHECK_WAKU_NOT_BLOCKED),
wakuCallback,
nilUserData,
WakuNotRespondingTimeout,
).isOkOr:
error "Failed to send watchdog request to Waku thread", error = $error
onWakuNotResponding(ctx)
waitFor watchdogRun(ctx)
proc wakuThreadBody(ctx: ptr WakuContext) {.thread.} =
## Waku thread that attends library user requests (stop, connect_to, etc.)
logging.setupLog(logging.LogLevel.DEBUG, logging.LogFormat.TEXT)
let wakuRun = proc(ctx: ptr WakuContext) {.async.} =
var waku: Waku
while true:
await ctx.reqSignal.wait()
if ctx.running.load == false:
break
## Trying to get a request from the libwaku requestor thread
var request: ptr WakuThreadRequest
let recvOk = ctx.reqChannel.tryRecv(request)
if not recvOk:
error "waku thread could not receive a request"
continue
## Handle the request
asyncSpawn WakuThreadRequest.process(request, addr waku)
ctx.reqReceivedSignal.fireSync().isOkOr:
error "could not fireSync back to requester thread", error = error
waitFor wakuRun(ctx)
proc createWakuContext*(): Result[ptr WakuContext, string] =
## This proc is called from the main thread and it creates
## the Waku working thread.
var ctx = createShared(WakuContext, 1)
ctx.reqSignal = ThreadSignalPtr.new().valueOr:
return err("couldn't create reqSignal ThreadSignalPtr")
ctx.reqReceivedSignal = ThreadSignalPtr.new().valueOr:
return err("couldn't create reqReceivedSignal ThreadSignalPtr")
ctx.lock.initLock()
ctx.running.store(true)
try:
createThread(ctx.wakuThread, wakuThreadBody, ctx)
except ValueError, ResourceExhaustedError:
freeShared(ctx)
return err("failed to create the Waku thread: " & getCurrentExceptionMsg())
try:
createThread(ctx.watchdogThread, watchdogThreadBody, ctx)
except ValueError, ResourceExhaustedError:
freeShared(ctx)
return err("failed to create the watchdog thread: " & getCurrentExceptionMsg())
return ok(ctx)
proc destroyWakuContext*(ctx: ptr WakuContext): Result[void, string] =
ctx.running.store(false)
let signaledOnTime = ctx.reqSignal.fireSync().valueOr:
return err("error in destroyWakuContext: " & $error)
if not signaledOnTime:
return err("failed to signal reqSignal on time in destroyWakuContext")
joinThread(ctx.wakuThread)
joinThread(ctx.watchdogThread)
ctx.lock.deinitLock()
?ctx.reqSignal.close()
?ctx.reqReceivedSignal.close()
freeShared(ctx)
return ok()

View File

@ -1,63 +0,0 @@
import std/json
import
chronicles,
chronos,
results,
eth/p2p/discoveryv5/enr,
strutils,
libp2p/peerid,
metrics
import
../../../waku/factory/waku,
../../../waku/node/waku_node,
../../../waku/node/health_monitor
type DebugNodeMsgType* = enum
RETRIEVE_LISTENING_ADDRESSES
RETRIEVE_MY_ENR
RETRIEVE_MY_PEER_ID
RETRIEVE_METRICS
RETRIEVE_ONLINE_STATE
CHECK_WAKU_NOT_BLOCKED
type DebugNodeRequest* = object
operation: DebugNodeMsgType
proc createShared*(T: type DebugNodeRequest, op: DebugNodeMsgType): ptr type T =
var ret = createShared(T)
ret[].operation = op
return ret
proc destroyShared(self: ptr DebugNodeRequest) =
deallocShared(self)
proc getMultiaddresses(node: WakuNode): seq[string] =
return node.info().listenAddresses
proc getMetrics(): string =
{.gcsafe.}:
return defaultRegistry.toText() ## defaultRegistry is {.global.} in metrics module
proc process*(
self: ptr DebugNodeRequest, waku: Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
case self.operation
of RETRIEVE_LISTENING_ADDRESSES:
## returns a comma-separated string of the listen addresses
return ok(waku.node.getMultiaddresses().join(","))
of RETRIEVE_MY_ENR:
return ok(waku.node.enr.toURI())
of RETRIEVE_MY_PEER_ID:
return ok($waku.node.peerId())
of RETRIEVE_METRICS:
return ok(getMetrics())
of RETRIEVE_ONLINE_STATE:
return ok($waku.healthMonitor.onlineMonitor.amIOnline())
of CHECK_WAKU_NOT_BLOCKED:
return ok("waku thread is not blocked")
error "unsupported operation in DebugNodeRequest"
return err("unsupported operation in DebugNodeRequest")

View File

@ -1,151 +0,0 @@
import std/json
import chronos, chronicles, results, strutils, libp2p/multiaddress
import
../../../waku/factory/waku,
../../../waku/discovery/waku_dnsdisc,
../../../waku/discovery/waku_discv5,
../../../waku/waku_core/peers,
../../../waku/node/waku_node,
../../../waku/node/kernel_api,
../../alloc
type DiscoveryMsgType* = enum
GET_BOOTSTRAP_NODES
UPDATE_DISCV5_BOOTSTRAP_NODES
START_DISCV5
STOP_DISCV5
PEER_EXCHANGE
type DiscoveryRequest* = object
operation: DiscoveryMsgType
## used in GET_BOOTSTRAP_NODES
enrTreeUrl: cstring
nameDnsServer: cstring
timeoutMs: cint
## used in UPDATE_DISCV5_BOOTSTRAP_NODES
nodes: cstring
## used in PEER_EXCHANGE
numPeers: uint64
proc createShared(
T: type DiscoveryRequest,
op: DiscoveryMsgType,
enrTreeUrl: cstring,
nameDnsServer: cstring,
timeoutMs: cint,
nodes: cstring,
numPeers: uint64,
): ptr type T =
var ret = createShared(T)
ret[].operation = op
ret[].enrTreeUrl = enrTreeUrl.alloc()
ret[].nameDnsServer = nameDnsServer.alloc()
ret[].timeoutMs = timeoutMs
ret[].nodes = nodes.alloc()
ret[].numPeers = numPeers
return ret
proc createRetrieveBootstrapNodesRequest*(
T: type DiscoveryRequest,
op: DiscoveryMsgType,
enrTreeUrl: cstring,
nameDnsServer: cstring,
timeoutMs: cint,
): ptr type T =
return T.createShared(op, enrTreeUrl, nameDnsServer, timeoutMs, "", 0)
proc createUpdateBootstrapNodesRequest*(
T: type DiscoveryRequest, op: DiscoveryMsgType, nodes: cstring
): ptr type T =
return T.createShared(op, "", "", 0, nodes, 0)
proc createDiscV5StartRequest*(T: type DiscoveryRequest): ptr type T =
return T.createShared(START_DISCV5, "", "", 0, "", 0)
proc createDiscV5StopRequest*(T: type DiscoveryRequest): ptr type T =
return T.createShared(STOP_DISCV5, "", "", 0, "", 0)
proc createPeerExchangeRequest*(
T: type DiscoveryRequest, numPeers: uint64
): ptr type T =
return T.createShared(PEER_EXCHANGE, "", "", 0, "", numPeers)
proc destroyShared(self: ptr DiscoveryRequest) =
deallocShared(self[].enrTreeUrl)
deallocShared(self[].nameDnsServer)
deallocShared(self[].nodes)
deallocShared(self)
proc retrieveBootstrapNodes(
enrTreeUrl: string, ipDnsServer: string
): Future[Result[seq[string], string]] {.async.} =
let dnsNameServers = @[parseIpAddress(ipDnsServer)]
let discoveredPeers: seq[RemotePeerInfo] = (
await retrieveDynamicBootstrapNodes(enrTreeUrl, dnsNameServers)
).valueOr:
return err("failed discovering peers from DNS: " & $error)
var multiAddresses = newSeq[string]()
for discPeer in discoveredPeers:
for address in discPeer.addrs:
multiAddresses.add($address & "/p2p/" & $discPeer)
return ok(multiAddresses)
proc updateDiscv5BootstrapNodes(nodes: string, waku: ptr Waku): Result[void, string] =
waku.wakuDiscv5.updateBootstrapRecords(nodes).isOkOr:
return err("error in updateDiscv5BootstrapNodes: " & $error)
return ok()
proc performPeerExchangeRequestTo(
numPeers: uint64, waku: ptr Waku
): Future[Result[int, string]] {.async.} =
let numPeersRecv = (await waku.node.fetchPeerExchangePeers(numPeers)).valueOr:
return err($error)
return ok(numPeersRecv)
proc process*(
self: ptr DiscoveryRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
case self.operation
of START_DISCV5:
let res = await waku.wakuDiscv5.start()
res.isOkOr:
error "START_DISCV5 failed", error = error
return err($error)
return ok("discv5 started correctly")
of STOP_DISCV5:
await waku.wakuDiscv5.stop()
return ok("discv5 stopped correctly")
of GET_BOOTSTRAP_NODES:
let nodes = (
await retrieveBootstrapNodes($self[].enrTreeUrl, $self[].nameDnsServer)
).valueOr:
error "GET_BOOTSTRAP_NODES failed", error = error
return err($error)
## returns a comma-separated string of bootstrap nodes' multiaddresses
return ok(nodes.join(","))
of UPDATE_DISCV5_BOOTSTRAP_NODES:
updateDiscv5BootstrapNodes($self[].nodes, waku).isOkOr:
error "UPDATE_DISCV5_BOOTSTRAP_NODES failed", error = error
return err($error)
return ok("discovery request processed correctly")
of PEER_EXCHANGE:
let numValidPeers = (await performPeerExchangeRequestTo(self[].numPeers, waku)).valueOr:
error "PEER_EXCHANGE failed", error = error
return err($error)
return ok($numValidPeers)
error "discovery request not handled"
return err("discovery request not handled")

View File

@ -1,135 +0,0 @@
import std/[sequtils, strutils, tables]
import chronicles, chronos, results, options, json
import
../../../waku/factory/waku,
../../../waku/node/waku_node,
../../alloc,
../../../waku/node/peer_manager
type PeerManagementMsgType* {.pure.} = enum
CONNECT_TO
GET_ALL_PEER_IDS
GET_CONNECTED_PEERS_INFO
GET_PEER_IDS_BY_PROTOCOL
DISCONNECT_PEER_BY_ID
DISCONNECT_ALL_PEERS
DIAL_PEER
DIAL_PEER_BY_ID
GET_CONNECTED_PEERS
type PeerManagementRequest* = object
operation: PeerManagementMsgType
peerMultiAddr: cstring
dialTimeout: Duration
protocol: cstring
peerId: cstring
type PeerInfo = object
protocols: seq[string]
addresses: seq[string]
proc createShared*(
T: type PeerManagementRequest,
op: PeerManagementMsgType,
peerMultiAddr = "",
dialTimeout = chronos.milliseconds(0), ## arbitrary Duration as not all ops needs dialTimeout
peerId = "",
protocol = "",
): ptr type T =
var ret = createShared(T)
ret[].operation = op
ret[].peerMultiAddr = peerMultiAddr.alloc()
ret[].peerId = peerId.alloc()
ret[].protocol = protocol.alloc()
ret[].dialTimeout = dialTimeout
return ret
proc destroyShared(self: ptr PeerManagementRequest) =
if not isNil(self[].peerMultiAddr):
deallocShared(self[].peerMultiAddr)
if not isNil(self[].peerId):
deallocShared(self[].peerId)
if not isNil(self[].protocol):
deallocShared(self[].protocol)
deallocShared(self)
proc process*(
self: ptr PeerManagementRequest, waku: Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
case self.operation
of CONNECT_TO:
let peers = ($self[].peerMultiAddr).split(",").mapIt(strip(it))
await waku.node.connectToNodes(peers, source = "static")
return ok("")
of GET_ALL_PEER_IDS:
## returns a comma-separated string of peerIDs
let peerIDs =
waku.node.peerManager.switch.peerStore.peers().mapIt($it.peerId).join(",")
return ok(peerIDs)
of GET_CONNECTED_PEERS_INFO:
## returns a JSON string mapping peerIDs to objects with protocols and addresses
var peersMap = initTable[string, PeerInfo]()
let peers = waku.node.peerManager.switch.peerStore.peers().filterIt(
it.connectedness == Connected
)
# Build a map of peer IDs to peer info objects
for peer in peers:
let peerIdStr = $peer.peerId
peersMap[peerIdStr] =
PeerInfo(protocols: peer.protocols, addresses: peer.addrs.mapIt($it))
# Convert the map to JSON string
let jsonObj = %*peersMap
let jsonStr = $jsonObj
return ok(jsonStr)
of GET_PEER_IDS_BY_PROTOCOL:
## returns a comma-separated string of peerIDs that mount the given protocol
let connectedPeers = waku.node.peerManager.switch.peerStore
.peers($self[].protocol)
.filterIt(it.connectedness == Connected)
.mapIt($it.peerId)
.join(",")
return ok(connectedPeers)
of DISCONNECT_PEER_BY_ID:
let peerId = PeerId.init($self[].peerId).valueOr:
error "DISCONNECT_PEER_BY_ID failed", error = $error
return err($error)
await waku.node.peerManager.disconnectNode(peerId)
return ok("")
of DISCONNECT_ALL_PEERS:
await waku.node.peerManager.disconnectAllPeers()
return ok("")
of DIAL_PEER:
let remotePeerInfo = parsePeerInfo($self[].peerMultiAddr).valueOr:
error "DIAL_PEER failed", error = $error
return err($error)
let conn = await waku.node.peerManager.dialPeer(remotePeerInfo, $self[].protocol)
if conn.isNone():
let msg = "failed dialing peer"
error "DIAL_PEER failed", error = msg, peerId = $remotePeerInfo.peerId
return err(msg)
of DIAL_PEER_BY_ID:
let peerId = PeerId.init($self[].peerId).valueOr:
error "DIAL_PEER_BY_ID failed", error = $error
return err($error)
let conn = await waku.node.peerManager.dialPeer(peerId, $self[].protocol)
if conn.isNone():
let msg = "failed dialing peer"
error "DIAL_PEER_BY_ID failed", error = msg, peerId = $peerId
return err(msg)
of GET_CONNECTED_PEERS:
## returns a comma-separated string of peerIDs
let
(inPeerIds, outPeerIds) = waku.node.peerManager.connectedPeers()
connectedPeerids = concat(inPeerIds, outPeerIds)
return ok(connectedPeerids.mapIt($it).join(","))
return ok("")

View File

@ -1,54 +0,0 @@
import std/[json, strutils]
import chronos, results
import libp2p/[protocols/ping, switch, multiaddress, multicodec]
import ../../../waku/[factory/waku, waku_core/peers, node/waku_node], ../../alloc
type PingRequest* = object
peerAddr: cstring
timeout: Duration
proc createShared*(
T: type PingRequest, peerAddr: cstring, timeout: Duration
): ptr type T =
var ret = createShared(T)
ret[].peerAddr = peerAddr.alloc()
ret[].timeout = timeout
return ret
proc destroyShared(self: ptr PingRequest) =
deallocShared(self[].peerAddr)
deallocShared(self)
proc process*(
self: ptr PingRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
let peerInfo = peers.parsePeerInfo(($self[].peerAddr).split(",")).valueOr:
return err("PingRequest failed to parse peer addr: " & $error)
proc ping(): Future[Result[Duration, string]] {.async, gcsafe.} =
try:
let conn = await waku.node.switch.dial(peerInfo.peerId, peerInfo.addrs, PingCodec)
defer:
await conn.close()
let pingRTT = await waku.node.libp2pPing.ping(conn)
if pingRTT == 0.nanos:
return err("could not ping peer: rtt-0")
return ok(pingRTT)
except CatchableError:
return err("could not ping peer: " & getCurrentExceptionMsg())
let pingFuture = ping()
let pingRTT: Duration =
if self[].timeout == chronos.milliseconds(0): # No timeout expected
?(await pingFuture)
else:
let timedOut = not (await pingFuture.withTimeout(self[].timeout))
if timedOut:
return err("ping timed out")
?(pingFuture.read())
ok($(pingRTT.nanos))

View File

@ -1,106 +0,0 @@
import options, std/[strutils, sequtils]
import chronicles, chronos, results
import
../../../../waku/waku_filter_v2/client,
../../../../waku/waku_core/message/message,
../../../../waku/factory/waku,
../../../../waku/waku_filter_v2/common,
../../../../waku/waku_core/subscription/push_handler,
../../../../waku/node/peer_manager/peer_manager,
../../../../waku/node/waku_node,
../../../../waku/node/kernel_api,
../../../../waku/waku_core/topics/pubsub_topic,
../../../../waku/waku_core/topics/content_topic,
../../../alloc
type FilterMsgType* = enum
SUBSCRIBE
UNSUBSCRIBE
UNSUBSCRIBE_ALL
type FilterRequest* = object
operation: FilterMsgType
pubsubTopic: cstring
contentTopics: cstring ## comma-separated list of content-topics
filterPushEventCallback: FilterPushHandler ## handles incoming filter pushed msgs
proc createShared*(
T: type FilterRequest,
op: FilterMsgType,
pubsubTopic: cstring = "",
contentTopics: cstring = "",
filterPushEventCallback: FilterPushHandler = nil,
): ptr type T =
var ret = createShared(T)
ret[].operation = op
ret[].pubsubTopic = pubsubTopic.alloc()
ret[].contentTopics = contentTopics.alloc()
ret[].filterPushEventCallback = filterPushEventCallback
return ret
proc destroyShared(self: ptr FilterRequest) =
deallocShared(self[].pubsubTopic)
deallocShared(self[].contentTopics)
deallocShared(self)
proc process*(
self: ptr FilterRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
const FilterOpTimeout = 5.seconds
if waku.node.wakuFilterClient.isNil():
let errorMsg = "FilterRequest waku.node.wakuFilterClient is nil"
error "fail filter process", error = errorMsg, op = $(self.operation)
return err(errorMsg)
case self.operation
of SUBSCRIBE:
waku.node.wakuFilterClient.registerPushHandler(self.filterPushEventCallback)
let peer = waku.node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
let errorMsg =
"could not find peer with WakuFilterSubscribeCodec when subscribing"
error "fail filter process", error = errorMsg, op = $(self.operation)
return err(errorMsg)
let pubsubTopic = some(PubsubTopic($self[].pubsubTopic))
let contentTopics = ($(self[].contentTopics)).split(",").mapIt(ContentTopic(it))
let subFut = waku.node.filterSubscribe(pubsubTopic, contentTopics, peer)
if not await subFut.withTimeout(FilterOpTimeout):
let errorMsg = "filter subscription timed out"
error "fail filter process", error = errorMsg, op = $(self.operation)
return err(errorMsg)
of UNSUBSCRIBE:
let peer = waku.node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
let errorMsg =
"could not find peer with WakuFilterSubscribeCodec when unsubscribing"
error "fail filter process", error = errorMsg, op = $(self.operation)
return err(errorMsg)
let pubsubTopic = some(PubsubTopic($self[].pubsubTopic))
let contentTopics = ($(self[].contentTopics)).split(",").mapIt(ContentTopic(it))
let subFut = waku.node.filterUnsubscribe(pubsubTopic, contentTopics, peer)
if not await subFut.withTimeout(FilterOpTimeout):
let errorMsg = "filter un-subscription timed out"
error "fail filter process", error = errorMsg, op = $(self.operation)
return err(errorMsg)
of UNSUBSCRIBE_ALL:
let peer = waku.node.peerManager.selectPeer(WakuFilterSubscribeCodec).valueOr:
let errorMsg =
"could not find peer with WakuFilterSubscribeCodec when unsubscribing all"
error "fail filter process", error = errorMsg, op = $(self.operation)
return err(errorMsg)
let unsubFut = waku.node.filterUnsubscribeAll(peer)
if not await unsubFut.withTimeout(FilterOpTimeout):
let errorMsg = "filter un-subscription all timed out"
error "fail filter process", error = errorMsg, op = $(self.operation)
return err(errorMsg)
return ok("")

View File

@ -1,109 +0,0 @@
import options
import chronicles, chronos, results
import
../../../../waku/waku_core/message/message,
../../../../waku/waku_core/codecs,
../../../../waku/factory/waku,
../../../../waku/waku_core/message,
../../../../waku/waku_core/time, # Timestamp
../../../../waku/waku_core/topics/pubsub_topic,
../../../../waku/waku_lightpush_legacy/client,
../../../../waku/waku_lightpush_legacy/common,
../../../../waku/node/peer_manager/peer_manager,
../../../alloc
type LightpushMsgType* = enum
PUBLISH
type ThreadSafeWakuMessage* = object
payload: SharedSeq[byte]
contentTopic: cstring
meta: SharedSeq[byte]
version: uint32
timestamp: Timestamp
ephemeral: bool
when defined(rln):
proof: SharedSeq[byte]
type LightpushRequest* = object
operation: LightpushMsgType
pubsubTopic: cstring
message: ThreadSafeWakuMessage # only used in 'PUBLISH' requests
proc createShared*(
T: type LightpushRequest,
op: LightpushMsgType,
pubsubTopic: cstring,
m = WakuMessage(),
): ptr type T =
var ret = createShared(T)
ret[].operation = op
ret[].pubsubTopic = pubsubTopic.alloc()
ret[].message = ThreadSafeWakuMessage(
payload: allocSharedSeq(m.payload),
contentTopic: m.contentTopic.alloc(),
meta: allocSharedSeq(m.meta),
version: m.version,
timestamp: m.timestamp,
ephemeral: m.ephemeral,
)
when defined(rln):
ret[].message.proof = allocSharedSeq(m.proof)
return ret
proc destroyShared(self: ptr LightpushRequest) =
deallocSharedSeq(self[].message.payload)
deallocShared(self[].message.contentTopic)
deallocSharedSeq(self[].message.meta)
when defined(rln):
deallocSharedSeq(self[].message.proof)
deallocShared(self)
proc toWakuMessage(m: ThreadSafeWakuMessage): WakuMessage =
var wakuMessage = WakuMessage()
wakuMessage.payload = m.payload.toSeq()
wakuMessage.contentTopic = $m.contentTopic
wakuMessage.meta = m.meta.toSeq()
wakuMessage.version = m.version
wakuMessage.timestamp = m.timestamp
wakuMessage.ephemeral = m.ephemeral
when defined(rln):
wakuMessage.proof = m.proof
return wakuMessage
proc process*(
self: ptr LightpushRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
case self.operation
of PUBLISH:
let msg = self.message.toWakuMessage()
let pubsubTopic = $self.pubsubTopic
if waku.node.wakuLightpushClient.isNil():
let errorMsg = "LightpushRequest waku.node.wakuLightpushClient is nil"
error "PUBLISH failed", error = errorMsg
return err(errorMsg)
let peerOpt = waku.node.peerManager.selectPeer(WakuLightPushCodec)
if peerOpt.isNone():
let errorMsg = "failed to lightpublish message, no suitable remote peers"
error "PUBLISH failed", error = errorMsg
return err(errorMsg)
let msgHashHex = (
await waku.node.wakuLegacyLightpushClient.publish(
pubsubTopic, msg, peer = peerOpt.get()
)
).valueOr:
error "PUBLISH failed", error = error
return err($error)
return ok(msgHashHex)

View File

@ -1,168 +0,0 @@
import std/[net, sequtils, strutils]
import chronicles, chronos, stew/byteutils, results
import
waku/waku_core/message/message,
waku/factory/[validator_signed, waku],
tools/confutils/cli_args,
waku/waku_node,
waku/waku_core/message,
waku/waku_core/time, # Timestamp
waku/waku_core/topics/pubsub_topic,
waku/waku_core/topics,
waku/waku_relay/protocol,
waku/node/peer_manager
import
../../../alloc
type RelayMsgType* = enum
SUBSCRIBE
UNSUBSCRIBE
PUBLISH
NUM_CONNECTED_PEERS
LIST_CONNECTED_PEERS
## to return the list of all connected peers to an specific pubsub topic
NUM_MESH_PEERS
LIST_MESH_PEERS
## to return the list of only the peers that conform the mesh for a particular pubsub topic
ADD_PROTECTED_SHARD ## Protects a shard with a public key
type ThreadSafeWakuMessage* = object
payload: SharedSeq[byte]
contentTopic: cstring
meta: SharedSeq[byte]
version: uint32
timestamp: Timestamp
ephemeral: bool
when defined(rln):
proof: SharedSeq[byte]
type RelayRequest* = object
operation: RelayMsgType
pubsubTopic: cstring
relayEventCallback: WakuRelayHandler # not used in 'PUBLISH' requests
message: ThreadSafeWakuMessage # only used in 'PUBLISH' requests
clusterId: cint # only used in 'ADD_PROTECTED_SHARD' requests
shardId: cint # only used in 'ADD_PROTECTED_SHARD' requests
publicKey: cstring # only used in 'ADD_PROTECTED_SHARD' requests
proc createShared*(
T: type RelayRequest,
op: RelayMsgType,
pubsubTopic: cstring = nil,
relayEventCallback: WakuRelayHandler = nil,
m = WakuMessage(),
clusterId: cint = 0,
shardId: cint = 0,
publicKey: cstring = nil,
): ptr type T =
var ret = createShared(T)
ret[].operation = op
ret[].pubsubTopic = pubsubTopic.alloc()
ret[].clusterId = clusterId
ret[].shardId = shardId
ret[].publicKey = publicKey.alloc()
ret[].relayEventCallback = relayEventCallback
ret[].message = ThreadSafeWakuMessage(
payload: allocSharedSeq(m.payload),
contentTopic: m.contentTopic.alloc(),
meta: allocSharedSeq(m.meta),
version: m.version,
timestamp: m.timestamp,
ephemeral: m.ephemeral,
)
when defined(rln):
ret[].message.proof = allocSharedSeq(m.proof)
return ret
proc destroyShared(self: ptr RelayRequest) =
deallocSharedSeq(self[].message.payload)
deallocShared(self[].message.contentTopic)
deallocSharedSeq(self[].message.meta)
when defined(rln):
deallocSharedSeq(self[].message.proof)
deallocShared(self[].pubsubTopic)
deallocShared(self[].publicKey)
deallocShared(self)
proc toWakuMessage(m: ThreadSafeWakuMessage): WakuMessage =
var wakuMessage = WakuMessage()
wakuMessage.payload = m.payload.toSeq()
wakuMessage.contentTopic = $m.contentTopic
wakuMessage.meta = m.meta.toSeq()
wakuMessage.version = m.version
wakuMessage.timestamp = m.timestamp
wakuMessage.ephemeral = m.ephemeral
when defined(rln):
wakuMessage.proof = m.proof
return wakuMessage
proc process*(
self: ptr RelayRequest, waku: ptr Waku
): Future[Result[string, string]] {.async.} =
defer:
destroyShared(self)
if waku.node.wakuRelay.isNil():
return err("Operation not supported without Waku Relay enabled.")
case self.operation
of SUBSCRIBE:
waku.node.subscribe(
(kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic),
handler = self.relayEventCallback,
).isOkOr:
error "SUBSCRIBE failed", error
return err($error)
of UNSUBSCRIBE:
waku.node.unsubscribe((kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic)).isOkOr:
error "UNSUBSCRIBE failed", error
return err($error)
of PUBLISH:
let msg = self.message.toWakuMessage()
let pubsubTopic = $self.pubsubTopic
(await waku.node.wakuRelay.publish(pubsubTopic, msg)).isOkOr:
error "PUBLISH failed", error
return err($error)
let msgHash = computeMessageHash(pubSubTopic, msg).to0xHex
return ok(msgHash)
of NUM_CONNECTED_PEERS:
let numConnPeers = waku.node.wakuRelay.getNumConnectedPeers($self.pubsubTopic).valueOr:
error "NUM_CONNECTED_PEERS failed", error
return err($error)
return ok($numConnPeers)
of LIST_CONNECTED_PEERS:
let connPeers = waku.node.wakuRelay.getConnectedPeers($self.pubsubTopic).valueOr:
error "LIST_CONNECTED_PEERS failed", error = error
return err($error)
## returns a comma-separated string of peerIDs
return ok(connPeers.mapIt($it).join(","))
of NUM_MESH_PEERS:
let numPeersInMesh = waku.node.wakuRelay.getNumPeersInMesh($self.pubsubTopic).valueOr:
error "NUM_MESH_PEERS failed", error = error
return err($error)
return ok($numPeersInMesh)
of LIST_MESH_PEERS:
let meshPeers = waku.node.wakuRelay.getPeersInMesh($self.pubsubTopic).valueOr:
error "LIST_MESH_PEERS failed", error = error
return err($error)
## returns a comma-separated string of peerIDs
return ok(meshPeers.mapIt($it).join(","))
of ADD_PROTECTED_SHARD:
try:
let relayShard =
RelayShard(clusterId: uint16(self.clusterId), shardId: uint16(self.shardId))
let protectedShard =
ProtectedShard.parseCmdArg($relayShard & ":" & $self.publicKey)
waku.node.wakuRelay.addSignedShardsValidator(
@[protectedShard], uint16(self.clusterId)
)
except ValueError:
return err(getCurrentExceptionMsg())
return ok("")

View File

@ -1,104 +0,0 @@
## This file contains the base message request type that will be handled.
## The requests are created by the main thread and processed by
## the Waku Thread.
import std/json, results
import chronos, chronos/threadsync
import
../../waku/factory/waku,
../ffi_types,
./requests/node_lifecycle_request,
./requests/peer_manager_request,
./requests/protocols/relay_request,
./requests/protocols/store_request,
./requests/protocols/lightpush_request,
./requests/protocols/filter_request,
./requests/debug_node_request,
./requests/discovery_request,
./requests/ping_request
type RequestType* {.pure.} = enum
LIFECYCLE
PEER_MANAGER
PING
RELAY
STORE
DEBUG
DISCOVERY
LIGHTPUSH
FILTER
type WakuThreadRequest* = object
reqType: RequestType
reqContent: pointer
callback: WakuCallBack
userData: pointer
proc createShared*(
T: type WakuThreadRequest,
reqType: RequestType,
reqContent: pointer,
callback: WakuCallBack,
userData: pointer,
): ptr type T =
var ret = createShared(T)
ret[].reqType = reqType
ret[].reqContent = reqContent
ret[].callback = callback
ret[].userData = userData
return ret
proc handleRes[T: string | void](
res: Result[T, string], request: ptr WakuThreadRequest
) =
## Handles the Result responses, which can either be Result[string, string] or
## Result[void, string].
defer:
deallocShared(request)
if res.isErr():
foreignThreadGc:
let msg = "libwaku error: handleRes fireSyncRes error: " & $res.error
request[].callback(
RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData
)
return
foreignThreadGc:
var msg: cstring = ""
when T is string:
msg = res.get().cstring()
request[].callback(
RET_OK, unsafeAddr msg[0], cast[csize_t](len(msg)), request[].userData
)
return
proc process*(
T: type WakuThreadRequest, request: ptr WakuThreadRequest, waku: ptr Waku
) {.async.} =
let retFut =
case request[].reqType
of LIFECYCLE:
cast[ptr NodeLifecycleRequest](request[].reqContent).process(waku)
of PEER_MANAGER:
cast[ptr PeerManagementRequest](request[].reqContent).process(waku[])
of PING:
cast[ptr PingRequest](request[].reqContent).process(waku)
of RELAY:
cast[ptr RelayRequest](request[].reqContent).process(waku)
of STORE:
cast[ptr StoreRequest](request[].reqContent).process(waku)
of DEBUG:
cast[ptr DebugNodeRequest](request[].reqContent).process(waku[])
of DISCOVERY:
cast[ptr DiscoveryRequest](request[].reqContent).process(waku)
of LIGHTPUSH:
cast[ptr LightpushRequest](request[].reqContent).process(waku)
of FILTER:
cast[ptr FilterRequest](request[].reqContent).process(waku)
handleRes(await retFut, request)
proc `$`*(self: WakuThreadRequest): string =
return $self.reqType

View File

@ -2,14 +2,51 @@
# Install Anvil
if ! command -v anvil &> /dev/null; then
REQUIRED_FOUNDRY_VERSION="$1"
if command -v anvil &> /dev/null; then
# Foundry is already installed; check the current version.
CURRENT_FOUNDRY_VERSION=$(anvil --version 2>/dev/null | awk '{print $2}')
if [ -n "$CURRENT_FOUNDRY_VERSION" ]; then
# Compare CURRENT_FOUNDRY_VERSION < REQUIRED_FOUNDRY_VERSION using sort -V
lower_version=$(printf '%s\n%s\n' "$CURRENT_FOUNDRY_VERSION" "$REQUIRED_FOUNDRY_VERSION" | sort -V | head -n1)
if [ "$lower_version" != "$REQUIRED_FOUNDRY_VERSION" ]; then
echo "Anvil is already installed with version $CURRENT_FOUNDRY_VERSION, which is older than the required $REQUIRED_FOUNDRY_VERSION. Please update Foundry manually if needed."
fi
fi
else
BASE_DIR="${XDG_CONFIG_HOME:-$HOME}"
FOUNDRY_DIR="${FOUNDRY_DIR:-"$BASE_DIR/.foundry"}"
FOUNDRY_BIN_DIR="$FOUNDRY_DIR/bin"
echo "Installing Foundry..."
curl -L https://foundry.paradigm.xyz | bash
# Extract the source path from the download result
echo "foundryup_path: $FOUNDRY_BIN_DIR"
# run foundryup
$FOUNDRY_BIN_DIR/foundryup
# Add Foundry to PATH for this script session
export PATH="$FOUNDRY_BIN_DIR:$PATH"
# Verify foundryup is available
if ! command -v foundryup >/dev/null 2>&1; then
echo "Error: foundryup installation failed or not found in $FOUNDRY_BIN_DIR"
exit 1
fi
# Run foundryup to install the required version
if [ -n "$REQUIRED_FOUNDRY_VERSION" ]; then
echo "Installing Foundry tools version $REQUIRED_FOUNDRY_VERSION..."
foundryup --install "$REQUIRED_FOUNDRY_VERSION"
else
echo "Installing latest Foundry tools..."
foundryup
fi
# Verify anvil was installed
if ! command -v anvil >/dev/null 2>&1; then
echo "Error: anvil installation failed"
exit 1
fi
echo "Anvil successfully installed: $(anvil --version)"
fi

View File

@ -1,8 +1,37 @@
#!/usr/bin/env bash
# Install pnpm
if ! command -v pnpm &> /dev/null; then
echo "pnpm is not installed, installing it now..."
npm i pnpm --global
REQUIRED_PNPM_VERSION="$1"
if command -v pnpm &> /dev/null; then
# pnpm is already installed; check the current version.
CURRENT_PNPM_VERSION=$(pnpm --version 2>/dev/null)
if [ -n "$CURRENT_PNPM_VERSION" ]; then
# Compare CURRENT_PNPM_VERSION < REQUIRED_PNPM_VERSION using sort -V
lower_version=$(printf '%s\n%s\n' "$CURRENT_PNPM_VERSION" "$REQUIRED_PNPM_VERSION" | sort -V | head -n1)
if [ "$lower_version" != "$REQUIRED_PNPM_VERSION" ]; then
echo "pnpm is already installed with version $CURRENT_PNPM_VERSION, which is older than the required $REQUIRED_PNPM_VERSION. Please update pnpm manually if needed."
fi
fi
else
# Install pnpm using npm
if [ -n "$REQUIRED_PNPM_VERSION" ]; then
echo "Installing pnpm version $REQUIRED_PNPM_VERSION..."
npm install -g pnpm@$REQUIRED_PNPM_VERSION
else
echo "Installing latest pnpm..."
npm install -g pnpm
fi
# Verify pnpm was installed
if ! command -v pnpm >/dev/null 2>&1; then
echo "Error: pnpm installation failed"
exit 1
fi
echo "pnpm successfully installed: $(pnpm --version)"
fi

View File

@ -1,7 +1,9 @@
#!/usr/bin/env bash
# Install Anvil
./scripts/install_anvil.sh
FOUNDRY_VERSION="$1"
./scripts/install_anvil.sh "$FOUNDRY_VERSION"
#Install pnpm
./scripts/install_pnpm.sh
# Install pnpm
PNPM_VERSION="$2"
./scripts/install_pnpm.sh "$PNPM_VERSION"

View File

@ -6,6 +6,10 @@ import std/strutils
import waku/common/broker/request_broker
## ---------------------------------------------------------------------------
## Async-mode brokers + tests
## ---------------------------------------------------------------------------
RequestBroker:
type SimpleResponse = object
value*: string
@ -31,11 +35,14 @@ RequestBroker:
suffix: string
): Future[Result[DualResponse, string]] {.async.}
RequestBroker:
RequestBroker(async):
type ImplicitResponse = ref object
note*: string
suite "RequestBroker macro":
static:
doAssert typeof(SimpleResponse.request()) is Future[Result[SimpleResponse, string]]
suite "RequestBroker macro (async mode)":
test "serves zero-argument providers":
check SimpleResponse
.setProvider(
@ -52,7 +59,7 @@ suite "RequestBroker macro":
test "zero-argument request errors when unset":
let res = waitFor SimpleResponse.request()
check res.isErr
check res.isErr()
check res.error.contains("no zero-arg provider")
test "serves input-based providers":
@ -78,7 +85,6 @@ suite "RequestBroker macro":
.setProvider(
proc(key: string, subKey: int): Future[Result[KeyedResponse, string]] {.async.} =
raise newException(ValueError, "simulated failure")
ok(KeyedResponse(key: key, payload: ""))
)
.isOk()
@ -90,7 +96,7 @@ suite "RequestBroker macro":
test "input request errors when unset":
let res = waitFor KeyedResponse.request("foo", 2)
check res.isErr
check res.isErr()
check res.error.contains("input signature")
test "supports both provider types simultaneously":
@ -109,11 +115,11 @@ suite "RequestBroker macro":
.isOk()
let noInput = waitFor DualResponse.request()
check noInput.isOk
check noInput.isOk()
check noInput.value.note == "base"
let withInput = waitFor DualResponse.request("-extra")
check withInput.isOk
check withInput.isOk()
check withInput.value.note == "base-extra"
check withInput.value.count == 6
@ -129,7 +135,7 @@ suite "RequestBroker macro":
DualResponse.clearProvider()
let res = waitFor DualResponse.request()
check res.isErr
check res.isErr()
test "implicit zero-argument provider works by default":
check ImplicitResponse
@ -140,14 +146,14 @@ suite "RequestBroker macro":
.isOk()
let res = waitFor ImplicitResponse.request()
check res.isOk
check res.isOk()
ImplicitResponse.clearProvider()
check res.value.note == "auto"
test "implicit zero-argument request errors when unset":
let res = waitFor ImplicitResponse.request()
check res.isErr
check res.isErr()
check res.error.contains("no zero-arg provider")
test "no provider override":
@ -171,7 +177,7 @@ suite "RequestBroker macro":
check DualResponse.setProvider(overrideProc).isErr()
let noInput = waitFor DualResponse.request()
check noInput.isOk
check noInput.isOk()
check noInput.value.note == "base"
let stillResponse = waitFor DualResponse.request(" still works")
@ -191,8 +197,306 @@ suite "RequestBroker macro":
check DualResponse.setProvider(overrideProc).isOk()
let nowSuccWithOverride = waitFor DualResponse.request()
check nowSuccWithOverride.isOk
check nowSuccWithOverride.isOk()
check nowSuccWithOverride.value.note == "something else"
check nowSuccWithOverride.value.count == 1
DualResponse.clearProvider()
## ---------------------------------------------------------------------------
## Sync-mode brokers + tests
## ---------------------------------------------------------------------------
RequestBroker(sync):
type SimpleResponseSync = object
value*: string
proc signatureFetch*(): Result[SimpleResponseSync, string]
RequestBroker(sync):
type KeyedResponseSync = object
key*: string
payload*: string
proc signatureFetchWithKey*(
key: string, subKey: int
): Result[KeyedResponseSync, string]
RequestBroker(sync):
type DualResponseSync = object
note*: string
count*: int
proc signatureNoInput*(): Result[DualResponseSync, string]
proc signatureWithInput*(suffix: string): Result[DualResponseSync, string]
RequestBroker(sync):
type ImplicitResponseSync = ref object
note*: string
static:
doAssert typeof(SimpleResponseSync.request()) is Result[SimpleResponseSync, string]
doAssert not (
typeof(SimpleResponseSync.request()) is Future[Result[SimpleResponseSync, string]]
)
doAssert typeof(KeyedResponseSync.request("topic", 1)) is
Result[KeyedResponseSync, string]
suite "RequestBroker macro (sync mode)":
test "serves zero-argument providers (sync)":
check SimpleResponseSync
.setProvider(
proc(): Result[SimpleResponseSync, string] =
ok(SimpleResponseSync(value: "hi"))
)
.isOk()
let res = SimpleResponseSync.request()
check res.isOk()
check res.value.value == "hi"
SimpleResponseSync.clearProvider()
test "zero-argument request errors when unset (sync)":
let res = SimpleResponseSync.request()
check res.isErr()
check res.error.contains("no zero-arg provider")
test "serves input-based providers (sync)":
var seen: seq[string] = @[]
check KeyedResponseSync
.setProvider(
proc(key: string, subKey: int): Result[KeyedResponseSync, string] =
seen.add(key)
ok(KeyedResponseSync(key: key, payload: key & "-payload+" & $subKey))
)
.isOk()
let res = KeyedResponseSync.request("topic", 1)
check res.isOk()
check res.value.key == "topic"
check res.value.payload == "topic-payload+1"
check seen == @["topic"]
KeyedResponseSync.clearProvider()
test "catches provider exception (sync)":
check KeyedResponseSync
.setProvider(
proc(key: string, subKey: int): Result[KeyedResponseSync, string] =
raise newException(ValueError, "simulated failure")
)
.isOk()
let res = KeyedResponseSync.request("neglected", 11)
check res.isErr()
check res.error.contains("simulated failure")
KeyedResponseSync.clearProvider()
test "input request errors when unset (sync)":
let res = KeyedResponseSync.request("foo", 2)
check res.isErr()
check res.error.contains("input signature")
test "supports both provider types simultaneously (sync)":
check DualResponseSync
.setProvider(
proc(): Result[DualResponseSync, string] =
ok(DualResponseSync(note: "base", count: 1))
)
.isOk()
check DualResponseSync
.setProvider(
proc(suffix: string): Result[DualResponseSync, string] =
ok(DualResponseSync(note: "base" & suffix, count: suffix.len))
)
.isOk()
let noInput = DualResponseSync.request()
check noInput.isOk()
check noInput.value.note == "base"
let withInput = DualResponseSync.request("-extra")
check withInput.isOk()
check withInput.value.note == "base-extra"
check withInput.value.count == 6
DualResponseSync.clearProvider()
test "clearProvider resets both entries (sync)":
check DualResponseSync
.setProvider(
proc(): Result[DualResponseSync, string] =
ok(DualResponseSync(note: "temp", count: 0))
)
.isOk()
DualResponseSync.clearProvider()
let res = DualResponseSync.request()
check res.isErr()
test "implicit zero-argument provider works by default (sync)":
check ImplicitResponseSync
.setProvider(
proc(): Result[ImplicitResponseSync, string] =
ok(ImplicitResponseSync(note: "auto"))
)
.isOk()
let res = ImplicitResponseSync.request()
check res.isOk()
ImplicitResponseSync.clearProvider()
check res.value.note == "auto"
test "implicit zero-argument request errors when unset (sync)":
let res = ImplicitResponseSync.request()
check res.isErr()
check res.error.contains("no zero-arg provider")
test "implicit zero-argument provider raises error (sync)":
check ImplicitResponseSync
.setProvider(
proc(): Result[ImplicitResponseSync, string] =
raise newException(ValueError, "simulated failure")
)
.isOk()
let res = ImplicitResponseSync.request()
check res.isErr()
check res.error.contains("simulated failure")
ImplicitResponseSync.clearProvider()
## ---------------------------------------------------------------------------
## POD / external type brokers + tests (distinct/alias behavior)
## ---------------------------------------------------------------------------
type ExternalDefinedTypeAsync = object
label*: string
type ExternalDefinedTypeSync = object
label*: string
type ExternalDefinedTypeShared = object
label*: string
RequestBroker:
type PodResponse = int
proc signatureFetch*(): Future[Result[PodResponse, string]] {.async.}
RequestBroker:
type ExternalAliasedResponse = ExternalDefinedTypeAsync
proc signatureFetch*(): Future[Result[ExternalAliasedResponse, string]] {.async.}
RequestBroker(sync):
type ExternalAliasedResponseSync = ExternalDefinedTypeSync
proc signatureFetch*(): Result[ExternalAliasedResponseSync, string]
RequestBroker(sync):
type DistinctStringResponseA = distinct string
RequestBroker(sync):
type DistinctStringResponseB = distinct string
RequestBroker(sync):
type ExternalDistinctResponseA = distinct ExternalDefinedTypeShared
RequestBroker(sync):
type ExternalDistinctResponseB = distinct ExternalDefinedTypeShared
suite "RequestBroker macro (POD/external types)":
test "supports non-object response types (async)":
check PodResponse
.setProvider(
proc(): Future[Result[PodResponse, string]] {.async.} =
ok(PodResponse(123))
)
.isOk()
let res = waitFor PodResponse.request()
check res.isOk()
check int(res.value) == 123
PodResponse.clearProvider()
test "supports aliased external types (async)":
check ExternalAliasedResponse
.setProvider(
proc(): Future[Result[ExternalAliasedResponse, string]] {.async.} =
ok(ExternalAliasedResponse(ExternalDefinedTypeAsync(label: "ext")))
)
.isOk()
let res = waitFor ExternalAliasedResponse.request()
check res.isOk()
check ExternalDefinedTypeAsync(res.value).label == "ext"
ExternalAliasedResponse.clearProvider()
test "supports aliased external types (sync)":
check ExternalAliasedResponseSync
.setProvider(
proc(): Result[ExternalAliasedResponseSync, string] =
ok(ExternalAliasedResponseSync(ExternalDefinedTypeSync(label: "ext")))
)
.isOk()
let res = ExternalAliasedResponseSync.request()
check res.isOk()
check ExternalDefinedTypeSync(res.value).label == "ext"
ExternalAliasedResponseSync.clearProvider()
test "distinct response types avoid overload ambiguity (sync)":
check DistinctStringResponseA
.setProvider(
proc(): Result[DistinctStringResponseA, string] =
ok(DistinctStringResponseA("a"))
)
.isOk()
check DistinctStringResponseB
.setProvider(
proc(): Result[DistinctStringResponseB, string] =
ok(DistinctStringResponseB("b"))
)
.isOk()
check ExternalDistinctResponseA
.setProvider(
proc(): Result[ExternalDistinctResponseA, string] =
ok(ExternalDistinctResponseA(ExternalDefinedTypeShared(label: "ea")))
)
.isOk()
check ExternalDistinctResponseB
.setProvider(
proc(): Result[ExternalDistinctResponseB, string] =
ok(ExternalDistinctResponseB(ExternalDefinedTypeShared(label: "eb")))
)
.isOk()
let resA = DistinctStringResponseA.request()
let resB = DistinctStringResponseB.request()
check resA.isOk()
check resB.isOk()
check string(resA.value) == "a"
check string(resB.value) == "b"
let resEA = ExternalDistinctResponseA.request()
let resEB = ExternalDistinctResponseB.request()
check resEA.isOk()
check resEB.isOk()
check ExternalDefinedTypeShared(resEA.value).label == "ea"
check ExternalDefinedTypeShared(resEB.value).label == "eb"
DistinctStringResponseA.clearProvider()
DistinctStringResponseB.clearProvider()
ExternalDistinctResponseA.clearProvider()
ExternalDistinctResponseB.clearProvider()

View File

@ -70,53 +70,6 @@ suite "Waku rln relay":
info "the generated identity credential: ", idCredential
test "hash Nim Wrappers":
# create an RLN instance
let rlnInstance = createRLNInstanceWrapper()
require:
rlnInstance.isOk()
# prepare the input
let
msg = "Hello".toBytes()
hashInput = encodeLengthPrefix(msg)
hashInputBuffer = toBuffer(hashInput)
# prepare other inputs to the hash function
let outputBuffer = default(Buffer)
let hashSuccess = sha256(unsafeAddr hashInputBuffer, unsafeAddr outputBuffer, true)
require:
hashSuccess
let outputArr = cast[ptr array[32, byte]](outputBuffer.`ptr`)[]
check:
"1e32b3ab545c07c8b4a7ab1ca4f46bc31e4fdc29ac3b240ef1d54b4017a26e4c" ==
outputArr.inHex()
let
hashOutput = cast[ptr array[32, byte]](outputBuffer.`ptr`)[]
hashOutputHex = hashOutput.toHex()
info "hash output", hashOutputHex
test "sha256 hash utils":
# create an RLN instance
let rlnInstance = createRLNInstanceWrapper()
require:
rlnInstance.isOk()
let rln = rlnInstance.get()
# prepare the input
let msg = "Hello".toBytes()
let hashRes = sha256(msg)
check:
hashRes.isOk()
"1e32b3ab545c07c8b4a7ab1ca4f46bc31e4fdc29ac3b240ef1d54b4017a26e4c" ==
hashRes.get().inHex()
test "poseidon hash utils":
# create an RLN instance
let rlnInstance = createRLNInstanceWrapper()

1
vendor/nim-ffi vendored Submodule

@ -0,0 +1 @@
Subproject commit d7a5492121aad190cf549436836e2fa42e34ff9b

View File

@ -30,7 +30,8 @@ requires "nim >= 2.2.4",
"regex",
"results",
"db_connector",
"minilru"
"minilru",
"ffi"
### Helper functions
proc buildModule(filePath, params = "", lang = "c"): bool =
@ -61,27 +62,21 @@ proc buildBinary(name: string, srcDir = "./", params = "", lang = "c") =
exec "nim " & lang & " --out:build/" & name & " --mm:refc " & extra_params & " " &
srcDir & name & ".nim"
proc buildLibrary(name: string, srcDir = "./", params = "", `type` = "static") =
proc buildLibrary(lib_name: string, srcDir = "./", params = "", `type` = "static") =
if not dirExists "build":
mkDir "build"
# allow something like "nim nimbus --verbosity:0 --hints:off nimbus.nims"
var extra_params = params
for i in 2 ..< paramCount():
for i in 2 ..< (paramCount() - 1):
extra_params &= " " & paramStr(i)
if `type` == "static":
exec "nim c" & " --out:build/" & name &
".a --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & name & ".nim"
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & "libwaku.nim"
else:
let lib_name = (when defined(windows): toDll(name) else: name & ".so")
when defined(windows):
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & name & ".nim"
else:
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & name & ".nim"
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:libwaku --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & "libwaku.nim"
proc buildMobileAndroid(srcDir = ".", params = "") =
let cpu = getEnv("CPU")
@ -206,15 +201,194 @@ let chroniclesParams =
"--warning:UnusedImport:on " & "-d:chronicles_log_level=TRACE"
task libwakuStatic, "Build the cbindings waku node library":
let name = "libwaku"
buildLibrary name, "library/", chroniclesParams, "static"
let lib_name = paramStr(paramCount())
buildLibrary lib_name, "library/", chroniclesParams, "static"
task libwakuDynamic, "Build the cbindings waku node library":
let name = "libwaku"
buildLibrary name, "library/", chroniclesParams, "dynamic"
let lib_name = paramStr(paramCount())
buildLibrary lib_name, "library/", chroniclesParams, "dynamic"
### Mobile Android
task libWakuAndroid, "Build the mobile bindings for Android":
let srcDir = "./library"
let extraParams = "-d:chronicles_log_level=ERROR"
buildMobileAndroid srcDir, extraParams
### Mobile iOS
import std/sequtils
proc buildMobileIOS(srcDir = ".", params = "") =
echo "Building iOS libwaku library"
let iosArch = getEnv("IOS_ARCH")
let iosSdk = getEnv("IOS_SDK")
let sdkPath = getEnv("IOS_SDK_PATH")
if sdkPath.len == 0:
quit "Error: IOS_SDK_PATH not set. Set it to the path of the iOS SDK"
# Use SDK name in path to differentiate device vs simulator
let outDir = "build/ios/" & iosSdk & "-" & iosArch
if not dirExists outDir:
mkDir outDir
var extra_params = params
for i in 2 ..< paramCount():
extra_params &= " " & paramStr(i)
let cpu = if iosArch == "arm64": "arm64" else: "amd64"
# The output static library
let nimcacheDir = outDir & "/nimcache"
let objDir = outDir & "/obj"
let vendorObjDir = outDir & "/vendor_obj"
let aFile = outDir & "/libwaku.a"
if not dirExists objDir:
mkDir objDir
if not dirExists vendorObjDir:
mkDir vendorObjDir
let clangBase = "clang -arch " & iosArch & " -isysroot " & sdkPath &
" -mios-version-min=18.0 -fembed-bitcode -fPIC -O2"
# Generate C sources from Nim (no linking)
exec "nim c" &
" --nimcache:" & nimcacheDir &
" --os:ios --cpu:" & cpu &
" --compileOnly:on" &
" --noMain --mm:refc" &
" --threads:on --opt:size --header" &
" -d:metrics -d:discv5_protocol_id=d5waku" &
" --nimMainPrefix:libwaku --skipParentCfg:on" &
" --cc:clang" &
" " & extra_params &
" " & srcDir & "/libwaku.nim"
# Compile vendor C libraries for iOS
# --- BearSSL ---
echo "Compiling BearSSL for iOS..."
let bearSslSrcDir = "./vendor/nim-bearssl/bearssl/csources/src"
let bearSslIncDir = "./vendor/nim-bearssl/bearssl/csources/inc"
for path in walkDirRec(bearSslSrcDir):
if path.endsWith(".c"):
let relPath = path.replace(bearSslSrcDir & "/", "").replace("/", "_")
let baseName = relPath.changeFileExt("o")
let oFile = vendorObjDir / ("bearssl_" & baseName)
if not fileExists(oFile):
exec clangBase & " -I" & bearSslIncDir & " -I" & bearSslSrcDir & " -c " & path & " -o " & oFile
# --- secp256k1 ---
echo "Compiling secp256k1 for iOS..."
let secp256k1Dir = "./vendor/nim-secp256k1/vendor/secp256k1"
let secp256k1Flags = " -I" & secp256k1Dir & "/include" &
" -I" & secp256k1Dir & "/src" &
" -I" & secp256k1Dir &
" -DENABLE_MODULE_RECOVERY=1" &
" -DENABLE_MODULE_ECDH=1" &
" -DECMULT_WINDOW_SIZE=15" &
" -DECMULT_GEN_PREC_BITS=4"
# Main secp256k1 source
let secp256k1Obj = vendorObjDir / "secp256k1.o"
if not fileExists(secp256k1Obj):
exec clangBase & secp256k1Flags & " -c " & secp256k1Dir & "/src/secp256k1.c -o " & secp256k1Obj
# Precomputed tables (required for ecmult operations)
let secp256k1PreEcmultObj = vendorObjDir / "secp256k1_precomputed_ecmult.o"
if not fileExists(secp256k1PreEcmultObj):
exec clangBase & secp256k1Flags & " -c " & secp256k1Dir & "/src/precomputed_ecmult.c -o " & secp256k1PreEcmultObj
let secp256k1PreEcmultGenObj = vendorObjDir / "secp256k1_precomputed_ecmult_gen.o"
if not fileExists(secp256k1PreEcmultGenObj):
exec clangBase & secp256k1Flags & " -c " & secp256k1Dir & "/src/precomputed_ecmult_gen.c -o " & secp256k1PreEcmultGenObj
# --- miniupnpc ---
echo "Compiling miniupnpc for iOS..."
let miniupnpcSrcDir = "./vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc/src"
let miniupnpcIncDir = "./vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc/include"
let miniupnpcBuildDir = "./vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc/build"
let miniupnpcFiles = @[
"addr_is_reserved.c", "connecthostport.c", "igd_desc_parse.c",
"minisoap.c", "minissdpc.c", "miniupnpc.c", "miniwget.c",
"minixml.c", "portlistingparse.c", "receivedata.c", "upnpcommands.c",
"upnpdev.c", "upnperrors.c", "upnpreplyparse.c"
]
for fileName in miniupnpcFiles:
let srcPath = miniupnpcSrcDir / fileName
let oFile = vendorObjDir / ("miniupnpc_" & fileName.changeFileExt("o"))
if fileExists(srcPath) and not fileExists(oFile):
exec clangBase &
" -I" & miniupnpcIncDir &
" -I" & miniupnpcSrcDir &
" -I" & miniupnpcBuildDir &
" -DMINIUPNPC_SET_SOCKET_TIMEOUT" &
" -D_BSD_SOURCE -D_DEFAULT_SOURCE" &
" -c " & srcPath & " -o " & oFile
# --- libnatpmp ---
echo "Compiling libnatpmp for iOS..."
let natpmpSrcDir = "./vendor/nim-nat-traversal/vendor/libnatpmp-upstream"
# Only compile natpmp.c - getgateway.c uses net/route.h which is not available on iOS
let natpmpObj = vendorObjDir / "natpmp_natpmp.o"
if not fileExists(natpmpObj):
exec clangBase &
" -I" & natpmpSrcDir &
" -DENABLE_STRNATPMPERR" &
" -c " & natpmpSrcDir & "/natpmp.c -o " & natpmpObj
# Use iOS-specific stub for getgateway
let getgatewayStubSrc = "./library/ios_natpmp_stubs.c"
let getgatewayStubObj = vendorObjDir / "natpmp_getgateway_stub.o"
if fileExists(getgatewayStubSrc) and not fileExists(getgatewayStubObj):
exec clangBase & " -c " & getgatewayStubSrc & " -o " & getgatewayStubObj
# --- BearSSL stubs (for tools functions not in main library) ---
echo "Compiling BearSSL stubs for iOS..."
let bearSslStubsSrc = "./library/ios_bearssl_stubs.c"
let bearSslStubsObj = vendorObjDir / "bearssl_stubs.o"
if fileExists(bearSslStubsSrc) and not fileExists(bearSslStubsObj):
exec clangBase & " -c " & bearSslStubsSrc & " -o " & bearSslStubsObj
# Compile all Nim-generated C files to object files
echo "Compiling Nim-generated C files for iOS..."
var cFiles: seq[string] = @[]
for kind, path in walkDir(nimcacheDir):
if kind == pcFile and path.endsWith(".c"):
cFiles.add(path)
for cFile in cFiles:
let baseName = extractFilename(cFile).changeFileExt("o")
let oFile = objDir / baseName
exec clangBase &
" -DENABLE_STRNATPMPERR" &
" -I./vendor/nimbus-build-system/vendor/Nim/lib/" &
" -I./vendor/nim-bearssl/bearssl/csources/inc/" &
" -I./vendor/nim-bearssl/bearssl/csources/tools/" &
" -I./vendor/nim-bearssl/bearssl/abi/" &
" -I./vendor/nim-secp256k1/vendor/secp256k1/include/" &
" -I./vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc/include/" &
" -I./vendor/nim-nat-traversal/vendor/libnatpmp-upstream/" &
" -I" & nimcacheDir &
" -c " & cFile &
" -o " & oFile
# Create static library from all object files
echo "Creating static library..."
var objFiles: seq[string] = @[]
for kind, path in walkDir(objDir):
if kind == pcFile and path.endsWith(".o"):
objFiles.add(path)
for kind, path in walkDir(vendorObjDir):
if kind == pcFile and path.endsWith(".o"):
objFiles.add(path)
exec "libtool -static -o " & aFile & " " & objFiles.join(" ")
echo "✔ iOS library created: " & aFile
task libWakuIOS, "Build the mobile bindings for iOS":
let srcDir = "./library"
let extraParams = "-d:chronicles_log_level=ERROR"
buildMobileIOS srcDir, extraParams

View File

@ -6,8 +6,15 @@
## Worth considering using it in a single provider, many requester scenario.
##
## Provides a declarative way to define an immutable value type together with a
## thread-local broker that can register an asynchronous provider, dispatch typed
## requests and clear provider.
## thread-local broker that can register an asynchronous or synchronous provider,
## dispatch typed requests and clear provider.
##
## For consideration use `sync` mode RequestBroker when you need to provide simple value(s)
## where there is no long-running async operation involved.
## Typically it act as a accessor for the local state of generic setting.
##
## `async` mode is better to be used when you request date that may involve some long IO operation
## or action.
##
## Usage:
## Declare your desired request type inside a `RequestBroker` macro, add any number of fields.
@ -24,6 +31,56 @@
## proc signature*(arg1: ArgType, arg2: AnotherArgType): Future[Result[TypeName, string]]
##
## ```
##
## Sync mode (no `async` / `Future`) can be generated with:
##
## ```nim
## RequestBroker(sync):
## type TypeName = object
## field1*: FieldType
##
## proc signature*(): Result[TypeName, string]
## proc signature*(arg1: ArgType): Result[TypeName, string]
## ```
##
## Note: When the request type is declared as a native type / alias / externally-defined
## type (i.e. not an inline `object` / `ref object` definition), RequestBroker
## will wrap it in `distinct` automatically unless you already used `distinct`.
## This avoids overload ambiguity when multiple brokers share the same
## underlying base type (Nim overload resolution does not consider return type).
##
## This means that for non-object request types you typically:
## - construct values with an explicit cast/constructor, e.g. `MyType("x")`
## - unwrap with a cast when needed, e.g. `string(myVal)` or `BaseType(myVal)`
##
## Example (native response type):
## ```nim
## RequestBroker(sync):
## type MyCount = int # exported as: `distinct int`
##
## MyCount.setProvider(proc(): Result[MyCount, string] = ok(MyCount(42)))
## let res = MyCount.request()
## if res.isOk():
## let raw = int(res.get())
## ```
##
## Example (externally-defined type):
## ```nim
## type External = object
## label*: string
##
## RequestBroker:
## type MyExternal = External # exported as: `distinct External`
##
## MyExternal.setProvider(
## proc(): Future[Result[MyExternal, string]] {.async.} =
## ok(MyExternal(External(label: "hi")))
## )
## let res = await MyExternal.request()
## if res.isOk():
## let base = External(res.get())
## echo base.label
## ```
## The 'TypeName' object defines the requestable data (but also can be seen as request for action with return value).
## The 'signature' proc defines the provider(s) signature, that is enforced at compile time.
## One signature can be with no arguments, another with any number of arguments - where the input arguments are
@ -31,12 +88,12 @@
##
## After this, you can register a provider anywhere in your code with
## `TypeName.setProvider(...)`, which returns error if already having a provider.
## Providers are async procs or lambdas that take no arguments and return a Future[Result[TypeName, string]].
## Providers are async procs/lambdas in default mode and sync procs in sync mode.
## Only one provider can be registered at a time per signature type (zero arg and/or multi arg).
##
## Requests can be made from anywhere with no direct dependency on the provider by
## calling `TypeName.request()` - with arguments respecting the signature(s).
## This will asynchronously call the registered provider and return a Future[Result[TypeName, string]].
## In async mode, this returns a Future[Result[TypeName, string]]. In sync mode, it returns Result[TypeName, string].
##
## Whenever you no want to process requests (or your object instance that provides the request goes out of scope),
## you can remove it from the broker with `TypeName.clearProvider()`.
@ -49,10 +106,10 @@
## text*: string
##
## ## Define the request and provider signature, that is enforced at compile time.
## proc signature*(): Future[Result[Greeting, string]]
## proc signature*(): Future[Result[Greeting, string]] {.async.}
##
## ## Also possible to define signature with arbitrary input arguments.
## proc signature*(lang: string): Future[Result[Greeting, string]]
## proc signature*(lang: string): Future[Result[Greeting, string]] {.async.}
##
## ...
## Greeting.setProvider(
@ -60,6 +117,23 @@
## ok(Greeting(text: "hello"))
## )
## let res = await Greeting.request()
##
##
## ...
## # using native type as response for a synchronous request.
## RequestBroker(sync):
## type NeedThatInfo = string
##
##...
## NeedThatInfo.setProvider(
## proc(): Result[NeedThatInfo, string] =
## ok("this is the info you wanted")
## )
## let res = NeedThatInfo.request().valueOr:
## echo "not ok due to: " & error
## NeedThatInfo(":-(")
##
## echo string(res)
## ```
## If no `signature` proc is declared, a zero-argument form is generated
## automatically, so the caller only needs to provide the type definition.
@ -77,7 +151,11 @@ proc errorFuture[T](message: string): Future[Result[T, string]] {.inline.} =
fut.complete(err(Result[T, string], message))
fut
proc isReturnTypeValid(returnType, typeIdent: NimNode): bool =
type RequestBrokerMode = enum
rbAsync
rbSync
proc isAsyncReturnTypeValid(returnType, typeIdent: NimNode): bool =
## Accept Future[Result[TypeIdent, string]] as the contract.
if returnType.kind != nnkBracketExpr or returnType.len != 2:
return false
@ -92,6 +170,23 @@ proc isReturnTypeValid(returnType, typeIdent: NimNode): bool =
return false
inner[2].kind == nnkIdent and inner[2].eqIdent("string")
proc isSyncReturnTypeValid(returnType, typeIdent: NimNode): bool =
## Accept Result[TypeIdent, string] as the contract.
if returnType.kind != nnkBracketExpr or returnType.len != 3:
return false
if returnType[0].kind != nnkIdent or not returnType[0].eqIdent("Result"):
return false
if returnType[1].kind != nnkIdent or not returnType[1].eqIdent($typeIdent):
return false
returnType[2].kind == nnkIdent and returnType[2].eqIdent("string")
proc isReturnTypeValid(returnType, typeIdent: NimNode, mode: RequestBrokerMode): bool =
case mode
of rbAsync:
isAsyncReturnTypeValid(returnType, typeIdent)
of rbSync:
isSyncReturnTypeValid(returnType, typeIdent)
proc cloneParams(params: seq[NimNode]): seq[NimNode] =
## Deep copy parameter definitions so they can be inserted in multiple places.
result = @[]
@ -109,73 +204,122 @@ proc collectParamNames(params: seq[NimNode]): seq[NimNode] =
continue
result.add(ident($nameNode))
proc makeProcType(returnType: NimNode, params: seq[NimNode]): NimNode =
proc makeProcType(
returnType: NimNode, params: seq[NimNode], mode: RequestBrokerMode
): NimNode =
var formal = newTree(nnkFormalParams)
formal.add(returnType)
for param in params:
formal.add(param)
let pragmas = newTree(nnkPragma, ident("async"))
newTree(nnkProcTy, formal, pragmas)
case mode
of rbAsync:
let pragmas = newTree(nnkPragma, ident("async"))
newTree(nnkProcTy, formal, pragmas)
of rbSync:
let raisesPragma = newTree(
nnkExprColonExpr, ident("raises"), newTree(nnkBracket, ident("CatchableError"))
)
let pragmas = newTree(nnkPragma, raisesPragma, ident("gcsafe"))
newTree(nnkProcTy, formal, pragmas)
macro RequestBroker*(body: untyped): untyped =
proc parseMode(modeNode: NimNode): RequestBrokerMode =
## Parses the mode selector for the 2-argument macro overload.
## Supported spellings: `sync` / `async` (case-insensitive).
let raw = ($modeNode).strip().toLowerAscii()
case raw
of "sync":
rbSync
of "async":
rbAsync
else:
error("RequestBroker mode must be `sync` or `async` (default is async)", modeNode)
proc ensureDistinctType(rhs: NimNode): NimNode =
## For PODs / aliases / externally-defined types, wrap in `distinct` unless
## it's already distinct.
if rhs.kind == nnkDistinctTy:
return copyNimTree(rhs)
newTree(nnkDistinctTy, copyNimTree(rhs))
proc generateRequestBroker(body: NimNode, mode: RequestBrokerMode): NimNode =
when defined(requestBrokerDebug):
echo body.treeRepr
echo "RequestBroker mode: ", $mode
var typeIdent: NimNode = nil
var objectDef: NimNode = nil
var isRefObject = false
for stmt in body:
if stmt.kind == nnkTypeSection:
for def in stmt:
if def.kind != nnkTypeDef:
continue
if not typeIdent.isNil():
error("Only one type may be declared inside RequestBroker", def)
typeIdent = baseTypeIdent(def[0])
let rhs = def[2]
var objectType: NimNode
## Support inline object types (fields are auto-exported)
## AND non-object types / aliases (e.g. `string`, `int`, `OtherType`).
case rhs.kind
of nnkObjectTy:
objectType = rhs
let recList = rhs[2]
if recList.kind != nnkRecList:
error("RequestBroker object must declare a standard field list", rhs)
var exportedRecList = newTree(nnkRecList)
for field in recList:
case field.kind
of nnkIdentDefs:
ensureFieldDef(field)
var cloned = copyNimTree(field)
for i in 0 ..< cloned.len - 2:
cloned[i] = exportIdentNode(cloned[i])
exportedRecList.add(cloned)
of nnkEmpty:
discard
else:
error(
"RequestBroker object definition only supports simple field declarations",
field,
)
objectDef = newTree(
nnkObjectTy, copyNimTree(rhs[0]), copyNimTree(rhs[1]), exportedRecList
)
of nnkRefTy:
isRefObject = true
if rhs.len != 1 or rhs[0].kind != nnkObjectTy:
error(
"RequestBroker ref object must wrap a concrete object definition", rhs
if rhs.len != 1:
error("RequestBroker ref type must have a single base", rhs)
if rhs[0].kind == nnkObjectTy:
let obj = rhs[0]
let recList = obj[2]
if recList.kind != nnkRecList:
error("RequestBroker object must declare a standard field list", obj)
var exportedRecList = newTree(nnkRecList)
for field in recList:
case field.kind
of nnkIdentDefs:
ensureFieldDef(field)
var cloned = copyNimTree(field)
for i in 0 ..< cloned.len - 2:
cloned[i] = exportIdentNode(cloned[i])
exportedRecList.add(cloned)
of nnkEmpty:
discard
else:
error(
"RequestBroker object definition only supports simple field declarations",
field,
)
let exportedObjectType = newTree(
nnkObjectTy, copyNimTree(obj[0]), copyNimTree(obj[1]), exportedRecList
)
objectType = rhs[0]
else:
continue
if not typeIdent.isNil():
error("Only one object type may be declared inside RequestBroker", def)
typeIdent = baseTypeIdent(def[0])
let recList = objectType[2]
if recList.kind != nnkRecList:
error("RequestBroker object must declare a standard field list", objectType)
var exportedRecList = newTree(nnkRecList)
for field in recList:
case field.kind
of nnkIdentDefs:
ensureFieldDef(field)
var cloned = copyNimTree(field)
for i in 0 ..< cloned.len - 2:
cloned[i] = exportIdentNode(cloned[i])
exportedRecList.add(cloned)
of nnkEmpty:
discard
objectDef = newTree(nnkRefTy, exportedObjectType)
else:
error(
"RequestBroker object definition only supports simple field declarations",
field,
)
let exportedObjectType = newTree(
nnkObjectTy,
copyNimTree(objectType[0]),
copyNimTree(objectType[1]),
exportedRecList,
)
if isRefObject:
objectDef = newTree(nnkRefTy, exportedObjectType)
## `ref SomeType` (SomeType can be defined elsewhere)
objectDef = ensureDistinctType(rhs)
else:
objectDef = exportedObjectType
## Non-object type / alias (e.g. `string`, `int`, `SomeExternalType`).
objectDef = ensureDistinctType(rhs)
if typeIdent.isNil():
error("RequestBroker body must declare exactly one object type", body)
error("RequestBroker body must declare exactly one type", body)
when defined(requestBrokerDebug):
echo "RequestBroker generating type: ", $typeIdent
@ -183,7 +327,6 @@ macro RequestBroker*(body: untyped): untyped =
let exportedTypeIdent = postfix(copyNimTree(typeIdent), "*")
let typeDisplayName = sanitizeIdentName(typeIdent)
let typeNameLit = newLit(typeDisplayName)
let isRefObjectLit = newLit(isRefObject)
var zeroArgSig: NimNode = nil
var zeroArgProviderName: NimNode = nil
var zeroArgFieldName: NimNode = nil
@ -211,10 +354,14 @@ macro RequestBroker*(body: untyped): untyped =
if params.len == 0:
error("Signature must declare a return type", stmt)
let returnType = params[0]
if not isReturnTypeValid(returnType, typeIdent):
error(
"Signature must return Future[Result[`" & $typeIdent & "`, string]]", stmt
)
if not isReturnTypeValid(returnType, typeIdent, mode):
case mode
of rbAsync:
error(
"Signature must return Future[Result[`" & $typeIdent & "`, string]]", stmt
)
of rbSync:
error("Signature must return Result[`" & $typeIdent & "`, string]", stmt)
let paramCount = params.len - 1
if paramCount == 0:
if zeroArgSig != nil:
@ -258,14 +405,20 @@ macro RequestBroker*(body: untyped): untyped =
var typeSection = newTree(nnkTypeSection)
typeSection.add(newTree(nnkTypeDef, exportedTypeIdent, newEmptyNode(), objectDef))
let returnType = quote:
Future[Result[`typeIdent`, string]]
let returnType =
case mode
of rbAsync:
quote:
Future[Result[`typeIdent`, string]]
of rbSync:
quote:
Result[`typeIdent`, string]
if not zeroArgSig.isNil():
let procType = makeProcType(returnType, @[])
let procType = makeProcType(returnType, @[], mode)
typeSection.add(newTree(nnkTypeDef, zeroArgProviderName, newEmptyNode(), procType))
if not argSig.isNil():
let procType = makeProcType(returnType, cloneParams(argParams))
let procType = makeProcType(returnType, cloneParams(argParams), mode)
typeSection.add(newTree(nnkTypeDef, argProviderName, newEmptyNode(), procType))
var brokerRecList = newTree(nnkRecList)
@ -316,33 +469,69 @@ macro RequestBroker*(body: untyped): untyped =
quote do:
`accessProcIdent`().`zeroArgFieldName` = nil
)
result.add(
quote do:
proc request*(
_: typedesc[`typeIdent`]
): Future[Result[`typeIdent`, string]] {.async: (raises: []).} =
let provider = `accessProcIdent`().`zeroArgFieldName`
if provider.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): no zero-arg provider registered"
)
let catchedRes = catch:
await provider()
case mode
of rbAsync:
result.add(
quote do:
proc request*(
_: typedesc[`typeIdent`]
): Future[Result[`typeIdent`, string]] {.async: (raises: []).} =
let provider = `accessProcIdent`().`zeroArgFieldName`
if provider.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): no zero-arg provider registered"
)
let catchedRes = catch:
await provider()
if catchedRes.isErr():
return err("Request failed:" & catchedRes.error.msg)
if catchedRes.isErr():
return err(
"RequestBroker(" & `typeNameLit` & "): provider threw exception: " &
catchedRes.error.msg
)
let providerRes = catchedRes.get()
when `isRefObjectLit`:
let providerRes = catchedRes.get()
if providerRes.isOk():
let resultValue = providerRes.get()
if resultValue.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): provider returned nil result"
)
return providerRes
when compiles(resultValue.isNil()):
if resultValue.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): provider returned nil result"
)
return providerRes
)
)
of rbSync:
result.add(
quote do:
proc request*(
_: typedesc[`typeIdent`]
): Result[`typeIdent`, string] {.gcsafe, raises: [].} =
let provider = `accessProcIdent`().`zeroArgFieldName`
if provider.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): no zero-arg provider registered"
)
var providerRes: Result[`typeIdent`, string]
try:
providerRes = provider()
except CatchableError as e:
return err(
"RequestBroker(" & `typeNameLit` & "): provider threw exception: " &
e.msg
)
if providerRes.isOk():
let resultValue = providerRes.get()
when compiles(resultValue.isNil()):
if resultValue.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): provider returned nil result"
)
return providerRes
)
if not argSig.isNil():
result.add(
quote do:
@ -363,10 +552,7 @@ macro RequestBroker*(body: untyped): untyped =
let argNameIdents = collectParamNames(requestParamDefs)
let providerSym = genSym(nskLet, "provider")
var formalParams = newTree(nnkFormalParams)
formalParams.add(
quote do:
Future[Result[`typeIdent`, string]]
)
formalParams.add(copyNimTree(returnType))
formalParams.add(
newTree(
nnkIdentDefs,
@ -378,8 +564,14 @@ macro RequestBroker*(body: untyped): untyped =
for paramDef in requestParamDefs:
formalParams.add(paramDef)
let requestPragmas = quote:
{.async: (raises: []), gcsafe.}
let requestPragmas =
case mode
of rbAsync:
quote:
{.async: (raises: []).}
of rbSync:
quote:
{.gcsafe, raises: [].}
var providerCall = newCall(providerSym)
for argName in argNameIdents:
providerCall.add(argName)
@ -396,23 +588,49 @@ macro RequestBroker*(body: untyped): untyped =
"): no provider registered for input signature"
)
)
requestBody.add(
quote do:
let catchedRes = catch:
await `providerCall`
if catchedRes.isErr():
return err("Request failed:" & catchedRes.error.msg)
let providerRes = catchedRes.get()
when `isRefObjectLit`:
case mode
of rbAsync:
requestBody.add(
quote do:
let catchedRes = catch:
await `providerCall`
if catchedRes.isErr():
return err(
"RequestBroker(" & `typeNameLit` & "): provider threw exception: " &
catchedRes.error.msg
)
let providerRes = catchedRes.get()
if providerRes.isOk():
let resultValue = providerRes.get()
if resultValue.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): provider returned nil result"
)
return providerRes
)
when compiles(resultValue.isNil()):
if resultValue.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): provider returned nil result"
)
return providerRes
)
of rbSync:
requestBody.add(
quote do:
var providerRes: Result[`typeIdent`, string]
try:
providerRes = `providerCall`
except CatchableError as e:
return err(
"RequestBroker(" & `typeNameLit` & "): provider threw exception: " & e.msg
)
if providerRes.isOk():
let resultValue = providerRes.get()
when compiles(resultValue.isNil()):
if resultValue.isNil():
return err(
"RequestBroker(" & `typeNameLit` & "): provider returned nil result"
)
return providerRes
)
# requestBody.add(providerCall)
result.add(
newTree(
@ -436,3 +654,17 @@ macro RequestBroker*(body: untyped): untyped =
when defined(requestBrokerDebug):
echo result.repr
return result
macro RequestBroker*(body: untyped): untyped =
## Default (async) mode.
generateRequestBroker(body, rbAsync)
macro RequestBroker*(mode: untyped, body: untyped): untyped =
## Explicit mode selector.
## Example:
## RequestBroker(sync):
## type Foo = object
## proc signature*(): Result[Foo, string]
generateRequestBroker(body, parseMode(mode))

View File

@ -13,7 +13,6 @@ import
libp2p/services/autorelayservice,
libp2p/services/hpservice,
libp2p/peerid,
libp2p/discovery/discoverymngr,
libp2p/discovery/rendezvousinterface,
eth/keys,
eth/p2p/discoveryv5/enr,
@ -63,7 +62,6 @@ type Waku* = ref object
dynamicBootstrapNodes*: seq[RemotePeerInfo]
dnsRetryLoopHandle: Future[void]
networkConnLoopHandle: Future[void]
discoveryMngr: DiscoveryManager
node*: WakuNode

View File

@ -379,7 +379,7 @@ method generateProof*(
let x = keccak.keccak256.digest(data)
let extNullifier = poseidon(@[@(epoch), @(rlnIdentifier)]).valueOr:
let extNullifier = generateExternalNullifier(epoch, rlnIdentifier).valueOr:
return err("Failed to compute external nullifier: " & error)
let witness = RLNWitnessInput(
@ -457,10 +457,9 @@ method verifyProof*(
var normalizedProof = proof
normalizedProof.externalNullifier = poseidon(
@[@(proof.epoch), @(proof.rlnIdentifier)]
).valueOr:
let externalNullifier = generateExternalNullifier(proof.epoch, proof.rlnIdentifier).valueOr:
return err("Failed to compute external nullifier: " & error)
normalizedProof.externalNullifier = externalNullifier
let proofBytes = serialize(normalizedProof, input)
let proofBuffer = proofBytes.toBuffer()

View File

@ -6,7 +6,8 @@ import
stew/[arrayops, byteutils, endians2],
stint,
results,
std/[sequtils, strutils, tables]
std/[sequtils, strutils, tables],
nimcrypto/keccak as keccak
import ./rln_interface, ../conversion_utils, ../protocol_types, ../protocol_metrics
import ../../waku_core, ../../waku_keystore
@ -119,24 +120,6 @@ proc createRLNInstance*(): RLNResult =
res = createRLNInstanceLocal()
return res
proc sha256*(data: openArray[byte]): RlnRelayResult[MerkleNode] =
## a thin layer on top of the Nim wrapper of the sha256 hasher
var lenPrefData = encodeLengthPrefix(data)
var
hashInputBuffer = lenPrefData.toBuffer()
outputBuffer: Buffer # will holds the hash output
trace "sha256 hash input buffer length", bufflen = hashInputBuffer.len
let hashSuccess = sha256(addr hashInputBuffer, addr outputBuffer, true)
# check whether the hash call is done successfully
if not hashSuccess:
return err("error in sha256 hash")
let output = cast[ptr MerkleNode](outputBuffer.`ptr`)[]
return ok(output)
proc poseidon*(data: seq[seq[byte]]): RlnRelayResult[array[32, byte]] =
## a thin layer on top of the Nim wrapper of the poseidon hasher
var inputBytes = serialize(data)
@ -180,9 +163,18 @@ proc toLeaves*(rateCommitments: seq[RateCommitment]): RlnRelayResult[seq[seq[byt
leaves.add(leaf)
return ok(leaves)
proc generateExternalNullifier*(
epoch: Epoch, rlnIdentifier: RlnIdentifier
): RlnRelayResult[ExternalNullifier] =
let epochHash = keccak.keccak256.digest(@(epoch))
let rlnIdentifierHash = keccak.keccak256.digest(@(rlnIdentifier))
let externalNullifier = poseidon(@[@(epochHash), @(rlnIdentifierHash)]).valueOr:
return err("Failed to compute external nullifier: " & error)
return ok(externalNullifier)
proc extractMetadata*(proof: RateLimitProof): RlnRelayResult[ProofMetadata] =
let externalNullifier = poseidon(@[@(proof.epoch), @(proof.rlnIdentifier)]).valueOr:
return err("could not construct the external nullifier")
let externalNullifier = generateExternalNullifier(proof.epoch, proof.rlnIdentifier).valueOr:
return err("Failed to compute external nullifier: " & error)
return ok(
ProofMetadata(
nullifier: proof.nullifier,