update mix simulation to use logos-core and lez

This commit is contained in:
Arseniy Klempner 2026-03-02 11:51:46 -06:00
parent 85a7bf3322
commit c921bef2b6
No known key found for this signature in database
GPG Key ID: 51653F18863BD24B
33 changed files with 1502 additions and 37 deletions

3
.gitmodules vendored
View File

@ -195,3 +195,6 @@
url = https://github.com/logos-messaging/nim-ffi/
ignore = untracked
branch = master
[submodule "vendor/mix-rln-spam-protection-plugin"]
path = vendor/mix-rln-spam-protection-plugin
url = https://github.com/adklempner/mix-rln-spam-protection-plugin.git

View File

@ -451,7 +451,9 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
error "failed to generate mix key pair", error = error
return
(await node.mountMix(conf.clusterId, mixPrivKey, conf.mixnodes)).isOkOr:
(await node.mountMix(conf.clusterId, mixPrivKey, conf.mixnodes,
enableSpamProtection = conf.enableSpamProtection,
userMessageLimit = conf.userMessageLimit)).isOkOr:
error "failed to mount waku mix protocol: ", error = $error
quit(QuitFailure)

View File

@ -1,4 +1,4 @@
import chronicles, chronos, std/strutils, regex
import chronicles, chronos, std/[strutils, options], regex
import
eth/keys,
@ -229,6 +229,17 @@ type
name: "websocket-secure-support"
.}: bool
enableSpamProtection* {.
desc: "Enable RLN-based spam protection for mix messages.",
defaultValue: false,
name: "enable-spam-protection"
.}: bool
userMessageLimit* {.
desc: "User message limit for RLN spam protection.",
name: "user-message-limit"
.}: Option[int]
## Kademlia Discovery config
kadBootstrapNodes* {.
desc:
@ -289,6 +300,12 @@ proc parseCmdArg*(T: type Option[uint], p: string): T =
except CatchableError:
raise newException(ValueError, "Invalid unsigned integer")
proc parseCmdArg*(T: type Option[int], p: string): T =
try:
some(parseInt(p))
except CatchableError:
raise newException(ValueError, "Invalid integer")
proc completeCmdArg*(T: type EthRpcUrl, val: string): seq[string] =
return @[]

View File

@ -1,6 +1,8 @@
import ffi
import std/locks
import chronicles
import waku/factory/waku
import waku/waku_mix/logos_core_client
declareLibrary("logosdelivery")
@ -31,3 +33,40 @@ proc logosdelivery_set_event_callback(
ctx[].eventCallback = cast[pointer](callback)
ctx[].eventUserData = userData
proc logosdelivery_init(): cint {.dynlib, exportc, cdecl.} =
initializeLibrary()
when declared(setLogLevel):
setLogLevel(LogLevel.WARN)
return RET_OK
proc logosdelivery_set_rln_fetcher(
ctx: ptr FFIContext[Waku], fetcher: RlnFetcherFunc, fetcherData: pointer
) {.dynlib, exportc, cdecl.} =
if fetcher.isNil:
echo "error: nil fetcher in logosdelivery_set_rln_fetcher"
return
setRlnFetcher(fetcher, fetcherData)
proc logosdelivery_set_rln_config(
ctx: ptr FFIContext[Waku], configAccountId: cstring, leafIndex: cint
): cint {.dynlib, exportc, cdecl.} =
if configAccountId.isNil:
return RET_ERR
setRlnConfig($configAccountId, leafIndex.int)
return RET_OK
proc logosdelivery_push_roots(
ctx: ptr FFIContext[Waku], rootsJson: cstring
) {.dynlib, exportc, cdecl.} =
if rootsJson.isNil:
return
pushRoots($rootsJson)
proc logosdelivery_push_proof(
ctx: ptr FFIContext[Waku], proofJson: cstring
) {.dynlib, exportc, cdecl.} =
if proofJson.isNil:
return
pushProof($proofJson)

View File

@ -77,22 +77,35 @@ extern "C"
FFICallBack callback,
void *userData);
// Retrieves the list of available node info IDs.
int logosdelivery_get_available_node_info_ids(void *ctx,
FFICallBack callback,
void *userData);
// Given a node info ID, retrieves the corresponding info.
int logosdelivery_get_node_info(void *ctx,
FFICallBack callback,
void *userData,
const char *nodeInfoId);
// Retrieves the list of available configurations.
int logosdelivery_get_available_configs(void *ctx,
FFICallBack callback,
void *userData);
int logosdelivery_init(void);
// RLN fetcher: C++ implements this, Nim calls it to get roots/proofs from RLN module.
// method: "get_valid_roots" or "get_merkle_proofs"
// params: JSON string with method-specific parameters
// callback + callbackData: Nim's callback to receive the result
// fetcherData: opaque pointer passed during registration (typically the C++ plugin instance)
typedef int (*RlnFetcherFunc)(const char *method, const char *params,
FFICallBack callback, void *callbackData, void *fetcherData);
// Register the RLN fetcher callback (called by C++ delivery module at startup).
void logosdelivery_set_rln_fetcher(void *ctx, RlnFetcherFunc fetcher, void *fetcherData);
// Set RLN configuration: config account ID and leaf index for this node.
int logosdelivery_set_rln_config(void *ctx, const char *configAccountId, int leafIndex);
#ifdef __cplusplus
}
#endif

View File

@ -11,6 +11,8 @@
"../vendor/nim-ffi"
--path:
"../"
--path:
"../vendor/mix-rln-spam-protection-plugin/src"
# Optimization and debugging
--opt:

4
simulations/mixnet/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
rln_keystore_*.json
credentials_manifest.json
config_account.txt
*.log

126
simulations/mixnet/build_setup.sh Executable file
View File

@ -0,0 +1,126 @@
#!/bin/bash
# Build and run RLN credential setup for the mix simulation.
#
# Prerequisites:
# - Sequencer running (bash dev.sh)
# - Programs deployed (source dev/env.sh && cargo run --bin run_setup)
# - Environment set up (source dev/env.sh)
#
# This script:
# 1. Builds register_member binary
# 2. Registers 7 members on-chain (one per node)
# 3. Generates keystores via setup_keystores.nim
set -euo pipefail
cd "$(dirname "$0")"
MIXNET_DIR=$(pwd)
cd ../..
ROOT_DIR=$(pwd)
RLN_PROJECT_DIR="${RLN_PROJECT_DIR:-$(cd "$ROOT_DIR/../.." && pwd)}"
echo "=== RLN Mix Simulation Setup ==="
echo " Mixnet dir: $MIXNET_DIR"
echo " RLN project: $RLN_PROJECT_DIR"
echo ""
# Peer IDs derived from nodekeys in config files
PEER_IDS=(
"16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o" # config.toml (service node)
"16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF" # config1.toml (mix node 1)
"16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA" # config2.toml (mix node 2)
"16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f" # config3.toml (mix node 3)
"16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu" # config4.toml (mix node 4)
"16Uiu2HAm1QxSjNvNbsT2xtLjRGAsBLVztsJiTHr9a3EK96717hpj" # chat2mix client 1
"16Uiu2HAmC9h26U1C83FJ5xpE32ghqya8CaZHX1Y7qpfHNnRABscN" # chat2mix client 2
)
RATE_LIMITS=(100 100 100 100 100 100 100)
# Step 1: Build register_member
echo "[1/3] Building register_member..."
(cd "$RLN_PROJECT_DIR" && cargo build --release --bin register_member 2>&1 | tail -5)
REGISTER_BIN="$RLN_PROJECT_DIR/target/release/register_member"
if [ ! -f "$REGISTER_BIN" ]; then
echo "FATAL: register_member binary not found at $REGISTER_BIN"
exit 1
fi
# Step 2: Register members and build manifest
echo "[2/3] Registering ${#PEER_IDS[@]} members on-chain..."
MANIFEST_FILE="$MIXNET_DIR/credentials_manifest.json"
echo "[" > "$MANIFEST_FILE"
for i in "${!PEER_IDS[@]}"; do
PEER_ID="${PEER_IDS[$i]}"
RATE_LIMIT="${RATE_LIMITS[$i]}"
echo " Registering node $((i+1))/${#PEER_IDS[@]} ($PEER_ID)..."
OUTPUT=$("$REGISTER_BIN" 2>&1)
CONFIG_ACCOUNT=$(echo "$OUTPUT" | grep "^CONFIG_ACCOUNT=" | cut -d= -f2)
LEAF_INDEX=$(echo "$OUTPUT" | grep "^LEAF_INDEX=" | cut -d= -f2)
IDENTITY_SECRET_HASH=$(echo "$OUTPUT" | grep "^IDENTITY_SECRET_HASH=" | cut -d= -f2)
if [ -z "$CONFIG_ACCOUNT" ] || [ -z "$LEAF_INDEX" ] || [ -z "$IDENTITY_SECRET_HASH" ]; then
echo "FATAL: Failed to parse register_member output for node $((i+1))"
echo "Output was: $OUTPUT"
exit 1
fi
# Add comma separator for all but first entry
if [ "$i" -gt 0 ]; then
echo "," >> "$MANIFEST_FILE"
fi
cat >> "$MANIFEST_FILE" <<EOF
{
"peerId": "$PEER_ID",
"leafIndex": $LEAF_INDEX,
"identitySecretHash": "$IDENTITY_SECRET_HASH",
"rateLimit": $RATE_LIMIT,
"configAccount": "$CONFIG_ACCOUNT"
}
EOF
echo " Registered at leaf index $LEAF_INDEX"
done
echo "]" >> "$MANIFEST_FILE"
# Save CONFIG_ACCOUNT for logoscore (all nodes share the same config account)
FIRST_CONFIG_ACCOUNT=$(echo "$OUTPUT" | grep "^CONFIG_ACCOUNT=" | head -1 | cut -d= -f2)
echo "$FIRST_CONFIG_ACCOUNT" > "$MIXNET_DIR/config_account.txt"
echo ""
echo " Config account: $FIRST_CONFIG_ACCOUNT"
echo " Manifest: $MANIFEST_FILE"
# Step 3: Generate keystores
echo "[3/3] Generating keystores..."
cd "$MIXNET_DIR"
source "$ROOT_DIR/env.sh"
nim c -d:release --mm:refc \
--passL:"-L$ROOT_DIR/vendor/zerokit/target/release -lrln" \
-o:/tmp/setup_keystores_$$ \
"$MIXNET_DIR/setup_keystores.nim" 2>&1 | tail -10
/tmp/setup_keystores_$$ "$MANIFEST_FILE"
RESULT=$?
rm -f /tmp/setup_keystores_$$
if [ $RESULT -ne 0 ]; then
echo "FATAL: Keystore generation failed"
exit 1
fi
KEYSTORE_COUNT=$(ls -1 rln_keystore_*.json 2>/dev/null | wc -l | tr -d ' ')
echo ""
echo "=== Setup Complete ==="
echo " Registered: ${#PEER_IDS[@]} members"
echo " Keystores: $KEYSTORE_COUNT files"
echo " Config account: $(cat config_account.txt)"
echo ""
echo "Next steps:"
echo " 1. Start logoscore with RLN module + HTTP service"
echo " 2. Run mix nodes and chat clients"

View File

@ -1,6 +1,7 @@
log-level = "TRACE"
relay = true
mix = true
enable-spam-protection = true
filter = true
store = true
lightpush = true

View File

@ -1,6 +1,7 @@
log-level = "TRACE"
relay = true
mix = true
enable-spam-protection = true
filter = true
store = false
lightpush = true

View File

@ -1,6 +1,7 @@
log-level = "TRACE"
relay = true
mix = true
enable-spam-protection = true
filter = true
store = false
lightpush = true

View File

@ -1,6 +1,7 @@
log-level = "TRACE"
relay = true
mix = true
enable-spam-protection = true
filter = true
store = false
lightpush = true

View File

@ -1,6 +1,7 @@
log-level = "TRACE"
relay = true
mix = true
enable-spam-protection = true
filter = true
store = false
lightpush = true

View File

@ -1,2 +1,31 @@
../../build/chat2mix --cluster-id=2 --num-shards-in-network=1 --shard=0 --servicenode="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o" --log-level=TRACE --kad-bootstrap-node="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o"
#--mixnode="/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF:9231e86da6432502900a84f867004ce78632ab52cd8e30b1ec322cd795710c2a" --mixnode="/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA:275cd6889e1f29ca48e5b9edb800d1a94f49f13d393a0ecf1a07af753506de6c" --mixnode="/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f:e0ed594a8d506681be075e8e23723478388fb182477f7a469309a25e7076fc18" --mixnode="/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu:8fd7a1a7c19b403d231452a9b1ea40eb1cc76f455d918ef8980e7685f9eeeb1f"
#!/usr/bin/env bash
# Chat client 1 — connects via lightpush to the mix network.
# Run this AFTER run_simulation.sh has all 5 nodes up.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DELIVERY_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
CHAT2MIX="$DELIVERY_DIR/build/chat2mix"
if [ ! -f "$CHAT2MIX" ]; then
echo "chat2mix not found. Build with: cd $DELIVERY_DIR && make chat2mix"
exit 1
fi
NODE0_ADDR="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o"
"$CHAT2MIX" \
--ports-shift=200 \
--cluster-id=2 \
--num-shards-in-network=1 \
--shard=0 \
--servicenode="$NODE0_ADDR" \
--log-level=TRACE \
--nodekey="cb6fe589db0e5d5b48f7e82d33093e4d9d35456f4aaffc2322c473a173b2ac49" \
--kad-bootstrap-node="$NODE0_ADDR" \
--mixnode="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o:9d09ce624f76e8f606265edb9cca2b7de9b41772a6d784bddaf92ffa8fba7d2c" \
--mixnode="/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF:9231e86da6432502900a84f867004ce78632ab52cd8e30b1ec322cd795710c2a" \
--mixnode="/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA:275cd6889e1f29ca48e5b9edb800d1a94f49f13d393a0ecf1a07af753506de6c" \
--mixnode="/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f:e0ed594a8d506681be075e8e23723478388fb182477f7a469309a25e7076fc18" \
--mixnode="/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu:8fd7a1a7c19b403d231452a9b1ea40eb1cc76f455d918ef8980e7685f9eeeb1f" \
--fleet="none"

View File

@ -1,2 +1,31 @@
../../build/chat2mix --cluster-id=2 --num-shards-in-network=1 --shard=0 --servicenode="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o" --log-level=TRACE
#--mixnode="/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF:9231e86da6432502900a84f867004ce78632ab52cd8e30b1ec322cd795710c2a" --mixnode="/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA:275cd6889e1f29ca48e5b9edb800d1a94f49f13d393a0ecf1a07af753506de6c" --mixnode="/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f:e0ed594a8d506681be075e8e23723478388fb182477f7a469309a25e7076fc18" --mixnode="/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu:8fd7a1a7c19b403d231452a9b1ea40eb1cc76f455d918ef8980e7685f9eeeb1f"
#!/usr/bin/env bash
# Chat client 2 — connects via lightpush to the mix network.
# Run this AFTER run_simulation.sh has all 5 nodes up.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
DELIVERY_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
CHAT2MIX="$DELIVERY_DIR/build/chat2mix"
if [ ! -f "$CHAT2MIX" ]; then
echo "chat2mix not found. Build with: cd $DELIVERY_DIR && make chat2mix"
exit 1
fi
NODE0_ADDR="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o"
"$CHAT2MIX" \
--ports-shift=201 \
--cluster-id=2 \
--num-shards-in-network=1 \
--shard=0 \
--servicenode="$NODE0_ADDR" \
--log-level=TRACE \
--nodekey="35eace7ccb246f20c487e05015ca77273d8ecaed0ed683de3d39bf4f69336feb" \
--kad-bootstrap-node="$NODE0_ADDR" \
--mixnode="/ip4/127.0.0.1/tcp/60001/p2p/16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o:9d09ce624f76e8f606265edb9cca2b7de9b41772a6d784bddaf92ffa8fba7d2c" \
--mixnode="/ip4/127.0.0.1/tcp/60002/p2p/16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF:9231e86da6432502900a84f867004ce78632ab52cd8e30b1ec322cd795710c2a" \
--mixnode="/ip4/127.0.0.1/tcp/60003/p2p/16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA:275cd6889e1f29ca48e5b9edb800d1a94f49f13d393a0ecf1a07af753506de6c" \
--mixnode="/ip4/127.0.0.1/tcp/60004/p2p/16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f:e0ed594a8d506681be075e8e23723478388fb182477f7a469309a25e7076fc18" \
--mixnode="/ip4/127.0.0.1/tcp/60005/p2p/16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu:8fd7a1a7c19b403d231452a9b1ea40eb1cc76f455d918ef8980e7685f9eeeb1f" \
--fleet="none"

View File

@ -0,0 +1,177 @@
#!/usr/bin/env bash
# E2E test: starts the 5-node mix simulation, launches two chat clients,
# sends messages in both directions, and verifies delivery.
# Exit code 0 = all tests passed, 1 = failure.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Configurable timeouts (seconds)
SIM_TIMEOUT=${E2E_SIM_TIMEOUT:-900}
DISCOVERY_WAIT=${E2E_DISCOVERY_WAIT:-60}
MSG_TIMEOUT=${E2E_MSG_TIMEOUT:-120}
PASSED=0
FAILED=0
SIM_PID=""
CHAT1_PID=""
CHAT2_PID=""
SIM_LOG=$(mktemp)
CHAT1_LOG=$(mktemp)
CHAT2_LOG=$(mktemp)
PIPE1="/tmp/e2e_chat1_in_$$"
PIPE2="/tmp/e2e_chat2_in_$$"
cleanup() {
echo ""
echo "=== Cleanup ==="
exec 3>&- 2>/dev/null || true
exec 4>&- 2>/dev/null || true
[ -n "$CHAT1_PID" ] && kill "$CHAT1_PID" 2>/dev/null || true
[ -n "$CHAT2_PID" ] && kill "$CHAT2_PID" 2>/dev/null || true
if [ -n "$SIM_PID" ]; then
kill "$SIM_PID" 2>/dev/null || true
wait "$SIM_PID" 2>/dev/null || true
fi
pkill -f logos_host 2>/dev/null || true
rm -f "$PIPE1" "$PIPE2" 2>/dev/null || true
echo "Done."
}
trap cleanup EXIT
echo "=== RLN Mix Network E2E Test ==="
echo ""
# ── Phase 1: Start simulation ──────────────────────────────────────
echo "[1/5] Starting simulation (this takes a few minutes)..."
bash "$SCRIPT_DIR/run_simulation.sh" > "$SIM_LOG" 2>&1 &
SIM_PID=$!
# Disable errexit — polling loops use grep/kill that may return non-zero
set +e
LAST_STEP=""
ELAPSED=0
while [ $ELAPSED -lt $SIM_TIMEOUT ]; do
if grep -q '\[7/7\] Simulation running!' "$SIM_LOG" 2>/dev/null; then
echo " Simulation ready."
break
fi
# Show simulation progress as new [N/7] lines appear
STEP_LINE=$(grep -F '/7]' "$SIM_LOG" 2>/dev/null | tail -1)
if [ -n "$STEP_LINE" ] && [ "$STEP_LINE" != "$LAST_STEP" ]; then
echo " $STEP_LINE"
LAST_STEP="$STEP_LINE"
fi
if ! kill -0 "$SIM_PID" 2>/dev/null; then
echo "FATAL: Simulation exited prematurely."
tail -30 "$SIM_LOG"
exit 1
fi
sleep 2
ELAPSED=$((ELAPSED + 2))
done
if ! grep -q '\[7/7\] Simulation running!' "$SIM_LOG" 2>/dev/null; then
echo "FATAL: Simulation not ready after ${SIM_TIMEOUT}s."
tail -30 "$SIM_LOG"
exit 1
fi
# ── Phase 2: Start chat clients ────────────────────────────────────
echo "[2/5] Starting chat clients..."
rm -f "$PIPE1" "$PIPE2"
mkfifo "$PIPE1" "$PIPE2"
# Open FIFOs read-write (<>) so open(2) never blocks and the write end
# stays alive as long as the FD is open — even if readers come and go.
exec 3<>"$PIPE1"
exec 4<>"$PIPE2"
# Start chat clients reading from the FIFOs.
bash "$SCRIPT_DIR/run_chat_mix.sh" < "$PIPE1" > "$CHAT1_LOG" 2>&1 &
CHAT1_PID=$!
bash "$SCRIPT_DIR/run_chat_mix1.sh" < "$PIPE2" > "$CHAT2_LOG" 2>&1 &
CHAT2_PID=$!
echo "TestAlice" >&3
echo "TestBob" >&4
echo " Clients started, nicknames sent."
# ── Phase 3: Wait for mix node discovery ───────────────────────────
echo "[3/5] Waiting for mix node discovery (up to ${DISCOVERY_WAIT}s)..."
DISC_ELAPSED=0
C1_READY=0
C2_READY=0
while [ $DISC_ELAPSED -lt $DISCOVERY_WAIT ]; do
[ $C1_READY -eq 0 ] && grep -q "ready to publish messages now" "$CHAT1_LOG" 2>/dev/null && { echo " Alice: ready"; C1_READY=1; }
[ $C2_READY -eq 0 ] && grep -q "ready to publish messages now" "$CHAT2_LOG" 2>/dev/null && { echo " Bob: ready"; C2_READY=1; }
[ $C1_READY -eq 1 ] && [ $C2_READY -eq 1 ] && break
sleep 2
DISC_ELAPSED=$((DISC_ELAPSED + 2))
done
if [ $C1_READY -eq 0 ] || [ $C2_READY -eq 0 ]; then
echo "FATAL: Chat clients not ready within ${DISCOVERY_WAIT}s."
[ $C1_READY -eq 0 ] && { echo " Alice log tail:"; tail -10 "$CHAT1_LOG"; }
[ $C2_READY -eq 0 ] && { echo " Bob log tail:"; tail -10 "$CHAT2_LOG"; }
exit 1
fi
echo " Settling filter subscriptions (20s)..."
sleep 20
# ── Phase 4: Send messages and verify delivery ─────────────────────
echo "[4/5] Sending messages and verifying delivery..."
MSG_A="e2e_alice_$(date +%s)"
MSG_B="e2e_bob_$(date +%s)"
check_delivery() {
local log_file=$1
local pattern=$2
local label=$3
local elapsed=0
while [ $elapsed -lt $MSG_TIMEOUT ]; do
if grep -q "$pattern" "$log_file" 2>/dev/null; then
echo " PASS: $label"
PASSED=$((PASSED + 1))
return 0
fi
sleep 3
elapsed=$((elapsed + 3))
done
echo " FAIL: $label (not received after ${MSG_TIMEOUT}s)"
FAILED=$((FAILED + 1))
return 0
}
echo " Sending: Alice -> '$MSG_A'"
echo "$MSG_A" >&3
echo " Sending: Bob -> '$MSG_B'"
echo "$MSG_B" >&4
echo " Waiting for delivery (up to ${MSG_TIMEOUT}s per message)..."
check_delivery "$CHAT2_LOG" "$MSG_A" "Alice -> Bob"
check_delivery "$CHAT1_LOG" "$MSG_B" "Bob -> Alice"
# ── Phase 5: Report ────────────────────────────────────────────────
echo ""
echo "[5/5] Results: $PASSED passed, $FAILED failed"
echo ""
if [ $FAILED -eq 0 ] && [ $PASSED -ge 2 ]; then
echo "E2E TEST PASSED"
exit 0
else
echo "E2E TEST FAILED"
echo " Simulation log: $SIM_LOG"
echo " Alice log: $CHAT1_LOG"
echo " Bob log: $CHAT2_LOG"
exit 1
fi

View File

@ -1 +0,0 @@
../../build/wakunode2 --config-file="config.toml" 2>&1 | tee mix_node.log

View File

@ -1 +0,0 @@
../../build/wakunode2 --config-file="config1.toml" 2>&1 | tee mix_node1.log

View File

@ -1 +0,0 @@
../../build/wakunode2 --config-file="config2.toml" 2>&1 | tee mix_node2.log

View File

@ -1 +0,0 @@
../../build/wakunode2 --config-file="config3.toml" 2>&1 | tee mix_node3.log

View File

@ -1 +0,0 @@
../../build/wakunode2 --config-file="config4.toml" 2>&1 | tee mix_node4.log

View File

@ -0,0 +1,466 @@
#!/usr/bin/env bash
# 5-node mix simulation using logoscore instances with embedded delivery + RLN modules.
# Each node is its own logoscore process — no standalone wakunode2 or HTTP polling.
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
DELIVERY_DIR="$(cd ../.. && pwd)"
RLN_PROJECT_DIR="$(cd "$DELIVERY_DIR/.." && pwd)"
export RISC0_DEV_MODE=1
export TMPDIR=/tmp
# --- Node identity constants (from config*.toml) ---
NODEKEYS=(
"f98e3fba96c32e8d1967d460f1b79457380e1a895f7971cecc8528abe733781a"
"09e9d134331953357bd38bbfce8edb377f4b6308b4f3bfbe85c610497053d684"
"ed54db994682e857d77cd6fb81be697382dc43aa5cd78e16b0ec8098549f860e"
"42f96f29f2d6670938b0864aced65a332dcf5774103b4c44ec4d0ea4ef3c47d6"
"3ce887b3c34b7a92dd2868af33941ed1dbec4893b054572cd5078da09dd923d4"
)
MIXKEYS=(
"a87db88246ec0eedda347b9b643864bee3d6933eb15ba41e6d58cb678d813258"
"c86029e02c05a7e25182974b519d0d52fcbafeca6fe191fbb64857fb05be1a53"
"b858ac16bbb551c4b2973313b1c8c8f7ea469fca03f1608d200bbf58d388ec7f"
"d8bd379bb394b0f22dd236d63af9f1a9bc45266beffc3fbbe19e8b6575f2535b"
"780fff09e51e98df574e266bf3266ec6a3a1ddfcf7da826a349a29c137009d49"
)
PEER_IDS=(
"16Uiu2HAmPiEs2ozjjJF2iN2Pe2FYeMC9w4caRHKYdLdAfjgbWM6o"
"16Uiu2HAmLtKaFaSWDohToWhWUZFLtqzYZGPFuXwKrojFVF6az5UF"
"16Uiu2HAmTEDHwAziWUSz6ZE23h5vxG2o4Nn7GazhMor4bVuMXTrA"
"16Uiu2HAmPwRKZajXtfb1Qsv45VVfRZgK3ENdfmnqzSrVm3BczF6f"
"16Uiu2HAmRhxmCHBYdXt1RibXrjAUNJbduAhzaTHwFCZT4qWnqZAu"
)
MIX_PUBKEYS=(
"9d09ce624f76e8f606265edb9cca2b7de9b41772a6d784bddaf92ffa8fba7d2c"
"9231e86da6432502900a84f867004ce78632ab52cd8e30b1ec322cd795710c2a"
"275cd6889e1f29ca48e5b9edb800d1a94f49f13d393a0ecf1a07af753506de6c"
"e0ed594a8d506681be075e8e23723478388fb182477f7a469309a25e7076fc18"
"8fd7a1a7c19b403d231452a9b1ea40eb1cc76f455d918ef8980e7685f9eeeb1f"
)
BASE_TCP_PORT=60001
BASE_DISC_PORT=9001
NUM_NODES=5
CONTENT_TOPIC="/toy-chat/2/baixa-chiado/proto"
case "$(uname -s)-$(uname -m)" in
Darwin-arm64) PLATFORM="darwin-arm64-dev"; EXT="dylib";;
Linux-x86_64) PLATFORM="linux-x86_64-dev"; EXT="so";;
Linux-aarch64) PLATFORM="linux-aarch64-dev"; EXT="so";;
*) echo "Unsupported platform"; exit 1;;
esac
# --- Cleanup ---
SEQUENCER_PID=""
INSTANCE_PIDS=()
MODULES_DIRS=()
WORK_DIR=""
cleanup() {
echo ""
echo "=== Shutting down ==="
for pid in "${INSTANCE_PIDS[@]+"${INSTANCE_PIDS[@]}"}"; do
if [ -n "$pid" ]; then
local children
children=$(pgrep -P "$pid" 2>/dev/null || true)
kill "$pid" $children 2>/dev/null || true
wait "$pid" 2>/dev/null || true
fi
done
pkill -f 'logos_host' 2>/dev/null || true
if [ -n "$SEQUENCER_PID" ]; then
kill "$SEQUENCER_PID" 2>/dev/null || true
wait "$SEQUENCER_PID" 2>/dev/null || true
fi
for mdir in "${MODULES_DIRS[@]}"; do
[ -n "$mdir" ] && rm -rf "$mdir"
done
if [ -n "$WORK_DIR" ]; then
echo " Logs: $WORK_DIR"
fi
echo "Done."
}
trap cleanup EXIT
echo "=== Mix Simulation (5 LogosCore Instances) ==="
echo " RLN project: $RLN_PROJECT_DIR"
echo " Delivery: $DELIVERY_DIR"
echo ""
pkill -f 'logos_host' 2>/dev/null || true
sleep 1
rm -f /tmp/logos_* 2>/dev/null || true
# ---------- Phase 1: Sequencer ----------
echo "[1/7] Starting sequencer..."
(cd "$RLN_PROJECT_DIR" && git submodule update --init lssa)
if nc -z 127.0.0.1 3040 2>/dev/null; then
OLD_PID=$(lsof -ti tcp:3040 2>/dev/null || true)
if [ -n "$OLD_PID" ]; then
echo " Port 3040 in use by PID $OLD_PID. Killing..."
kill "$OLD_PID" 2>/dev/null || true
sleep 1
fi
fi
rm -rf "$RLN_PROJECT_DIR/lssa/rocksdb"
echo " Building sequencer (first run may take several minutes)..."
(cd "$RLN_PROJECT_DIR/lssa" && cargo build --features standalone -p sequencer_runner 2>&1 | tail -3) || {
echo " FATAL: sequencer build failed"
exit 1
}
SEQUENCER_BIN="$RLN_PROJECT_DIR/lssa/target/debug/sequencer_runner"
(cd "$RLN_PROJECT_DIR/lssa" && env RUST_LOG=info "$SEQUENCER_BIN" sequencer_runner/configs/debug) >/dev/null 2>&1 &
SEQUENCER_PID=$!
echo " PID: $SEQUENCER_PID"
echo " Waiting for port 3040..."
for i in $(seq 1 300); do
if nc -z 127.0.0.1 3040 2>/dev/null; then
echo " Sequencer ready."
break
fi
if ! kill -0 "$SEQUENCER_PID" 2>/dev/null; then
echo " ERROR: Sequencer exited unexpectedly."
exit 1
fi
sleep 1
done
if ! nc -z 127.0.0.1 3040 2>/dev/null; then
echo " ERROR: Sequencer did not start within 300s."
exit 1
fi
# ---------- Phase 2: Deploy programs ----------
echo "[2/7] Deploying programs..."
# Build guest binaries if missing (required by run_setup and register_member)
LEZ_RLN_DIR="$RLN_PROJECT_DIR/lez-rln"
GUEST_BIN="$LEZ_RLN_DIR/methods/guest/target/riscv32im-risc0-zkvm-elf/docker/rln_registration.bin"
if [ ! -f "$GUEST_BIN" ]; then
if ! command -v cargo-risczero &>/dev/null; then
echo " FATAL: zkVM guest binaries not found and cargo-risczero not installed."
echo " Install with: cargo install cargo-risczero && cargo risczero install"
echo " Requires Docker running for cross-compilation."
exit 1
fi
echo " Building zkVM guest programs (first run may take several minutes)..."
(cd "$LEZ_RLN_DIR" && cargo risczero build --manifest-path methods/guest/Cargo.toml 2>&1 | tail -10) || {
echo " FATAL: guest program build failed. Is Docker running?"
exit 1
}
fi
export NSSA_WALLET_HOME_DIR="$RLN_PROJECT_DIR/dev"
export WALLET_CONFIG="$NSSA_WALLET_HOME_DIR/wallet_config.json"
export WALLET_STORAGE="$NSSA_WALLET_HOME_DIR/storage.json"
rm -f "$WALLET_CONFIG" "$WALLET_STORAGE"
SETUP_OUTPUT=$(cd "$LEZ_RLN_DIR" && cargo run --bin run_setup 2>&1) || {
echo " FATAL: run_setup failed:"
echo "$SETUP_OUTPUT"
exit 1
}
echo "$SETUP_OUTPUT" | tail -5
TREE_MAIN_ACCOUNT=$(echo "$SETUP_OUTPUT" | grep "Tree main account:" | awk '{print $NF}')
if [ -z "$TREE_MAIN_ACCOUNT" ]; then
echo " FATAL: Could not parse tree main account from run_setup output"
exit 1
fi
echo " Programs deployed."
echo " Tree main account: $TREE_MAIN_ACCOUNT"
# ---------- Phase 3: Register 5 members & generate keystores ----------
echo "[3/7] Registering $NUM_NODES members and generating keystores..."
WORK_DIR=$(mktemp -d)
REGISTER_BIN="$LEZ_RLN_DIR/target/release/register_member"
(cd "$LEZ_RLN_DIR" && cargo build --release --bin register_member 2>&1 | tail -3)
if [ ! -f "$REGISTER_BIN" ]; then
echo " FATAL: register_member not found at $REGISTER_BIN"
exit 1
fi
MANIFEST_FILE="$WORK_DIR/manifest.json"
echo "[" > "$MANIFEST_FILE"
LEAF_INDICES=()
IDENTITY_SECRETS=()
CONFIG_ACCOUNT=""
for i in $(seq 0 $((NUM_NODES - 1))); do
echo " Registering node $((i+1))/$NUM_NODES..."
OUTPUT=$(cd "$LEZ_RLN_DIR" && "$REGISTER_BIN" 2>&1) || {
echo " FATAL: register_member failed:"
echo "$OUTPUT"
exit 1
}
CONFIG_ACCOUNT=$(echo "$OUTPUT" | grep "^CONFIG_ACCOUNT=" | cut -d= -f2)
LEAF_INDEX=$(echo "$OUTPUT" | grep "^LEAF_INDEX=" | cut -d= -f2)
IDENTITY_SECRET=$(echo "$OUTPUT" | grep "^IDENTITY_SECRET_HASH=" | cut -d= -f2)
if [ -z "$CONFIG_ACCOUNT" ] || [ -z "$LEAF_INDEX" ] || [ -z "$IDENTITY_SECRET" ]; then
echo " FATAL: Failed to parse register_member output:"
echo "$OUTPUT"
exit 1
fi
LEAF_INDICES+=("$LEAF_INDEX")
IDENTITY_SECRETS+=("$IDENTITY_SECRET")
[ "$i" -gt 0 ] && echo "," >> "$MANIFEST_FILE"
cat >> "$MANIFEST_FILE" <<EOF
{
"peerId": "${PEER_IDS[$i]}",
"leafIndex": $LEAF_INDEX,
"identitySecretHash": "$IDENTITY_SECRET",
"rateLimit": 100,
"configAccount": "$CONFIG_ACCOUNT"
}
EOF
echo " leaf=$LEAF_INDEX"
done
echo "]" >> "$MANIFEST_FILE"
echo " Config account: $CONFIG_ACCOUNT"
# Generate keystores
echo " Generating keystores..."
LIBRLN_FILE="$DELIVERY_DIR/librln_v0.9.0.a"
if [ ! -f "$LIBRLN_FILE" ]; then
echo " Building librln..."
(cd "$DELIVERY_DIR" && make librln 2>&1 | tail -5)
fi
if [ ! -f "$LIBRLN_FILE" ]; then
echo " FATAL: librln not found at $LIBRLN_FILE"
exit 1
fi
if [ ! -f "$DELIVERY_DIR/nimbus-build-system.paths" ]; then
echo " Generating nim paths..."
(cd "$DELIVERY_DIR" && make nimbus-build-system-paths 2>&1 | tail -3)
fi
NIM_PATH_ARGS=()
while IFS= read -r line; do
line="${line//\"/}"
[[ -n "$line" ]] && NIM_PATH_ARGS+=("$line")
done < "$DELIVERY_DIR/nimbus-build-system.paths"
SETUP_KS_BIN="$WORK_DIR/setup_keystores"
nim c -d:release --mm:refc \
"${NIM_PATH_ARGS[@]}" \
--passL:"$LIBRLN_FILE" --passL:"-lm" \
-o:"$SETUP_KS_BIN" \
"$SCRIPT_DIR/setup_keystores.nim" 2>&1 | tail -10
if [ ! -f "$SETUP_KS_BIN" ]; then
echo " FATAL: Failed to compile setup_keystores.nim"
exit 1
fi
(cd "$WORK_DIR" && "$SETUP_KS_BIN" "$MANIFEST_FILE") || {
echo " FATAL: setup_keystores failed"
exit 1
}
KEYSTORE_COUNT=$(ls -1 "$WORK_DIR"/rln_keystore_*.json 2>/dev/null | wc -l | tr -d ' ')
echo " Keystores: $KEYSTORE_COUNT"
# ---------- Phase 4: Build / check modules ----------
echo "[4/7] Building modules (if needed)..."
LOGOSCORE="${LOGOSCORE:-$(nix build github:logos-co/logos-liblogos/7df6195 --override-input logos-cpp-sdk github:logos-co/logos-cpp-sdk/a4bd66c --no-link --print-out-paths)/bin/logoscore}"
WALLET_MODULE_RESULT="$RLN_PROJECT_DIR/logos-rln-module/result-wallet"
NEED_BUILD=0
[ -f "$RLN_PROJECT_DIR/logos-rln-module/result-rln/lib/liblogos_rln_module.$EXT" ] || NEED_BUILD=1
[ -f "$WALLET_MODULE_RESULT/lib/liblogos_execution_zone_wallet_module.$EXT" ] || NEED_BUILD=1
[ -f "$RLN_PROJECT_DIR/logos-delivery-module/result/lib/delivery_module_plugin.$EXT" ] || NEED_BUILD=1
if [ "$NEED_BUILD" -eq 1 ]; then
echo " Some modules missing — running build_modules.sh..."
bash "$RLN_PROJECT_DIR/build_modules.sh" || {
echo " FATAL: Module build failed."
exit 1
}
fi
[ -f "$RLN_PROJECT_DIR/logos-rln-module/result-rln/lib/liblogos_rln_module.$EXT" ] || { echo " FATAL: RLN module not found after build."; exit 1; }
[ -f "$WALLET_MODULE_RESULT/lib/liblogos_execution_zone_wallet_module.$EXT" ] || { echo " FATAL: Wallet module not found after build."; exit 1; }
[ -f "$RLN_PROJECT_DIR/logos-delivery-module/result/lib/delivery_module_plugin.$EXT" ] || { echo " FATAL: Delivery module not found after build."; exit 1; }
echo " All modules present."
# Build chat2mix if not present
CHAT2MIX="$DELIVERY_DIR/build/chat2mix"
if [ ! -f "$CHAT2MIX" ] || [ "${REBUILD_NIM:-0}" = "1" ]; then
echo " Building chat2mix..."
(cd "$DELIVERY_DIR" && make chat2mix 2>&1 | tail -5) || {
echo " WARNING: chat2mix build failed — chat clients won't be available"
}
else
echo " chat2mix already built."
fi
# ---------- Phase 5: Stage modules ----------
echo "[5/7] Staging modules for $NUM_NODES instances..."
stage_modules() {
local mdir
mdir=$(mktemp -d)
mkdir -p "$mdir/liblogos_execution_zone_wallet_module"
cp -L "$WALLET_MODULE_RESULT/lib/liblogos_execution_zone_wallet_module.$EXT" "$mdir/liblogos_execution_zone_wallet_module/"
[ -f "$WALLET_MODULE_RESULT/lib/libwallet_ffi.$EXT" ] && \
cp -L "$WALLET_MODULE_RESULT/lib/libwallet_ffi.$EXT" "$mdir/liblogos_execution_zone_wallet_module/"
echo "{\"name\":\"liblogos_execution_zone_wallet_module\",\"version\":\"1.0.0\",\"type\":\"core\",\"main\":{\"$PLATFORM\":\"liblogos_execution_zone_wallet_module.$EXT\"},\"dependencies\":[],\"capabilities\":[]}" > "$mdir/liblogos_execution_zone_wallet_module/manifest.json"
mkdir -p "$mdir/liblogos_rln_module"
cp -L "$RLN_PROJECT_DIR/logos-rln-module/result-rln/lib/liblogos_rln_module.$EXT" "$mdir/liblogos_rln_module/"
cp -L "$RLN_PROJECT_DIR/logos-rln-module/result-rln/lib/liblez_rln_ffi.$EXT" "$mdir/liblogos_rln_module/" 2>/dev/null || true
echo "{\"name\":\"liblogos_rln_module\",\"version\":\"1.0.0\",\"type\":\"core\",\"main\":{\"$PLATFORM\":\"liblogos_rln_module.$EXT\"},\"dependencies\":[\"liblogos_execution_zone_wallet_module\"],\"capabilities\":[]}" > "$mdir/liblogos_rln_module/manifest.json"
mkdir -p "$mdir/delivery_module"
cp -L "$RLN_PROJECT_DIR/logos-delivery-module/result/lib/delivery_module_plugin.$EXT" "$mdir/delivery_module/"
[ -f "$RLN_PROJECT_DIR/logos-delivery-module/result/lib/liblogosdelivery.$EXT" ] && \
cp -L "$RLN_PROJECT_DIR/logos-delivery-module/result/lib/liblogosdelivery.$EXT" "$mdir/delivery_module/"
for pq in "$RLN_PROJECT_DIR"/logos-delivery-module/result/lib/libpq*; do
[ -f "$pq" ] && cp -L "$pq" "$mdir/delivery_module/"
done
echo "{\"name\":\"delivery_module\",\"version\":\"1.0.0\",\"type\":\"core\",\"main\":{\"$PLATFORM\":\"delivery_module_plugin.$EXT\"},\"dependencies\":[],\"capabilities\":[]}" > "$mdir/delivery_module/manifest.json"
echo "$mdir"
}
for i in $(seq 0 $((NUM_NODES - 1))); do
MDIR=$(stage_modules)
MODULES_DIRS+=("$MDIR")
echo " Node $i modules: $MDIR"
done
LOAD_ORDER="liblogos_execution_zone_wallet_module,liblogos_rln_module,delivery_module"
WALLET_CALL="liblogos_execution_zone_wallet_module.open($WALLET_CONFIG,$WALLET_STORAGE)"
# ---------- Phase 6: Start 5 logoscore instances ----------
echo "[6/7] Starting $NUM_NODES logoscore instances..."
# Write node configs and start instances
for i in $(seq 0 $((NUM_NODES - 1))); do
TCP_PORT=$((BASE_TCP_PORT + i))
DISC_PORT=$((BASE_DISC_PORT + i))
LEAF_INDEX="${LEAF_INDICES[$i]}"
NODE_CONFIG="$WORK_DIR/node${i}_config.json"
LOG_FILE="$WORK_DIR/node${i}.log"
# Bootstrap: node 0 has no entry nodes, others bootstrap to node 0
if [ "$i" -eq 0 ]; then
ENTRY_NODES="[]"
else
ENTRY_NODES="[\"/ip4/127.0.0.1/tcp/$BASE_TCP_PORT/p2p/${PEER_IDS[0]}\"]"
fi
# Build mixNodes array: all OTHER nodes' multiaddr:mixPubKey
MIX_NODES_JSON=""
for j in $(seq 0 $((NUM_NODES - 1))); do
[ "$j" -eq "$i" ] && continue
J_PORT=$((BASE_TCP_PORT + j))
[ -n "$MIX_NODES_JSON" ] && MIX_NODES_JSON="$MIX_NODES_JSON, "
MIX_NODES_JSON="$MIX_NODES_JSON\"/ip4/127.0.0.1/tcp/$J_PORT/p2p/${PEER_IDS[$j]}:${MIX_PUBKEYS[$j]}\""
done
cat > "$NODE_CONFIG" <<EOF
{
"mode": "Core",
"clusterId": 2,
"numShardsInNetwork": 1,
"entryNodes": $ENTRY_NODES,
"maxMessageSize": "150 KiB",
"listenAddress": "127.0.0.1",
"tcpPort": $TCP_PORT,
"discv5UdpPort": $DISC_PORT,
"nodekey": "${NODEKEYS[$i]}",
"mixkey": "${MIXKEYS[$i]}",
"mixnodes": [$MIX_NODES_JSON],
"mix": true,
"enableSpamProtection": true,
"logLevel": "TRACE"
}
EOF
echo " Starting node $i (port $TCP_PORT, leaf $LEAF_INDEX)..."
(cd "$WORK_DIR" && TMPDIR=/tmp "$LOGOSCORE" -m "${MODULES_DIRS[$i]}" -l "$LOAD_ORDER" \
-c "$WALLET_CALL" \
-c "delivery_module.createNode(@$NODE_CONFIG)" \
-c "delivery_module.start()" \
-c "delivery_module.subscribe($CONTENT_TOPIC)" \
-c "delivery_module.setRlnConfig($CONFIG_ACCOUNT,$LEAF_INDEX)" \
-c "liblogos_rln_module.start_root_broadcast($CONFIG_ACCOUNT)" \
-c "liblogos_rln_module.start_merkle_proof_broadcast($CONFIG_ACCOUNT,$LEAF_INDEX)" \
</dev/null >"$LOG_FILE" 2>&1) &
INSTANCE_PIDS+=($!)
echo " Node $i PID: ${INSTANCE_PIDS[$i]}"
# Wait for all 7 -c calls to succeed
echo " Waiting for node $i to initialize..."
for j in $(seq 1 90); do
N=$(grep -c '^Method call successful' "$LOG_FILE" 2>/dev/null || true); N=${N:-0}
[ "$N" -ge 7 ] && break
if ! kill -0 "${INSTANCE_PIDS[$i]}" 2>/dev/null; then
N=$(grep -c '^Method call successful' "$LOG_FILE" 2>/dev/null || true); N=${N:-0}
echo " ERROR: Node $i exited after $N/7 calls. Method call lines:"
grep 'Method call' "$LOG_FILE"
echo " --- Last 15 log lines ---"
tail -15 "$LOG_FILE"
exit 1
fi
sleep 1
done
if [ "$N" -lt 7 ]; then
echo " ERROR: Node $i did not initialize ($N/7 calls). Log:"
grep 'Method call\|Error' "$LOG_FILE" | tail -10
exit 1
fi
echo " Node $i ready ($N/7 calls)."
# Pause between nodes to avoid resource contention
sleep 3
done
# Wait for peer discovery across all nodes
echo " Waiting for peer discovery (15s)..."
sleep 15
# ---------- Phase 7: Ready ----------
echo ""
echo "[7/7] Simulation running!"
echo ""
echo " Sequencer: PID $SEQUENCER_PID (port 3040)"
echo " Config: $CONFIG_ACCOUNT"
echo " Logs: $WORK_DIR/node*.log"
echo ""
for i in $(seq 0 $((NUM_NODES - 1))); do
TCP_PORT=$((BASE_TCP_PORT + i))
echo " Node $i: PID ${INSTANCE_PIDS[$i]}, port $TCP_PORT, leaf ${LEAF_INDICES[$i]}"
done
echo ""
echo " To inspect logs:"
for i in $(seq 0 $((NUM_NODES - 1))); do
echo " grep 'Method call' $WORK_DIR/node${i}.log"
done
echo ""
echo " Now start chat clients in separate terminals:"
echo " cd $(pwd)"
echo " bash run_chat_mix.sh"
echo " bash run_chat_mix1.sh"
echo ""
echo " Press Ctrl+C to stop everything."
wait

View File

@ -0,0 +1,95 @@
## Generate RLN keystores from a manifest produced by register_member.rs.
##
## Reads a JSON manifest file with identity data for each node, reconstructs
## IdentityCredential from the secret hash, and saves per-node keystores.
##
## Manifest format (one JSON object per line on stdin, or a JSON array file):
## [{"peerId": "16Uiu2...", "leafIndex": 0, "identitySecretHash": "aabb...", "rateLimit": 100}, ...]
##
## Usage: nim c -r setup_keystores.nim <manifest.json>
import std/[os, options, json, strutils]
import results
import chronicles
import
mix_rln_spam_protection/credentials,
mix_rln_spam_protection/types,
mix_rln_spam_protection/rln_interface
const KeystorePassword = "mix-rln-password"
proc hexToBytes32(hex: string): Result[array[32, byte], string] =
var h = hex
if h.startsWith("0x") or h.startsWith("0X"):
h = h[2 .. ^1]
if h.len != 64:
return err("Expected 64 hex chars, got " & $h.len)
var output: array[32, byte]
for i in 0 ..< 32:
try:
output[i] = byte(parseHexInt(h[i * 2 .. i * 2 + 1]))
except ValueError:
return err("Invalid hex at position " & $i)
ok(output)
proc main() =
let args = commandLineParams()
if args.len < 1:
echo "Usage: setup_keystores <manifest.json>"
quit(1)
let manifestPath = args[0]
if not fileExists(manifestPath):
echo "Manifest file not found: ", manifestPath
quit(1)
let manifestData = readFile(manifestPath)
let manifest =
try:
parseJson(manifestData)
except CatchableError as e:
echo "Failed to parse manifest: ", e.msg
quit(1)
echo "=== RLN Keystore Setup ==="
echo "Processing ", manifest.len, " entries from ", manifestPath
echo ""
for entry in manifest:
let peerId = entry["peerId"].getStr()
let leafIndex = MembershipIndex(entry["leafIndex"].getBiggestInt())
let secretHex = entry["identitySecretHash"].getStr()
let rateLimit = uint64(entry["rateLimit"].getBiggestInt())
let idSecretBytes = hexToBytes32(secretHex).valueOr:
echo "Invalid identity secret hex for ", peerId, ": ", error
quit(1)
let idCommitmentArr = poseidonHash(@[@idSecretBytes]).valueOr:
echo "Failed to compute idCommitment for ", peerId, ": ", error
quit(1)
var cred: IdentityCredential
cred.idSecretHash = IDSecretHash(idSecretBytes)
cred.idCommitment = IDCommitment(idCommitmentArr)
let keystorePath = "rln_keystore_" & peerId & ".json"
let saveResult = saveKeystore(
cred,
KeystorePassword,
keystorePath,
some(leafIndex),
some(rateLimit),
)
if saveResult.isErr:
echo "Failed to save keystore for ", peerId, ": ", saveResult.error
quit(1)
echo " Saved: ", keystorePath, " (index: ", leafIndex, ", limit: ", rateLimit, ")"
echo ""
echo "=== Setup Complete ==="
when isMainModule:
main()

View File

@ -643,6 +643,17 @@ with the drawback of consuming some more bandwidth.""",
name: "mixnode"
.}: seq[MixNodePubInfo]
enableSpamProtection* {.
desc: "Enable RLN-based spam protection for mix messages.",
defaultValue: false,
name: "enable-spam-protection"
.}: bool
userMessageLimit* {.
desc: "User message limit for RLN spam protection.",
name: "user-message-limit"
.}: Option[int]
# Kademlia Discovery config
enableKadDiscovery* {.
desc:
@ -1070,6 +1081,9 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.mixConf.withEnabled(n.mix)
b.mixConf.withMixNodes(n.mixnodes)
b.mixConf.withEnableSpamProtection(n.enableSpamProtection)
if n.userMessageLimit.isSome():
b.mixConf.withUserMessageLimit(n.userMessageLimit.get())
b.withMix(n.mix)
if n.mixkey.isSome():
b.mixConf.withMixKey(n.mixkey.get())

@ -0,0 +1 @@
Subproject commit 38f8ac8d6fcf06afb1dd1f4490887a4ddd8a331a

2
vendor/nph vendored

@ -1 +1 @@
Subproject commit 2cacf6cc28116e4046e0b67a13545af5c4e756bd
Subproject commit c6e03162dc2820d3088660f644818d7040e95791

View File

@ -1,7 +1,12 @@
import std/[net, options]
import std/[net, options, strutils]
import results
import json_serialization, json_serialization/std/options as json_options
import libp2p/crypto/crypto
import libp2p/crypto/secp
import libp2p/crypto/curve25519
import libp2p/protocols/mix/curve25519
import nimcrypto/utils as ncrutils
import
waku/common/utils/parse_size_units,
@ -9,6 +14,7 @@ import
waku/factory/waku_conf,
waku/factory/conf_builder/conf_builder,
waku/factory/networks_config,
waku/waku_mix,
tools/confutils/entry_nodes
export json_serialization, json_options
@ -80,6 +86,13 @@ const TheWakuNetworkPreset* = ProtocolsConfig(
),
)
type MixProtocolConfig* = object
nodekey*: Option[string]
mixkey*: Option[string]
enableSpamProtection*: bool
userMessageLimit*: Option[int]
mixNodes*: seq[string]
type WakuMode* {.pure.} = enum
Edge
Core
@ -92,6 +105,7 @@ type NodeConfig* {.
networkingConfig: NetworkingConfig
ethRpcEndpoints: seq[string]
p2pReliability: bool
mixProtocolConfig: Option[MixProtocolConfig]
logLevel: LogLevel
logFormat: LogFormat
@ -102,6 +116,7 @@ proc init*(
networkingConfig: NetworkingConfig = DefaultNetworkingConfig,
ethRpcEndpoints: seq[string] = @[],
p2pReliability: bool = false,
mixProtocolConfig: Option[MixProtocolConfig] = none(MixProtocolConfig),
logLevel: LogLevel = LogLevel.INFO,
logFormat: LogFormat = LogFormat.TEXT,
): T =
@ -111,6 +126,7 @@ proc init*(
networkingConfig: networkingConfig,
ethRpcEndpoints: ethRpcEndpoints,
p2pReliability: p2pReliability,
mixProtocolConfig: mixProtocolConfig,
logLevel: logLevel,
logFormat: logFormat,
)
@ -152,6 +168,9 @@ proc p2pReliability*(c: NodeConfig): bool =
proc logLevel*(c: NodeConfig): LogLevel =
c.logLevel
proc mixProtocolConfig*(c: NodeConfig): Option[MixProtocolConfig] =
c.mixProtocolConfig
proc logFormat*(c: NodeConfig): LogFormat =
c.logFormat
@ -260,6 +279,34 @@ proc toWakuConf*(
# TODO: we should get rid of those two
b.rlnRelayconf.withUserMessageLimit(100)
# Set mix protocol config if provided
if nodeConfig.mixProtocolConfig.isSome():
let mixCfg = nodeConfig.mixProtocolConfig.get()
b.withMix(true)
b.mixConf.withEnabled(true)
if mixCfg.nodekey.isSome():
let key = SkPrivateKey.init(ncrutils.fromHex(mixCfg.nodekey.get())).valueOr:
return err("Invalid nodekey hex: " & $error)
b.withNodeKey(crypto.PrivateKey(scheme: Secp256k1, skkey: key))
if mixCfg.mixkey.isSome():
b.mixConf.withMixKey(mixCfg.mixkey.get())
b.mixConf.withEnableSpamProtection(mixCfg.enableSpamProtection)
if mixCfg.userMessageLimit.isSome():
b.mixConf.withUserMessageLimit(mixCfg.userMessageLimit.get())
if mixCfg.mixNodes.len > 0:
var parsedMixNodes: seq[MixNodePubInfo]
for entry in mixCfg.mixNodes:
let parts = entry.split(":")
if parts.len < 2:
return err("Invalid mixNode format (expected multiaddr:pubKeyHex): " & entry)
let pubKeyHex = parts[^1]
let multiAddr = entry[0 ..< entry.len - pubKeyHex.len - 1]
parsedMixNodes.add(MixNodePubInfo(
multiAddr: multiAddr,
pubKey: intoCurve25519Key(ncrutils.fromHex(pubKeyHex)),
))
b.mixConf.withMixNodes(parsedMixNodes)
## Various configurations
b.withNatStrategy("any")
b.withP2PReliability(nodeConfig.p2pReliability)
@ -459,6 +506,35 @@ proc readValue*(
messageValidation = messageValidation.get(DefaultMessageValidation),
)
# ---------- MixProtocolConfig ----------
proc writeValue*(w: var JsonWriter, val: MixProtocolConfig) {.raises: [IOError].} =
w.beginRecord()
if val.nodekey.isSome(): w.writeField("nodekey", val.nodekey.get())
if val.mixkey.isSome(): w.writeField("mixkey", val.mixkey.get())
w.writeField("enableSpamProtection", val.enableSpamProtection)
if val.userMessageLimit.isSome(): w.writeField("userMessageLimit", val.userMessageLimit.get())
if val.mixNodes.len > 0: w.writeField("mixNodes", val.mixNodes)
w.endRecord()
proc readValue*(
r: var JsonReader, val: var MixProtocolConfig
) {.raises: [SerializationError, IOError].} =
for fieldName in readObjectFields(r):
case fieldName
of "nodekey":
val.nodekey = some(r.readValue(string))
of "mixkey":
val.mixkey = some(r.readValue(string))
of "enableSpamProtection":
val.enableSpamProtection = r.readValue(bool)
of "userMessageLimit":
val.userMessageLimit = some(r.readValue(int))
of "mixNodes":
val.mixNodes = r.readValue(seq[string])
else:
r.raiseUnexpectedField(fieldName, "MixProtocolConfig")
# ---------- NodeConfig ----------
proc writeValue*(w: var JsonWriter, val: NodeConfig) {.raises: [IOError].} =
@ -468,6 +544,7 @@ proc writeValue*(w: var JsonWriter, val: NodeConfig) {.raises: [IOError].} =
w.writeField("networkingConfig", val.networkingConfig)
w.writeField("ethRpcEndpoints", val.ethRpcEndpoints)
w.writeField("p2pReliability", val.p2pReliability)
if val.mixProtocolConfig.isSome(): w.writeField("mixProtocolConfig", val.mixProtocolConfig.get())
w.writeField("logLevel", val.logLevel)
w.writeField("logFormat", val.logFormat)
w.endRecord()
@ -481,6 +558,7 @@ proc readValue*(
networkingConfig: Option[NetworkingConfig]
ethRpcEndpoints: Option[seq[string]]
p2pReliability: Option[bool]
mixProtocolConfig: Option[MixProtocolConfig]
logLevel: Option[LogLevel]
logFormat: Option[LogFormat]
@ -496,6 +574,8 @@ proc readValue*(
ethRpcEndpoints = some(r.readValue(seq[string]))
of "p2pReliability":
p2pReliability = some(r.readValue(bool))
of "mixProtocolConfig":
mixProtocolConfig = some(r.readValue(MixProtocolConfig))
of "logLevel":
logLevel = some(r.readValue(LogLevel))
of "logFormat":
@ -509,6 +589,7 @@ proc readValue*(
networkingConfig = networkingConfig.get(DefaultNetworkingConfig),
ethRpcEndpoints = ethRpcEndpoints.get(@[]),
p2pReliability = p2pReliability.get(false),
mixProtocolConfig = mixProtocolConfig,
logLevel = logLevel.get(LogLevel.INFO),
logFormat = logFormat.get(LogFormat.TEXT),
)

View File

@ -12,6 +12,8 @@ type MixConfBuilder* = object
enabled: Option[bool]
mixKey: Option[string]
mixNodes: seq[MixNodePubInfo]
enableSpamProtection: bool
userMessageLimit: Option[int]
proc init*(T: type MixConfBuilder): MixConfBuilder =
MixConfBuilder()
@ -25,6 +27,12 @@ proc withMixKey*(b: var MixConfBuilder, mixKey: string) =
proc withMixNodes*(b: var MixConfBuilder, mixNodes: seq[MixNodePubInfo]) =
b.mixNodes = mixNodes
proc withEnableSpamProtection*(b: var MixConfBuilder, enable: bool) =
b.enableSpamProtection = enable
proc withUserMessageLimit*(b: var MixConfBuilder, limit: int) =
b.userMessageLimit = some(limit)
proc build*(b: MixConfBuilder): Result[Option[MixConf], string] =
if not b.enabled.get(false):
return ok(none[MixConf]())
@ -33,11 +41,15 @@ proc build*(b: MixConfBuilder): Result[Option[MixConf], string] =
let mixPrivKey = intoCurve25519Key(ncrutils.fromHex(b.mixKey.get()))
let mixPubKey = public(mixPrivKey)
return ok(
some(MixConf(mixKey: mixPrivKey, mixPubKey: mixPubKey, mixNodes: b.mixNodes))
some(MixConf(mixKey: mixPrivKey, mixPubKey: mixPubKey, mixnodes: b.mixNodes,
enableSpamProtection: b.enableSpamProtection,
userMessageLimit: b.userMessageLimit))
)
else:
let (mixPrivKey, mixPubKey) = generateKeyPair().valueOr:
return err("Generate key pair error: " & $error)
return ok(
some(MixConf(mixKey: mixPrivKey, mixPubKey: mixPubKey, mixNodes: b.mixNodes))
some(MixConf(mixKey: mixPrivKey, mixPubKey: mixPubKey, mixnodes: b.mixNodes,
enableSpamProtection: b.enableSpamProtection,
userMessageLimit: b.userMessageLimit))
)

View File

@ -167,7 +167,9 @@ proc setupProtocols(
#mount mix
if conf.mixConf.isSome():
let mixConf = conf.mixConf.get()
(await node.mountMix(conf.clusterId, mixConf.mixKey, mixConf.mixnodes)).isOkOr:
(await node.mountMix(conf.clusterId, mixConf.mixKey, mixConf.mixnodes,
enableSpamProtection = mixConf.enableSpamProtection,
userMessageLimit = mixConf.userMessageLimit)).isOkOr:
return err("failed to mount waku mix protocol: " & $error)
# Setup extended kademlia discovery

View File

@ -51,6 +51,8 @@ type MixConf* = ref object
mixKey*: Curve25519Key
mixPubKey*: Curve25519Key
mixnodes*: seq[MixNodePubInfo]
enableSpamProtection*: bool
userMessageLimit*: Option[int]
type KademliaDiscoveryConf* = object
bootstrapNodes*: seq[(PeerId, seq[MultiAddress])]

View File

@ -316,8 +316,10 @@ proc mountMix*(
clusterId: uint16,
mixPrivKey: Curve25519Key,
mixnodes: seq[MixNodePubInfo],
userMessageLimit: Option[int] = none(int),
enableSpamProtection: bool = false,
): Future[Result[void, string]] {.async.} =
info "mounting mix protocol", nodeId = node.info #TODO log the config used
info "mounting mix protocol", nodeId = node.info
if node.announcedAddresses.len == 0:
return err("Trying to mount mix without having announced addresses")
@ -326,8 +328,23 @@ proc mountMix*(
return err("Failed to convert multiaddress to string.")
info "local addr", localaddr = localaddrStr
let publishMessage: PublishMessage = proc(
message: WakuMessage
): Future[Result[void, string]] {.async.} =
if node.wakuRelay.isNil():
return err("WakuRelay not mounted")
let pubsubTopic =
if node.wakuAutoSharding.isNone():
return err("Auto sharding not configured")
else:
node.wakuAutoSharding.get().getShard(message.contentTopic).valueOr:
return err("Autosharding error: " & error)
discard await node.wakuRelay.publish(pubsubTopic, message)
return ok()
node.wakuMix = WakuMix.new(
localaddrStr, node.peerManager, clusterId, mixPrivKey, mixnodes
localaddrStr, node.peerManager, clusterId, mixPrivKey, mixnodes,
publishMessage, userMessageLimit, enableSpamProtection,
).valueOr:
error "Waku Mix protocol initialization failed", err = error
return
@ -634,7 +651,7 @@ proc start*(node: WakuNode) {.async.} =
await node.startRelay()
if not node.wakuMix.isNil():
node.wakuMix.start()
await node.wakuMix.start()
if not node.wakuMetadata.isNil():
node.wakuMetadata.start()

View File

@ -0,0 +1,215 @@
{.push raises: [].}
## RLN client: request-response delivery from C++ RLN module via registered fetcher.
##
## The C++ delivery module registers an RLN fetcher function pointer at startup.
## Nim calls the fetcher to get roots/proofs from the RLN module on demand.
## Callback factories are used by protocol.nim when wiring up spam protection.
import std/[json, strutils, locks]
import chronos
import results
import chronicles
import mix_rln_spam_protection/types
logScope:
topics = "waku mix rln-client"
type
RlnFetchCallback* = proc(callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer) {.cdecl, gcsafe, raises: [].}
RlnFetcherFunc* = proc(
methodName: cstring, params: cstring,
callback: RlnFetchCallback,
callbackData: pointer, fetcherData: pointer
): cint {.cdecl, gcsafe, raises: [].}
var
rlnFetcherLock: Lock
rlnFetcher: RlnFetcherFunc
rlnFetcherData: pointer
rlnConfigAccountId: string
rlnLeafIndex: int = -1
cachedRootsJson: string
cachedProofJson: string
rlnFetcherLock.initLock()
proc setRlnFetcher*(fetcher: RlnFetcherFunc, fetcherData: pointer) {.gcsafe.} =
{.gcsafe.}:
rlnFetcherLock.acquire()
rlnFetcher = fetcher
rlnFetcherData = fetcherData
rlnFetcherLock.release()
proc setRlnConfig*(configAccountId: string, leafIndex: int) {.gcsafe.} =
{.gcsafe.}:
rlnFetcherLock.acquire()
rlnConfigAccountId = configAccountId
rlnLeafIndex = leafIndex
rlnFetcherLock.release()
proc getRlnConfig*(): (string, int) {.gcsafe.} =
{.gcsafe.}:
rlnFetcherLock.acquire()
result = (rlnConfigAccountId, rlnLeafIndex)
rlnFetcherLock.release()
proc pushRoots*(rootsJson: string) {.gcsafe.} =
{.gcsafe.}:
rlnFetcherLock.acquire()
cachedRootsJson = rootsJson
rlnFetcherLock.release()
trace "Received roots via event push", len = rootsJson.len
proc pushProof*(proofJson: string) {.gcsafe.} =
{.gcsafe.}:
rlnFetcherLock.acquire()
cachedProofJson = proofJson
rlnFetcherLock.release()
trace "Received proof via event push", len = proofJson.len
proc getCachedRoots(): string {.gcsafe.} =
{.gcsafe.}:
rlnFetcherLock.acquire()
result = cachedRootsJson
rlnFetcherLock.release()
proc getCachedProof(): string {.gcsafe.} =
{.gcsafe.}:
rlnFetcherLock.acquire()
result = cachedProofJson
rlnFetcherLock.release()
type FetchResult = object
json: string
errMsg: string
success: bool
proc callRlnFetcher(methodName: string, params: string): Result[string, string] {.gcsafe.} =
{.gcsafe.}:
rlnFetcherLock.acquire()
let fetcher = rlnFetcher
let data = rlnFetcherData
rlnFetcherLock.release()
if fetcher.isNil:
return err("RLN fetcher not registered")
var fetchResult: FetchResult
let cb: RlnFetchCallback = proc(callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer) {.cdecl, gcsafe, raises: [].} =
let res = cast[ptr FetchResult](userData)
if callerRet == 0 and not msg.isNil and len > 0:
res[].json = newString(len.int)
copyMem(addr res[].json[0], msg, len.int)
res[].success = true
elif not msg.isNil and len > 0:
res[].errMsg = newString(len.int)
copyMem(addr res[].errMsg[0], msg, len.int)
res[].success = false
else:
res[].success = (callerRet == 0)
let ret = fetcher(methodName.cstring, params.cstring, cb, addr fetchResult, data)
if ret != 0 or not fetchResult.success:
if fetchResult.errMsg.len > 0:
return err(fetchResult.errMsg)
return err("RLN fetcher returned error code: " & $ret)
if fetchResult.json.len == 0:
return err("RLN fetcher returned empty response")
return ok(fetchResult.json)
proc hexToBytes32(hex: string): Result[array[32, byte], string] =
var h = hex
if h.startsWith("0x") or h.startsWith("0X"):
h = h[2 .. ^1]
if h.len != 64:
return err("Expected 64 hex chars, got " & $h.len)
var output: array[32, byte]
for i in 0 ..< 32:
try:
output[i] = byte(parseHexInt(h[i * 2 .. i * 2 + 1]))
except ValueError:
return err("Invalid hex at position " & $i)
ok(output)
proc parseRootsJson*(snapshot: string): RlnResult[seq[MerkleNode]] =
if snapshot.len == 0:
return err("No roots data")
try:
let parsed = parseJson(snapshot)
var roots: seq[MerkleNode]
for elem in parsed:
let root = hexToBytes32(elem.getStr()).valueOr:
return err("Invalid root hex: " & error)
roots.add(MerkleNode(root))
return ok(roots)
except CatchableError as e:
return err("Failed to parse roots: " & e.msg)
proc parseExternalProof(snapshot: string): Result[ExternalMerkleProof, string] =
if snapshot.len == 0:
return err("No merkle proof data")
try:
let parsed = parseJson(snapshot)
let root = hexToBytes32(parsed["root"].getStr()).valueOr:
return err("Invalid root hex: " & error)
var pathElements: seq[byte]
for elem in parsed["path_elements"]:
let elemBytes = hexToBytes32(elem.getStr()).valueOr:
return err("Invalid pathElement hex: " & error)
for b in elemBytes:
pathElements.add(b)
var identityPathIndex: seq[byte]
for idx in parsed["path_indices"]:
identityPathIndex.add(byte(idx.getInt()))
ok(ExternalMerkleProof(
pathElements: pathElements,
identityPathIndex: identityPathIndex,
root: MerkleNode(root),
))
except CatchableError as e:
err("Failed to parse proof: " & e.msg)
proc makeFetchLatestRoots*(): FetchLatestRootsCallback =
return proc(): Future[RlnResult[seq[MerkleNode]]] {.async, gcsafe, raises: [].} =
let cached = getCachedRoots()
if cached.len > 0:
let res = parseRootsJson(cached)
if res.isOk:
trace "Using cached roots from event push", count = res.get().len
return res
let (configAccount, _) = getRlnConfig()
if configAccount.len == 0:
return err("RLN config not set")
let rootsJson = callRlnFetcher("get_valid_roots", configAccount)
if rootsJson.isErr:
return err(rootsJson.error)
let res = parseRootsJson(rootsJson.get())
if res.isOk:
trace "Fetched roots from RLN module via fetcher", count = res.get().len
return res
proc makeFetchMerkleProof*(): FetchMerkleProofCallback =
return proc(
index: MembershipIndex
): Future[RlnResult[ExternalMerkleProof]] {.async, gcsafe, raises: [].} =
let cached = getCachedProof()
if cached.len > 0:
let res = parseExternalProof(cached)
if res.isOk:
trace "Using cached proof from event push", index = index
return res
let (configAccount, leafIndex) = getRlnConfig()
if configAccount.len == 0:
return err("RLN config not set")
let params = configAccount & "," & $leafIndex
let proofJson = callRlnFetcher("get_merkle_proofs", params)
if proofJson.isErr:
return err(proofJson.error)
let res = parseExternalProof(proofJson.get())
if res.isOk:
trace "Fetched merkle proof from RLN module via fetcher", index = index
return res
{.pop.}

View File

@ -10,14 +10,19 @@ import
libp2p/protocols/mix/mix_protocol,
libp2p/protocols/mix/mix_metrics,
libp2p/protocols/mix/delay_strategy,
libp2p/[multiaddress, peerid],
libp2p/protocols/mix/spam_protection,
libp2p/[multiaddress, multicodec, peerid, peerinfo],
eth/common/keys
import
waku/node/peer_manager,
waku/waku_core,
waku/waku_enr,
waku/node/peer_manager/waku_peer_store
waku/node/peer_manager/waku_peer_store,
mix_rln_spam_protection,
waku/waku_relay,
waku/common/nimchronos,
./logos_core_client
logScope:
topics = "waku mix"
@ -25,10 +30,16 @@ logScope:
const minMixPoolSize = 4
type
PublishMessage* = proc(message: WakuMessage): Future[Result[void, string]] {.
async, gcsafe, raises: []
.}
WakuMix* = ref object of MixProtocol
peerManager*: PeerManager
clusterId: uint16
pubKey*: Curve25519Key
mixRlnSpamProtection*: MixRlnSpamProtection
publishMessage*: PublishMessage
WakuMixResult*[T] = Result[T, string]
@ -41,11 +52,9 @@ proc processBootNodes(
) =
var count = 0
for node in bootnodes:
let pInfo = parsePeerInfo(node.multiAddr).valueOr:
error "Failed to get peer id from multiaddress: ",
error = error, multiAddr = $node.multiAddr
let (peerId, networkAddr) = parseFullAddress(node.multiAddr).valueOr:
error "Failed to parse multiaddress", multiAddr = node.multiAddr, error = error
continue
let peerId = pInfo.peerId
var peerPubKey: crypto.PublicKey
if not peerId.extractPublicKey(peerPubKey):
warn "Failed to extract public key from peerId, skipping node", peerId = peerId
@ -65,10 +74,10 @@ proc processBootNodes(
count.inc()
peermgr.addPeer(
RemotePeerInfo.init(peerId, @[multiAddr], mixPubKey = some(node.pubKey))
RemotePeerInfo.init(peerId, @[networkAddr], mixPubKey = some(node.pubKey))
)
mix_pool_size.set(count)
info "using mix bootstrap nodes ", count = count
debug "using mix bootstrap nodes ", count = count
proc new*(
T: typedesc[WakuMix],
@ -77,9 +86,12 @@ proc new*(
clusterId: uint16,
mixPrivKey: Curve25519Key,
bootnodes: seq[MixNodePubInfo],
publishMessage: PublishMessage = nil,
userMessageLimit: Option[int] = none(int),
enableSpamProtection: bool = false,
): WakuMixResult[T] =
let mixPubKey = public(mixPrivKey)
info "mixPubKey", mixPubKey = mixPubKey
trace "mixPubKey", mixPubKey = mixPubKey
let nodeMultiAddr = MultiAddress.init(nodeAddr).valueOr:
return err("failed to parse mix node address: " & $nodeAddr & ", error: " & error)
let localMixNodeInfo = initMixNodeInfo(
@ -87,27 +99,134 @@ proc new*(
peermgr.switch.peerInfo.publicKey.skkey, peermgr.switch.peerInfo.privateKey.skkey,
)
var m = WakuMix(peerManager: peermgr, clusterId: clusterId, pubKey: mixPubKey)
var spamProtection: MixRlnSpamProtection = nil
var spamProtectionOpt = default(Opt[SpamProtection])
if enableSpamProtection:
let peerId = peermgr.switch.peerInfo.peerId
var spamProtectionConfig = defaultConfig()
spamProtectionConfig.keystorePath = "rln_keystore_" & $peerId & ".json"
spamProtectionConfig.keystorePassword = "mix-rln-password"
if userMessageLimit.isSome():
spamProtectionConfig.userMessageLimit = userMessageLimit.get()
spamProtection = newMixRlnSpamProtection(spamProtectionConfig).valueOr:
return err("failed to create spam protection: " & error)
spamProtection.setMerkleProofCallbacks(
makeFetchMerkleProof(),
makeFetchLatestRoots(),
)
spamProtectionOpt = Opt.some(SpamProtection(spamProtection))
var m = WakuMix(
peerManager: peermgr,
clusterId: clusterId,
pubKey: mixPubKey,
mixRlnSpamProtection: spamProtection,
publishMessage: publishMessage,
)
procCall MixProtocol(m).init(
localMixNodeInfo,
peermgr.switch,
spamProtection = spamProtectionOpt,
delayStrategy =
ExponentialDelayStrategy.new(meanDelayMs = 50, rng = crypto.newRng()),
ExponentialDelayStrategy.new(meanDelayMs = 100, rng = crypto.newRng()),
)
processBootNodes(bootnodes, peermgr, m)
if m.nodePool.len < minMixPoolSize:
warn "publishing with mix won't work until atleast 3 mix nodes in node pool"
return ok(m)
proc poolSize*(mix: WakuMix): int =
mix.nodePool.len
method start*(mix: WakuMix) =
proc setupSpamProtectionCallbacks(mix: WakuMix) =
if mix.publishMessage.isNil():
warn "PublishMessage callback not available, spam protection coordination disabled"
return
let publishCallback: PublishCallback = proc(
contentTopic: string, data: seq[byte]
) {.async.} =
let msg = WakuMessage(
payload: data,
contentTopic: contentTopic,
ephemeral: true,
timestamp: getNowInNanosecondTime(),
)
let res = await mix.publishMessage(msg)
if res.isErr():
warn "Failed to publish spam protection coordination message",
contentTopic = contentTopic, error = res.error
return
trace "Published spam protection coordination message", contentTopic = contentTopic
mix.mixRlnSpamProtection.setPublishCallback(publishCallback)
trace "Spam protection publish callback configured"
proc handleMessage*(
mix: WakuMix, pubsubTopic: PubsubTopic, message: WakuMessage
) {.async, gcsafe.} =
if mix.mixRlnSpamProtection.isNil():
return
let contentTopic = message.contentTopic
if contentTopic == mix.mixRlnSpamProtection.getProofMetadataContentTopic():
let res = mix.mixRlnSpamProtection.handleProofMetadata(message.payload)
if res.isErr:
warn "Failed to handle proof metadata", error = res.error
else:
trace "Handled proof metadata"
proc getSpamProtectionContentTopics*(mix: WakuMix): seq[string] =
if mix.mixRlnSpamProtection.isNil():
return @[]
return mix.mixRlnSpamProtection.getContentTopics()
method start*(mix: WakuMix) {.async.} =
info "starting waku mix protocol"
method stop*(mix: WakuMix) {.async.} =
discard
if not mix.mixRlnSpamProtection.isNil():
let initRes = await mix.mixRlnSpamProtection.init()
if initRes.isErr:
error "Failed to initialize spam protection", error = initRes.error
else:
mix.setupSpamProtectionCallbacks()
let startRes = await mix.mixRlnSpamProtection.start()
if startRes.isErr:
error "Failed to start spam protection", error = startRes.error
else:
let registerRes = await mix.mixRlnSpamProtection.registerSelf()
if registerRes.isErr:
error "Failed to register spam protection credentials",
error = registerRes.error
else:
debug "Registered spam protection credentials", index = registerRes.get()
if mix.nodePool.len > 0:
info "warming up mix connections", poolSize = mix.nodePool.len
var connected = 0
for peerId in mix.nodePool.peerIds():
let pubInfo = mix.nodePool.get(peerId)
if pubInfo.isSome:
let (pid, multiAddr, _, _) = pubInfo.get().get()
try:
discard await mix.switch.dial(pid, @[multiAddr], @[MixProtocolID])
connected.inc()
debug "mix connection established", peerId = pid
except CatchableError as e:
warn "failed to dial mix peer at startup", peerId = pid, error = e.msg
info "mix warmup complete", connections = connected, total = mix.nodePool.len
method stop*(mix: WakuMix) {.async.} =
if not mix.mixRlnSpamProtection.isNil():
await mix.mixRlnSpamProtection.stop()
debug "Spam protection stopped"
# Mix Protocol