mirror of
https://github.com/logos-messaging/logos-messaging-nim.git
synced 2026-01-26 09:43:15 +00:00
Merge branch 'master' into feat/service-incentivization-poc
This commit is contained in:
commit
921e651ba2
13
.github/ISSUE_TEMPLATE/bump_dependencies.md
vendored
13
.github/ISSUE_TEMPLATE/bump_dependencies.md
vendored
@ -12,7 +12,6 @@ assignees: ''
|
||||
Update `nwaku` "vendor" dependencies.
|
||||
|
||||
### Items to bump
|
||||
- [ ] negentropy
|
||||
- [ ] dnsclient.nim ( update to the latest tag version )
|
||||
- [ ] nim-bearssl
|
||||
- [ ] nimbus-build-system
|
||||
@ -38,12 +37,12 @@ Update `nwaku` "vendor" dependencies.
|
||||
- [ ] nim-sqlite3-abi ( update to the latest tag version )
|
||||
- [ ] nim-stew
|
||||
- [ ] nim-stint
|
||||
- [ ] nim-taskpools
|
||||
- [ ] nim-testutils
|
||||
- [ ] nim-taskpools ( update to the latest tag version )
|
||||
- [ ] nim-testutils ( update to the latest tag version )
|
||||
- [ ] nim-toml-serialization
|
||||
- [ ] nim-unicodedb
|
||||
- [ ] nim-unittest2
|
||||
- [ ] nim-web3
|
||||
- [ ] nim-websock
|
||||
- [ ] nim-unittest2 ( update to the latest tag version )
|
||||
- [ ] nim-web3 ( update to the latest tag version )
|
||||
- [ ] nim-websock ( update to the latest tag version )
|
||||
- [ ] nim-zlib
|
||||
- [ ] zerokit ( this should be kept in version `v0.5.1` )
|
||||
- [ ] zerokit ( this should be kept in version `v0.7.0` )
|
||||
|
||||
8
.github/workflows/ci.yml
vendored
8
.github/workflows/ci.yml
vendored
@ -119,12 +119,13 @@ jobs:
|
||||
sudo docker run --rm -d -e POSTGRES_PASSWORD=test123 -p 5432:5432 postgres:15.4-alpine3.18
|
||||
postgres_enabled=1
|
||||
fi
|
||||
|
||||
|
||||
export MAKEFLAGS="-j1"
|
||||
export NIMFLAGS="--colors:off -d:chronicles_colors:none"
|
||||
export USE_LIBBACKTRACE=0
|
||||
|
||||
make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test testwakunode2
|
||||
make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled test
|
||||
make V=1 LOG_LEVEL=DEBUG QUICK_AND_DIRTY_COMPILER=1 POSTGRES=$postgres_enabled testwakunode2
|
||||
|
||||
build-docker-image:
|
||||
needs: changes
|
||||
@ -140,14 +141,12 @@ jobs:
|
||||
|
||||
secrets: inherit
|
||||
|
||||
|
||||
js-waku-node:
|
||||
needs: build-docker-image
|
||||
uses: waku-org/js-waku/.github/workflows/test-node.yml@master
|
||||
with:
|
||||
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
|
||||
test_type: node
|
||||
debug: waku*
|
||||
|
||||
js-waku-node-optional:
|
||||
needs: build-docker-image
|
||||
@ -155,7 +154,6 @@ jobs:
|
||||
with:
|
||||
nim_wakunode_image: ${{ needs.build-docker-image.outputs.image }}
|
||||
test_type: node-optional
|
||||
debug: waku*
|
||||
|
||||
lint:
|
||||
name: "Lint"
|
||||
|
||||
37
.github/workflows/windows-build.yml
vendored
37
.github/workflows/windows-build.yml
vendored
@ -68,28 +68,6 @@ jobs:
|
||||
./build_all.bat
|
||||
cd ../../../..
|
||||
|
||||
- name: Building libunwind
|
||||
run: |
|
||||
cd vendor/nim-libbacktrace
|
||||
mkdir -p vendor/libunwind/build
|
||||
pushd vendor/libunwind
|
||||
|
||||
cmake -S runtimes \
|
||||
-DLLVM_ENABLE_RUNTIMES="libunwind" \
|
||||
-DLIBUNWIND_ENABLE_SHARED=OFF -DLIBUNWIND_ENABLE_STATIC=ON \
|
||||
-DLIBUNWIND_INCLUDE_DOCS=OFF -DLIBUNWIND_INSTALL_HEADERS=ON \
|
||||
-DCMAKE_INSTALL_PREFIX="$(pwd)/../install/usr" \
|
||||
-G "MinGW Makefiles" -B build
|
||||
|
||||
cd build
|
||||
mingw32-make VERBOSE=1 clean
|
||||
mingw32-make VERBOSE=1 unwind_static
|
||||
mingw32-make VERBOSE=1 install-unwind
|
||||
|
||||
popd
|
||||
mkdir -p install/usr/lib
|
||||
cp -r vendor/libunwind/build/lib/libunwind.a install/usr/lib/
|
||||
|
||||
- name: Building miniupnpc
|
||||
run: |
|
||||
cd vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc
|
||||
@ -103,12 +81,13 @@ jobs:
|
||||
make CC="gcc -fPIC -D_WIN32_WINNT=0x0600 -DNATPMP_STATICLIB" libnatpmp.a V=1
|
||||
cd ../../../../
|
||||
|
||||
- name: Building wakunode2
|
||||
- name: Building wakunode2.exe
|
||||
run: |
|
||||
cd vendor/nim-libbacktrace
|
||||
cp ./vendor/libunwind/build/lib/libunwind.a install/usr/lib
|
||||
cd ../..
|
||||
make wakunode2 LOG_LEVEL=DEBUG V=3 -j8
|
||||
|
||||
- name: Building libwaku.dll
|
||||
run: |
|
||||
make libwaku STATIC=0 LOG_LEVEL=DEBUG V=1 -j
|
||||
|
||||
- name: Check Executable
|
||||
run: |
|
||||
@ -118,3 +97,9 @@ jobs:
|
||||
echo "Build failed: wakunode2.exe not found"
|
||||
exit 1
|
||||
fi
|
||||
if [ -f "./build/libwaku.dll" ]; then
|
||||
echo "libwaku.dll build successful"
|
||||
else
|
||||
echo "Build failed: libwaku.dll not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@ -7,6 +7,10 @@
|
||||
**Info:** before upgrading to this version, make sure you delete the previous rln_tree folder, i.e.,
|
||||
the one that is passed through this CLI: `--rln-relay-tree-path`.
|
||||
|
||||
### Features
|
||||
* lightpush v3 ([#3279](https://github.com/waku-org/nwaku/pull/3279)) ([e0b563ff](https://github.com/waku-org/nwaku/commit/e0b563ffe5af20bd26d37cd9b4eb9ed9eb82ff80))
|
||||
Upgrade for Waku Llightpush protocol with enhanced error handling. Read specification [here](https://github.com/waku-org/specs/blob/master/standards/core/lightpush.md)
|
||||
|
||||
This release supports the following [libp2p protocols](https://docs.libp2p.io/concepts/protocols/):
|
||||
| Protocol | Spec status | Protocol id |
|
||||
| ---: | :---: | :--- |
|
||||
@ -14,6 +18,7 @@ This release supports the following [libp2p protocols](https://docs.libp2p.io/co
|
||||
| [`12/WAKU2-FILTER`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/12/filter.md) | `draft` | `/vac/waku/filter/2.0.0-beta1` <br />`/vac/waku/filter-subscribe/2.0.0-beta1` <br />`/vac/waku/filter-push/2.0.0-beta1` |
|
||||
| [`13/WAKU2-STORE`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/13/store.md) | `draft` | `/vac/waku/store/2.0.0-beta4` |
|
||||
| [`19/WAKU2-LIGHTPUSH`](https://github.com/vacp2p/rfc-index/blob/main/waku/standards/core/19/lightpush.md) | `draft` | `/vac/waku/lightpush/2.0.0-beta1` |
|
||||
| [`WAKU2-LIGHTPUSH v3`](https://github.com/waku-org/specs/blob/master/standards/core/lightpush.md) | `draft` | `/vac/waku/lightpush/3.0.0` |
|
||||
| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` |
|
||||
| [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/feat--waku-sync/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` |
|
||||
|
||||
|
||||
@ -78,7 +78,7 @@ RUN make -j$(nproc)
|
||||
|
||||
|
||||
# Debug image
|
||||
FROM prod AS debug
|
||||
FROM prod AS debug-with-heaptrack
|
||||
|
||||
RUN apk add --no-cache gdb libunwind
|
||||
|
||||
|
||||
56
Makefile
56
Makefile
@ -53,7 +53,19 @@ endif
|
||||
# default target, because it's the first one that doesn't start with '.'
|
||||
all: | wakunode2 example2 chat2 chat2bridge libwaku
|
||||
|
||||
test: | testcommon testwaku
|
||||
TEST_FILE := $(word 2,$(MAKECMDGOALS))
|
||||
TEST_NAME := $(wordlist 3,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS))
|
||||
|
||||
test:
|
||||
ifeq ($(strip $(TEST_FILE)),)
|
||||
$(MAKE) testcommon
|
||||
$(MAKE) testwaku
|
||||
else
|
||||
$(MAKE) compile-test $(TEST_FILE) $(TEST_NAME)
|
||||
endif
|
||||
# this prevents make from erroring on unknown targets like "Index"
|
||||
%:
|
||||
@true
|
||||
|
||||
waku.nims:
|
||||
ln -s waku.nimble $@
|
||||
@ -82,15 +94,18 @@ HEAPTRACKER ?= 0
|
||||
HEAPTRACKER_INJECT ?= 0
|
||||
ifeq ($(HEAPTRACKER), 1)
|
||||
# Needed to make nimbus-build-system use the Nim's 'heaptrack_support' branch
|
||||
DOCKER_NIM_COMMIT := NIM_COMMIT=heaptrack_support
|
||||
TARGET := heaptrack-build
|
||||
DOCKER_NIM_COMMIT := NIM_COMMIT=heaptrack_support_v2.0.12
|
||||
TARGET := debug-with-heaptrack
|
||||
NIM_COMMIT := heaptrack_support_v2.0.12
|
||||
|
||||
ifeq ($(HEAPTRACKER_INJECT), 1)
|
||||
# the Nim compiler will load 'libheaptrack_inject.so'
|
||||
HEAPTRACK_PARAMS := -d:heaptracker -d:heaptracker_inject
|
||||
NIM_PARAMS := $(NIM_PARAMS) -d:heaptracker -d:heaptracker_inject
|
||||
else
|
||||
# the Nim compiler will load 'libheaptrack_preload.so'
|
||||
HEAPTRACK_PARAMS := -d:heaptracker
|
||||
NIM_PARAMS := $(NIM_PARAMS) -d:heaptracker
|
||||
endif
|
||||
|
||||
endif
|
||||
@ -109,11 +124,8 @@ ifeq (, $(shell which cargo))
|
||||
curl https://sh.rustup.rs -sSf | sh -s -- -y --default-toolchain stable
|
||||
endif
|
||||
|
||||
anvil: rustup
|
||||
ifeq (, $(shell which anvil 2> /dev/null))
|
||||
# Install Anvil if it's not installed
|
||||
./scripts/install_anvil.sh
|
||||
endif
|
||||
rln-deps: rustup
|
||||
./scripts/install_rln_tests_dependencies.sh
|
||||
|
||||
deps: | deps-common nat-libs waku.nims
|
||||
|
||||
@ -202,13 +214,14 @@ testcommon: | build deps
|
||||
##########
|
||||
.PHONY: testwaku wakunode2 testwakunode2 example2 chat2 chat2bridge liteprotocoltester
|
||||
|
||||
# install anvil only for the testwaku target
|
||||
testwaku: | build deps anvil librln
|
||||
# install rln-deps only for the testwaku target
|
||||
testwaku: | build deps rln-deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
$(ENV_SCRIPT) nim test -d:os=$(shell uname) $(NIM_PARAMS) waku.nims
|
||||
|
||||
wakunode2: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$@" && \
|
||||
\
|
||||
$(ENV_SCRIPT) nim wakunode2 $(NIM_PARAMS) waku.nims
|
||||
|
||||
benchmarks: | build deps librln
|
||||
@ -243,9 +256,10 @@ build/%: | build deps librln
|
||||
echo -e $(BUILD_MSG) "build/$*" && \
|
||||
$(ENV_SCRIPT) nim buildone $(NIM_PARAMS) waku.nims $*
|
||||
|
||||
test/%: | build deps librln
|
||||
echo -e $(BUILD_MSG) "test/$*" && \
|
||||
$(ENV_SCRIPT) nim testone $(NIM_PARAMS) waku.nims $*
|
||||
compile-test: | build deps librln
|
||||
echo -e $(BUILD_MSG) "$(TEST_FILE)" && \
|
||||
$(ENV_SCRIPT) nim buildTest $(NIM_PARAMS) waku.nims $(TEST_FILE) && \
|
||||
$(ENV_SCRIPT) nim execTest $(NIM_PARAMS) waku.nims $(TEST_FILE) "$(TEST_NAME)"
|
||||
|
||||
################
|
||||
## Waku tools ##
|
||||
@ -343,12 +357,12 @@ docker-image:
|
||||
docker-quick-image: MAKE_TARGET ?= wakunode2
|
||||
docker-quick-image: DOCKER_IMAGE_TAG ?= $(MAKE_TARGET)-$(GIT_VERSION)
|
||||
docker-quick-image: DOCKER_IMAGE_NAME ?= wakuorg/nwaku:$(DOCKER_IMAGE_TAG)
|
||||
docker-quick-image: NIM_PARAMS := $(NIM_PARAMS) -d:chronicles_colors:none -d:insecure -d:postgres --passL:$(LIBRLN_FILE) --passL:-lm
|
||||
docker-quick-image: | build deps librln wakunode2
|
||||
docker build \
|
||||
--build-arg="MAKE_TARGET=$(MAKE_TARGET)" \
|
||||
--tag $(DOCKER_IMAGE_NAME) \
|
||||
--file docker/binaries/Dockerfile.bn.amd64 \
|
||||
--target $(TARGET) \
|
||||
--file docker/binaries/Dockerfile.bn.local \
|
||||
.
|
||||
|
||||
docker-push:
|
||||
@ -397,14 +411,16 @@ docker-liteprotocoltester-push:
|
||||
|
||||
STATIC ?= 0
|
||||
|
||||
|
||||
libwaku: | build deps librln
|
||||
rm -f build/libwaku*
|
||||
rm -f build/libwaku*
|
||||
|
||||
ifeq ($(STATIC), 1)
|
||||
echo -e $(BUILD_MSG) "build/$@.a" && \
|
||||
$(ENV_SCRIPT) nim libwakuStatic $(NIM_PARAMS) waku.nims
|
||||
echo -e $(BUILD_MSG) "build/$@.a" && $(ENV_SCRIPT) nim libwakuStatic $(NIM_PARAMS) waku.nims
|
||||
else ifeq ($(detected_OS),Windows)
|
||||
echo -e $(BUILD_MSG) "build/$@.dll" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims
|
||||
else
|
||||
echo -e $(BUILD_MSG) "build/$@.so" && \
|
||||
$(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims
|
||||
echo -e $(BUILD_MSG) "build/$@.so" && $(ENV_SCRIPT) nim libwakuDynamic $(NIM_PARAMS) waku.nims
|
||||
endif
|
||||
|
||||
#####################
|
||||
|
||||
@ -87,7 +87,7 @@ pacman -S --noconfirm --needed mingw-w64-x86_64-python
|
||||
#### 3. Build Wakunode
|
||||
- Open Git Bash as administrator
|
||||
- clone nwaku and cd nwaku
|
||||
- Execute: `./scripts/build_wakunode_windows.sh`
|
||||
- Execute: `./scripts/build_windows.sh`
|
||||
|
||||
#### 4. Troubleshooting
|
||||
If `wakunode2.exe` isn't generated:
|
||||
|
||||
@ -11,7 +11,6 @@ import
|
||||
confutils,
|
||||
chronicles,
|
||||
chronos,
|
||||
stew/shims/net as stewNet,
|
||||
eth/keys,
|
||||
bearssl,
|
||||
stew/[byteutils, results],
|
||||
@ -381,7 +380,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
if conf.relay:
|
||||
let shards =
|
||||
conf.shards.mapIt(RelayShard(clusterId: conf.clusterId, shardId: uint16(it)))
|
||||
(await node.mountRelay(shards)).isOkOr:
|
||||
(await node.mountRelay()).isOkOr:
|
||||
echo "failed to mount relay: " & error
|
||||
return
|
||||
|
||||
@ -536,7 +535,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
chat.printReceivedMessage(msg)
|
||||
|
||||
node.subscribe(
|
||||
(kind: PubsubSub, topic: DefaultPubsubTopic), some(WakuRelayHandler(handler))
|
||||
(kind: PubsubSub, topic: DefaultPubsubTopic), WakuRelayHandler(handler)
|
||||
).isOkOr:
|
||||
error "failed to subscribe to pubsub topic",
|
||||
topic = DefaultPubsubTopic, error = error
|
||||
@ -559,7 +558,7 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
let rlnConf = WakuRlnConfig(
|
||||
dynamic: conf.rlnRelayDynamic,
|
||||
credIndex: conf.rlnRelayCredIndex,
|
||||
chainId: conf.rlnRelayChainId,
|
||||
chainId: UInt256.fromBytesBE(conf.rlnRelayChainId.toBytesBE()),
|
||||
ethClientUrls: conf.ethClientUrls.mapIt(string(it)),
|
||||
creds: some(
|
||||
RlnRelayCreds(
|
||||
@ -591,9 +590,6 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
|
||||
|
||||
await chat.readWriteLoop()
|
||||
|
||||
if conf.keepAlive:
|
||||
node.startKeepalive()
|
||||
|
||||
runForever()
|
||||
|
||||
proc main(rng: ref HmacDrbgContext) {.async.} =
|
||||
|
||||
@ -23,6 +23,7 @@ import
|
||||
waku_store,
|
||||
factory/builder,
|
||||
common/utils/matterbridge_client,
|
||||
common/rate_limit/setting,
|
||||
],
|
||||
# Chat 2 imports
|
||||
../chat2/chat2,
|
||||
@ -232,7 +233,7 @@ proc start*(cmb: Chat2MatterBridge) {.async.} =
|
||||
except:
|
||||
error "exception in relayHandler: " & getCurrentExceptionMsg()
|
||||
|
||||
cmb.nodev2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), some(relayHandler)).isOkOr:
|
||||
cmb.nodev2.subscribe((kind: PubsubSub, topic: DefaultPubsubTopic), relayHandler).isOkOr:
|
||||
error "failed to subscribe to relay", topic = DefaultPubsubTopic, error = error
|
||||
return
|
||||
|
||||
|
||||
24
apps/liteprotocoltester/legacy_publisher.nim
Normal file
24
apps/liteprotocoltester/legacy_publisher.nim
Normal file
@ -0,0 +1,24 @@
|
||||
import chronos, results, options
|
||||
import waku/[waku_node, waku_core]
|
||||
import publisher_base
|
||||
|
||||
type LegacyPublisher* = ref object of PublisherBase
|
||||
|
||||
proc new*(T: type LegacyPublisher, wakuNode: WakuNode): T =
|
||||
if isNil(wakuNode.wakuLegacyLightpushClient):
|
||||
wakuNode.mountLegacyLightPushClient()
|
||||
|
||||
return LegacyPublisher(wakuNode: wakuNode)
|
||||
|
||||
method send*(
|
||||
self: LegacyPublisher,
|
||||
topic: PubsubTopic,
|
||||
message: WakuMessage,
|
||||
servicePeer: RemotePeerInfo,
|
||||
): Future[Result[void, string]] {.async.} =
|
||||
# when error it must return original error desc due the text is used for distinction between error types in metrics.
|
||||
discard (
|
||||
await self.wakuNode.legacyLightpushPublish(some(topic), message, servicePeer)
|
||||
).valueOr:
|
||||
return err(error)
|
||||
return ok()
|
||||
@ -14,13 +14,11 @@ import
|
||||
waku/[
|
||||
common/enr,
|
||||
common/logging,
|
||||
factory/waku,
|
||||
factory/waku as waku_factory,
|
||||
factory/external_config,
|
||||
waku_node,
|
||||
node/health_monitor,
|
||||
node/waku_metrics,
|
||||
node/peer_manager,
|
||||
waku_api/rest/builder as rest_server_builder,
|
||||
waku_lightpush/common,
|
||||
waku_filter_v2,
|
||||
waku_peer_exchange/protocol,
|
||||
@ -28,8 +26,8 @@ import
|
||||
waku_core/multiaddrstr,
|
||||
],
|
||||
./tester_config,
|
||||
./lightpush_publisher,
|
||||
./filter_subscriber,
|
||||
./publisher,
|
||||
./receiver,
|
||||
./diagnose_connections,
|
||||
./service_peer_management
|
||||
|
||||
@ -49,7 +47,7 @@ when isMainModule:
|
||||
## 5. Start monitoring tools and external interfaces
|
||||
## 6. Setup graceful shutdown hooks
|
||||
|
||||
const versionString = "version / git commit hash: " & waku.git_version
|
||||
const versionString = "version / git commit hash: " & waku_factory.git_version
|
||||
|
||||
let confRes = LiteProtocolTesterConf.load(version = versionString)
|
||||
if confRes.isErr():
|
||||
@ -61,7 +59,7 @@ when isMainModule:
|
||||
## Logging setup
|
||||
logging.setupLog(conf.logLevel, conf.logFormat)
|
||||
|
||||
info "Running Lite Protocol Tester node", version = waku.git_version
|
||||
info "Running Lite Protocol Tester node", version = waku_factory.git_version
|
||||
logConfig(conf)
|
||||
|
||||
##Prepare Waku configuration
|
||||
@ -69,13 +67,13 @@ when isMainModule:
|
||||
## - override according to tester functionality
|
||||
##
|
||||
|
||||
var wakuConf: WakuNodeConf
|
||||
var wakuNodeConf: WakuNodeConf
|
||||
|
||||
if conf.configFile.isSome():
|
||||
try:
|
||||
var configFile {.threadvar.}: InputFile
|
||||
configFile = conf.configFile.get()
|
||||
wakuConf = WakuNodeConf.load(
|
||||
wakuNodeConf = WakuNodeConf.load(
|
||||
version = versionString,
|
||||
printUsage = false,
|
||||
secondarySources = proc(
|
||||
@ -88,82 +86,54 @@ when isMainModule:
|
||||
error "Loading Waku configuration failed", error = getCurrentExceptionMsg()
|
||||
quit(QuitFailure)
|
||||
|
||||
wakuConf.logLevel = conf.logLevel
|
||||
wakuConf.logFormat = conf.logFormat
|
||||
wakuConf.nat = conf.nat
|
||||
wakuConf.maxConnections = 500
|
||||
wakuConf.restAddress = conf.restAddress
|
||||
wakuConf.restPort = conf.restPort
|
||||
wakuConf.restAllowOrigin = conf.restAllowOrigin
|
||||
wakuNodeConf.logLevel = conf.logLevel
|
||||
wakuNodeConf.logFormat = conf.logFormat
|
||||
wakuNodeConf.nat = conf.nat
|
||||
wakuNodeConf.maxConnections = 500
|
||||
wakuNodeConf.restAddress = conf.restAddress
|
||||
wakuNodeConf.restPort = conf.restPort
|
||||
wakuNodeConf.restAllowOrigin = conf.restAllowOrigin
|
||||
|
||||
wakuConf.dnsAddrs = true
|
||||
wakuConf.dnsAddrsNameServers = @[parseIpAddress("8.8.8.8"), parseIpAddress("1.1.1.1")]
|
||||
wakuNodeConf.dnsAddrsNameServers =
|
||||
@[parseIpAddress("8.8.8.8"), parseIpAddress("1.1.1.1")]
|
||||
|
||||
wakuConf.shards = @[conf.shard]
|
||||
wakuConf.contentTopics = conf.contentTopics
|
||||
wakuConf.clusterId = conf.clusterId
|
||||
wakuNodeConf.shards = @[conf.shard]
|
||||
wakuNodeConf.contentTopics = conf.contentTopics
|
||||
wakuNodeConf.clusterId = conf.clusterId
|
||||
## TODO: Depending on the tester needs we might extend here with shards, clusterId, etc...
|
||||
|
||||
wakuConf.metricsServer = true
|
||||
wakuConf.metricsServerAddress = parseIpAddress("0.0.0.0")
|
||||
wakuConf.metricsServerPort = conf.metricsPort
|
||||
wakuNodeConf.metricsServer = true
|
||||
wakuNodeConf.metricsServerAddress = parseIpAddress("0.0.0.0")
|
||||
wakuNodeConf.metricsServerPort = conf.metricsPort
|
||||
|
||||
# If bootstrap option is chosen we expect our clients will not mounted
|
||||
# so we will mount PeerExchange manually to gather possible service peers,
|
||||
# if got some we will mount the client protocols afterward.
|
||||
wakuConf.peerExchange = false
|
||||
wakuConf.relay = false
|
||||
wakuConf.filter = false
|
||||
wakuConf.lightpush = false
|
||||
wakuConf.store = false
|
||||
wakuNodeConf.peerExchange = false
|
||||
wakuNodeConf.relay = false
|
||||
wakuNodeConf.filter = false
|
||||
wakuNodeConf.lightpush = false
|
||||
wakuNodeConf.store = false
|
||||
|
||||
wakuConf.rest = false
|
||||
wakuConf.relayServiceRatio = "40:60"
|
||||
wakuNodeConf.rest = false
|
||||
wakuNodeConf.relayServiceRatio = "40:60"
|
||||
|
||||
# NOTE: {.threadvar.} is used to make the global variable GC safe for the closure uses it
|
||||
# It will always be called from main thread anyway.
|
||||
# Ref: https://nim-lang.org/docs/manual.html#threads-gc-safety
|
||||
var nodeHealthMonitor {.threadvar.}: WakuNodeHealthMonitor
|
||||
nodeHealthMonitor = WakuNodeHealthMonitor()
|
||||
nodeHealthMonitor.setOverallHealth(HealthStatus.INITIALIZING)
|
||||
|
||||
let restServer = rest_server_builder.startRestServerEssentials(
|
||||
nodeHealthMonitor, wakuConf
|
||||
).valueOr:
|
||||
error "Starting esential REST server failed.", error = $error
|
||||
let wakuConf = wakuNodeConf.toWakuConf().valueOr:
|
||||
error "Issue converting toWakuConf", error = $error
|
||||
quit(QuitFailure)
|
||||
|
||||
var wakuApp = Waku.new(wakuConf).valueOr:
|
||||
var waku = Waku.new(wakuConf).valueOr:
|
||||
error "Waku initialization failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
wakuApp.restServer = restServer
|
||||
|
||||
nodeHealthMonitor.setNode(wakuApp.node)
|
||||
|
||||
(waitFor startWaku(addr wakuApp)).isOkOr:
|
||||
(waitFor startWaku(addr waku)).isOkOr:
|
||||
error "Starting waku failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
rest_server_builder.startRestServerProtocolSupport(
|
||||
restServer, wakuApp.node, wakuApp.wakuDiscv5, wakuConf
|
||||
).isOkOr:
|
||||
error "Starting protocols support REST server failed.", error = $error
|
||||
quit(QuitFailure)
|
||||
|
||||
wakuApp.metricsServer = waku_metrics.startMetricsServerAndLogging(wakuConf).valueOr:
|
||||
error "Starting monitoring and external interfaces failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
nodeHealthMonitor.setOverallHealth(HealthStatus.READY)
|
||||
|
||||
debug "Setting up shutdown hooks"
|
||||
## Setup shutdown hooks for this process.
|
||||
## Stop node gracefully on shutdown.
|
||||
|
||||
proc asyncStopper(wakuApp: Waku) {.async: (raises: [Exception]).} =
|
||||
nodeHealthMonitor.setOverallHealth(HealthStatus.SHUTTING_DOWN)
|
||||
await wakuApp.stop()
|
||||
proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} =
|
||||
await waku.stop()
|
||||
quit(QuitSuccess)
|
||||
|
||||
# Handle Ctrl-C SIGINT
|
||||
@ -172,7 +142,7 @@ when isMainModule:
|
||||
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
||||
setupForeignThreadGc()
|
||||
notice "Shutting down after receiving SIGINT"
|
||||
asyncSpawn asyncStopper(wakuApp)
|
||||
asyncSpawn asyncStopper(waku)
|
||||
|
||||
setControlCHook(handleCtrlC)
|
||||
|
||||
@ -180,7 +150,7 @@ when isMainModule:
|
||||
when defined(posix):
|
||||
proc handleSigterm(signal: cint) {.noconv.} =
|
||||
notice "Shutting down after receiving SIGTERM"
|
||||
asyncSpawn asyncStopper(wakuApp)
|
||||
asyncSpawn asyncStopper(waku)
|
||||
|
||||
c_signal(ansi_c.SIGTERM, handleSigterm)
|
||||
|
||||
@ -193,7 +163,7 @@ when isMainModule:
|
||||
# Not available in -d:release mode
|
||||
writeStackTrace()
|
||||
|
||||
waitFor wakuApp.stop()
|
||||
waitFor waku.stop()
|
||||
quit(QuitFailure)
|
||||
|
||||
c_signal(ansi_c.SIGSEGV, handleSigsegv)
|
||||
@ -212,7 +182,7 @@ when isMainModule:
|
||||
if conf.serviceNode.len == 0:
|
||||
if conf.bootstrapNode.len > 0:
|
||||
info "Bootstrapping with PeerExchange to gather random service node"
|
||||
let futForServiceNode = pxLookupServiceNode(wakuApp.node, conf)
|
||||
let futForServiceNode = pxLookupServiceNode(waku.node, conf)
|
||||
if not (waitFor futForServiceNode.withTimeout(20.minutes)):
|
||||
error "Service node not found in time via PX"
|
||||
quit(QuitFailure)
|
||||
@ -222,7 +192,7 @@ when isMainModule:
|
||||
quit(QuitFailure)
|
||||
|
||||
serviceNodePeerInfo = selectRandomServicePeer(
|
||||
wakuApp.node.peerManager, none(RemotePeerInfo), codec
|
||||
waku.node.peerManager, none(RemotePeerInfo), codec
|
||||
).valueOr:
|
||||
error "Service node selection failed"
|
||||
quit(QuitFailure)
|
||||
@ -237,11 +207,11 @@ when isMainModule:
|
||||
|
||||
info "Service node to be used", serviceNode = $serviceNodePeerInfo
|
||||
|
||||
logSelfPeers(wakuApp.node.peerManager)
|
||||
logSelfPeers(waku.node.peerManager)
|
||||
|
||||
if conf.testFunc == TesterFunctionality.SENDER:
|
||||
setupAndPublish(wakuApp.node, conf, serviceNodePeerInfo)
|
||||
setupAndPublish(waku.node, conf, serviceNodePeerInfo)
|
||||
else:
|
||||
setupAndSubscribe(wakuApp.node, conf, serviceNodePeerInfo)
|
||||
setupAndListen(waku.node, conf, serviceNodePeerInfo)
|
||||
|
||||
runForever()
|
||||
|
||||
@ -24,8 +24,8 @@ def run_tester_node(predefined_test_env):
|
||||
return os.system(script_cmd)
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2 or sys.argv[1] not in ["RECEIVER", "SENDER"]:
|
||||
print("Error: First argument must be either 'RECEIVER' or 'SENDER'")
|
||||
if len(sys.argv) < 2 or sys.argv[1] not in ["RECEIVER", "SENDER", "SENDERV3"]:
|
||||
print("Error: First argument must be either 'RECEIVER' or 'SENDER' or 'SENDERV3'")
|
||||
sys.exit(1)
|
||||
|
||||
predefined_test_env_file = '/usr/bin/infra.env'
|
||||
|
||||
@ -21,14 +21,17 @@ import
|
||||
./tester_message,
|
||||
./lpt_metrics,
|
||||
./diagnose_connections,
|
||||
./service_peer_management
|
||||
./service_peer_management,
|
||||
./publisher_base,
|
||||
./legacy_publisher,
|
||||
./v3_publisher
|
||||
|
||||
randomize()
|
||||
|
||||
type SizeRange* = tuple[min: uint64, max: uint64]
|
||||
|
||||
var RANDOM_PALYLOAD {.threadvar.}: seq[byte]
|
||||
RANDOM_PALYLOAD = urandom(1024 * 1024)
|
||||
var RANDOM_PAYLOAD {.threadvar.}: seq[byte]
|
||||
RANDOM_PAYLOAD = urandom(1024 * 1024)
|
||||
# 1MiB of random payload to be used to extend message
|
||||
|
||||
proc prepareMessage(
|
||||
@ -59,9 +62,8 @@ proc prepareMessage(
|
||||
if renderSize < len(contentPayload).uint64:
|
||||
renderSize = len(contentPayload).uint64
|
||||
|
||||
let finalPayload = concat(
|
||||
contentPayload, RANDOM_PALYLOAD[0 .. renderSize - len(contentPayload).uint64]
|
||||
)
|
||||
let finalPayload =
|
||||
concat(contentPayload, RANDOM_PAYLOAD[0 .. renderSize - len(contentPayload).uint64])
|
||||
let message = WakuMessage(
|
||||
payload: finalPayload, # content of the message
|
||||
contentTopic: contentTopic, # content topic to publish to
|
||||
@ -108,6 +110,7 @@ proc reportSentMessages() =
|
||||
|
||||
proc publishMessages(
|
||||
wakuNode: WakuNode,
|
||||
publisher: PublisherBase,
|
||||
servicePeer: RemotePeerInfo,
|
||||
lightpushPubsubTopic: PubsubTopic,
|
||||
lightpushContentTopic: ContentTopic,
|
||||
@ -148,9 +151,7 @@ proc publishMessages(
|
||||
|
||||
let publishStartTime = Moment.now()
|
||||
|
||||
let wlpRes = await wakuNode.legacyLightpushPublish(
|
||||
some(lightpushPubsubTopic), message, actualServicePeer
|
||||
)
|
||||
let wlpRes = await publisher.send(lightpushPubsubTopic, message, actualServicePeer)
|
||||
|
||||
let publishDuration = Moment.now() - publishStartTime
|
||||
|
||||
@ -213,10 +214,13 @@ proc publishMessages(
|
||||
proc setupAndPublish*(
|
||||
wakuNode: WakuNode, conf: LiteProtocolTesterConf, servicePeer: RemotePeerInfo
|
||||
) =
|
||||
if isNil(wakuNode.wakuLightpushClient):
|
||||
# if we have not yet initialized lightpush client, then do it as the only way we can get here is
|
||||
# by having a service peer discovered.
|
||||
wakuNode.mountLegacyLightPushClient()
|
||||
var publisher: PublisherBase
|
||||
if conf.lightpushVersion == LightpushVersion.LEGACY:
|
||||
info "Using legacy lightpush protocol for publishing messages"
|
||||
publisher = LegacyPublisher.new(wakuNode)
|
||||
else:
|
||||
info "Using lightpush v3 protocol for publishing messages"
|
||||
publisher = V3Publisher.new(wakuNode)
|
||||
|
||||
# give some time to receiver side to set up
|
||||
let waitTillStartTesting = conf.startPublishingAfter.seconds
|
||||
@ -257,6 +261,7 @@ proc setupAndPublish*(
|
||||
# Start maintaining subscription
|
||||
asyncSpawn publishMessages(
|
||||
wakuNode,
|
||||
publisher,
|
||||
servicePeer,
|
||||
conf.getPubsubTopic(),
|
||||
conf.contentTopics[0],
|
||||
14
apps/liteprotocoltester/publisher_base.nim
Normal file
14
apps/liteprotocoltester/publisher_base.nim
Normal file
@ -0,0 +1,14 @@
|
||||
import chronos, results
|
||||
import waku/[waku_node, waku_core]
|
||||
|
||||
type PublisherBase* = ref object of RootObj
|
||||
wakuNode*: WakuNode
|
||||
|
||||
method send*(
|
||||
self: PublisherBase,
|
||||
topic: PubsubTopic,
|
||||
message: WakuMessage,
|
||||
servicePeer: RemotePeerInfo,
|
||||
): Future[Result[void, string]] {.base, async.} =
|
||||
discard
|
||||
# when error it must return original error desc due the text is used for distinction between error types in metrics.
|
||||
@ -116,7 +116,7 @@ proc maintainSubscription(
|
||||
|
||||
await sleepAsync(30.seconds) # Subscription maintenance interval
|
||||
|
||||
proc setupAndSubscribe*(
|
||||
proc setupAndListen*(
|
||||
wakuNode: WakuNode, conf: LiteProtocolTesterConf, servicePeer: RemotePeerInfo
|
||||
) =
|
||||
if isNil(wakuNode.wakuFilterClient):
|
||||
@ -25,7 +25,12 @@ fi
|
||||
|
||||
FUNCTION=$2
|
||||
if [ "${FUNCTION}" = "SENDER" ]; then
|
||||
FUNCTION=--test-func=SENDER
|
||||
FUNCTION="--test-func=SENDER --lightpush-version=LEGACY"
|
||||
SERVICENAME=lightpush-service
|
||||
fi
|
||||
|
||||
if [ "${FUNCTION}" = "SENDERV3" ]; then
|
||||
FUNCTION="--test-func=SENDER --lightpush-version=V3"
|
||||
SERVICENAME=lightpush-service
|
||||
fi
|
||||
|
||||
|
||||
@ -26,7 +26,15 @@ fi
|
||||
|
||||
FUNCTION=$2
|
||||
if [ "${FUNCTION}" = "SENDER" ]; then
|
||||
FUNCTION=--test-func=SENDER
|
||||
FUNCTION="--test-func=SENDER --lightpush-version=LEGACY"
|
||||
SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}}
|
||||
NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"}
|
||||
NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"}
|
||||
METRICS_PORT=--metrics-port="${PUBLISHER_METRICS_PORT:-8003}"
|
||||
fi
|
||||
|
||||
if [ "${FUNCTION}" = "SENDERV3" ]; then
|
||||
FUNCTION="--test-func=SENDER --lightpush-version=V3"
|
||||
SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}}
|
||||
NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"}
|
||||
NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"}
|
||||
|
||||
@ -26,7 +26,15 @@ fi
|
||||
|
||||
FUNCTION=$2
|
||||
if [ "${FUNCTION}" = "SENDER" ]; then
|
||||
FUNCTION=--test-func=SENDER
|
||||
FUNCTION="--test-func=SENDER --lightpush-version=LEGACY"
|
||||
SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}}
|
||||
NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"}
|
||||
NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"}
|
||||
METRICS_PORT=--metrics-port="${PUBLISHER_METRICS_PORT:-8003}"
|
||||
fi
|
||||
|
||||
if [ "${FUNCTION}" = "SENDERV3" ]; then
|
||||
FUNCTION="--test-func=SENDER --lightpush-version=V3"
|
||||
SERIVCE_NODE_ADDR=${LIGHTPUSH_SERVICE_PEER:-${LIGHTPUSH_BOOTSTRAP:-}}
|
||||
NODE_ARG=${LIGHTPUSH_SERVICE_PEER:+--service-node="${LIGHTPUSH_SERVICE_PEER}"}
|
||||
NODE_ARG=${NODE_ARG:---bootstrap-node="${LIGHTPUSH_BOOTSTRAP}"}
|
||||
|
||||
@ -158,9 +158,7 @@ proc tryCallAllPxPeers*(
|
||||
proc pxLookupServiceNode*(
|
||||
node: WakuNode, conf: LiteProtocolTesterConf
|
||||
): Future[Result[bool, void]] {.async.} =
|
||||
var codec: string = WakuLightPushCodec
|
||||
if conf.testFunc == TesterFunctionality.RECEIVER:
|
||||
codec = WakuFilterSubscribeCodec
|
||||
let codec: string = conf.getCodec()
|
||||
|
||||
if node.wakuPeerExchange.isNil():
|
||||
let peerExchangeNode = translateToRemotePeerInfo(conf.bootstrapNode).valueOr:
|
||||
|
||||
@ -33,6 +33,10 @@ type TesterFunctionality* = enum
|
||||
SENDER # pumps messages to the network
|
||||
RECEIVER # gather and analyze messages from the network
|
||||
|
||||
type LightpushVersion* = enum
|
||||
LEGACY # legacy lightpush protocol
|
||||
V3 # lightpush v3 protocol
|
||||
|
||||
type LiteProtocolTesterConf* = object
|
||||
configFile* {.
|
||||
desc:
|
||||
@ -80,6 +84,12 @@ type LiteProtocolTesterConf* = object
|
||||
name: "test-func"
|
||||
.}: TesterFunctionality
|
||||
|
||||
lightpushVersion* {.
|
||||
desc: "Version of the sender to use. Supported values: legacy, v3.",
|
||||
defaultValue: LightpushVersion.LEGACY,
|
||||
name: "lightpush-version"
|
||||
.}: LightpushVersion
|
||||
|
||||
numMessages* {.
|
||||
desc: "Number of messages to send.", defaultValue: 120, name: "num-messages"
|
||||
.}: uint32
|
||||
@ -190,4 +200,14 @@ proc load*(T: type LiteProtocolTesterConf, version = ""): ConfResult[T] =
|
||||
proc getPubsubTopic*(conf: LiteProtocolTesterConf): PubsubTopic =
|
||||
return $RelayShard(clusterId: conf.clusterId, shardId: conf.shard)
|
||||
|
||||
proc getCodec*(conf: LiteProtocolTesterConf): string =
|
||||
return
|
||||
if conf.testFunc == TesterFunctionality.RECEIVER:
|
||||
WakuFilterSubscribeCodec
|
||||
else:
|
||||
if conf.lightpushVersion == LightpushVersion.LEGACY:
|
||||
WakuLegacyLightPushCodec
|
||||
else:
|
||||
WakuLightPushCodec
|
||||
|
||||
{.pop.}
|
||||
|
||||
29
apps/liteprotocoltester/v3_publisher.nim
Normal file
29
apps/liteprotocoltester/v3_publisher.nim
Normal file
@ -0,0 +1,29 @@
|
||||
import results, options, chronos
|
||||
import waku/[waku_node, waku_core, waku_lightpush]
|
||||
import publisher_base
|
||||
|
||||
type V3Publisher* = ref object of PublisherBase
|
||||
|
||||
proc new*(T: type V3Publisher, wakuNode: WakuNode): T =
|
||||
if isNil(wakuNode.wakuLightpushClient):
|
||||
wakuNode.mountLightPushClient()
|
||||
|
||||
return V3Publisher(wakuNode: wakuNode)
|
||||
|
||||
method send*(
|
||||
self: V3Publisher,
|
||||
topic: PubsubTopic,
|
||||
message: WakuMessage,
|
||||
servicePeer: RemotePeerInfo,
|
||||
): Future[Result[void, string]] {.async.} =
|
||||
# when error it must return original error desc due the text is used for distinction between error types in metrics.
|
||||
discard (
|
||||
await self.wakuNode.lightpushPublish(some(topic), message, some(servicePeer))
|
||||
).valueOr:
|
||||
if error.code == NO_PEERS_TO_RELAY and
|
||||
error.desc != some("No peers for topic, skipping publish"):
|
||||
# TODO: We need better separation of errors happening on the client side or the server side.-
|
||||
return err("dial_failure")
|
||||
else:
|
||||
return err($error.code)
|
||||
return ok()
|
||||
@ -554,7 +554,7 @@ proc subscribeAndHandleMessages(
|
||||
else:
|
||||
msgPerContentTopic[msg.contentTopic] = 1
|
||||
|
||||
node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler))).isOkOr:
|
||||
node.subscribe((kind: PubsubSub, topic: pubsubTopic), WakuRelayHandler(handler)).isOkOr:
|
||||
error "failed to subscribe to pubsub topic", pubsubTopic, error
|
||||
quit(1)
|
||||
|
||||
@ -570,17 +570,18 @@ when isMainModule:
|
||||
info "cli flags", conf = conf
|
||||
|
||||
if conf.clusterId == 1:
|
||||
let twnClusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let twnNetworkConf = NetworkConf.TheWakuNetworkConf()
|
||||
|
||||
conf.bootstrapNodes = twnClusterConf.discv5BootstrapNodes
|
||||
conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
|
||||
conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
|
||||
conf.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec
|
||||
conf.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit
|
||||
conf.numShardsInNetwork = twnClusterConf.numShardsInNetwork
|
||||
conf.bootstrapNodes = twnNetworkConf.discv5BootstrapNodes
|
||||
conf.rlnRelayDynamic = twnNetworkConf.rlnRelayDynamic
|
||||
conf.rlnRelayEthContractAddress = twnNetworkConf.rlnRelayEthContractAddress
|
||||
conf.rlnEpochSizeSec = twnNetworkConf.rlnEpochSizeSec
|
||||
conf.rlnRelayUserMessageLimit = twnNetworkConf.rlnRelayUserMessageLimit
|
||||
conf.numShardsInNetwork = twnNetworkConf.shardingConf.numShardsInCluster
|
||||
|
||||
if conf.shards.len == 0:
|
||||
conf.shards = toSeq(uint16(0) .. uint16(twnClusterConf.numShardsInNetwork - 1))
|
||||
conf.shards =
|
||||
toSeq(uint16(0) .. uint16(twnNetworkConf.shardingConf.numShardsInCluster - 1))
|
||||
|
||||
if conf.logLevel != LogLevel.NONE:
|
||||
setLogLevel(conf.logLevel)
|
||||
|
||||
@ -5,7 +5,6 @@ import
|
||||
chronos,
|
||||
std/strutils,
|
||||
results,
|
||||
stew/shims/net,
|
||||
regex
|
||||
|
||||
type EthRpcUrl* = distinct string
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
std/json,
|
||||
results,
|
||||
stew/shims/net,
|
||||
chronicles,
|
||||
chronicles/topics_registry,
|
||||
chronos,
|
||||
|
||||
@ -9,7 +9,7 @@ x-logging: &logging
|
||||
x-rln-relay-eth-client-address: &rln_relay_eth_client_address ${RLN_RELAY_ETH_CLIENT_ADDRESS:-} # Add your RLN_RELAY_ETH_CLIENT_ADDRESS after the "-"
|
||||
|
||||
x-rln-environment: &rln_env
|
||||
RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8}
|
||||
RLN_RELAY_CONTRACT_ADDRESS: ${RLN_RELAY_CONTRACT_ADDRESS:-0xB9cd878C90E49F797B4431fBF4fb333108CB90e6}
|
||||
RLN_RELAY_CRED_PATH: ${RLN_RELAY_CRED_PATH:-} # Optional: Add your RLN_RELAY_CRED_PATH after the "-"
|
||||
RLN_RELAY_CRED_PASSWORD: ${RLN_RELAY_CRED_PASSWORD:-} # Optional: Add your RLN_RELAY_CRED_PASSWORD after the "-"
|
||||
|
||||
|
||||
@ -24,7 +24,7 @@ fi
|
||||
docker run -v $(pwd)/keystore:/keystore/:Z harbor.status.im/wakuorg/nwaku:v0.30.1 generateRlnKeystore \
|
||||
--rln-relay-eth-client-address=${RLN_RELAY_ETH_CLIENT_ADDRESS} \
|
||||
--rln-relay-eth-private-key=${ETH_TESTNET_KEY} \
|
||||
--rln-relay-eth-contract-address=0xfe7a9eabcE779a090FD702346Fd0bFAc02ce6Ac8 \
|
||||
--rln-relay-eth-contract-address=0xB9cd878C90E49F797B4431fBF4fb333108CB90e6 \
|
||||
--rln-relay-cred-path=/keystore/keystore.json \
|
||||
--rln-relay-cred-password="${RLN_RELAY_CRED_PASSWORD}" \
|
||||
--rln-relay-user-message-limit=20 \
|
||||
|
||||
@ -2,7 +2,6 @@ import
|
||||
std/[strutils, sequtils, tables, strformat],
|
||||
confutils,
|
||||
chronos,
|
||||
stew/shims/net,
|
||||
chronicles/topics_registry,
|
||||
os
|
||||
import
|
||||
|
||||
@ -16,7 +16,6 @@ import
|
||||
factory/external_config,
|
||||
factory/waku,
|
||||
node/health_monitor,
|
||||
node/waku_metrics,
|
||||
waku_api/rest/builder as rest_server_builder,
|
||||
]
|
||||
|
||||
@ -53,69 +52,21 @@ when isMainModule:
|
||||
let conf = wakuNodeConf.toInspectRlnDbConf()
|
||||
doInspectRlnDb(conf)
|
||||
of noCommand:
|
||||
# NOTE: {.threadvar.} is used to make the global variable GC safe for the closure uses it
|
||||
# It will always be called from main thread anyway.
|
||||
# Ref: https://nim-lang.org/docs/manual.html#threads-gc-safety
|
||||
var nodeHealthMonitor {.threadvar.}: WakuNodeHealthMonitor
|
||||
nodeHealthMonitor = WakuNodeHealthMonitor()
|
||||
nodeHealthMonitor.setOverallHealth(HealthStatus.INITIALIZING)
|
||||
|
||||
let conf = wakuNodeConf.toWakuConf().valueOr:
|
||||
error "Waku configuration failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
var restServer: WakuRestServerRef = nil
|
||||
|
||||
if conf.restServerConf.isSome():
|
||||
restServer = rest_server_builder.startRestServerEssentials(
|
||||
nodeHealthMonitor, conf.restServerConf.get(), conf.portsShift
|
||||
).valueOr:
|
||||
error "Starting essential REST server failed.", error = $error
|
||||
quit(QuitFailure)
|
||||
|
||||
var waku = Waku.new(conf).valueOr:
|
||||
error "Waku initialization failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
waku.restServer = restServer
|
||||
|
||||
nodeHealthMonitor.setNode(waku.node)
|
||||
|
||||
(waitFor startWaku(addr waku)).isOkOr:
|
||||
error "Starting waku failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
if conf.restServerConf.isSome():
|
||||
rest_server_builder.startRestServerProtocolSupport(
|
||||
restServer,
|
||||
waku.node,
|
||||
waku.wakuDiscv5,
|
||||
conf.restServerConf.get(),
|
||||
conf.relay,
|
||||
conf.lightPush,
|
||||
conf.clusterId,
|
||||
conf.shards,
|
||||
conf.contentTopics,
|
||||
).isOkOr:
|
||||
error "Starting protocols support REST server failed.", error = $error
|
||||
quit(QuitFailure)
|
||||
|
||||
if conf.metricsServerConf.isSome():
|
||||
waku.metricsServer = waku_metrics.startMetricsServerAndLogging(
|
||||
conf.metricsServerConf.get(), conf.portsShift
|
||||
).valueOr:
|
||||
error "Starting monitoring and external interfaces failed", error = error
|
||||
quit(QuitFailure)
|
||||
|
||||
nodeHealthMonitor.setOverallHealth(HealthStatus.READY)
|
||||
|
||||
debug "Setting up shutdown hooks"
|
||||
## Setup shutdown hooks for this process.
|
||||
## Stop node gracefully on shutdown.
|
||||
|
||||
proc asyncStopper(node: Waku) {.async: (raises: [Exception]).} =
|
||||
nodeHealthMonitor.setOverallHealth(HealthStatus.SHUTTING_DOWN)
|
||||
await node.stop()
|
||||
proc asyncStopper(waku: Waku) {.async: (raises: [Exception]).} =
|
||||
await waku.stop()
|
||||
quit(QuitSuccess)
|
||||
|
||||
# Handle Ctrl-C SIGINT
|
||||
|
||||
@ -69,17 +69,33 @@ pipeline {
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps { script {
|
||||
image = docker.build(
|
||||
"${params.IMAGE_NAME}:${params.IMAGE_TAG ?: env.GIT_COMMIT.take(8)}",
|
||||
"--label=build='${env.BUILD_URL}' " +
|
||||
"--label=commit='${git.commit()}' " +
|
||||
"--label=version='${git.describe('--tags')}' " +
|
||||
"--build-arg=MAKE_TARGET='${params.MAKE_TARGET}' " +
|
||||
"--build-arg=NIMFLAGS='${params.NIMFLAGS} -d:postgres ' " +
|
||||
"--build-arg=LOG_LEVEL='${params.LOWEST_LOG_LEVEL_ALLOWED}' " +
|
||||
"--build-arg=DEBUG='${params.DEBUG ? "1" : "0"} ' " +
|
||||
"--target=${params.HEAPTRACK ? "heaptrack-build" : "prod"} ."
|
||||
)
|
||||
if (params.HEAPTRACK) {
|
||||
echo 'Building with heaptrack support'
|
||||
image = docker.build(
|
||||
"${params.IMAGE_NAME}:${params.IMAGE_TAG ?: env.GIT_COMMIT.take(8)}",
|
||||
"--label=build='${env.BUILD_URL}' " +
|
||||
"--label=commit='${git.commit()}' " +
|
||||
"--label=version='${git.describe('--tags')}' " +
|
||||
"--build-arg=MAKE_TARGET='${params.MAKE_TARGET}' " +
|
||||
"--build-arg=NIMFLAGS='${params.NIMFLAGS} -d:postgres -d:heaptracker ' " +
|
||||
"--build-arg=LOG_LEVEL='${params.LOWEST_LOG_LEVEL_ALLOWED}' " +
|
||||
"--build-arg=DEBUG='${params.DEBUG ? "1" : "0"} ' " +
|
||||
"--build-arg=NIM_COMMIT='NIM_COMMIT=heaptrack_support_v2.0.12' " +
|
||||
"--target='debug-with-heaptrack' ."
|
||||
)
|
||||
} else {
|
||||
image = docker.build(
|
||||
"${params.IMAGE_NAME}:${params.IMAGE_TAG ?: env.GIT_COMMIT.take(8)}",
|
||||
"--label=build='${env.BUILD_URL}' " +
|
||||
"--label=commit='${git.commit()}' " +
|
||||
"--label=version='${git.describe('--tags')}' " +
|
||||
"--build-arg=MAKE_TARGET='${params.MAKE_TARGET}' " +
|
||||
"--build-arg=NIMFLAGS='${params.NIMFLAGS} -d:postgres ' " +
|
||||
"--build-arg=LOG_LEVEL='${params.LOWEST_LOG_LEVEL_ALLOWED}' " +
|
||||
"--build-arg=DEBUG='${params.DEBUG ? "1" : "0"} ' " +
|
||||
"--target='prod' ."
|
||||
)
|
||||
}
|
||||
} }
|
||||
}
|
||||
|
||||
|
||||
@ -13,7 +13,7 @@ EXPOSE 30303 60000 8545
|
||||
|
||||
# Referenced in the binary
|
||||
RUN apt-get update &&\
|
||||
apt-get install -y libpcre3 libpq-dev curl iproute2 wget &&\
|
||||
apt-get install -y libpcre3 libpq-dev curl iproute2 wget dnsutils &&\
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Fix for 'Error loading shared library libpcre.so.3: No such file or directory'
|
||||
|
||||
63
docker/binaries/Dockerfile.bn.local
Normal file
63
docker/binaries/Dockerfile.bn.local
Normal file
@ -0,0 +1,63 @@
|
||||
# Dockerfile to build a distributable container image from pre-existing binaries
|
||||
# FROM debian:stable-slim AS prod
|
||||
FROM ubuntu:24.04 AS prod
|
||||
|
||||
ARG MAKE_TARGET=wakunode2
|
||||
|
||||
LABEL maintainer="vaclav@status.im"
|
||||
LABEL source="https://github.com/waku-org/nwaku"
|
||||
LABEL description="Wakunode: Waku client"
|
||||
LABEL commit="unknown"
|
||||
|
||||
# DevP2P, LibP2P, and JSON RPC ports
|
||||
EXPOSE 30303 60000 8545
|
||||
|
||||
# Referenced in the binary
|
||||
RUN apt-get update &&\
|
||||
apt-get install -y libpcre3 libpq-dev curl iproute2 wget jq dnsutils &&\
|
||||
apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Fix for 'Error loading shared library libpcre.so.3: No such file or directory'
|
||||
RUN ln -s /usr/lib/libpcre.so /usr/lib/libpcre.so.3
|
||||
|
||||
# Copy to separate location to accomodate different MAKE_TARGET values
|
||||
ADD ./build/$MAKE_TARGET /usr/local/bin/
|
||||
|
||||
# Copy migration scripts for DB upgrades
|
||||
ADD ./migrations/ /app/migrations/
|
||||
|
||||
# Symlink the correct wakunode binary
|
||||
RUN ln -sv /usr/local/bin/$MAKE_TARGET /usr/bin/wakunode
|
||||
|
||||
ENTRYPOINT ["/usr/bin/wakunode"]
|
||||
|
||||
# By default just show help if called without arguments
|
||||
CMD ["--help"]
|
||||
|
||||
# Build debug tools: heaptrack
|
||||
FROM ubuntu:24.04 AS heaptrack-build
|
||||
|
||||
RUN apt update
|
||||
RUN apt install -y gdb git g++ make cmake zlib1g-dev libboost-all-dev libunwind-dev
|
||||
RUN git clone https://github.com/KDE/heaptrack.git /heaptrack
|
||||
|
||||
WORKDIR /heaptrack/build
|
||||
# going to a commit that builds properly. We will revisit this for new releases
|
||||
RUN git reset --hard f9cc35ebbdde92a292fe3870fe011ad2874da0ca
|
||||
RUN cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
RUN make -j$(nproc)
|
||||
|
||||
|
||||
# Debug image
|
||||
FROM prod AS debug-with-heaptrack
|
||||
|
||||
RUN apt update
|
||||
RUN apt install -y gdb libunwind8
|
||||
|
||||
# Add heaptrack
|
||||
COPY --from=heaptrack-build /heaptrack/build/ /heaptrack/build/
|
||||
|
||||
ENV LD_LIBRARY_PATH=/heaptrack/build/lib/heaptrack/
|
||||
RUN ln -s /heaptrack/build/bin/heaptrack /usr/local/bin/heaptrack
|
||||
|
||||
ENTRYPOINT ["/heaptrack/build/bin/heaptrack", "/usr/bin/wakunode"]
|
||||
@ -305,10 +305,10 @@ int main(int argc, char** argv) {
|
||||
\"storeMessageDbUrl\": \"%s\", \
|
||||
\"storeMessageRetentionPolicy\": \"%s\", \
|
||||
\"storeMaxNumDbConnections\": %d , \
|
||||
\"logLevel\": \"FATAL\", \
|
||||
\"logLevel\": \"DEBUG\", \
|
||||
\"discv5Discovery\": true, \
|
||||
\"discv5BootstrapNodes\": \
|
||||
[\"enr:-QESuEB4Dchgjn7gfAvwB00CxTA-nGiyk-aALI-H4dYSZD3rUk7bZHmP8d2U6xDiQ2vZffpo45Jp7zKNdnwDUx6g4o6XAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOvD3S3jUNICsrOILlmhENiWAMmMVlAl6-Q8wRB7hidY4N0Y3CCdl-DdWRwgiMohXdha3UyDw\", \"enr:-QEkuEBIkb8q8_mrorHndoXH9t5N6ZfD-jehQCrYeoJDPHqT0l0wyaONa2-piRQsi3oVKAzDShDVeoQhy0uwN1xbZfPZAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQKnGt-GSgqPSf3IAPM7bFgTlpczpMZZLF3geeoNNsxzSoN0Y3CCdl-DdWRwgiMohXdha3UyDw\"], \
|
||||
[\"enr:-QEKuED9AJm2HGgrRpVaJY2nj68ao_QiPeUT43sK-aRM7sMJ6R4G11OSDOwnvVacgN1sTw-K7soC5dzHDFZgZkHU0u-XAYJpZIJ2NIJpcISnYxMvim11bHRpYWRkcnO4WgAqNiVib290LTAxLmRvLWFtczMuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfACw2JWJvb3QtMDEuZG8tYW1zMy5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEC3rRtFQSgc24uWewzXaxTY8hDAHB8sgnxr9k8Rjb5GeSDdGNwgnZfg3VkcIIjKIV3YWt1Mg0\", \"enr:-QEcuED7ww5vo2rKc1pyBp7fubBUH-8STHEZHo7InjVjLblEVyDGkjdTI9VdqmYQOn95vuQH-Htku17WSTzEufx-Wg4mAYJpZIJ2NIJpcIQihw1Xim11bHRpYWRkcnO4bAAzNi5ib290LTAxLmdjLXVzLWNlbnRyYWwxLWEuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfADU2LmJvb3QtMDEuZ2MtdXMtY2VudHJhbDEtYS5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaECxjqgDQ0WyRSOilYU32DA5k_XNlDis3m1VdXkK9xM6kODdGNwgnZfg3VkcIIjKIV3YWt1Mg0\", \"enr:-QEcuEAoShWGyN66wwusE3Ri8hXBaIkoHZHybUB8cCPv5v3ypEf9OCg4cfslJxZFANl90s-jmMOugLUyBx4EfOBNJ6_VAYJpZIJ2NIJpcIQI2hdMim11bHRpYWRkcnO4bAAzNi5ib290LTAxLmFjLWNuLWhvbmdrb25nLWMuc3RhdHVzLnByb2Quc3RhdHVzLmltBnZfADU2LmJvb3QtMDEuYWMtY24taG9uZ2tvbmctYy5zdGF0dXMucHJvZC5zdGF0dXMuaW0GAbveA4Jyc40AEAUAAQAgAEAAgAEAiXNlY3AyNTZrMaEDP7CbRk-YKJwOFFM4Z9ney0GPc7WPJaCwGkpNRyla7mCDdGNwgnZfg3VkcIIjKIV3YWt1Mg0\"], \
|
||||
\"discv5UdpPort\": 9999, \
|
||||
\"dnsDiscoveryUrl\": \"enrtree://AMOJVZX4V6EXP7NTJPMAYJYST2QP6AJXYW76IU6VGJS7UVSNDYZG4@boot.prod.status.nodes.status.im\", \
|
||||
\"dnsDiscoveryNameServers\": [\"8.8.8.8\", \"1.0.0.1\"] \
|
||||
@ -353,6 +353,11 @@ int main(int argc, char** argv) {
|
||||
show_main_menu();
|
||||
while(1) {
|
||||
handle_user_input();
|
||||
|
||||
// Uncomment the following if need to test the metrics retrieval
|
||||
// WAKU_CALL( waku_get_metrics(ctx,
|
||||
// event_handler,
|
||||
// userData) );
|
||||
}
|
||||
|
||||
pthread_mutex_destroy(&mutex);
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import
|
||||
std/[tables, sequtils],
|
||||
stew/byteutils,
|
||||
stew/shims/net,
|
||||
chronicles,
|
||||
chronos,
|
||||
confutils,
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import
|
||||
std/[tables, times, sequtils],
|
||||
stew/byteutils,
|
||||
stew/shims/net,
|
||||
chronicles,
|
||||
results,
|
||||
chronos,
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import
|
||||
std/[tables, times, sequtils],
|
||||
stew/byteutils,
|
||||
stew/shims/net,
|
||||
chronicles,
|
||||
chronos,
|
||||
confutils,
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import
|
||||
std/[tables, sequtils],
|
||||
stew/byteutils,
|
||||
stew/shims/net,
|
||||
chronicles,
|
||||
chronos,
|
||||
confutils,
|
||||
@ -120,7 +119,7 @@ proc setupAndSubscribe(rng: ref HmacDrbgContext) {.async.} =
|
||||
contentTopic = msg.contentTopic,
|
||||
timestamp = msg.timestamp
|
||||
|
||||
node.subscribe((kind: PubsubSub, topic: pubsubTopic), some(WakuRelayHandler(handler))).isOkOr:
|
||||
node.subscribe((kind: PubsubSub, topic: pubsubTopic), WakuRelayHandler(handler)).isOkOr:
|
||||
error "failed to subscribe to pubsub topic", pubsubTopic, error
|
||||
quit(1)
|
||||
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import waku/[common/logging, factory/[waku, networks_config, external_config]]
|
||||
import
|
||||
std/[options, strutils, os, sequtils],
|
||||
stew/shims/net as stewNet,
|
||||
chronicles,
|
||||
chronos,
|
||||
metrics,
|
||||
@ -25,26 +24,26 @@ proc setup*(): Waku =
|
||||
|
||||
var conf = confRes.get()
|
||||
|
||||
let twnClusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let twnNetworkConf = NetworkConf.TheWakuNetworkConf()
|
||||
if len(conf.shards) != 0:
|
||||
conf.pubsubTopics = conf.shards.mapIt(twnClusterConf.pubsubTopics[it.uint16])
|
||||
conf.pubsubTopics = conf.shards.mapIt(twnNetworkConf.pubsubTopics[it.uint16])
|
||||
else:
|
||||
conf.pubsubTopics = twnClusterConf.pubsubTopics
|
||||
conf.pubsubTopics = twnNetworkConf.pubsubTopics
|
||||
|
||||
# Override configuration
|
||||
conf.maxMessageSize = twnClusterConf.maxMessageSize
|
||||
conf.clusterId = twnClusterConf.clusterId
|
||||
conf.rlnRelayEthContractAddress = twnClusterConf.rlnRelayEthContractAddress
|
||||
conf.rlnRelayDynamic = twnClusterConf.rlnRelayDynamic
|
||||
conf.discv5Discovery = twnClusterConf.discv5Discovery
|
||||
conf.maxMessageSize = twnNetworkConf.maxMessageSize
|
||||
conf.clusterId = twnNetworkConf.clusterId
|
||||
conf.rlnRelayEthContractAddress = twnNetworkConf.rlnRelayEthContractAddress
|
||||
conf.rlnRelayDynamic = twnNetworkConf.rlnRelayDynamic
|
||||
conf.discv5Discovery = twnNetworkConf.discv5Discovery
|
||||
conf.discv5BootstrapNodes =
|
||||
conf.discv5BootstrapNodes & twnClusterConf.discv5BootstrapNodes
|
||||
conf.rlnEpochSizeSec = twnClusterConf.rlnEpochSizeSec
|
||||
conf.rlnRelayUserMessageLimit = twnClusterConf.rlnRelayUserMessageLimit
|
||||
conf.discv5BootstrapNodes & twnNetworkConf.discv5BootstrapNodes
|
||||
conf.rlnEpochSizeSec = twnNetworkConf.rlnEpochSizeSec
|
||||
conf.rlnRelayUserMessageLimit = twnNetworkConf.rlnRelayUserMessageLimit
|
||||
|
||||
# Only set rlnRelay to true if relay is configured
|
||||
if conf.relay:
|
||||
conf.rlnRelay = twnClusterConf.rlnRelay
|
||||
conf.rlnRelay = twnNetworkConf.rlnRelay
|
||||
|
||||
debug "Starting node"
|
||||
var waku = Waku.new(conf).valueOr:
|
||||
|
||||
9
library/events/json_waku_not_responding_event.nim
Normal file
9
library/events/json_waku_not_responding_event.nim
Normal file
@ -0,0 +1,9 @@
|
||||
import system, std/json, ./json_base_event
|
||||
|
||||
type JsonWakuNotRespondingEvent* = ref object of JsonEvent
|
||||
|
||||
proc new*(T: type JsonWakuNotRespondingEvent): T =
|
||||
return JsonWakuNotRespondingEvent(eventType: "waku_not_responding")
|
||||
|
||||
method `$`*(event: JsonWakuNotRespondingEvent): string =
|
||||
$(%*event)
|
||||
@ -45,6 +45,8 @@ int waku_version(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
// Sets a callback that will be invoked whenever an event occurs.
|
||||
// It is crucial that the passed callback is fast, non-blocking and potentially thread-safe.
|
||||
void waku_set_event_callback(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
@ -150,6 +152,10 @@ int waku_disconnect_peer_by_id(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_disconnect_all_peers(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_dial_peer(void* ctx,
|
||||
const char* peerMultiAddr,
|
||||
const char* protocol,
|
||||
@ -221,6 +227,10 @@ int waku_get_my_peerid(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_get_metrics(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_peer_exchange_request(void* ctx,
|
||||
int numPeers,
|
||||
WakuCallBack callback,
|
||||
@ -232,6 +242,10 @@ int waku_ping_peer(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
int waku_is_online(void* ctx,
|
||||
WakuCallBack callback,
|
||||
void* userData);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -15,19 +15,18 @@ import
|
||||
waku/waku_core/topics/pubsub_topic,
|
||||
waku/waku_core/subscription/push_handler,
|
||||
waku/waku_relay,
|
||||
./events/
|
||||
[json_message_event, json_topic_health_change_event, json_connection_change_event],
|
||||
./waku_thread/waku_thread,
|
||||
./waku_thread/inter_thread_communication/requests/node_lifecycle_request,
|
||||
./waku_thread/inter_thread_communication/requests/peer_manager_request,
|
||||
./waku_thread/inter_thread_communication/requests/protocols/relay_request,
|
||||
./waku_thread/inter_thread_communication/requests/protocols/store_request,
|
||||
./waku_thread/inter_thread_communication/requests/protocols/lightpush_request,
|
||||
./waku_thread/inter_thread_communication/requests/protocols/filter_request,
|
||||
./waku_thread/inter_thread_communication/requests/debug_node_request,
|
||||
./waku_thread/inter_thread_communication/requests/discovery_request,
|
||||
./waku_thread/inter_thread_communication/requests/ping_request,
|
||||
./waku_thread/inter_thread_communication/waku_thread_request,
|
||||
./events/json_message_event,
|
||||
./waku_context,
|
||||
./waku_thread_requests/requests/node_lifecycle_request,
|
||||
./waku_thread_requests/requests/peer_manager_request,
|
||||
./waku_thread_requests/requests/protocols/relay_request,
|
||||
./waku_thread_requests/requests/protocols/store_request,
|
||||
./waku_thread_requests/requests/protocols/lightpush_request,
|
||||
./waku_thread_requests/requests/protocols/filter_request,
|
||||
./waku_thread_requests/requests/debug_node_request,
|
||||
./waku_thread_requests/requests/discovery_request,
|
||||
./waku_thread_requests/requests/ping_request,
|
||||
./waku_thread_requests/waku_thread_request,
|
||||
./alloc,
|
||||
./ffi_types,
|
||||
../waku/factory/app_callbacks
|
||||
@ -48,25 +47,6 @@ template checkLibwakuParams*(
|
||||
if isNil(callback):
|
||||
return RET_MISSING_CALLBACK
|
||||
|
||||
template callEventCallback(ctx: ptr WakuContext, eventName: string, body: untyped) =
|
||||
if isNil(ctx[].eventCallback):
|
||||
error eventName & " - eventCallback is nil"
|
||||
return
|
||||
|
||||
foreignThreadGc:
|
||||
try:
|
||||
let event = body
|
||||
cast[WakuCallBack](ctx[].eventCallback)(
|
||||
RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData
|
||||
)
|
||||
except Exception, CatchableError:
|
||||
let msg =
|
||||
"Exception " & eventName & " when calling 'eventCallBack': " &
|
||||
getCurrentExceptionMsg()
|
||||
cast[WakuCallBack](ctx[].eventCallback)(
|
||||
RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData
|
||||
)
|
||||
|
||||
proc handleRequest(
|
||||
ctx: ptr WakuContext,
|
||||
requestType: RequestType,
|
||||
@ -74,28 +54,13 @@ proc handleRequest(
|
||||
callback: WakuCallBack,
|
||||
userData: pointer,
|
||||
): cint =
|
||||
waku_thread.sendRequestToWakuThread(ctx, requestType, content, callback, userData).isOkOr:
|
||||
waku_context.sendRequestToWakuThread(ctx, requestType, content, callback, userData).isOkOr:
|
||||
let msg = "libwaku error: " & $error
|
||||
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
|
||||
return RET_ERR
|
||||
|
||||
return RET_OK
|
||||
|
||||
proc onConnectionChange(ctx: ptr WakuContext): ConnectionChangeHandler =
|
||||
return proc(peerId: PeerId, peerEvent: PeerEventKind) {.async.} =
|
||||
callEventCallback(ctx, "onConnectionChange"):
|
||||
$JsonConnectionChangeEvent.new($peerId, peerEvent)
|
||||
|
||||
proc onReceivedMessage(ctx: ptr WakuContext): WakuRelayHandler =
|
||||
return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} =
|
||||
callEventCallback(ctx, "onReceivedMessage"):
|
||||
$JsonMessageEvent.new(pubsubTopic, msg)
|
||||
|
||||
proc onTopicHealthChange(ctx: ptr WakuContext): TopicHealthChangeHandler =
|
||||
return proc(pubsubTopic: PubsubTopic, topicHealth: TopicHealth) {.async.} =
|
||||
callEventCallback(ctx, "onTopicHealthChange"):
|
||||
$JsonTopicHealthChangeEvent.new(pubsubTopic, topicHealth)
|
||||
|
||||
### End of not-exported components
|
||||
################################################################################
|
||||
|
||||
@ -146,8 +111,8 @@ proc waku_new(
|
||||
return nil
|
||||
|
||||
## Create the Waku thread that will keep waiting for req from the main thread.
|
||||
var ctx = waku_thread.createWakuThread().valueOr:
|
||||
let msg = "Error in createWakuThread: " & $error
|
||||
var ctx = waku_context.createWakuContext().valueOr:
|
||||
let msg = "Error in createWakuContext: " & $error
|
||||
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
|
||||
return nil
|
||||
|
||||
@ -180,7 +145,7 @@ proc waku_destroy(
|
||||
initializeLibrary()
|
||||
checkLibwakuParams(ctx, callback, userData)
|
||||
|
||||
waku_thread.destroyWakuThread(ctx).isOkOr:
|
||||
waku_context.destroyWakuContext(ctx).isOkOr:
|
||||
let msg = "libwaku error: " & $error
|
||||
callback(RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), userData)
|
||||
return RET_ERR
|
||||
@ -580,6 +545,20 @@ proc waku_disconnect_peer_by_id(
|
||||
userData,
|
||||
)
|
||||
|
||||
proc waku_disconnect_all_peers(
|
||||
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibwakuParams(ctx, callback, userData)
|
||||
|
||||
handleRequest(
|
||||
ctx,
|
||||
RequestType.PEER_MANAGER,
|
||||
PeerManagementRequest.createShared(op = PeerManagementMsgType.DISCONNECT_ALL_PEERS),
|
||||
callback,
|
||||
userData,
|
||||
)
|
||||
|
||||
proc waku_dial_peer(
|
||||
ctx: ptr WakuContext,
|
||||
peerMultiAddr: cstring,
|
||||
@ -782,6 +761,20 @@ proc waku_get_my_peerid(
|
||||
userData,
|
||||
)
|
||||
|
||||
proc waku_get_metrics(
|
||||
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibwakuParams(ctx, callback, userData)
|
||||
|
||||
handleRequest(
|
||||
ctx,
|
||||
RequestType.DEBUG,
|
||||
DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_METRICS),
|
||||
callback,
|
||||
userData,
|
||||
)
|
||||
|
||||
proc waku_start_discv5(
|
||||
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
@ -842,5 +835,19 @@ proc waku_ping_peer(
|
||||
userData,
|
||||
)
|
||||
|
||||
proc waku_is_online(
|
||||
ctx: ptr WakuContext, callback: WakuCallBack, userData: pointer
|
||||
): cint {.dynlib, exportc.} =
|
||||
initializeLibrary()
|
||||
checkLibwakuParams(ctx, callback, userData)
|
||||
|
||||
handleRequest(
|
||||
ctx,
|
||||
RequestType.DEBUG,
|
||||
DebugNodeRequest.createShared(DebugNodeMsgType.RETRIEVE_ONLINE_STATE),
|
||||
callback,
|
||||
userData,
|
||||
)
|
||||
|
||||
### End of exported procs
|
||||
################################################################################
|
||||
|
||||
219
library/waku_context.nim
Normal file
219
library/waku_context.nim
Normal file
@ -0,0 +1,219 @@
|
||||
{.pragma: exported, exportc, cdecl, raises: [].}
|
||||
{.pragma: callback, cdecl, raises: [], gcsafe.}
|
||||
{.passc: "-fPIC".}
|
||||
|
||||
import std/[options, atomics, os, net, locks]
|
||||
import chronicles, chronos, chronos/threadsync, taskpools/channels_spsc_single, results
|
||||
import
|
||||
waku/factory/waku,
|
||||
waku/node/peer_manager,
|
||||
waku/waku_relay/[protocol, topic_health],
|
||||
waku/waku_core/[topics/pubsub_topic, message],
|
||||
./waku_thread_requests/[waku_thread_request, requests/debug_node_request],
|
||||
./ffi_types,
|
||||
./events/[
|
||||
json_message_event, json_topic_health_change_event, json_connection_change_event,
|
||||
json_waku_not_responding_event,
|
||||
]
|
||||
|
||||
type WakuContext* = object
|
||||
wakuThread: Thread[(ptr WakuContext)]
|
||||
watchdogThread: Thread[(ptr WakuContext)]
|
||||
# monitors the Waku thread and notifies the Waku SDK consumer if it hangs
|
||||
lock: Lock
|
||||
reqChannel: ChannelSPSCSingle[ptr WakuThreadRequest]
|
||||
reqSignal: ThreadSignalPtr
|
||||
# to inform The Waku Thread (a.k.a TWT) that a new request is sent
|
||||
reqReceivedSignal: ThreadSignalPtr
|
||||
# to inform the main thread that the request is rx by TWT
|
||||
userData*: pointer
|
||||
eventCallback*: pointer
|
||||
eventUserdata*: pointer
|
||||
running: Atomic[bool] # To control when the threads are running
|
||||
|
||||
const git_version* {.strdefine.} = "n/a"
|
||||
const versionString = "version / git commit hash: " & waku.git_version
|
||||
|
||||
template callEventCallback(ctx: ptr WakuContext, eventName: string, body: untyped) =
|
||||
if isNil(ctx[].eventCallback):
|
||||
error eventName & " - eventCallback is nil"
|
||||
return
|
||||
|
||||
foreignThreadGc:
|
||||
try:
|
||||
let event = body
|
||||
cast[WakuCallBack](ctx[].eventCallback)(
|
||||
RET_OK, unsafeAddr event[0], cast[csize_t](len(event)), ctx[].eventUserData
|
||||
)
|
||||
except Exception, CatchableError:
|
||||
let msg =
|
||||
"Exception " & eventName & " when calling 'eventCallBack': " &
|
||||
getCurrentExceptionMsg()
|
||||
cast[WakuCallBack](ctx[].eventCallback)(
|
||||
RET_ERR, unsafeAddr msg[0], cast[csize_t](len(msg)), ctx[].eventUserData
|
||||
)
|
||||
|
||||
proc onConnectionChange*(ctx: ptr WakuContext): ConnectionChangeHandler =
|
||||
return proc(peerId: PeerId, peerEvent: PeerEventKind) {.async.} =
|
||||
callEventCallback(ctx, "onConnectionChange"):
|
||||
$JsonConnectionChangeEvent.new($peerId, peerEvent)
|
||||
|
||||
proc onReceivedMessage*(ctx: ptr WakuContext): WakuRelayHandler =
|
||||
return proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async.} =
|
||||
callEventCallback(ctx, "onReceivedMessage"):
|
||||
$JsonMessageEvent.new(pubsubTopic, msg)
|
||||
|
||||
proc onTopicHealthChange*(ctx: ptr WakuContext): TopicHealthChangeHandler =
|
||||
return proc(pubsubTopic: PubsubTopic, topicHealth: TopicHealth) {.async.} =
|
||||
callEventCallback(ctx, "onTopicHealthChange"):
|
||||
$JsonTopicHealthChangeEvent.new(pubsubTopic, topicHealth)
|
||||
|
||||
proc onWakuNotResponding*(ctx: ptr WakuContext) =
|
||||
callEventCallback(ctx, "onWakuNotResponsive"):
|
||||
$JsonWakuNotRespondingEvent.new()
|
||||
|
||||
proc sendRequestToWakuThread*(
|
||||
ctx: ptr WakuContext,
|
||||
reqType: RequestType,
|
||||
reqContent: pointer,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer,
|
||||
timeout = InfiniteDuration,
|
||||
): Result[void, string] =
|
||||
ctx.lock.acquire()
|
||||
# This lock is only necessary while we use a SP Channel and while the signalling
|
||||
# between threads assumes that there aren't concurrent requests.
|
||||
# Rearchitecting the signaling + migrating to a MP Channel will allow us to receive
|
||||
# requests concurrently and spare us the need of locks
|
||||
defer:
|
||||
ctx.lock.release()
|
||||
|
||||
let req = WakuThreadRequest.createShared(reqType, reqContent, callback, userData)
|
||||
## Sending the request
|
||||
let sentOk = ctx.reqChannel.trySend(req)
|
||||
if not sentOk:
|
||||
deallocShared(req)
|
||||
return err("Couldn't send a request to the waku thread: " & $req[])
|
||||
|
||||
let fireSyncRes = ctx.reqSignal.fireSync()
|
||||
if fireSyncRes.isErr():
|
||||
deallocShared(req)
|
||||
return err("failed fireSync: " & $fireSyncRes.error)
|
||||
|
||||
if fireSyncRes.get() == false:
|
||||
deallocShared(req)
|
||||
return err("Couldn't fireSync in time")
|
||||
|
||||
## wait until the Waku Thread properly received the request
|
||||
let res = ctx.reqReceivedSignal.waitSync(timeout)
|
||||
if res.isErr():
|
||||
deallocShared(req)
|
||||
return err("Couldn't receive reqReceivedSignal signal")
|
||||
|
||||
## Notice that in case of "ok", the deallocShared(req) is performed by the Waku Thread in the
|
||||
## process proc. See the 'waku_thread_request.nim' module for more details.
|
||||
ok()
|
||||
|
||||
proc watchdogThreadBody(ctx: ptr WakuContext) {.thread.} =
|
||||
## Watchdog thread that monitors the Waku thread and notifies the library user if it hangs.
|
||||
|
||||
let watchdogRun = proc(ctx: ptr WakuContext) {.async.} =
|
||||
const WatchdogTimeinterval = 1.seconds
|
||||
const WakuNotRespondingTimeout = 3.seconds
|
||||
while true:
|
||||
await sleepAsync(WatchdogTimeinterval)
|
||||
|
||||
if ctx.running.load == false:
|
||||
debug "Watchdog thread exiting because WakuContext is not running"
|
||||
break
|
||||
|
||||
let wakuCallback = proc(
|
||||
callerRet: cint, msg: ptr cchar, len: csize_t, userData: pointer
|
||||
) {.cdecl, gcsafe, raises: [].} =
|
||||
discard ## Don't do anything. Just respecting the callback signature.
|
||||
const nilUserData = nil
|
||||
|
||||
trace "Sending watchdog request to Waku thread"
|
||||
|
||||
sendRequestToWakuThread(
|
||||
ctx,
|
||||
RequestType.DEBUG,
|
||||
DebugNodeRequest.createShared(DebugNodeMsgType.CHECK_WAKU_NOT_BLOCKED),
|
||||
wakuCallback,
|
||||
nilUserData,
|
||||
WakuNotRespondingTimeout,
|
||||
).isOkOr:
|
||||
error "Failed to send watchdog request to Waku thread", error = $error
|
||||
onWakuNotResponding(ctx)
|
||||
|
||||
waitFor watchdogRun(ctx)
|
||||
|
||||
proc wakuThreadBody(ctx: ptr WakuContext) {.thread.} =
|
||||
## Waku thread that attends library user requests (stop, connect_to, etc.)
|
||||
|
||||
let wakuRun = proc(ctx: ptr WakuContext) {.async.} =
|
||||
var waku: Waku
|
||||
while true:
|
||||
await ctx.reqSignal.wait()
|
||||
|
||||
if ctx.running.load == false:
|
||||
break
|
||||
|
||||
## Trying to get a request from the libwaku requestor thread
|
||||
var request: ptr WakuThreadRequest
|
||||
let recvOk = ctx.reqChannel.tryRecv(request)
|
||||
if not recvOk:
|
||||
error "waku thread could not receive a request"
|
||||
continue
|
||||
|
||||
let fireRes = ctx.reqReceivedSignal.fireSync()
|
||||
if fireRes.isErr():
|
||||
error "could not fireSync back to requester thread", error = fireRes.error
|
||||
|
||||
## Handle the request
|
||||
asyncSpawn WakuThreadRequest.process(request, addr waku)
|
||||
|
||||
waitFor wakuRun(ctx)
|
||||
|
||||
proc createWakuContext*(): Result[ptr WakuContext, string] =
|
||||
## This proc is called from the main thread and it creates
|
||||
## the Waku working thread.
|
||||
var ctx = createShared(WakuContext, 1)
|
||||
ctx.reqSignal = ThreadSignalPtr.new().valueOr:
|
||||
return err("couldn't create reqSignal ThreadSignalPtr")
|
||||
ctx.reqReceivedSignal = ThreadSignalPtr.new().valueOr:
|
||||
return err("couldn't create reqReceivedSignal ThreadSignalPtr")
|
||||
ctx.lock.initLock()
|
||||
|
||||
ctx.running.store(true)
|
||||
|
||||
try:
|
||||
createThread(ctx.wakuThread, wakuThreadBody, ctx)
|
||||
except ValueError, ResourceExhaustedError:
|
||||
freeShared(ctx)
|
||||
return err("failed to create the Waku thread: " & getCurrentExceptionMsg())
|
||||
|
||||
try:
|
||||
createThread(ctx.watchdogThread, watchdogThreadBody, ctx)
|
||||
except ValueError, ResourceExhaustedError:
|
||||
freeShared(ctx)
|
||||
return err("failed to create the watchdog thread: " & getCurrentExceptionMsg())
|
||||
|
||||
return ok(ctx)
|
||||
|
||||
proc destroyWakuContext*(ctx: ptr WakuContext): Result[void, string] =
|
||||
ctx.running.store(false)
|
||||
|
||||
let signaledOnTime = ctx.reqSignal.fireSync().valueOr:
|
||||
return err("error in destroyWakuContext: " & $error)
|
||||
if not signaledOnTime:
|
||||
return err("failed to signal reqSignal on time in destroyWakuContext")
|
||||
|
||||
joinThread(ctx.wakuThread)
|
||||
joinThread(ctx.watchdogThread)
|
||||
ctx.lock.deinitLock()
|
||||
?ctx.reqSignal.close()
|
||||
?ctx.reqReceivedSignal.close()
|
||||
freeShared(ctx)
|
||||
|
||||
return ok()
|
||||
@ -1,132 +0,0 @@
|
||||
{.pragma: exported, exportc, cdecl, raises: [].}
|
||||
{.pragma: callback, cdecl, raises: [], gcsafe.}
|
||||
{.passc: "-fPIC".}
|
||||
|
||||
import std/[options, atomics, os, net, locks]
|
||||
import chronicles, chronos, chronos/threadsync, taskpools/channels_spsc_single, results
|
||||
import waku/factory/waku, ./inter_thread_communication/waku_thread_request, ../ffi_types
|
||||
|
||||
type WakuContext* = object
|
||||
thread: Thread[(ptr WakuContext)]
|
||||
lock: Lock
|
||||
reqChannel: ChannelSPSCSingle[ptr WakuThreadRequest]
|
||||
reqSignal: ThreadSignalPtr
|
||||
# to inform The Waku Thread (a.k.a TWT) that a new request is sent
|
||||
reqReceivedSignal: ThreadSignalPtr
|
||||
# to inform the main thread that the request is rx by TWT
|
||||
userData*: pointer
|
||||
eventCallback*: pointer
|
||||
eventUserdata*: pointer
|
||||
running: Atomic[bool] # To control when the thread is running
|
||||
|
||||
const git_version* {.strdefine.} = "n/a"
|
||||
const versionString = "version / git commit hash: " & waku.git_version
|
||||
|
||||
proc runWaku(ctx: ptr WakuContext) {.async.} =
|
||||
## This is the worker body. This runs the Waku node
|
||||
## and attends library user requests (stop, connect_to, etc.)
|
||||
|
||||
var waku: Waku
|
||||
|
||||
while true:
|
||||
await ctx.reqSignal.wait()
|
||||
|
||||
if ctx.running.load == false:
|
||||
break
|
||||
|
||||
## Trying to get a request from the libwaku requestor thread
|
||||
var request: ptr WakuThreadRequest
|
||||
let recvOk = ctx.reqChannel.tryRecv(request)
|
||||
if not recvOk:
|
||||
error "waku thread could not receive a request"
|
||||
continue
|
||||
|
||||
let fireRes = ctx.reqReceivedSignal.fireSync()
|
||||
if fireRes.isErr():
|
||||
error "could not fireSync back to requester thread", error = fireRes.error
|
||||
|
||||
## Handle the request
|
||||
asyncSpawn WakuThreadRequest.process(request, addr waku)
|
||||
|
||||
proc run(ctx: ptr WakuContext) {.thread.} =
|
||||
## Launch waku worker
|
||||
waitFor runWaku(ctx)
|
||||
|
||||
proc createWakuThread*(): Result[ptr WakuContext, string] =
|
||||
## This proc is called from the main thread and it creates
|
||||
## the Waku working thread.
|
||||
var ctx = createShared(WakuContext, 1)
|
||||
ctx.reqSignal = ThreadSignalPtr.new().valueOr:
|
||||
return err("couldn't create reqSignal ThreadSignalPtr")
|
||||
ctx.reqReceivedSignal = ThreadSignalPtr.new().valueOr:
|
||||
return err("couldn't create reqReceivedSignal ThreadSignalPtr")
|
||||
ctx.lock.initLock()
|
||||
|
||||
ctx.running.store(true)
|
||||
|
||||
try:
|
||||
createThread(ctx.thread, run, ctx)
|
||||
except ValueError, ResourceExhaustedError:
|
||||
# and freeShared for typed allocations!
|
||||
freeShared(ctx)
|
||||
|
||||
return err("failed to create the Waku thread: " & getCurrentExceptionMsg())
|
||||
|
||||
return ok(ctx)
|
||||
|
||||
proc destroyWakuThread*(ctx: ptr WakuContext): Result[void, string] =
|
||||
ctx.running.store(false)
|
||||
|
||||
let signaledOnTime = ctx.reqSignal.fireSync().valueOr:
|
||||
return err("error in destroyWakuThread: " & $error)
|
||||
if not signaledOnTime:
|
||||
return err("failed to signal reqSignal on time in destroyWakuThread")
|
||||
|
||||
joinThread(ctx.thread)
|
||||
ctx.lock.deinitLock()
|
||||
?ctx.reqSignal.close()
|
||||
?ctx.reqReceivedSignal.close()
|
||||
freeShared(ctx)
|
||||
|
||||
return ok()
|
||||
|
||||
proc sendRequestToWakuThread*(
|
||||
ctx: ptr WakuContext,
|
||||
reqType: RequestType,
|
||||
reqContent: pointer,
|
||||
callback: WakuCallBack,
|
||||
userData: pointer,
|
||||
): Result[void, string] =
|
||||
let req = WakuThreadRequest.createShared(reqType, reqContent, callback, userData)
|
||||
|
||||
# This lock is only necessary while we use a SP Channel and while the signalling
|
||||
# between threads assumes that there aren't concurrent requests.
|
||||
# Rearchitecting the signaling + migrating to a MP Channel will allow us to receive
|
||||
# requests concurrently and spare us the need of locks
|
||||
ctx.lock.acquire()
|
||||
defer:
|
||||
ctx.lock.release()
|
||||
## Sending the request
|
||||
let sentOk = ctx.reqChannel.trySend(req)
|
||||
if not sentOk:
|
||||
deallocShared(req)
|
||||
return err("Couldn't send a request to the waku thread: " & $req[])
|
||||
|
||||
let fireSyncRes = ctx.reqSignal.fireSync()
|
||||
if fireSyncRes.isErr():
|
||||
deallocShared(req)
|
||||
return err("failed fireSync: " & $fireSyncRes.error)
|
||||
|
||||
if fireSyncRes.get() == false:
|
||||
deallocShared(req)
|
||||
return err("Couldn't fireSync in time")
|
||||
|
||||
## wait until the Waku Thread properly received the request
|
||||
let res = ctx.reqReceivedSignal.waitSync()
|
||||
if res.isErr():
|
||||
deallocShared(req)
|
||||
return err("Couldn't receive reqReceivedSignal signal")
|
||||
|
||||
## Notice that in case of "ok", the deallocShared(req) is performed by the Waku Thread in the
|
||||
## process proc.
|
||||
ok()
|
||||
@ -1,11 +1,24 @@
|
||||
import std/json
|
||||
import chronicles, chronos, results, eth/p2p/discoveryv5/enr, strutils, libp2p/peerid
|
||||
import ../../../../waku/factory/waku, ../../../../waku/node/waku_node
|
||||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
results,
|
||||
eth/p2p/discoveryv5/enr,
|
||||
strutils,
|
||||
libp2p/peerid,
|
||||
metrics
|
||||
import
|
||||
../../../waku/factory/waku,
|
||||
../../../waku/node/waku_node,
|
||||
../../../waku/node/health_monitor
|
||||
|
||||
type DebugNodeMsgType* = enum
|
||||
RETRIEVE_LISTENING_ADDRESSES
|
||||
RETRIEVE_MY_ENR
|
||||
RETRIEVE_MY_PEER_ID
|
||||
RETRIEVE_METRICS
|
||||
RETRIEVE_ONLINE_STATE
|
||||
CHECK_WAKU_NOT_BLOCKED
|
||||
|
||||
type DebugNodeRequest* = object
|
||||
operation: DebugNodeMsgType
|
||||
@ -21,6 +34,10 @@ proc destroyShared(self: ptr DebugNodeRequest) =
|
||||
proc getMultiaddresses(node: WakuNode): seq[string] =
|
||||
return node.info().listenAddresses
|
||||
|
||||
proc getMetrics(): string =
|
||||
{.gcsafe.}:
|
||||
return defaultRegistry.toText() ## defaultRegistry is {.global.} in metrics module
|
||||
|
||||
proc process*(
|
||||
self: ptr DebugNodeRequest, waku: Waku
|
||||
): Future[Result[string, string]] {.async.} =
|
||||
@ -35,6 +52,12 @@ proc process*(
|
||||
return ok(waku.node.enr.toURI())
|
||||
of RETRIEVE_MY_PEER_ID:
|
||||
return ok($waku.node.peerId())
|
||||
of RETRIEVE_METRICS:
|
||||
return ok(getMetrics())
|
||||
of RETRIEVE_ONLINE_STATE:
|
||||
return ok($waku.healthMonitor.onlineMonitor.amIOnline())
|
||||
of CHECK_WAKU_NOT_BLOCKED:
|
||||
return ok("waku thread is not blocked")
|
||||
|
||||
error "unsupported operation in DebugNodeRequest"
|
||||
return err("unsupported operation in DebugNodeRequest")
|
||||
@ -1,12 +1,12 @@
|
||||
import std/json
|
||||
import chronos, chronicles, results, strutils, libp2p/multiaddress
|
||||
import
|
||||
../../../../waku/factory/waku,
|
||||
../../../../waku/discovery/waku_dnsdisc,
|
||||
../../../../waku/discovery/waku_discv5,
|
||||
../../../../waku/waku_core/peers,
|
||||
../../../../waku/node/waku_node,
|
||||
../../../alloc
|
||||
../../../waku/factory/waku,
|
||||
../../../waku/discovery/waku_dnsdisc,
|
||||
../../../waku/discovery/waku_discv5,
|
||||
../../../waku/waku_core/peers,
|
||||
../../../waku/node/waku_node,
|
||||
../../alloc
|
||||
|
||||
type DiscoveryMsgType* = enum
|
||||
GET_BOOTSTRAP_NODES
|
||||
@ -2,13 +2,14 @@ import std/[options, json, strutils, net]
|
||||
import chronos, chronicles, results, confutils, confutils/std/net
|
||||
|
||||
import
|
||||
../../../../waku/node/peer_manager/peer_manager,
|
||||
../../../../waku/factory/external_config,
|
||||
../../../../waku/factory/waku,
|
||||
../../../../waku/factory/node_factory,
|
||||
../../../../waku/factory/networks_config,
|
||||
../../../../waku/factory/app_callbacks,
|
||||
../../../alloc
|
||||
../../../waku/node/peer_manager/peer_manager,
|
||||
../../../waku/factory/external_config,
|
||||
../../../waku/factory/waku,
|
||||
../../../waku/factory/node_factory,
|
||||
../../../waku/factory/networks_config,
|
||||
../../../waku/factory/app_callbacks,
|
||||
../../../waku/waku_api/rest/builder,
|
||||
../../alloc
|
||||
|
||||
type NodeLifecycleMsgType* = enum
|
||||
CREATE_NODE
|
||||
@ -73,9 +74,11 @@ proc createWaku(
|
||||
appCallbacks.topicHealthChangeHandler = nil
|
||||
|
||||
# TODO: Convert `confJson` directly to `WakuConf`
|
||||
let wakuConf = conf.toWakuConf().valueOr:
|
||||
var wakuConf = conf.toWakuConf().valueOr:
|
||||
return err("Configuration error: " & $error)
|
||||
|
||||
wakuConf.restServerConf = none(RestServerConf) ## don't want REST in libwaku
|
||||
|
||||
let wakuRes = Waku.new(wakuConf, appCallbacks).valueOr:
|
||||
error "waku initialization failed", error = error
|
||||
return err("Failed setting up Waku: " & $error)
|
||||
@ -1,10 +1,10 @@
|
||||
import std/[sequtils, strutils]
|
||||
import chronicles, chronos, results, options, json
|
||||
import
|
||||
../../../../waku/factory/waku,
|
||||
../../../../waku/node/waku_node,
|
||||
../../../alloc,
|
||||
../../../../waku/node/peer_manager
|
||||
../../../waku/factory/waku,
|
||||
../../../waku/node/waku_node,
|
||||
../../alloc,
|
||||
../../../waku/node/peer_manager
|
||||
|
||||
type PeerManagementMsgType* {.pure.} = enum
|
||||
CONNECT_TO
|
||||
@ -12,6 +12,7 @@ type PeerManagementMsgType* {.pure.} = enum
|
||||
GET_CONNECTED_PEERS_INFO
|
||||
GET_PEER_IDS_BY_PROTOCOL
|
||||
DISCONNECT_PEER_BY_ID
|
||||
DISCONNECT_ALL_PEERS
|
||||
DIAL_PEER
|
||||
DIAL_PEER_BY_ID
|
||||
GET_CONNECTED_PEERS
|
||||
@ -120,6 +121,9 @@ proc process*(
|
||||
return err($error)
|
||||
await waku.node.peerManager.disconnectNode(peerId)
|
||||
return ok("")
|
||||
of DISCONNECT_ALL_PEERS:
|
||||
await waku.node.peerManager.disconnectAllPeers()
|
||||
return ok("")
|
||||
of DIAL_PEER:
|
||||
let remotePeerInfo = parsePeerInfo($self[].peerMultiAddr).valueOr:
|
||||
error "DIAL_PEER failed", error = $error
|
||||
@ -1,7 +1,7 @@
|
||||
import std/[json, strutils]
|
||||
import chronos, results
|
||||
import libp2p/[protocols/ping, switch, multiaddress, multicodec]
|
||||
import ../../../../waku/[factory/waku, waku_core/peers, node/waku_node], ../../../alloc
|
||||
import ../../../waku/[factory/waku, waku_core/peers, node/waku_node], ../../alloc
|
||||
|
||||
type PingRequest* = object
|
||||
peerAddr: cstring
|
||||
@ -1,16 +1,16 @@
|
||||
import options, std/[strutils, sequtils]
|
||||
import chronicles, chronos, results
|
||||
import
|
||||
../../../../../waku/waku_filter_v2/client,
|
||||
../../../../../waku/waku_core/message/message,
|
||||
../../../../../waku/factory/waku,
|
||||
../../../../../waku/waku_filter_v2/common,
|
||||
../../../../../waku/waku_core/subscription/push_handler,
|
||||
../../../../../waku/node/peer_manager/peer_manager,
|
||||
../../../../../waku/node/waku_node,
|
||||
../../../../../waku/waku_core/topics/pubsub_topic,
|
||||
../../../../../waku/waku_core/topics/content_topic,
|
||||
../../../../alloc
|
||||
../../../../waku/waku_filter_v2/client,
|
||||
../../../../waku/waku_core/message/message,
|
||||
../../../../waku/factory/waku,
|
||||
../../../../waku/waku_filter_v2/common,
|
||||
../../../../waku/waku_core/subscription/push_handler,
|
||||
../../../../waku/node/peer_manager/peer_manager,
|
||||
../../../../waku/node/waku_node,
|
||||
../../../../waku/waku_core/topics/pubsub_topic,
|
||||
../../../../waku/waku_core/topics/content_topic,
|
||||
../../../alloc
|
||||
|
||||
type FilterMsgType* = enum
|
||||
SUBSCRIBE
|
||||
@ -1,16 +1,16 @@
|
||||
import options
|
||||
import chronicles, chronos, results
|
||||
import
|
||||
../../../../../waku/waku_core/message/message,
|
||||
../../../../../waku/waku_core/codecs,
|
||||
../../../../../waku/factory/waku,
|
||||
../../../../../waku/waku_core/message,
|
||||
../../../../../waku/waku_core/time, # Timestamp
|
||||
../../../../../waku/waku_core/topics/pubsub_topic,
|
||||
../../../../../waku/waku_lightpush_legacy/client,
|
||||
../../../../../waku/waku_lightpush_legacy/common,
|
||||
../../../../../waku/node/peer_manager/peer_manager,
|
||||
../../../../alloc
|
||||
../../../../waku/waku_core/message/message,
|
||||
../../../../waku/waku_core/codecs,
|
||||
../../../../waku/factory/waku,
|
||||
../../../../waku/waku_core/message,
|
||||
../../../../waku/waku_core/time, # Timestamp
|
||||
../../../../waku/waku_core/topics/pubsub_topic,
|
||||
../../../../waku/waku_lightpush_legacy/client,
|
||||
../../../../waku/waku_lightpush_legacy/common,
|
||||
../../../../waku/node/peer_manager/peer_manager,
|
||||
../../../alloc
|
||||
|
||||
type LightpushMsgType* = enum
|
||||
PUBLISH
|
||||
@ -1,16 +1,16 @@
|
||||
import std/[net, sequtils, strutils]
|
||||
import chronicles, chronos, stew/byteutils, results
|
||||
import
|
||||
../../../../../waku/waku_core/message/message,
|
||||
../../../../../waku/factory/[external_config, validator_signed, waku],
|
||||
../../../../../waku/waku_node,
|
||||
../../../../../waku/waku_core/message,
|
||||
../../../../../waku/waku_core/time, # Timestamp
|
||||
../../../../../waku/waku_core/topics/pubsub_topic,
|
||||
../../../../../waku/waku_core/topics,
|
||||
../../../../../waku/waku_relay/protocol,
|
||||
../../../../../waku/node/peer_manager,
|
||||
../../../../alloc
|
||||
../../../../waku/waku_core/message/message,
|
||||
../../../../waku/factory/[external_config, validator_signed, waku],
|
||||
../../../../waku/waku_node,
|
||||
../../../../waku/waku_core/message,
|
||||
../../../../waku/waku_core/time, # Timestamp
|
||||
../../../../waku/waku_core/topics/pubsub_topic,
|
||||
../../../../waku/waku_core/topics,
|
||||
../../../../waku/waku_relay/protocol,
|
||||
../../../../waku/node/peer_manager,
|
||||
../../../alloc
|
||||
|
||||
type RelayMsgType* = enum
|
||||
SUBSCRIBE
|
||||
@ -111,7 +111,7 @@ proc process*(
|
||||
of SUBSCRIBE:
|
||||
waku.node.subscribe(
|
||||
(kind: SubscriptionKind.PubsubSub, topic: $self.pubsubTopic),
|
||||
handler = some(self.relayEventCallback),
|
||||
handler = self.relayEventCallback,
|
||||
).isOkOr:
|
||||
error "SUBSCRIBE failed", error
|
||||
return err($error)
|
||||
@ -1,15 +1,15 @@
|
||||
import std/[json, sugar, strutils, options]
|
||||
import chronos, chronicles, results, stew/byteutils
|
||||
import
|
||||
../../../../../waku/factory/waku,
|
||||
../../../../alloc,
|
||||
../../../../utils,
|
||||
../../../../../waku/waku_core/peers,
|
||||
../../../../../waku/waku_core/time,
|
||||
../../../../../waku/waku_core/message/digest,
|
||||
../../../../../waku/waku_store/common,
|
||||
../../../../../waku/waku_store/client,
|
||||
../../../../../waku/common/paging
|
||||
../../../../waku/factory/waku,
|
||||
../../../alloc,
|
||||
../../../utils,
|
||||
../../../../waku/waku_core/peers,
|
||||
../../../../waku/waku_core/time,
|
||||
../../../../waku/waku_core/message/digest,
|
||||
../../../../waku/waku_store/common,
|
||||
../../../../waku/waku_store/client,
|
||||
../../../../waku/common/paging
|
||||
|
||||
type StoreReqType* = enum
|
||||
REMOTE_QUERY ## to perform a query to another Store node
|
||||
@ -5,8 +5,8 @@
|
||||
import std/json, results
|
||||
import chronos, chronos/threadsync
|
||||
import
|
||||
../../../waku/factory/waku,
|
||||
../../ffi_types,
|
||||
../../waku/factory/waku,
|
||||
../ffi_types,
|
||||
./requests/node_lifecycle_request,
|
||||
./requests/peer_manager_request,
|
||||
./requests/protocols/relay_request,
|
||||
@ -19,15 +19,14 @@ host_triplet=$(rustc --version --verbose | awk '/host:/{print $2}')
|
||||
|
||||
tarball="${host_triplet}"
|
||||
|
||||
# use arkzkey feature for v0.5.1
|
||||
# use arkzkey feature for v0.7.0
|
||||
# TODO: update this script in the future when arkzkey is default
|
||||
if [[ "${rln_version}" == "v0.5.1" ]]; then
|
||||
if [[ "${rln_version}" == "v0.7.0" ]]; then
|
||||
tarball+="-arkzkey-rln.tar.gz"
|
||||
else
|
||||
tarball+="-rln.tar.gz"
|
||||
fi
|
||||
|
||||
|
||||
# Download the prebuilt rln library if it is available
|
||||
if curl --silent --fail-with-body -L \
|
||||
"https://github.com/vacp2p/zerokit/releases/download/$rln_version/$tarball" \
|
||||
|
||||
@ -36,25 +36,28 @@ cd ../../../..
|
||||
|
||||
echo "6. -.-.-.- Building libunwind -.-.-.-"
|
||||
cd vendor/nim-libbacktrace
|
||||
execute_command "make all V=1"
|
||||
execute_command "make install/usr/lib/libunwind.a V=1"
|
||||
execute_command "make all V=1 -j8"
|
||||
execute_command "make install/usr/lib/libunwind.a V=1 -j8"
|
||||
cp ./vendor/libunwind/build/lib/libunwind.a install/usr/lib
|
||||
cd ../../
|
||||
|
||||
echo "7. -.-.-.- Building miniupnpc -.-.-.- "
|
||||
cd vendor/nim-nat-traversal/vendor/miniupnp/miniupnpc
|
||||
execute_command "git checkout little_chore_windows_support"
|
||||
execute_command "make -f Makefile.mingw CC=gcc CXX=g++ libminiupnpc.a V=1"
|
||||
execute_command "make -f Makefile.mingw CC=gcc CXX=g++ libminiupnpc.a V=1 -j8"
|
||||
cd ../../../../..
|
||||
|
||||
echo "8. -.-.-.- Building libnatpmp -.-.-.- "
|
||||
cd ./vendor/nim-nat-traversal/vendor/libnatpmp-upstream
|
||||
make CC="gcc -fPIC -D_WIN32_WINNT=0x0600 -DNATPMP_STATICLIB" libnatpmp.a V=1
|
||||
make CC="gcc -fPIC -D_WIN32_WINNT=0x0600 -DNATPMP_STATICLIB" libnatpmp.a V=1 -j8
|
||||
cd ../../../../
|
||||
|
||||
echo "9. -.-.-.- Building wakunode2 -.-.-.- "
|
||||
execute_command "make wakunode2 LOG_LEVEL=DEBUG V=1 -j8"
|
||||
|
||||
echo "10. -.-.-.- Building libwaku -.-.-.- "
|
||||
execute_command "make libwaku STATIC=0 LOG_LEVEL=DEBUG V=1 -j8"
|
||||
|
||||
echo "Windows setup completed successfully!"
|
||||
echo "✓ Successful commands: $success_count"
|
||||
echo "✗ Failed commands: $failure_count"
|
||||
@ -2,13 +2,14 @@
|
||||
|
||||
# Install Anvil
|
||||
|
||||
if ! command -v anvil &> /dev/null; then
|
||||
BASE_DIR="${XDG_CONFIG_HOME:-$HOME}"
|
||||
FOUNDRY_DIR="${FOUNDRY_DIR:-"$BASE_DIR/.foundry"}"
|
||||
FOUNDRY_BIN_DIR="$FOUNDRY_DIR/bin"
|
||||
|
||||
BASE_DIR="${XDG_CONFIG_HOME:-$HOME}"
|
||||
FOUNDRY_DIR="${FOUNDRY_DIR-"$BASE_DIR/.foundry"}"
|
||||
FOUNDRY_BIN_DIR="$FOUNDRY_DIR/bin"
|
||||
|
||||
curl -L https://foundry.paradigm.xyz | bash
|
||||
# Extract the source path from the download result
|
||||
echo "foundryup_path: $FOUNDRY_BIN_DIR"
|
||||
# run foundryup
|
||||
$FOUNDRY_BIN_DIR/foundryup
|
||||
curl -L https://foundry.paradigm.xyz | bash
|
||||
# Extract the source path from the download result
|
||||
echo "foundryup_path: $FOUNDRY_BIN_DIR"
|
||||
# run foundryup
|
||||
$FOUNDRY_BIN_DIR/foundryup
|
||||
fi
|
||||
8
scripts/install_pnpm.sh
Executable file
8
scripts/install_pnpm.sh
Executable file
@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Install pnpm
|
||||
if ! command -v pnpm &> /dev/null; then
|
||||
echo "pnpm is not installed, installing it now..."
|
||||
npm i pnpm --global
|
||||
fi
|
||||
|
||||
7
scripts/install_rln_tests_dependencies.sh
Executable file
7
scripts/install_rln_tests_dependencies.sh
Executable file
@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Install Anvil
|
||||
./scripts/install_anvil.sh
|
||||
|
||||
#Install pnpm
|
||||
./scripts/install_pnpm.sh
|
||||
@ -38,7 +38,8 @@ when os == "Linux" and
|
||||
#./waku_archive_legacy/test_driver_postgres_query,
|
||||
#./waku_archive_legacy/test_driver_postgres,
|
||||
./factory/test_node_factory,
|
||||
./wakunode_rest/test_rest_store
|
||||
./wakunode_rest/test_rest_store,
|
||||
./wakunode_rest/test_all
|
||||
|
||||
# Waku store test suite
|
||||
import
|
||||
@ -91,21 +92,7 @@ import
|
||||
# Waku Keystore test suite
|
||||
import ./test_waku_keystore_keyfile, ./test_waku_keystore
|
||||
|
||||
## Wakunode Rest API test suite
|
||||
import
|
||||
./wakunode_rest/test_rest_debug,
|
||||
./wakunode_rest/test_rest_debug_serdes,
|
||||
./wakunode_rest/test_rest_relay,
|
||||
./wakunode_rest/test_rest_relay_serdes,
|
||||
./wakunode_rest/test_rest_serdes,
|
||||
./wakunode_rest/test_rest_filter,
|
||||
./wakunode_rest/test_rest_lightpush,
|
||||
./wakunode_rest/test_rest_lightpush_legacy,
|
||||
./wakunode_rest/test_rest_admin,
|
||||
./wakunode_rest/test_rest_cors,
|
||||
./wakunode_rest/test_rest_health
|
||||
|
||||
import ./waku_rln_relay/test_all
|
||||
|
||||
# Node Factory
|
||||
import ./factory/test_external_config
|
||||
import ./factory/test_all
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
./test_base64_codec,
|
||||
./test_confutils_envvar,
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
std/[os, options],
|
||||
results,
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
confutils,
|
||||
confutils/defs,
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import std/options, results, stew/shims/net, testutils/unittests
|
||||
import std/[options, net], results, testutils/unittests
|
||||
import waku/common/enr, ../testlib/wakucore
|
||||
|
||||
suite "nim-eth ENR - builder and typed record":
|
||||
|
||||
3
tests/factory/test_all.nim
Normal file
3
tests/factory/test_all.nim
Normal file
@ -0,0 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import ./test_external_config, ./test_node_factory, ./test_waku_conf
|
||||
@ -8,7 +8,8 @@ import
|
||||
libp2p/multiaddress,
|
||||
nimcrypto/utils,
|
||||
secp256k1,
|
||||
confutils
|
||||
confutils,
|
||||
stint
|
||||
import
|
||||
../../waku/factory/external_config,
|
||||
../../waku/factory/networks_config,
|
||||
@ -16,10 +17,46 @@ import
|
||||
../../waku/common/logging,
|
||||
../../waku/common/utils/parse_size_units
|
||||
|
||||
suite "Waku config - apply preset":
|
||||
test "Default preset is TWN":
|
||||
suite "Waku external config - default values":
|
||||
test "Default sharding value":
|
||||
## Setup
|
||||
let expectedConf = ClusterConf.TheWakuNetworkConf()
|
||||
let defaultShardingMode = AutoSharding
|
||||
let defaultNumShardsInCluster = 1.uint16
|
||||
let defaultSubscribeShards = @[0.uint16]
|
||||
|
||||
## Given
|
||||
let preConfig = defaultWakuNodeConf().get()
|
||||
|
||||
## When
|
||||
let res = preConfig.toWakuConf()
|
||||
assert res.isOk(), $res.error
|
||||
|
||||
## Then
|
||||
let conf = res.get()
|
||||
check conf.shardingConf.kind == defaultShardingMode
|
||||
check conf.shardingConf.numShardsInCluster == defaultNumShardsInCluster
|
||||
check conf.subscribeShards == defaultSubscribeShards
|
||||
|
||||
test "Default shards value in static sharding":
|
||||
## Setup
|
||||
let defaultSubscribeShards: seq[uint16] = @[]
|
||||
|
||||
## Given
|
||||
var preConfig = defaultWakuNodeConf().get()
|
||||
preConfig.numShardsInNetwork = 0.uint16
|
||||
|
||||
## When
|
||||
let res = preConfig.toWakuConf()
|
||||
assert res.isOk(), $res.error
|
||||
|
||||
## Then
|
||||
let conf = res.get()
|
||||
check conf.subscribeShards == defaultSubscribeShards
|
||||
|
||||
suite "Waku external config - apply preset":
|
||||
test "Preset is TWN":
|
||||
## Setup
|
||||
let expectedConf = NetworkConf.TheWakuNetworkConf()
|
||||
|
||||
## Given
|
||||
let preConfig = WakuNodeConf(
|
||||
@ -47,7 +84,9 @@ suite "Waku config - apply preset":
|
||||
check rlnRelayConf.chainId == expectedConf.rlnRelayChainId
|
||||
check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec
|
||||
check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit
|
||||
check conf.numShardsInNetwork == expectedConf.numShardsInNetwork
|
||||
check conf.shardingConf.kind == expectedConf.shardingConf.kind
|
||||
check conf.shardingConf.numShardsInCluster ==
|
||||
expectedConf.shardingConf.numShardsInCluster
|
||||
check conf.discv5Conf.isSome() == expectedConf.discv5Discovery
|
||||
if conf.discv5Conf.isSome():
|
||||
let discv5Conf = conf.discv5Conf.get()
|
||||
@ -55,7 +94,7 @@ suite "Waku config - apply preset":
|
||||
|
||||
test "Subscribes to all valid shards in twn":
|
||||
## Setup
|
||||
let expectedConf = ClusterConf.TheWakuNetworkConf()
|
||||
let expectedConf = NetworkConf.TheWakuNetworkConf()
|
||||
|
||||
## Given
|
||||
let shards: seq[uint16] = @[0, 1, 2, 3, 4, 5, 6, 7]
|
||||
@ -67,11 +106,11 @@ suite "Waku config - apply preset":
|
||||
|
||||
## Then
|
||||
let conf = res.get()
|
||||
check conf.shards.len == expectedConf.numShardsInNetwork.int
|
||||
check conf.subscribeShards.len == expectedConf.shardingConf.numShardsInCluster.int
|
||||
|
||||
test "Subscribes to some valid shards in twn":
|
||||
## Setup
|
||||
let expectedConf = ClusterConf.TheWakuNetworkConf()
|
||||
let expectedConf = NetworkConf.TheWakuNetworkConf()
|
||||
|
||||
## Given
|
||||
let shards: seq[uint16] = @[0, 4, 7]
|
||||
@ -83,9 +122,9 @@ suite "Waku config - apply preset":
|
||||
|
||||
## Then
|
||||
let conf = resConf.get()
|
||||
assert conf.shards.len() == shards.len()
|
||||
assert conf.subscribeShards.len() == shards.len()
|
||||
for index, shard in shards:
|
||||
assert shard in conf.shards
|
||||
assert shard in conf.subscribeShards
|
||||
|
||||
test "Subscribes to invalid shards in twn":
|
||||
## Setup
|
||||
@ -102,7 +141,7 @@ suite "Waku config - apply preset":
|
||||
|
||||
test "Apply TWN preset when cluster id = 1":
|
||||
## Setup
|
||||
let expectedConf = ClusterConf.TheWakuNetworkConf()
|
||||
let expectedConf = NetworkConf.TheWakuNetworkConf()
|
||||
|
||||
## Given
|
||||
let preConfig = WakuNodeConf(
|
||||
@ -130,13 +169,15 @@ suite "Waku config - apply preset":
|
||||
check rlnRelayConf.chainId == expectedConf.rlnRelayChainId
|
||||
check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec
|
||||
check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit
|
||||
check conf.numShardsInNetwork == expectedConf.numShardsInNetwork
|
||||
check conf.shardingConf.kind == expectedConf.shardingConf.kind
|
||||
check conf.shardingConf.numShardsInCluster ==
|
||||
expectedConf.shardingConf.numShardsInCluster
|
||||
check conf.discv5Conf.isSome() == expectedConf.discv5Discovery
|
||||
if conf.discv5Conf.isSome():
|
||||
let discv5Conf = conf.discv5Conf.get()
|
||||
check discv5Conf.bootstrapNodes == expectedConf.discv5BootstrapNodes
|
||||
|
||||
suite "Waku config - node key":
|
||||
suite "Waku external config - node key":
|
||||
test "Passed node key is used":
|
||||
## Setup
|
||||
let nodeKeyStr =
|
||||
@ -157,13 +198,13 @@ suite "Waku config - node key":
|
||||
assert utils.toHex(resKey.getRawBytes().get()) ==
|
||||
utils.toHex(nodekey.getRawBytes().get())
|
||||
|
||||
suite "Waku config - Shards":
|
||||
suite "Waku external config - Shards":
|
||||
test "Shards are valid":
|
||||
## Setup
|
||||
|
||||
## Given
|
||||
let shards: seq[uint16] = @[0, 2, 4]
|
||||
let numShardsInNetwork = 5.uint32
|
||||
let numShardsInNetwork = 5.uint16
|
||||
let wakuNodeConf = WakuNodeConf(
|
||||
cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
|
||||
)
|
||||
@ -182,7 +223,7 @@ suite "Waku config - Shards":
|
||||
|
||||
## Given
|
||||
let shards: seq[uint16] = @[0, 2, 5]
|
||||
let numShardsInNetwork = 5.uint32
|
||||
let numShardsInNetwork = 5.uint16
|
||||
let wakuNodeConf = WakuNodeConf(
|
||||
cmd: noCommand, shards: shards, numShardsInNetwork: numShardsInNetwork
|
||||
)
|
||||
@ -197,7 +238,7 @@ suite "Waku config - Shards":
|
||||
## Setup
|
||||
|
||||
## Given
|
||||
let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"])
|
||||
let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=0"])
|
||||
|
||||
## When
|
||||
let res = wakuNodeConf.toWakuConf()
|
||||
@ -206,3 +247,15 @@ suite "Waku config - Shards":
|
||||
let wakuConf = res.get()
|
||||
let vRes = wakuConf.validate()
|
||||
assert vRes.isOk(), $vRes.error
|
||||
|
||||
test "Imvalid shard is passed without num shards":
|
||||
## Setup
|
||||
|
||||
## Given
|
||||
let wakuNodeConf = WakuNodeConf.load(version = "", cmdLine = @["--shard=32"])
|
||||
|
||||
## When
|
||||
let res = wakuNodeConf.toWakuConf()
|
||||
|
||||
## Then
|
||||
assert res.isErr(), "Invalid shard was accepted"
|
||||
|
||||
@ -9,14 +9,14 @@ import
|
||||
testutils/unittests
|
||||
import
|
||||
waku/factory/waku_conf,
|
||||
waku/factory/waku_conf_builder,
|
||||
waku/factory/conf_builder/conf_builder,
|
||||
waku/factory/networks_config,
|
||||
waku/common/utils/parse_size_units
|
||||
|
||||
suite "Waku Conf - build with cluster conf":
|
||||
test "Cluster Conf is passed and relay is enabled":
|
||||
## Setup
|
||||
let clusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let networkConf = NetworkConf.TheWakuNetworkConf()
|
||||
var builder = WakuConfBuilder.init()
|
||||
builder.discv5Conf.withUdpPort(9000)
|
||||
builder.withRelayServiceRatio("50:50")
|
||||
@ -24,8 +24,8 @@ suite "Waku Conf - build with cluster conf":
|
||||
let expectedShards = toSeq[0.uint16 .. 7.uint16]
|
||||
|
||||
## Given
|
||||
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
|
||||
builder.withClusterConf(clusterConf)
|
||||
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
|
||||
builder.withNetworkConf(networkConf)
|
||||
builder.withRelay(true)
|
||||
builder.rlnRelayConf.withTreePath("/tmp/test-tree-path")
|
||||
|
||||
@ -37,27 +37,29 @@ suite "Waku Conf - build with cluster conf":
|
||||
## Then
|
||||
let resValidate = conf.validate()
|
||||
assert resValidate.isOk(), $resValidate.error
|
||||
check conf.clusterId == clusterConf.clusterId
|
||||
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
|
||||
check conf.shards == expectedShards
|
||||
check conf.clusterId == networkConf.clusterId
|
||||
check conf.shardingConf.kind == networkConf.shardingConf.kind
|
||||
check conf.shardingConf.numShardsInCluster ==
|
||||
networkConf.shardingConf.numShardsInCluster
|
||||
check conf.subscribeShards == expectedShards
|
||||
check conf.maxMessageSizeBytes ==
|
||||
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
|
||||
uint64(parseCorrectMsgSize(networkConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes
|
||||
|
||||
if clusterConf.rlnRelay:
|
||||
if networkConf.rlnRelay:
|
||||
assert conf.rlnRelayConf.isSome(), "RLN Relay conf is disabled"
|
||||
|
||||
let rlnRelayConf = conf.rlnRelayConf.get()
|
||||
check rlnRelayConf.ethContractAddress.string ==
|
||||
clusterConf.rlnRelayEthContractAddress
|
||||
check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic
|
||||
check rlnRelayConf.chainId == clusterConf.rlnRelayChainId
|
||||
check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec
|
||||
check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit
|
||||
networkConf.rlnRelayEthContractAddress
|
||||
check rlnRelayConf.dynamic == networkConf.rlnRelayDynamic
|
||||
check rlnRelayConf.chainId == networkConf.rlnRelayChainId
|
||||
check rlnRelayConf.epochSizeSec == networkConf.rlnEpochSizeSec
|
||||
check rlnRelayConf.userMessageLimit == networkConf.rlnRelayUserMessageLimit
|
||||
|
||||
test "Cluster Conf is passed, but relay is disabled":
|
||||
## Setup
|
||||
let clusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let networkConf = NetworkConf.TheWakuNetworkConf()
|
||||
var builder = WakuConfBuilder.init()
|
||||
builder.withRelayServiceRatio("50:50")
|
||||
builder.discv5Conf.withUdpPort(9000)
|
||||
@ -65,8 +67,8 @@ suite "Waku Conf - build with cluster conf":
|
||||
let expectedShards = toSeq[0.uint16 .. 7.uint16]
|
||||
|
||||
## Given
|
||||
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
|
||||
builder.withClusterConf(clusterConf)
|
||||
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
|
||||
builder.withNetworkConf(networkConf)
|
||||
builder.withRelay(false)
|
||||
|
||||
## When
|
||||
@ -77,26 +79,28 @@ suite "Waku Conf - build with cluster conf":
|
||||
## Then
|
||||
let resValidate = conf.validate()
|
||||
assert resValidate.isOk(), $resValidate.error
|
||||
check conf.clusterId == clusterConf.clusterId
|
||||
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
|
||||
check conf.shards == expectedShards
|
||||
check conf.clusterId == networkConf.clusterId
|
||||
check conf.shardingConf.kind == networkConf.shardingConf.kind
|
||||
check conf.shardingConf.numShardsInCluster ==
|
||||
networkConf.shardingConf.numShardsInCluster
|
||||
check conf.subscribeShards == expectedShards
|
||||
check conf.maxMessageSizeBytes ==
|
||||
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
|
||||
uint64(parseCorrectMsgSize(networkConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes
|
||||
|
||||
assert conf.rlnRelayConf.isNone
|
||||
|
||||
test "Cluster Conf is passed, but rln relay is disabled":
|
||||
## Setup
|
||||
let clusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let networkConf = NetworkConf.TheWakuNetworkConf()
|
||||
var builder = WakuConfBuilder.init()
|
||||
|
||||
let # Mount all shards in network
|
||||
expectedShards = toSeq[0.uint16 .. 7.uint16]
|
||||
|
||||
## Given
|
||||
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
|
||||
builder.withClusterConf(clusterConf)
|
||||
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
|
||||
builder.withNetworkConf(networkConf)
|
||||
builder.rlnRelayConf.withEnabled(false)
|
||||
|
||||
## When
|
||||
@ -107,24 +111,26 @@ suite "Waku Conf - build with cluster conf":
|
||||
## Then
|
||||
let resValidate = conf.validate()
|
||||
assert resValidate.isOk(), $resValidate.error
|
||||
check conf.clusterId == clusterConf.clusterId
|
||||
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
|
||||
check conf.shards == expectedShards
|
||||
check conf.clusterId == networkConf.clusterId
|
||||
check conf.shardingConf.kind == networkConf.shardingConf.kind
|
||||
check conf.shardingConf.numShardsInCluster ==
|
||||
networkConf.shardingConf.numShardsInCluster
|
||||
check conf.subscribeShards == expectedShards
|
||||
check conf.maxMessageSizeBytes ==
|
||||
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
|
||||
uint64(parseCorrectMsgSize(networkConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes
|
||||
assert conf.rlnRelayConf.isNone
|
||||
|
||||
test "Cluster Conf is passed and valid shards are specified":
|
||||
## Setup
|
||||
let clusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let networkConf = NetworkConf.TheWakuNetworkConf()
|
||||
var builder = WakuConfBuilder.init()
|
||||
let shards = @[2.uint16, 3.uint16]
|
||||
|
||||
## Given
|
||||
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
|
||||
builder.withClusterConf(clusterConf)
|
||||
builder.withShards(shards)
|
||||
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
|
||||
builder.withNetworkConf(networkConf)
|
||||
builder.withSubscribeShards(shards)
|
||||
|
||||
## When
|
||||
let resConf = builder.build()
|
||||
@ -134,23 +140,25 @@ suite "Waku Conf - build with cluster conf":
|
||||
## Then
|
||||
let resValidate = conf.validate()
|
||||
assert resValidate.isOk(), $resValidate.error
|
||||
check conf.clusterId == clusterConf.clusterId
|
||||
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
|
||||
check conf.shards == shards
|
||||
check conf.clusterId == networkConf.clusterId
|
||||
check conf.shardingConf.kind == networkConf.shardingConf.kind
|
||||
check conf.shardingConf.numShardsInCluster ==
|
||||
networkConf.shardingConf.numShardsInCluster
|
||||
check conf.subscribeShards == shards
|
||||
check conf.maxMessageSizeBytes ==
|
||||
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
|
||||
uint64(parseCorrectMsgSize(networkConf.maxMessageSize))
|
||||
check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes
|
||||
|
||||
test "Cluster Conf is passed and invalid shards are specified":
|
||||
## Setup
|
||||
let clusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let networkConf = NetworkConf.TheWakuNetworkConf()
|
||||
var builder = WakuConfBuilder.init()
|
||||
let shards = @[2.uint16, 10.uint16]
|
||||
|
||||
## Given
|
||||
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
|
||||
builder.withClusterConf(clusterConf)
|
||||
builder.withShards(shards)
|
||||
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
|
||||
builder.withNetworkConf(networkConf)
|
||||
builder.withSubscribeShards(shards)
|
||||
|
||||
## When
|
||||
let resConf = builder.build()
|
||||
@ -158,11 +166,11 @@ suite "Waku Conf - build with cluster conf":
|
||||
## Then
|
||||
assert resConf.isErr(), "Invalid shard was accepted"
|
||||
|
||||
test "Cluster Conf is passed and RLN contract is overridden":
|
||||
test "Cluster Conf is passed and RLN contract is **not** overridden":
|
||||
## Setup
|
||||
let clusterConf = ClusterConf.TheWakuNetworkConf()
|
||||
let networkConf = NetworkConf.TheWakuNetworkConf()
|
||||
var builder = WakuConfBuilder.init()
|
||||
builder.rlnRelayConf.withEthClientAddress("https://my_eth_rpc_url/")
|
||||
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
|
||||
|
||||
# Mount all shards in network
|
||||
let expectedShards = toSeq[0.uint16 .. 7.uint16]
|
||||
@ -170,7 +178,7 @@ suite "Waku Conf - build with cluster conf":
|
||||
|
||||
## Given
|
||||
builder.rlnRelayConf.withEthContractAddress(contractAddress)
|
||||
builder.withClusterConf(clusterConf)
|
||||
builder.withNetworkConf(networkConf)
|
||||
builder.withRelay(true)
|
||||
builder.rlnRelayConf.withTreePath("/tmp/test")
|
||||
|
||||
@ -182,23 +190,26 @@ suite "Waku Conf - build with cluster conf":
|
||||
## Then
|
||||
let resValidate = conf.validate()
|
||||
assert resValidate.isOk(), $resValidate.error
|
||||
check conf.clusterId == clusterConf.clusterId
|
||||
check conf.numShardsInNetwork == clusterConf.numShardsInNetwork
|
||||
check conf.shards == expectedShards
|
||||
check conf.clusterId == networkConf.clusterId
|
||||
check conf.shardingConf.kind == networkConf.shardingConf.kind
|
||||
check conf.shardingConf.numShardsInCluster ==
|
||||
networkConf.shardingConf.numShardsInCluster
|
||||
check conf.subscribeShards == expectedShards
|
||||
check conf.maxMessageSizeBytes ==
|
||||
uint64(parseCorrectMsgSize(clusterConf.maxMessageSize))
|
||||
check conf.discv5Conf.isSome == clusterConf.discv5Discovery
|
||||
check conf.discv5Conf.get().bootstrapNodes == clusterConf.discv5BootstrapNodes
|
||||
uint64(parseCorrectMsgSize(networkConf.maxMessageSize))
|
||||
check conf.discv5Conf.isSome == networkConf.discv5Discovery
|
||||
check conf.discv5Conf.get().bootstrapNodes == networkConf.discv5BootstrapNodes
|
||||
|
||||
if clusterConf.rlnRelay:
|
||||
if networkConf.rlnRelay:
|
||||
assert conf.rlnRelayConf.isSome
|
||||
|
||||
let rlnRelayConf = conf.rlnRelayConf.get()
|
||||
check rlnRelayConf.ethContractAddress.string == contractAddress
|
||||
check rlnRelayConf.dynamic == clusterConf.rlnRelayDynamic
|
||||
check rlnRelayConf.chainId == clusterConf.rlnRelayChainId
|
||||
check rlnRelayConf.epochSizeSec == clusterConf.rlnEpochSizeSec
|
||||
check rlnRelayConf.userMessageLimit == clusterConf.rlnRelayUserMessageLimit
|
||||
check rlnRelayConf.ethContractAddress.string ==
|
||||
networkConf.rlnRelayEthContractAddress
|
||||
check rlnRelayConf.dynamic == networkConf.rlnRelayDynamic
|
||||
check rlnRelayConf.chainId == networkConf.rlnRelayChainId
|
||||
check rlnRelayConf.epochSizeSec == networkConf.rlnEpochSizeSec
|
||||
check rlnRelayConf.userMessageLimit == networkConf.rlnRelayUserMessageLimit
|
||||
|
||||
suite "Waku Conf - node key":
|
||||
test "Node key is generated":
|
||||
@ -263,10 +274,25 @@ suite "Waku Conf - extMultiaddrs":
|
||||
## Then
|
||||
let resValidate = conf.validate()
|
||||
assert resValidate.isOk(), $resValidate.error
|
||||
check multiaddrs.len == conf.networkConf.extMultiAddrs.len
|
||||
let resMultiaddrs = conf.networkConf.extMultiAddrs.map(
|
||||
check multiaddrs.len == conf.endpointConf.extMultiAddrs.len
|
||||
let resMultiaddrs = conf.endpointConf.extMultiAddrs.map(
|
||||
proc(m: MultiAddress): string =
|
||||
$m
|
||||
)
|
||||
for m in multiaddrs:
|
||||
check m in resMultiaddrs
|
||||
|
||||
suite "Waku Conf Builder - rate limits":
|
||||
test "Valid rate limit passed via string":
|
||||
## Setup
|
||||
var builder = RateLimitConfBuilder.init()
|
||||
|
||||
## Given
|
||||
let rateLimitsStr = @["lightpush:2/2ms", "10/2m", "store: 3/3s"]
|
||||
builder.withRateLimits(rateLimitsStr)
|
||||
|
||||
## When
|
||||
let res = builder.build()
|
||||
|
||||
## Then
|
||||
assert res.isOk(), $res.error
|
||||
|
||||
@ -1 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import ./test_rpc_codec, ./test_poc_eligibility, ./test_poc_reputation
|
||||
|
||||
@ -3,7 +3,6 @@ import
|
||||
testutils/unittests,
|
||||
libp2p/[multiaddress, peerid],
|
||||
libp2p/crypto/crypto,
|
||||
stew/shims/net,
|
||||
eth/keys,
|
||||
eth/p2p/discoveryv5/enr,
|
||||
nimcrypto/utils
|
||||
|
||||
@ -1,9 +1,4 @@
|
||||
import
|
||||
chronicles,
|
||||
std/[options, tables, strutils],
|
||||
stew/shims/net,
|
||||
chronos,
|
||||
testutils/unittests
|
||||
import chronicles, std/[options, tables, strutils], chronos, testutils/unittests
|
||||
|
||||
import
|
||||
waku/node/waku_node,
|
||||
@ -23,7 +18,7 @@ suite "Peer Manager":
|
||||
|
||||
asyncSetup:
|
||||
listenPort = Port(0)
|
||||
listenAddress = ValidIpAddress.init("0.0.0.0")
|
||||
listenAddress = parseIpAddress("0.0.0.0")
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
clusterId = 1
|
||||
|
||||
@ -1,3 +1,5 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
./test_wakunode_filter,
|
||||
./test_wakunode_legacy_lightpush,
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[options, tables, sequtils, strutils, sets],
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
chronicles,
|
||||
|
||||
@ -1,8 +1,7 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[options, tempfiles],
|
||||
stew/shims/net as stewNet,
|
||||
std/[options, tempfiles, net],
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
std/strformat,
|
||||
@ -46,8 +45,8 @@ suite "Waku Legacy Lightpush - End To End":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
await allFutures(server.start(), client.start())
|
||||
await server.start()
|
||||
@ -70,7 +69,7 @@ suite "Waku Legacy Lightpush - End To End":
|
||||
asyncTest "Via 11/WAKU2-RELAY from Relay/Full Node":
|
||||
# Given a light lightpush client
|
||||
let lightpushClient =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
lightpushClient.mountLegacyLightpushClient()
|
||||
|
||||
# When the client publishes a message
|
||||
@ -129,8 +128,8 @@ suite "RLN Proofs as a Lightpush Service":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
# mount rln-relay
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
@ -162,7 +161,7 @@ suite "RLN Proofs as a Lightpush Service":
|
||||
asyncTest "Message is published when RLN enabled":
|
||||
# Given a light lightpush client
|
||||
let lightpushClient =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
lightpushClient.mountLegacyLightPushClient()
|
||||
|
||||
# When the client publishes a message
|
||||
@ -190,9 +189,9 @@ suite "Waku Legacy Lightpush message delivery":
|
||||
|
||||
await allFutures(destNode.start(), bridgeNode.start(), lightNode.start())
|
||||
|
||||
(await destNode.mountRelay(@[DefaultRelayShard])).isOkOr:
|
||||
(await destNode.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
(await bridgeNode.mountRelay(@[DefaultRelayShard])).isOkOr:
|
||||
(await bridgeNode.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
await bridgeNode.mountLegacyLightPush()
|
||||
lightNode.mountLegacyLightPushClient()
|
||||
@ -215,7 +214,7 @@ suite "Waku Legacy Lightpush message delivery":
|
||||
msg == message
|
||||
completionFutRelay.complete(true)
|
||||
|
||||
destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), some(relayHandler)).isOkOr:
|
||||
destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), relayHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic:" & $error
|
||||
|
||||
# Wait for subscription to take effect
|
||||
|
||||
@ -1,11 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/options,
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
libp2p/crypto/crypto
|
||||
import std/options, testutils/unittests, chronos, libp2p/crypto/crypto
|
||||
|
||||
import
|
||||
waku/[
|
||||
@ -66,8 +61,8 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
archiveDriver = newArchiveDriverWithMessages(pubsubTopic, archiveMessages)
|
||||
let mountArchiveResult = server.mountLegacyArchive(archiveDriver)
|
||||
@ -440,7 +435,7 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
newArchiveDriverWithMessages(pubsubTopic, archiveMessages)
|
||||
otherServerKey = generateSecp256k1Key()
|
||||
otherServer =
|
||||
newTestWakuNode(otherServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(otherServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountOtherArchiveResult =
|
||||
otherServer.mountLegacyArchive(otherArchiveDriverWithMessages)
|
||||
assert mountOtherArchiveResult.isOk()
|
||||
@ -522,8 +517,8 @@ suite "Waku Store - End to End - Unsorted Archive":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
let
|
||||
unsortedArchiveDriverWithMessages =
|
||||
@ -678,8 +673,8 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
let archiveDriver = newSqliteArchiveDriver()
|
||||
.put(pubsubTopic, archiveMessages[0 ..< 6])
|
||||
@ -927,7 +922,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
ephemeralServerKey = generateSecp256k1Key()
|
||||
ephemeralServer =
|
||||
newTestWakuNode(ephemeralServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(ephemeralServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountEphemeralArchiveResult =
|
||||
ephemeralServer.mountLegacyArchive(ephemeralArchiveDriver)
|
||||
assert mountEphemeralArchiveResult.isOk()
|
||||
@ -970,7 +965,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
mixedServerKey = generateSecp256k1Key()
|
||||
mixedServer =
|
||||
newTestWakuNode(mixedServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(mixedServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountMixedArchiveResult = mixedServer.mountLegacyArchive(mixedArchiveDriver)
|
||||
assert mountMixedArchiveResult.isOk()
|
||||
|
||||
@ -997,7 +992,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
emptyServerKey = generateSecp256k1Key()
|
||||
emptyServer =
|
||||
newTestWakuNode(emptyServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(emptyServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountEmptyArchiveResult = emptyServer.mountLegacyArchive(emptyArchiveDriver)
|
||||
assert mountEmptyArchiveResult.isOk()
|
||||
|
||||
@ -1028,7 +1023,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
voluminousServerKey = generateSecp256k1Key()
|
||||
voluminousServer =
|
||||
newTestWakuNode(voluminousServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(voluminousServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountVoluminousArchiveResult =
|
||||
voluminousServer.mountLegacyArchive(voluminousArchiveDriverWithMessages)
|
||||
assert mountVoluminousArchiveResult.isOk()
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[options, tempfiles],
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
std/strformat,
|
||||
@ -40,8 +39,8 @@ suite "Waku Lightpush - End To End":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
await allFutures(server.start(), client.start())
|
||||
await server.start()
|
||||
@ -63,7 +62,7 @@ suite "Waku Lightpush - End To End":
|
||||
asyncTest "Via 11/WAKU2-RELAY from Relay/Full Node":
|
||||
# Given a light lightpush client
|
||||
let lightpushClient =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
lightpushClient.mountLightpushClient()
|
||||
|
||||
# When the client publishes a message
|
||||
@ -77,7 +76,7 @@ suite "Waku Lightpush - End To End":
|
||||
# Then the message is not relayed but not due to RLN
|
||||
assert publishResponse.isErr(), "We expect an error response"
|
||||
|
||||
assert (publishResponse.error.code == NO_PEERS_TO_RELAY),
|
||||
assert (publishResponse.error.code == LightPushErrorCode.NO_PEERS_TO_RELAY),
|
||||
"incorrect error response"
|
||||
|
||||
suite "Waku LightPush Validation Tests":
|
||||
@ -94,7 +93,7 @@ suite "Waku Lightpush - End To End":
|
||||
|
||||
check:
|
||||
publishResponse.isErr()
|
||||
publishResponse.error.code == INVALID_MESSAGE_ERROR
|
||||
publishResponse.error.code == LightPushErrorCode.INVALID_MESSAGE
|
||||
publishResponse.error.desc ==
|
||||
some(fmt"Message size exceeded maximum of {DefaultMaxWakuMessageSize} bytes")
|
||||
|
||||
@ -123,8 +122,8 @@ suite "RLN Proofs as a Lightpush Service":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
# mount rln-relay
|
||||
let wakuRlnConfig = WakuRlnConfig(
|
||||
@ -156,7 +155,7 @@ suite "RLN Proofs as a Lightpush Service":
|
||||
asyncTest "Message is published when RLN enabled":
|
||||
# Given a light lightpush client
|
||||
let lightpushClient =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
lightpushClient.mountLightPushClient()
|
||||
|
||||
# When the client publishes a message
|
||||
@ -169,7 +168,7 @@ suite "RLN Proofs as a Lightpush Service":
|
||||
|
||||
# Then the message is not relayed but not due to RLN
|
||||
assert publishResponse.isErr(), "We expect an error response"
|
||||
check publishResponse.error.code == NO_PEERS_TO_RELAY
|
||||
check publishResponse.error.code == LightPushErrorCode.NO_PEERS_TO_RELAY
|
||||
|
||||
suite "Waku Lightpush message delivery":
|
||||
asyncTest "lightpush message flow succeed":
|
||||
@ -184,9 +183,9 @@ suite "Waku Lightpush message delivery":
|
||||
|
||||
await allFutures(destNode.start(), bridgeNode.start(), lightNode.start())
|
||||
|
||||
(await destNode.mountRelay(@[DefaultRelayShard])).isOkOr:
|
||||
(await destNode.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
(await bridgeNode.mountRelay(@[DefaultRelayShard])).isOkOr:
|
||||
(await bridgeNode.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
await bridgeNode.mountLightPush()
|
||||
lightNode.mountLightPushClient()
|
||||
@ -210,7 +209,7 @@ suite "Waku Lightpush message delivery":
|
||||
msg == message
|
||||
completionFutRelay.complete(true)
|
||||
|
||||
destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), some(relayHandler)).isOkOr:
|
||||
destNode.subscribe((kind: PubsubSub, topic: CustomPubsubTopic), relayHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to relay"
|
||||
|
||||
# Wait for subscription to take effect
|
||||
|
||||
@ -5,7 +5,6 @@ import
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
chronicles,
|
||||
stew/shims/net,
|
||||
libp2p/switch,
|
||||
libp2p/peerId,
|
||||
libp2p/crypto/crypto,
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
os,
|
||||
std/[options, tables],
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
# chronos/timer,
|
||||
@ -32,7 +31,7 @@ const DEFAULT_PROTOCOLS: seq[string] =
|
||||
@["/ipfs/id/1.0.0", "/libp2p/autonat/1.0.0", "/libp2p/circuit/relay/0.2.0/hop"]
|
||||
|
||||
let
|
||||
listenIp = ValidIpAddress.init("0.0.0.0")
|
||||
listenIp = parseIpAddress("0.0.0.0")
|
||||
listenPort = Port(0)
|
||||
|
||||
suite "Peer Manager":
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[tempfiles, strutils, options],
|
||||
stew/shims/net as stewNet,
|
||||
stew/results,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
@ -121,8 +120,8 @@ suite "Waku RlnRelay - End to End - Static":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
await allFutures(server.start(), client.start())
|
||||
|
||||
@ -410,8 +409,8 @@ suite "Waku RlnRelay - End to End - OnChain":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
await allFutures(server.start(), client.start())
|
||||
|
||||
|
||||
@ -1,16 +1,10 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[options, sequtils, tempfiles],
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
chronicles,
|
||||
stew/shims/net as stewNet
|
||||
import std/[options, sequtils, tempfiles], testutils/unittests, chronos, chronicles
|
||||
|
||||
import
|
||||
std/[sequtils, tempfiles],
|
||||
stew/byteutils,
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
libp2p/switch,
|
||||
@ -35,7 +29,7 @@ import
|
||||
import waku_relay/protocol
|
||||
|
||||
const
|
||||
listenIp = ValidIpAddress.init("0.0.0.0")
|
||||
listenIp = parseIpAddress("0.0.0.0")
|
||||
listenPort = Port(0)
|
||||
|
||||
suite "Sharding":
|
||||
|
||||
@ -1,11 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
std/[options, sequtils, sets],
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
libp2p/crypto/crypto
|
||||
import std/[options, sequtils, sets], testutils/unittests, chronos, libp2p/crypto/crypto
|
||||
|
||||
import
|
||||
waku/[
|
||||
@ -75,8 +70,8 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
archiveDriver = newArchiveDriverWithMessages(pubsubTopic, messages)
|
||||
let mountArchiveResult = server.mountArchive(archiveDriver)
|
||||
@ -480,7 +475,7 @@ suite "Waku Store - End to End - Sorted Archive":
|
||||
)
|
||||
otherServerKey = generateSecp256k1Key()
|
||||
otherServer =
|
||||
newTestWakuNode(otherServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(otherServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountOtherArchiveResult =
|
||||
otherServer.mountArchive(otherArchiveDriverWithMessages)
|
||||
assert mountOtherArchiveResult.isOk()
|
||||
@ -571,8 +566,8 @@ suite "Waku Store - End to End - Unsorted Archive":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
let
|
||||
unsortedArchiveDriverWithMessages =
|
||||
@ -788,8 +783,8 @@ suite "Waku Store - End to End - Unsorted Archive without provided Timestamp":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
let
|
||||
unsortedArchiveDriverWithMessages =
|
||||
@ -938,8 +933,8 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
serverKey = generateSecp256k1Key()
|
||||
clientKey = generateSecp256k1Key()
|
||||
|
||||
server = newTestWakuNode(serverKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
let archiveDriver = newSqliteArchiveDriver().put(pubsubTopic, messages[0 ..< 6]).put(
|
||||
pubsubTopicB, messages[6 ..< 10]
|
||||
@ -1189,7 +1184,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
ephemeralServerKey = generateSecp256k1Key()
|
||||
ephemeralServer =
|
||||
newTestWakuNode(ephemeralServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(ephemeralServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountEphemeralArchiveResult =
|
||||
ephemeralServer.mountArchive(ephemeralArchiveDriver)
|
||||
assert mountEphemeralArchiveResult.isOk()
|
||||
@ -1231,7 +1226,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
mixedServerKey = generateSecp256k1Key()
|
||||
mixedServer =
|
||||
newTestWakuNode(mixedServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(mixedServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountMixedArchiveResult = mixedServer.mountArchive(mixedArchiveDriver)
|
||||
assert mountMixedArchiveResult.isOk()
|
||||
|
||||
@ -1258,7 +1253,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
emptyServerKey = generateSecp256k1Key()
|
||||
emptyServer =
|
||||
newTestWakuNode(emptyServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(emptyServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountEmptyArchiveResult = emptyServer.mountArchive(emptyArchiveDriver)
|
||||
assert mountEmptyArchiveResult.isOk()
|
||||
|
||||
@ -1298,7 +1293,7 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
|
||||
let
|
||||
voluminousServerKey = generateSecp256k1Key()
|
||||
voluminousServer =
|
||||
newTestWakuNode(voluminousServerKey, ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(voluminousServerKey, parseIpAddress("0.0.0.0"), Port(0))
|
||||
mountVoluminousArchiveResult =
|
||||
voluminousServer.mountArchive(voluminousArchiveDriverWithMessages)
|
||||
assert mountVoluminousArchiveResult.isOk()
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[sequtils, times, sugar, net],
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
json_rpc/rpcserver,
|
||||
@ -40,7 +39,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "connectPeer() works":
|
||||
# Create 2 nodes
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
|
||||
@ -59,7 +58,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "dialPeer() works":
|
||||
# Create 2 nodes
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
@ -94,7 +93,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "dialPeer() fails gracefully":
|
||||
# Create 2 nodes and start them
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
await allFutures(nodes.mapIt(it.mountRelay()))
|
||||
@ -122,8 +121,7 @@ procSuite "Peer Manager":
|
||||
|
||||
asyncTest "Adding, selecting and filtering peers work":
|
||||
let
|
||||
node =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
node = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
|
||||
# Create filter peer
|
||||
filterLoc = MultiAddress.init("/ip4/127.0.0.1/tcp/0").tryGet()
|
||||
@ -156,7 +154,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "Peer manager keeps track of connections":
|
||||
# Create 2 nodes
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
@ -209,7 +207,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "Peer manager updates failed peers correctly":
|
||||
# Create 2 nodes
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
@ -311,7 +309,7 @@ procSuite "Peer Manager":
|
||||
# Simulate restart by initialising a new node using the same storage
|
||||
let node3 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("127.0.0.1"),
|
||||
parseIpAddress("127.0.0.1"),
|
||||
Port(56037),
|
||||
peerStorage = storage,
|
||||
)
|
||||
@ -384,7 +382,7 @@ procSuite "Peer Manager":
|
||||
# Simulate restart by initialising a new node using the same storage
|
||||
let node3 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("127.0.0.1"),
|
||||
parseIpAddress("127.0.0.1"),
|
||||
Port(56037),
|
||||
peerStorage = storage,
|
||||
)
|
||||
@ -420,26 +418,26 @@ procSuite "Peer Manager":
|
||||
# different network
|
||||
node1 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("0.0.0.0"),
|
||||
parseIpAddress("0.0.0.0"),
|
||||
port,
|
||||
clusterId = 3,
|
||||
shards = @[uint16(0)],
|
||||
subscribeShards = @[uint16(0)],
|
||||
)
|
||||
|
||||
# same network
|
||||
node2 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("0.0.0.0"),
|
||||
parseIpAddress("0.0.0.0"),
|
||||
port,
|
||||
clusterId = 4,
|
||||
shards = @[uint16(0)],
|
||||
subscribeShards = @[uint16(0)],
|
||||
)
|
||||
node3 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("0.0.0.0"),
|
||||
parseIpAddress("0.0.0.0"),
|
||||
port,
|
||||
clusterId = 4,
|
||||
shards = @[uint16(0)],
|
||||
subscribeShards = @[uint16(0)],
|
||||
)
|
||||
|
||||
node1.mountMetadata(3).expect("Mounted Waku Metadata")
|
||||
@ -476,12 +474,12 @@ procSuite "Peer Manager":
|
||||
storage = WakuPeerStorage.new(database)[]
|
||||
node1 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("0.0.0.0"),
|
||||
parseIpAddress("0.0.0.0"),
|
||||
Port(0),
|
||||
peerStorage = storage,
|
||||
)
|
||||
node2 =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
peerInfo2 = node2.switch.peerInfo
|
||||
betaCodec = "/vac/waku/relay/2.0.0-beta2"
|
||||
stableCodec = "/vac/waku/relay/2.0.0"
|
||||
@ -509,10 +507,7 @@ procSuite "Peer Manager":
|
||||
|
||||
# Simulate restart by initialising a new node using the same storage
|
||||
let node3 = newTestWakuNode(
|
||||
generateSecp256k1Key(),
|
||||
ValidIpAddress.init("0.0.0.0"),
|
||||
Port(0),
|
||||
peerStorage = storage,
|
||||
generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0), peerStorage = storage
|
||||
)
|
||||
|
||||
(await node3.mountRelay()).isOkOr:
|
||||
@ -547,7 +542,7 @@ procSuite "Peer Manager":
|
||||
let nodes = toSeq(0 ..< 4).mapIt(
|
||||
newTestWakuNode(
|
||||
nodeKey = generateSecp256k1Key(),
|
||||
bindIp = ValidIpAddress.init("0.0.0.0"),
|
||||
bindIp = parseIpAddress("0.0.0.0"),
|
||||
bindPort = Port(0),
|
||||
wakuFlags = some(CapabilitiesBitfield.init(@[Relay])),
|
||||
)
|
||||
@ -617,7 +612,7 @@ procSuite "Peer Manager":
|
||||
let nodes = toSeq(0 ..< 4).mapIt(
|
||||
newTestWakuNode(
|
||||
nodeKey = generateSecp256k1Key(),
|
||||
bindIp = ValidIpAddress.init("0.0.0.0"),
|
||||
bindIp = parseIpAddress("0.0.0.0"),
|
||||
bindPort = Port(0),
|
||||
wakuFlags = some(CapabilitiesBitfield.init(@[Relay])),
|
||||
)
|
||||
@ -685,7 +680,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "Peer store keeps track of incoming connections":
|
||||
# Create 4 nodes
|
||||
let nodes = toSeq(0 ..< 4).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
# Start them
|
||||
@ -779,8 +774,7 @@ procSuite "Peer Manager":
|
||||
let basePeerId = "16Uiu2HAm7QGEZKujdSbbo1aaQyfDPQ6Bw3ybQnj6fruH5Dxwd7D"
|
||||
|
||||
let
|
||||
node =
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
node = newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
peers = toSeq(1 .. 4)
|
||||
.mapIt(parsePeerInfo("/ip4/0.0.0.0/tcp/30300/p2p/" & basePeerId & $it))
|
||||
.filterIt(it.isOk())
|
||||
@ -819,7 +813,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "connectedPeers() returns expected number of connections per protocol":
|
||||
# Create 4 nodes
|
||||
let nodes = toSeq(0 ..< 4).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
# Start them with relay + filter
|
||||
@ -874,7 +868,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "getNumStreams() returns expected number of connections per protocol":
|
||||
# Create 2 nodes
|
||||
let nodes = toSeq(0 ..< 2).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
# Start them with relay + filter
|
||||
@ -1159,7 +1153,7 @@ procSuite "Peer Manager":
|
||||
asyncTest "colocationLimit is enforced by pruneConnsByIp()":
|
||||
# Create 5 nodes
|
||||
let nodes = toSeq(0 ..< 5).mapIt(
|
||||
newTestWakuNode(generateSecp256k1Key(), ValidIpAddress.init("0.0.0.0"), Port(0))
|
||||
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
|
||||
)
|
||||
|
||||
# Start them with relay + filter
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[sequtils, options],
|
||||
stew/shims/net,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
libp2p/peerid,
|
||||
@ -23,9 +22,9 @@ procSuite "Relay (GossipSub) Peer Exchange":
|
||||
newTestWakuNode(node2Key, listenAddress, port, sendSignedPeerRecord = true)
|
||||
|
||||
# When both client and server mount relay without a handler
|
||||
(await node1.mountRelay(@[DefaultRelayShard])).isOkOr:
|
||||
(await node1.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
(await node2.mountRelay(@[DefaultRelayShard], none(RoutingRecordsHandler))).isOkOr:
|
||||
(await node2.mountRelay(none(RoutingRecordsHandler))).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
# Then the relays are mounted without a handler
|
||||
@ -75,11 +74,11 @@ procSuite "Relay (GossipSub) Peer Exchange":
|
||||
peerExchangeHandle: RoutingRecordsHandler = peerExchangeHandler
|
||||
|
||||
# Givem the nodes mount relay with a peer exchange handler
|
||||
(await node1.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))).isOkOr:
|
||||
(await node1.mountRelay(some(emptyPeerExchangeHandle))).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
(await node2.mountRelay(@[DefaultRelayShard], some(emptyPeerExchangeHandle))).isOkOr:
|
||||
(await node2.mountRelay(some(emptyPeerExchangeHandle))).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
(await node3.mountRelay(@[DefaultRelayShard], some(peerExchangeHandle))).isOkOr:
|
||||
(await node3.mountRelay(some(peerExchangeHandle))).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
# Ensure that node1 prunes all peers after the first connection
|
||||
@ -87,6 +86,19 @@ procSuite "Relay (GossipSub) Peer Exchange":
|
||||
|
||||
await allFutures([node1.start(), node2.start(), node3.start()])
|
||||
|
||||
# The three nodes should be subscribed to the same shard
|
||||
proc simpleHandler(
|
||||
topic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
await sleepAsync(0.milliseconds)
|
||||
|
||||
node1.subscribe((kind: PubsubSub, topic: $DefaultRelayShard), simpleHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
node2.subscribe((kind: PubsubSub, topic: $DefaultRelayShard), simpleHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
node3.subscribe((kind: PubsubSub, topic: $DefaultRelayShard), simpleHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
|
||||
# When nodes are connected
|
||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||
await node3.connectToNodes(@[node1.switch.peerInfo.toRemotePeerInfo()])
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[sequtils, tables],
|
||||
stew/shims/net,
|
||||
results,
|
||||
stew/base32,
|
||||
testutils/unittests,
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
chronicles,
|
||||
@ -45,7 +44,10 @@ suite "Waku Keepalive":
|
||||
|
||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||
|
||||
node1.startKeepalive(2.seconds)
|
||||
let healthMonitor = NodeHealthMonitor()
|
||||
healthMonitor.setNodeToHealthMonitor(node1)
|
||||
healthMonitor.startKeepalive(2.seconds).isOkOr:
|
||||
assert false, "Failed to start keepalive"
|
||||
|
||||
check:
|
||||
(await completionFut.withTimeout(5.seconds)) == true
|
||||
|
||||
@ -5,7 +5,6 @@ import
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
chronicles,
|
||||
stew/shims/net,
|
||||
libp2p/switch,
|
||||
libp2p/peerId,
|
||||
libp2p/crypto/crypto,
|
||||
|
||||
@ -18,8 +18,8 @@ suite "Waku NetConfig":
|
||||
let wakuFlags = defaultTestWakuFlags()
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extIp = none(IpAddress),
|
||||
extPort = none(Port),
|
||||
extMultiAddrs = @[],
|
||||
@ -46,7 +46,8 @@ suite "Waku NetConfig":
|
||||
let conf = defaultTestWakuConf()
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress, bindPort = conf.networkConf.p2pTcpPort
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
)
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
@ -57,7 +58,9 @@ suite "Waku NetConfig":
|
||||
netConfig.announcedAddresses.len == 1 # Only bind address should be present
|
||||
netConfig.announcedAddresses[0] ==
|
||||
formatListenAddress(
|
||||
ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.networkConf.p2pTcpPort)
|
||||
ip4TcpEndPoint(
|
||||
conf.endpointConf.p2pListenAddress, conf.endpointConf.p2pTcpPort
|
||||
)
|
||||
)
|
||||
|
||||
asyncTest "AnnouncedAddresses contains external address if extIp/Port are provided":
|
||||
@ -67,8 +70,8 @@ suite "Waku NetConfig":
|
||||
extPort = Port(1234)
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extIp = some(extIp),
|
||||
extPort = some(extPort),
|
||||
)
|
||||
@ -88,8 +91,8 @@ suite "Waku NetConfig":
|
||||
extPort = Port(1234)
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
dns4DomainName = some(dns4DomainName),
|
||||
extPort = some(extPort),
|
||||
)
|
||||
@ -110,8 +113,8 @@ suite "Waku NetConfig":
|
||||
extMultiAddrs = @[ip4TcpEndPoint(extIp, extPort)]
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extMultiAddrs = extMultiAddrs,
|
||||
)
|
||||
|
||||
@ -131,8 +134,8 @@ suite "Waku NetConfig":
|
||||
extPort = Port(1234)
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
dns4DomainName = some(dns4DomainName),
|
||||
extIp = some(extIp),
|
||||
extPort = some(extPort),
|
||||
@ -152,8 +155,8 @@ suite "Waku NetConfig":
|
||||
wssEnabled = false
|
||||
|
||||
var netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
wsEnabled = true,
|
||||
wssEnabled = wssEnabled,
|
||||
)
|
||||
@ -165,8 +168,9 @@ suite "Waku NetConfig":
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
|
||||
netConfig.announcedAddresses[1] == (
|
||||
ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.webSocketConf.get().port) &
|
||||
wsFlag(wssEnabled)
|
||||
ip4TcpEndPoint(
|
||||
conf.endpointConf.p2pListenAddress, conf.webSocketConf.get().port
|
||||
) & wsFlag(wssEnabled)
|
||||
)
|
||||
|
||||
## Now try the same for the case of wssEnabled = true
|
||||
@ -174,8 +178,8 @@ suite "Waku NetConfig":
|
||||
wssEnabled = true
|
||||
|
||||
netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
wsEnabled = true,
|
||||
wssEnabled = wssEnabled,
|
||||
)
|
||||
@ -187,8 +191,9 @@ suite "Waku NetConfig":
|
||||
check:
|
||||
netConfig.announcedAddresses.len == 2 # Bind address + wsHostAddress
|
||||
netConfig.announcedAddresses[1] == (
|
||||
ip4TcpEndPoint(conf.networkConf.p2pListenAddress, conf.websocketConf.get().port) &
|
||||
wsFlag(wssEnabled)
|
||||
ip4TcpEndPoint(
|
||||
conf.endpointConf.p2pListenAddress, conf.websocketConf.get().port
|
||||
) & wsFlag(wssEnabled)
|
||||
)
|
||||
|
||||
asyncTest "Announced WebSocket address contains external IP if provided":
|
||||
@ -199,8 +204,8 @@ suite "Waku NetConfig":
|
||||
wssEnabled = false
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extIp = some(extIp),
|
||||
extPort = some(extPort),
|
||||
wsEnabled = true,
|
||||
@ -224,8 +229,8 @@ suite "Waku NetConfig":
|
||||
wssEnabled = false
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
dns4DomainName = some(dns4DomainName),
|
||||
extPort = some(extPort),
|
||||
wsEnabled = true,
|
||||
@ -252,8 +257,8 @@ suite "Waku NetConfig":
|
||||
wssEnabled = false
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
dns4DomainName = some(dns4DomainName),
|
||||
extIp = some(extIp),
|
||||
extPort = some(extPort),
|
||||
@ -277,7 +282,8 @@ suite "Waku NetConfig":
|
||||
let conf = defaultTestWakuConf()
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress, bindPort = conf.networkConf.p2pTcpPort
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
)
|
||||
|
||||
assert netConfigRes.isOk(), $netConfigRes.error
|
||||
@ -285,8 +291,8 @@ suite "Waku NetConfig":
|
||||
let netConfig = netConfigRes.get()
|
||||
|
||||
check:
|
||||
netConfig.enrIp.get() == conf.networkConf.p2pListenAddress
|
||||
netConfig.enrPort.get() == conf.networkConf.p2pTcpPort
|
||||
netConfig.enrIp.get() == conf.endpointConf.p2pListenAddress
|
||||
netConfig.enrPort.get() == conf.endpointConf.p2pTcpPort
|
||||
|
||||
asyncTest "ENR is set with extIp/Port if provided":
|
||||
let
|
||||
@ -295,8 +301,8 @@ suite "Waku NetConfig":
|
||||
extPort = Port(1234)
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extIp = some(extIp),
|
||||
extPort = some(extPort),
|
||||
)
|
||||
@ -316,8 +322,8 @@ suite "Waku NetConfig":
|
||||
extPort = Port(1234)
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
dns4DomainName = some(dns4DomainName),
|
||||
extPort = some(extPort),
|
||||
)
|
||||
@ -339,8 +345,8 @@ suite "Waku NetConfig":
|
||||
extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))]
|
||||
|
||||
var netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extMultiAddrs = extMultiAddrs,
|
||||
wsEnabled = wsEnabled,
|
||||
)
|
||||
@ -358,8 +364,8 @@ suite "Waku NetConfig":
|
||||
extMultiAddrs = @[(ip4TcpEndPoint(extAddIp, extAddPort) & wsFlag(wssEnabled))]
|
||||
|
||||
netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extMultiAddrs = extMultiAddrs,
|
||||
wssEnabled = wssEnabled,
|
||||
)
|
||||
@ -380,8 +386,8 @@ suite "Waku NetConfig":
|
||||
extMultiAddrs = @[ip4TcpEndPoint(extAddIp, extAddPort)]
|
||||
|
||||
let netConfigRes = NetConfig.init(
|
||||
bindIp = conf.networkConf.p2pListenAddress,
|
||||
bindPort = conf.networkConf.p2pTcpPort,
|
||||
bindIp = conf.endpointConf.p2pListenAddress,
|
||||
bindPort = conf.endpointConf.p2pTcpPort,
|
||||
extMultiAddrs = extMultiAddrs,
|
||||
extMultiAddrsOnly = true,
|
||||
)
|
||||
|
||||
@ -75,8 +75,15 @@ suite "Waku Switch":
|
||||
completionFut = newFuture[bool]()
|
||||
proto = new LPProtocol
|
||||
proto.codec = customProtoCodec
|
||||
proto.handler = proc(conn: Connection, proto: string) {.async.} =
|
||||
assert (await conn.readLp(1024)) == msg.toBytes()
|
||||
proto.handler = proc(
|
||||
conn: Connection, proto: string
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
try:
|
||||
assert (await conn.readLp(1024)) == msg.toBytes()
|
||||
except LPStreamError:
|
||||
error "Connection read error", error = getCurrentExceptionMsg()
|
||||
assert false, getCurrentExceptionMsg()
|
||||
|
||||
completionFut.complete(true)
|
||||
|
||||
await proto.start()
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
std/[sequtils, strutils, net],
|
||||
stew/byteutils,
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronicles,
|
||||
chronos,
|
||||
@ -15,7 +14,8 @@ import
|
||||
libp2p/protocols/pubsub/pubsub,
|
||||
libp2p/protocols/pubsub/gossipsub,
|
||||
libp2p/nameresolving/mockresolver,
|
||||
eth/p2p/discoveryv5/enr
|
||||
eth/p2p/discoveryv5/enr,
|
||||
eth/net/utils
|
||||
import
|
||||
waku/[waku_core, waku_node, node/peer_manager], ./testlib/wakucore, ./testlib/wakunode
|
||||
|
||||
@ -34,14 +34,14 @@ suite "WakuNode":
|
||||
# Setup node 1 with stable codec "/vac/waku/relay/2.0.0"
|
||||
|
||||
await node1.start()
|
||||
(await node1.mountRelay(@[shard])).isOkOr:
|
||||
(await node1.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
node1.wakuRelay.codec = "/vac/waku/relay/2.0.0"
|
||||
|
||||
# Setup node 2 with beta codec "/vac/waku/relay/2.0.0-beta2"
|
||||
|
||||
await node2.start()
|
||||
(await node2.mountRelay(@[shard])).isOkOr:
|
||||
(await node2.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
node2.wakuRelay.codec = "/vac/waku/relay/2.0.0-beta2"
|
||||
|
||||
@ -69,7 +69,7 @@ suite "WakuNode":
|
||||
assert false, "Failed to unsubscribe from topic: " & $error
|
||||
|
||||
## Subscribe to the relay topic to add the custom relay handler defined above
|
||||
node2.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
|
||||
node2.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic"
|
||||
await sleepAsync(2000.millis)
|
||||
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import
|
||||
std/options,
|
||||
results,
|
||||
stew/shims/net,
|
||||
chronos,
|
||||
libp2p/switch,
|
||||
libp2p/builders,
|
||||
@ -38,7 +37,7 @@ proc defaultTestWakuConfBuilder*(): WakuConfBuilder =
|
||||
builder.withRelayServiceRatio("60:40")
|
||||
builder.withMaxMessageSize("1024 KiB")
|
||||
builder.withClusterId(DefaultClusterId)
|
||||
builder.withShards(@[DefaultShardId])
|
||||
builder.withSubscribeShards(@[DefaultShardId])
|
||||
builder.withRelay(true)
|
||||
builder.withRendezvous(true)
|
||||
builder.storeServiceConf.withDbMigration(false)
|
||||
@ -73,7 +72,7 @@ proc newTestWakuNode*(
|
||||
agentString = none(string),
|
||||
peerStoreCapacity = none(int),
|
||||
clusterId = DefaultClusterId,
|
||||
shards = @[DefaultShardId],
|
||||
subscribeShards = @[DefaultShardId],
|
||||
): WakuNode =
|
||||
var resolvedExtIp = extIp
|
||||
|
||||
@ -87,7 +86,7 @@ proc newTestWakuNode*(
|
||||
var conf = defaultTestWakuConf()
|
||||
|
||||
conf.clusterId = clusterId
|
||||
conf.shards = shards
|
||||
conf.subscribeShards = subscribeShards
|
||||
|
||||
if dns4DomainName.isSome() and extIp.isNone():
|
||||
# If there's an error resolving the IP, an exception is thrown and test fails
|
||||
@ -115,7 +114,7 @@ proc newTestWakuNode*(
|
||||
var enrBuilder = EnrBuilder.init(nodeKey)
|
||||
|
||||
enrBuilder.withWakuRelaySharding(
|
||||
RelayShards(clusterId: conf.clusterId, shardIds: conf.shards)
|
||||
RelayShards(clusterId: conf.clusterId, shardIds: conf.subscribeShards)
|
||||
).isOkOr:
|
||||
raise newException(Defect, "Invalid record: " & $error)
|
||||
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{.used.}
|
||||
|
||||
import stew/shims/net as stewNet, std/strutils, testutils/unittests
|
||||
import std/[strutils, net], testutils/unittests
|
||||
import ../testlib/wakucore, ../testlib/wakunode
|
||||
|
||||
suite "Waku Core - Published Address":
|
||||
|
||||
@ -503,7 +503,7 @@ suite "Waku Discovery v5":
|
||||
waku.dynamicBootstrapNodes,
|
||||
waku.rng,
|
||||
waku.conf.nodeKey,
|
||||
waku.conf.networkConf.p2pListenAddress,
|
||||
waku.conf.endpointConf.p2pListenAddress,
|
||||
waku.conf.portsShift,
|
||||
)
|
||||
|
||||
@ -534,7 +534,7 @@ suite "Waku Discovery v5":
|
||||
waku.dynamicBootstrapNodes,
|
||||
waku.rng,
|
||||
waku.conf.nodeKey,
|
||||
waku.conf.networkConf.p2pListenAddress,
|
||||
waku.conf.endpointConf.p2pListenAddress,
|
||||
waku.conf.portsShift,
|
||||
)
|
||||
|
||||
|
||||
@ -1,9 +1,4 @@
|
||||
import
|
||||
std/options,
|
||||
stew/shims/net,
|
||||
chronos,
|
||||
libp2p/crypto/crypto as libp2p_keys,
|
||||
eth/keys as eth_keys
|
||||
import std/options, chronos, libp2p/crypto/crypto as libp2p_keys, eth/keys as eth_keys
|
||||
|
||||
import
|
||||
waku/
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
stew/results,
|
||||
stew/shims/net,
|
||||
chronos,
|
||||
testutils/unittests,
|
||||
libp2p/crypto/crypto as libp2p_keys,
|
||||
|
||||
@ -2,7 +2,6 @@ import
|
||||
std/options,
|
||||
sequtils,
|
||||
results,
|
||||
stew/shims/net,
|
||||
chronos,
|
||||
libp2p/crypto/crypto as libp2p_keys,
|
||||
eth/keys as eth_keys
|
||||
|
||||
@ -20,8 +20,10 @@ proc newTestWakuLightpushNode*(
|
||||
): Future[WakuLightPush] {.async.} =
|
||||
let
|
||||
peerManager = PeerManager.new(switch)
|
||||
wakuSharding = Sharding(clusterId: 1, shardCountGenZero: 8)
|
||||
proto = WakuLightPush.new(peerManager, rng, handler, wakuSharding, rateLimitSetting)
|
||||
wakuAutoSharding = Sharding(clusterId: 1, shardCountGenZero: 8)
|
||||
proto = WakuLightPush.new(
|
||||
peerManager, rng, handler, some(wakuAutoSharding), rateLimitSetting
|
||||
)
|
||||
|
||||
await proto.start()
|
||||
switch.mount(proto)
|
||||
|
||||
@ -1 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import ./test_client, ./test_ratelimit
|
||||
|
||||
@ -53,8 +53,9 @@ suite "Waku Lightpush Client":
|
||||
): Future[WakuLightPushResult] {.async.} =
|
||||
let msgLen = message.encode().buffer.len
|
||||
if msgLen > int(DefaultMaxWakuMessageSize) + 64 * 1024:
|
||||
return
|
||||
lighpushErrorResult(PAYLOAD_TOO_LARGE, "length greater than maxMessageSize")
|
||||
return lighpushErrorResult(
|
||||
LightPushErrorCode.PAYLOAD_TOO_LARGE, "length greater than maxMessageSize"
|
||||
)
|
||||
handlerFuture.complete((pubsubTopic, message))
|
||||
# return that we published the message to 1 peer.
|
||||
return ok(1)
|
||||
@ -294,7 +295,7 @@ suite "Waku Lightpush Client":
|
||||
# Then the message is not received by the server
|
||||
check:
|
||||
publishResponse5.isErr()
|
||||
publishResponse5.error.code == PAYLOAD_TOO_LARGE
|
||||
publishResponse5.error.code == LightPushErrorCode.PAYLOAD_TOO_LARGE
|
||||
(await handlerFuture.waitForResult()).isErr()
|
||||
|
||||
asyncTest "Invalid Encoding Payload":
|
||||
@ -307,7 +308,7 @@ suite "Waku Lightpush Client":
|
||||
# And the error is returned
|
||||
check:
|
||||
publishResponse.requestId == "N/A"
|
||||
publishResponse.statusCode == LightpushStatusCode.BAD_REQUEST.uint32
|
||||
publishResponse.statusCode == LightPushErrorCode.BAD_REQUEST
|
||||
publishResponse.statusDesc.isSome()
|
||||
scanf(publishResponse.statusDesc.get(), decodeRpcFailure)
|
||||
|
||||
@ -320,7 +321,7 @@ suite "Waku Lightpush Client":
|
||||
peer: PeerId, pubsubTopic: PubsubTopic, message: WakuMessage
|
||||
): Future[WakuLightPushResult] {.async.} =
|
||||
handlerFuture2.complete()
|
||||
return lighpushErrorResult(PAYLOAD_TOO_LARGE, handlerError)
|
||||
return lighpushErrorResult(LightPushErrorCode.PAYLOAD_TOO_LARGE, handlerError)
|
||||
|
||||
let
|
||||
serverSwitch2 = newTestSwitch()
|
||||
@ -336,7 +337,7 @@ suite "Waku Lightpush Client":
|
||||
|
||||
# Then the response is negative
|
||||
check:
|
||||
publishResponse.error.code == PAYLOAD_TOO_LARGE
|
||||
publishResponse.error.code == LightPushErrorCode.PAYLOAD_TOO_LARGE
|
||||
publishResponse.error.desc == some(handlerError)
|
||||
(await handlerFuture2.waitForResult()).isOk()
|
||||
|
||||
|
||||
@ -119,7 +119,7 @@ suite "Rate limited push service":
|
||||
|
||||
check:
|
||||
requestRes.isErr()
|
||||
requestRes.error.code == TOO_MANY_REQUESTS
|
||||
requestRes.error.code == LightPushErrorCode.TOO_MANY_REQUESTS
|
||||
requestRes.error.desc == some(TooManyRequestsMessage)
|
||||
|
||||
for testCnt in 0 .. 2:
|
||||
|
||||
@ -1 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import ./test_client, ./test_ratelimit
|
||||
|
||||
@ -1 +1,3 @@
|
||||
{.used.}
|
||||
|
||||
import ./test_protocol, ./test_rpc_codec
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
import
|
||||
unittest,
|
||||
results,
|
||||
stew/[shims/net, byteutils],
|
||||
stew/byteutils,
|
||||
nimcrypto/sha2,
|
||||
libp2p/protocols/pubsub/rpc/messages
|
||||
|
||||
|
||||
@ -2,7 +2,6 @@
|
||||
|
||||
import
|
||||
std/[options, strformat],
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
libp2p/protocols/pubsub/[pubsub, gossipsub],
|
||||
@ -78,7 +77,8 @@ suite "Waku Relay":
|
||||
|
||||
asyncTest "Publish with Subscription (Network Size: 1)":
|
||||
# When subscribing to a Pubsub Topic
|
||||
discard node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
|
||||
node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
|
||||
# Then the node is subscribed
|
||||
check:
|
||||
@ -112,7 +112,7 @@ suite "Waku Relay":
|
||||
otherHandlerFuture.complete((topic, message))
|
||||
|
||||
# When subscribing the second node to the Pubsub Topic
|
||||
discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
|
||||
# Then the second node is subscribed, but not the first one
|
||||
check:
|
||||
@ -173,8 +173,8 @@ suite "Waku Relay":
|
||||
otherHandlerFuture.complete((topic, message))
|
||||
|
||||
# When subscribing both nodes to the same Pubsub Topic
|
||||
discard node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
|
||||
# Then both nodes are subscribed
|
||||
check:
|
||||
@ -229,7 +229,7 @@ suite "Waku Relay":
|
||||
|
||||
asyncTest "Refreshing subscription":
|
||||
# Given a subscribed node
|
||||
discard node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
check:
|
||||
node.isSubscribed(pubsubTopic)
|
||||
node.subscribedTopics == pubsubTopicSeq
|
||||
@ -245,7 +245,7 @@ suite "Waku Relay":
|
||||
) {.async, gcsafe.} =
|
||||
otherHandlerFuture.complete((topic, message))
|
||||
|
||||
discard node.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
node.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
check:
|
||||
node.isSubscribed(pubsubTopic)
|
||||
node.subscribedTopics == pubsubTopicSeq
|
||||
@ -292,14 +292,14 @@ suite "Waku Relay":
|
||||
otherHandlerFuture.complete((topic, message))
|
||||
|
||||
otherNode.addValidator(len4Validator)
|
||||
discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
await sleepAsync(500.millis)
|
||||
check:
|
||||
otherNode.isSubscribed(pubsubTopic)
|
||||
|
||||
# Given a subscribed node with a validator
|
||||
node.addValidator(len4Validator)
|
||||
discard node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
await sleepAsync(500.millis)
|
||||
check:
|
||||
node.isSubscribed(pubsubTopic)
|
||||
@ -381,8 +381,8 @@ suite "Waku Relay":
|
||||
) {.async, gcsafe.} =
|
||||
otherHandlerFuture.complete((topic, message))
|
||||
|
||||
discard node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
check:
|
||||
node.isSubscribed(pubsubTopic)
|
||||
node.subscribedTopics == pubsubTopicSeq
|
||||
@ -465,8 +465,8 @@ suite "Waku Relay":
|
||||
) {.async, gcsafe.} =
|
||||
handlerFuture2.complete((topic, message))
|
||||
|
||||
discard node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
discard node.subscribe(pubsubTopicB, simpleFutureHandler2)
|
||||
node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
node.subscribe(pubsubTopicB, simpleFutureHandler2)
|
||||
|
||||
# Given the other nodes are subscribed to two pubsub topics
|
||||
var otherHandlerFuture1 = newPushHandlerFuture()
|
||||
@ -493,10 +493,10 @@ suite "Waku Relay":
|
||||
) {.async, gcsafe.} =
|
||||
anotherHandlerFuture2.complete((topic, message))
|
||||
|
||||
discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler1)
|
||||
discard otherNode.subscribe(pubsubTopicC, otherSimpleFutureHandler2)
|
||||
discard anotherNode.subscribe(pubsubTopicB, anotherSimpleFutureHandler1)
|
||||
discard anotherNode.subscribe(pubsubTopicC, anotherSimpleFutureHandler2)
|
||||
otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler1)
|
||||
otherNode.subscribe(pubsubTopicC, otherSimpleFutureHandler2)
|
||||
anotherNode.subscribe(pubsubTopicB, anotherSimpleFutureHandler1)
|
||||
anotherNode.subscribe(pubsubTopicC, anotherSimpleFutureHandler2)
|
||||
await sleepAsync(500.millis)
|
||||
|
||||
# When publishing a message in node for each of the pubsub topics
|
||||
@ -736,15 +736,13 @@ suite "Waku Relay":
|
||||
otherSwitch = newTestSwitch()
|
||||
otherNode = await newTestWakuRelay(otherSwitch)
|
||||
await allFutures(otherSwitch.start(), otherNode.start())
|
||||
let otherTopicHandler: TopicHandler =
|
||||
otherNode.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
otherNode.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
|
||||
# Given a node without a subscription
|
||||
check:
|
||||
node.subscribedTopics == []
|
||||
|
||||
# When unsubscribing from a pubsub topic from an unsubscribed topic handler
|
||||
node.unsubscribe(pubsubTopic, otherTopicHandler)
|
||||
node.unsubscribe(pubsubTopic)
|
||||
|
||||
# Then the node is still not subscribed
|
||||
check:
|
||||
@ -755,11 +753,11 @@ suite "Waku Relay":
|
||||
|
||||
asyncTest "Single Node with Single Pubsub Topic":
|
||||
# Given a node subscribed to a pubsub topic
|
||||
let topicHandler = node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
check node.subscribedTopics == pubsubTopicSeq
|
||||
|
||||
# When unsubscribing from the pubsub topic
|
||||
node.unsubscribe(pubsubTopic, topicHandler)
|
||||
node.unsubscribe(pubsubTopic)
|
||||
|
||||
# Then the node is not subscribed anymore
|
||||
check node.subscribedTopics == []
|
||||
@ -769,9 +767,8 @@ suite "Waku Relay":
|
||||
let pubsubTopicB = "/waku/2/rs/0/1"
|
||||
|
||||
# Given a node subscribed to multiple pubsub topics
|
||||
let
|
||||
topicHandler = node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
topicHandlerB = node.subscribe(pubsubTopicB, simpleFutureHandler)
|
||||
node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
node.subscribe(pubsubTopicB, simpleFutureHandler)
|
||||
|
||||
assert pubsubTopic in node.subscribedTopics,
|
||||
fmt"Node is not subscribed to {pubsubTopic}"
|
||||
@ -779,13 +776,13 @@ suite "Waku Relay":
|
||||
fmt"Node is not subscribed to {pubsubTopicB}"
|
||||
|
||||
# When unsubscribing from one of the pubsub topics
|
||||
node.unsubscribe(pubsubTopic, topicHandler)
|
||||
node.unsubscribe(pubsubTopic)
|
||||
|
||||
# Then the node is still subscribed to the other pubsub topic
|
||||
check node.subscribedTopics == @[pubsubTopicB]
|
||||
|
||||
# When unsubscribing from the other pubsub topic
|
||||
node.unsubscribe(pubsubTopicB, topicHandlerB)
|
||||
node.unsubscribe(pubsubTopicB)
|
||||
|
||||
# Then the node is not subscribed anymore
|
||||
check node.subscribedTopics == []
|
||||
@ -803,7 +800,7 @@ suite "Waku Relay":
|
||||
|
||||
asyncTest "Single Node with Single Pubsub Topic":
|
||||
# Given a node subscribed to a pubsub topic
|
||||
discard node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
check node.subscribedTopics == pubsubTopicSeq
|
||||
|
||||
# When unsubscribing from all pubsub topics
|
||||
@ -817,9 +814,9 @@ suite "Waku Relay":
|
||||
let pubsubTopicB = "/waku/2/rs/0/1"
|
||||
|
||||
# Given a node subscribed to multiple pubsub topics
|
||||
discard node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
discard node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
discard node.subscribe(pubsubTopicB, simpleFutureHandler)
|
||||
node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
node.subscribe(pubsubTopicB, simpleFutureHandler)
|
||||
|
||||
assert pubsubTopic in node.subscribedTopics,
|
||||
fmt"Node is not subscribed to {pubsubTopic}"
|
||||
@ -856,8 +853,8 @@ suite "Waku Relay":
|
||||
) {.async, gcsafe.} =
|
||||
otherHandlerFuture.complete((topic, message))
|
||||
|
||||
discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
discard node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
check:
|
||||
node.subscribedTopics == pubsubTopicSeq
|
||||
otherNode.subscribedTopics == pubsubTopicSeq
|
||||
@ -1022,8 +1019,8 @@ suite "Waku Relay":
|
||||
) {.async, gcsafe.} =
|
||||
otherHandlerFuture.complete((topic, message))
|
||||
|
||||
discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
discard node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
check:
|
||||
node.subscribedTopics == pubsubTopicSeq
|
||||
otherNode.subscribedTopics == pubsubTopicSeq
|
||||
@ -1164,8 +1161,8 @@ suite "Waku Relay":
|
||||
otherMessageSeq.add((topic, message))
|
||||
otherHandlerFuture.complete((topic, message))
|
||||
|
||||
discard node.subscribe(pubsubTopic, thisSimpleFutureHandler)
|
||||
discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
node.subscribe(pubsubTopic, thisSimpleFutureHandler)
|
||||
otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
check:
|
||||
node.subscribedTopics == pubsubTopicSeq
|
||||
otherNode.subscribedTopics == pubsubTopicSeq
|
||||
@ -1238,8 +1235,8 @@ suite "Waku Relay":
|
||||
) {.async, gcsafe.} =
|
||||
otherHandlerFuture.complete((topic, message))
|
||||
|
||||
discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
discard node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
check:
|
||||
node.subscribedTopics == pubsubTopicSeq
|
||||
otherNode.subscribedTopics == pubsubTopicSeq
|
||||
@ -1333,8 +1330,8 @@ suite "Waku Relay":
|
||||
) {.async, gcsafe.} =
|
||||
otherHandlerFuture.complete((topic, message))
|
||||
|
||||
discard otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
discard node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
otherNode.subscribe(pubsubTopic, otherSimpleFutureHandler)
|
||||
node.subscribe(pubsubTopic, simpleFutureHandler)
|
||||
check:
|
||||
node.subscribedTopics == pubsubTopicSeq
|
||||
otherNode.subscribedTopics == pubsubTopicSeq
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
import
|
||||
std/[os, sequtils, sysrand, math],
|
||||
stew/byteutils,
|
||||
stew/shims/net as stewNet,
|
||||
testutils/unittests,
|
||||
chronos,
|
||||
libp2p/switch,
|
||||
@ -71,15 +70,15 @@ suite "WakuNode - Relay":
|
||||
message = WakuMessage(payload: payload, contentTopic: contentTopic)
|
||||
|
||||
await node1.start()
|
||||
(await node1.mountRelay(@[shard])).isOkOr:
|
||||
(await node1.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
await node2.start()
|
||||
(await node2.mountRelay(@[shard])).isOkOr:
|
||||
(await node2.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
await node3.start()
|
||||
(await node3.mountRelay(@[shard])).isOkOr:
|
||||
(await node3.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
await allFutures(
|
||||
@ -98,13 +97,19 @@ suite "WakuNode - Relay":
|
||||
msg.timestamp > 0
|
||||
completionFut.complete(true)
|
||||
|
||||
## The following unsubscription is necessary to remove the default relay handler, which is
|
||||
## added when mountRelay is called.
|
||||
node3.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr:
|
||||
assert false, "Failed to unsubscribe from topic: " & $error
|
||||
proc simpleHandler(
|
||||
topic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
await sleepAsync(0.milliseconds)
|
||||
|
||||
## node1 and node2 explicitly subscribe to the same shard as node3
|
||||
node1.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
node2.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
|
||||
## Subscribe to the relay topic to add the custom relay handler defined above
|
||||
node3.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
|
||||
node3.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
await sleepAsync(500.millis)
|
||||
|
||||
@ -148,15 +153,15 @@ suite "WakuNode - Relay":
|
||||
|
||||
# start all the nodes
|
||||
await node1.start()
|
||||
(await node1.mountRelay(@[shard])).isOkOr:
|
||||
(await node1.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
await node2.start()
|
||||
(await node2.mountRelay(@[shard])).isOkOr:
|
||||
(await node2.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
await node3.start()
|
||||
(await node3.mountRelay(@[shard])).isOkOr:
|
||||
(await node3.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||
@ -194,13 +199,19 @@ suite "WakuNode - Relay":
|
||||
# relay handler is called
|
||||
completionFut.complete(true)
|
||||
|
||||
## The following unsubscription is necessary to remove the default relay handler, which is
|
||||
## added when mountRelay is called.
|
||||
node3.unsubscribe((kind: PubsubUnsub, topic: $shard)).isOkOr:
|
||||
assert false, "Failed to unsubscribe from topic: " & $error
|
||||
proc simpleHandler(
|
||||
topic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
await sleepAsync(0.milliseconds)
|
||||
|
||||
## node1 and node2 explicitly subscribe to the same shard as node3
|
||||
node1.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
node2.subscribe((kind: PubsubSub, topic: $shard), simpleHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
|
||||
## Subscribe to the relay topic to add the custom relay handler defined above
|
||||
node3.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
|
||||
node3.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
await sleepAsync(500.millis)
|
||||
|
||||
@ -288,11 +299,11 @@ suite "WakuNode - Relay":
|
||||
message = WakuMessage(payload: payload, contentTopic: contentTopic)
|
||||
|
||||
await node1.start()
|
||||
(await node1.mountRelay(@[shard])).isOkOr:
|
||||
(await node1.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
await node2.start()
|
||||
(await node2.mountRelay(@[shard])).isOkOr:
|
||||
(await node2.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||
@ -314,7 +325,7 @@ suite "WakuNode - Relay":
|
||||
assert false, "Failed to unsubscribe from topic: " & $error
|
||||
|
||||
## Subscribe to the relay topic to add the custom relay handler defined above
|
||||
node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
|
||||
node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
await sleepAsync(500.millis)
|
||||
|
||||
@ -346,11 +357,11 @@ suite "WakuNode - Relay":
|
||||
message = WakuMessage(payload: payload, contentTopic: contentTopic)
|
||||
|
||||
await node1.start()
|
||||
(await node1.mountRelay(@[shard])).isOkOr:
|
||||
(await node1.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
await node2.start()
|
||||
(await node2.mountRelay(@[shard])).isOkOr:
|
||||
(await node2.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||
@ -372,7 +383,7 @@ suite "WakuNode - Relay":
|
||||
assert false, "Failed to unsubscribe from topic: " & $error
|
||||
|
||||
## Subscribe to the relay topic to add the custom relay handler defined above
|
||||
node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
|
||||
node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
await sleepAsync(500.millis)
|
||||
|
||||
@ -404,11 +415,11 @@ suite "WakuNode - Relay":
|
||||
message = WakuMessage(payload: payload, contentTopic: contentTopic)
|
||||
|
||||
await node1.start()
|
||||
(await node1.mountRelay(@[shard])).isOkOr:
|
||||
(await node1.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
await node2.start()
|
||||
(await node2.mountRelay(@[shard])).isOkOr:
|
||||
(await node2.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
#delete websocket peer address
|
||||
@ -434,7 +445,7 @@ suite "WakuNode - Relay":
|
||||
assert false, "Failed to unsubscribe from topic: " & $error
|
||||
|
||||
## Subscribe to the relay topic to add the custom relay handler defined above
|
||||
node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
|
||||
node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
await sleepAsync(500.millis)
|
||||
|
||||
@ -468,11 +479,11 @@ suite "WakuNode - Relay":
|
||||
message = WakuMessage(payload: payload, contentTopic: contentTopic)
|
||||
|
||||
await node1.start()
|
||||
(await node1.mountRelay(@[shard])).isOkOr:
|
||||
(await node1.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
await node2.start()
|
||||
(await node2.mountRelay(@[shard])).isOkOr:
|
||||
(await node2.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||
@ -494,7 +505,7 @@ suite "WakuNode - Relay":
|
||||
assert false, "Failed to unsubscribe from topic: " & $error
|
||||
|
||||
## Subscribe to the relay topic to add the custom relay handler defined above
|
||||
node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
|
||||
node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
await sleepAsync(500.millis)
|
||||
|
||||
@ -536,11 +547,11 @@ suite "WakuNode - Relay":
|
||||
message = WakuMessage(payload: payload, contentTopic: contentTopic)
|
||||
|
||||
await node1.start()
|
||||
(await node1.mountRelay(@[shard])).isOkOr:
|
||||
(await node1.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
await node2.start()
|
||||
(await node2.mountRelay(@[shard])).isOkOr:
|
||||
(await node2.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
|
||||
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])
|
||||
@ -562,7 +573,7 @@ suite "WakuNode - Relay":
|
||||
assert false, "Failed to unsubscribe from topic: " & $error
|
||||
|
||||
## Subscribe to the relay topic to add the custom relay handler defined above
|
||||
node1.subscribe((kind: PubsubSub, topic: $shard), some(relayHandler)).isOkOr:
|
||||
node1.subscribe((kind: PubsubSub, topic: $shard), relayHandler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
await sleepAsync(500.millis)
|
||||
|
||||
@ -584,10 +595,15 @@ suite "WakuNode - Relay":
|
||||
await allFutures(nodes.mapIt(it.start()))
|
||||
await allFutures(nodes.mapIt(it.mountRelay()))
|
||||
|
||||
proc simpleHandler(
|
||||
topic: PubsubTopic, msg: WakuMessage
|
||||
): Future[void] {.async, gcsafe.} =
|
||||
await sleepAsync(0.milliseconds)
|
||||
|
||||
# subscribe all nodes to a topic
|
||||
let topic = "topic"
|
||||
for node in nodes:
|
||||
discard node.wakuRelay.subscribe(topic, nil)
|
||||
node.wakuRelay.subscribe(topic, simpleHandler)
|
||||
await sleepAsync(500.millis)
|
||||
|
||||
# connect nodes in full mesh
|
||||
@ -632,7 +648,7 @@ suite "WakuNode - Relay":
|
||||
# Stop all nodes
|
||||
await allFutures(nodes.mapIt(it.stop()))
|
||||
|
||||
asyncTest "Only one subscription is allowed for contenttopics that generate the same shard":
|
||||
asyncTest "Multiple subscription calls are allowed for contenttopics that generate the same shard":
|
||||
## Setup
|
||||
let
|
||||
nodeKey = generateSecp256k1Key()
|
||||
@ -641,7 +657,7 @@ suite "WakuNode - Relay":
|
||||
await node.start()
|
||||
(await node.mountRelay()).isOkOr:
|
||||
assert false, "Failed to mount relay"
|
||||
require node.mountSharding(1, 1).isOk
|
||||
require node.mountAutoSharding(1, 1).isOk
|
||||
|
||||
## Given
|
||||
let
|
||||
@ -654,27 +670,35 @@ suite "WakuNode - Relay":
|
||||
): Future[void] {.gcsafe, raises: [Defect].} =
|
||||
discard pubsubTopic
|
||||
discard message
|
||||
assert shard == node.wakuSharding.getShard(contentTopicA).expect("Valid Topic"),
|
||||
assert shard ==
|
||||
node.wakuAutoSharding.get().getShard(contentTopicA).expect("Valid Topic"),
|
||||
"topic must use the same shard"
|
||||
assert shard == node.wakuSharding.getShard(contentTopicB).expect("Valid Topic"),
|
||||
assert shard ==
|
||||
node.wakuAutoSharding.get().getShard(contentTopicB).expect("Valid Topic"),
|
||||
"topic must use the same shard"
|
||||
assert shard == node.wakuSharding.getShard(contentTopicC).expect("Valid Topic"),
|
||||
assert shard ==
|
||||
node.wakuAutoSharding.get().getShard(contentTopicC).expect("Valid Topic"),
|
||||
"topic must use the same shard"
|
||||
|
||||
## When
|
||||
node.subscribe((kind: ContentSub, topic: contentTopicA), some(handler)).isOkOr:
|
||||
node.subscribe((kind: ContentSub, topic: contentTopicA), handler).isOkOr:
|
||||
assert false, "Failed to subscribe to topic: " & $error
|
||||
node.subscribe((kind: ContentSub, topic: contentTopicB), some(handler)).isErrOr:
|
||||
node.subscribe((kind: ContentSub, topic: contentTopicB), handler).isOkOr:
|
||||
assert false,
|
||||
"The subscription should fail because is already subscribe to that shard"
|
||||
node.subscribe((kind: ContentSub, topic: contentTopicC), some(handler)).isErrOr:
|
||||
"The subscription call shouldn't error even though it's already subscribed to that shard"
|
||||
node.subscribe((kind: ContentSub, topic: contentTopicC), handler).isOkOr:
|
||||
assert false,
|
||||
"The subscription should fail because is already subscribe to that shard"
|
||||
"The subscription call shouldn't error even though it's already subscribed to that shard"
|
||||
|
||||
## The node should be subscribed to the shard
|
||||
check node.wakuRelay.isSubscribed(shard)
|
||||
|
||||
## Then
|
||||
node.unsubscribe((kind: ContentUnsub, topic: contentTopicB)).isOkOr:
|
||||
assert false, "Failed to unsubscribe to topic: " & $error
|
||||
check node.wakuRelay.isSubscribed(shard)
|
||||
|
||||
## After unsubcription, the node should not be subscribed to the shard anymore
|
||||
check not node.wakuRelay.isSubscribed(shard)
|
||||
|
||||
## Cleanup
|
||||
await node.stop()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user