Merge branch 'master' into release/v0.38

This commit is contained in:
darshankabariya 2026-04-08 00:55:39 +05:30
commit b2e46b6e91
No known key found for this signature in database
GPG Key ID: 9A92CCD9899F0D22
160 changed files with 3988 additions and 18443 deletions

View File

@ -8,7 +8,7 @@ assignees: ''
---
<!--
Add appropriate release number and adjust the target fleet in the tittle!
Add appropriate release number and adjust the target fleet in the title!
-->
### Link to the Release PR
@ -20,17 +20,22 @@ Kindly add a link to the release PR where we have a sign-off from QA. At this ti
### Items to complete, in order
<!--
You can release into either waku.sanbox, status.prod, or both.
For status.prod it is crucial to coordinate such deployment with status friends.
You can release into either waku.sanbox, status.prod, or both. Both cases require coordination with Infra Team.
waku.sandbox must be considered a prod fleet as it is used by external parties.
For both status.prod it is crucial to coordinate such deployment with Status Team.
The following points should be followed in order.
-->
- [ ] Receive sign-off from DST.
- [ ] Inform DST team about what are the expectations for this release. For example, if we expect higher, same or lower bandwidth consumption. Or a new protocol appears, etc.
- [ ] Ask DST to add a comment approving this deployment and add a link to the analysis report.
- [ ] Update waku.sandbox with [this deployment job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/).
- [ ] Deploy to waku.sandbox
- [ ] Coordinate with Infra Team about possible changes in CI behavior
- [ ] Update waku.sandbox with [this deployment job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/).
- [ ] Deploy to status.prod
- [ ] Coordinate with Infra Team about possible changes in CI behavior
- [ ] Ask Status admin to add a comment approving that this deployment to happen now.
- [ ] Update status.prod with [this deployment job](https://ci.infra.status.im/job/nim-waku/job/deploy-status-prod/).
@ -42,7 +47,7 @@ For status.prod it is crucial to coordinate such deployment with status friends.
- [Release process](https://github.com/logos-messaging/logos-delivery/blob/master/docs/contributors/release-process.md)
- [Release notes](https://github.com/logos-messaging/logos-delivery/blob/master/CHANGELOG.md)
- [Infra-role-nim-waku](https://github.com/status-im/infra-role-nim-waku)
- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku)
- [Infra-waku](https://github.com/status-im/infra-waku)
- [Infra-Status](https://github.com/status-im/infra-status)
- [Jenkins](https://ci.infra.status.im/job/nim-waku/)
- [Fleets](https://fleets.waku.org/)

4
.nph.toml Normal file
View File

@ -0,0 +1,4 @@
extend-exclude = [
"vendor",
"nimbledeps",
]

View File

@ -68,6 +68,22 @@
| [`66/WAKU2-METADATA`](https://github.com/waku-org/specs/blob/master/standards/core/metadata.md) | `raw` | `/vac/waku/metadata/1.0.0` |
| [`WAKU-SYNC`](https://github.com/waku-org/specs/blob/master/standards/core/sync.md) | `draft` | `/vac/waku/sync/1.0.0` |
## v0.37.4 (2026-04-03)
### Changes
- Optimize release builds for speed ([#3735](https://github.com/logos-messaging/logos-delivery/pull/3735)) ([#3777](https://github.com/logos-messaging/logos-delivery/pull/3777))
### Bug Fixes
- Properly add DEBUG flag into Dockerfile
## v0.37.3 (2026-03-25)
### Features
- Allow override user-message-rate-limit ([#3778](https://github.com/logos-messaging/logos-delivery/pull/3778))
## v0.37.2 (2026-03-19)
### Features
@ -77,7 +93,6 @@
### Bug Fixes
- Bump nim-http-utils to v0.4.1 to allow accepting <:><space><(> as a valid header and tests to validate html rfc7230 ([#43](https://github.com/status-im/nim-http-utils/pull/43))
- Force FINALIZE partition detach after detecting shorter error ([#3728](https://github.com/logos-messaging/logos-delivery/pull/3766))
## v0.37.1 (2026-03-12)

View File

@ -144,7 +144,7 @@ deps: | deps-common nat-libs waku.nims
# "-d:release" implies "--stacktrace:off" and it cannot be added to config.nims
ifeq ($(DEBUG), 0)
NIM_PARAMS := $(NIM_PARAMS) -d:release
NIM_PARAMS := $(NIM_PARAMS) -d:release -d:lto_incremental -d:strip
else
NIM_PARAMS := $(NIM_PARAMS) -d:debug
endif

View File

@ -36,7 +36,6 @@ import
waku_lightpush_legacy/rpc,
waku_enr,
discovery/waku_dnsdisc,
waku_store_legacy,
waku_node,
node/waku_metrics,
node/peer_manager,
@ -50,8 +49,7 @@ import
import libp2p/protocols/pubsub/rpc/messages, libp2p/protocols/pubsub/pubsub
import ../../waku/waku_rln_relay
const Help =
"""
const Help = """
Commands: /[?|help|connect|nick|exit]
help: Prints this help
connect: dials a remote peer
@ -337,16 +335,16 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
builder.withRecord(record)
builder
.withNetworkConfigurationDetails(
conf.listenAddress,
Port(uint16(conf.tcpPort) + conf.portsShift),
extIp,
extTcpPort,
wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift),
wsEnabled = conf.websocketSupport,
wssEnabled = conf.websocketSecureSupport,
)
.tryGet()
.withNetworkConfigurationDetails(
conf.listenAddress,
Port(uint16(conf.tcpPort) + conf.portsShift),
extIp,
extTcpPort,
wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift),
wsEnabled = conf.websocketSupport,
wssEnabled = conf.websocketSecureSupport,
)
.tryGet()
builder.build().tryGet()
await node.start()

View File

@ -127,8 +127,10 @@ proc toMatterbridge(
assert chat2Msg.isOk
if not cmb.mbClient
.postMessage(text = string.fromBytes(chat2Msg[].payload), username = chat2Msg[].nick)
.containsValue(true):
.postMessage(
text = string.fromBytes(chat2Msg[].payload), username = chat2Msg[].nick
)
.containsValue(true):
chat2_mb_dropped.inc(labelValues = ["duplicate"])
error "Matterbridge host unreachable. Dropping message."
@ -175,10 +177,10 @@ proc new*(
builder.withNodeKey(nodev2Key)
builder
.withNetworkConfigurationDetails(
nodev2BindIp, nodev2BindPort, nodev2ExtIp, nodev2ExtPort
)
.tryGet()
.withNetworkConfigurationDetails(
nodev2BindIp, nodev2BindPort, nodev2ExtIp, nodev2ExtPort
)
.tryGet()
builder.build().tryGet()
return Chat2MatterBridge(

View File

@ -57,8 +57,7 @@ import ../../waku/waku_rln_relay
logScope:
topics = "chat2 mix"
const Help =
"""
const Help = """
Commands: /[?|help|connect|nick|exit]
help: Prints this help
connect: dials a remote peer
@ -429,16 +428,16 @@ proc processInput(rfd: AsyncFD, rng: ref HmacDrbgContext) {.async.} =
builder.withRecord(record)
builder
.withNetworkConfigurationDetails(
conf.listenAddress,
Port(uint16(conf.tcpPort) + conf.portsShift),
extIp,
extTcpPort,
wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift),
wsEnabled = conf.websocketSupport,
wssEnabled = conf.websocketSecureSupport,
)
.tryGet()
.withNetworkConfigurationDetails(
conf.listenAddress,
Port(uint16(conf.tcpPort) + conf.portsShift),
extIp,
extTcpPort,
wsBindPort = Port(uint16(conf.websocketPort) + conf.portsShift),
wsEnabled = conf.websocketSupport,
wssEnabled = conf.websocketSecureSupport,
)
.tryGet()
builder.build().tryGet()
node.mountAutoSharding(conf.clusterId, conf.numShardsInNetwork).isOkOr:

View File

@ -113,17 +113,16 @@ type
shards* {.
desc:
"Shards index to subscribe to [0..NUM_SHARDS_IN_NETWORK-1]. Argument may be repeated.",
defaultValue:
@[
uint16(0),
uint16(1),
uint16(2),
uint16(3),
uint16(4),
uint16(5),
uint16(6),
uint16(7),
],
defaultValue: @[
uint16(0),
uint16(1),
uint16(2),
uint16(3),
uint16(4),
uint16(5),
uint16(6),
uint16(7),
],
name: "shard"
.}: seq[uint16]

View File

@ -6,12 +6,15 @@ import
os
import
libp2p/protocols/ping,
libp2p/protocols/protocol,
libp2p/crypto/[crypto, secp],
libp2p/nameresolving/dnsresolver,
libp2p/multicodec
import
./certsgenerator,
waku/[waku_enr, node/peer_manager, waku_core, waku_node, factory/builder]
waku/[waku_enr, node/peer_manager, waku_core, waku_node, factory/builder],
waku/waku_metadata/protocol,
waku/common/callbacks
# protocols and their tag
const ProtocolsTable = {
@ -45,7 +48,7 @@ type WakuCanaryConf* = object
timeout* {.
desc: "Timeout to consider that the connection failed",
defaultValue: chronos.seconds(10),
defaultValue: chronos.seconds(20),
name: "timeout",
abbr: "t"
.}: chronos.Duration
@ -161,11 +164,10 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
# create dns resolver
let
nameServers =
@[
initTAddress(parseIpAddress("1.1.1.1"), Port(53)),
initTAddress(parseIpAddress("1.0.0.1"), Port(53)),
]
nameServers = @[
initTAddress(parseIpAddress("1.1.1.1"), Port(53)),
initTAddress(parseIpAddress("1.0.0.1"), Port(53)),
]
resolver: DnsResolver = DnsResolver.new(nameServers)
if conf.logLevel != LogLevel.NONE:
@ -252,12 +254,26 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
error "failed to mount libp2p ping protocol: " & getCurrentExceptionMsg()
quit(QuitFailure)
node.mountMetadata(conf.clusterId, conf.shards).isOkOr:
error "failed to mount metadata protocol", error
# Mount metadata with a custom getter that returns CLI shards directly,
# since the canary doesn't mount relay (which is what the default getter reads from).
# Without this fix, the canary always sends remoteShards=[] in metadata requests.
let cliShards = conf.shards
let shardsGetter: GetShards = proc(): seq[uint16] {.closure, gcsafe, raises: [].} =
return cliShards
let metadata = WakuMetadata.new(conf.clusterId, shardsGetter)
node.wakuMetadata = metadata
node.peerManager.wakuMetadata = metadata
let mountRes = catch:
node.switch.mount(metadata, protocolMatcher(WakuMetadataCodec))
mountRes.isOkOr:
error "failed to mount metadata protocol", error = error.msg
quit(QuitFailure)
await node.start()
debug "Connecting to peer", peer = peer, timeout = conf.timeout
var pingFut: Future[bool]
if conf.ping:
pingFut = pingNode(node, peer).withTimeout(conf.timeout)
@ -267,8 +283,18 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
error "Timedout after", timeout = conf.timeout
quit(QuitFailure)
# Clean disconnect with defer so the remote node doesn't see
# "Stream Underlying Connection Closed!" when we exit
defer:
debug "Cleanly disconnecting from peer", peerId = peer.peerId
await node.peerManager.disconnectNode(peer.peerId)
await node.stop()
debug "Connected, checking connection status", peerId = peer.peerId
let lp2pPeerStore = node.switch.peerStore
let conStatus = node.peerManager.switch.peerStore[ConnectionBook][peer.peerId]
debug "Connection status", peerId = peer.peerId, conStatus = conStatus
var pingSuccess = true
if conf.ping:
@ -284,14 +310,15 @@ proc main(rng: ref HmacDrbgContext): Future[int] {.async.} =
if conStatus in [Connected, CanConnect]:
let nodeProtocols = lp2pPeerStore[ProtoBook][peer.peerId]
debug "Peer protocols", peerId = peer.peerId, protocols = nodeProtocols
if not areProtocolsSupported(conf.protocols, nodeProtocols):
error "Not all protocols are supported",
expected = conf.protocols, supported = nodeProtocols
quit(QuitFailure)
return 1
elif conStatus == CannotConnect:
error "Could not connect", peerId = peer.peerId
quit(QuitFailure)
return 1
return 0
when isMainModule:

View File

@ -26,10 +26,6 @@ if defined(windows):
# set the IMAGE_FILE_LARGE_ADDRESS_AWARE flag so we can use PAE, if enabled, and access more than 2 GiB of RAM
switch("passL", "-Wl,--large-address-aware")
# The dynamic Chronicles output currently prevents us from using colors on Windows
# because these require direct manipulations of the stdout File object.
switch("define", "chronicles_colors=off")
# https://github.com/status-im/nimbus-eth2/blob/stable/docs/cpu_features.md#ssse3-supplemental-sse3
# suggests that SHA256 hashing with SSSE3 is 20% faster than without SSSE3, so
# given its near-ubiquity in the x86 installed base, it renders a distribution
@ -52,9 +48,10 @@ if defined(disableMarchNative):
switch("passL", "-march=haswell -mtune=generic")
else:
if defined(marchOptimized):
# https://github.com/status-im/nimbus-eth2/blob/stable/docs/cpu_features.md#bmi2--adx
switch("passC", "-march=broadwell -mtune=generic")
switch("passL", "-march=broadwell -mtune=generic")
# -march=broadwell: https://github.com/status-im/nimbus-eth2/blob/stable/docs/cpu_features.md#bmi2--adx
# Changed to x86-64-v2 for broader support
switch("passC", "-march=x86-64-v2 -mtune=generic")
switch("passL", "-march=x86-64-v2 -mtune=generic")
else:
switch("passC", "-mssse3")
switch("passL", "-mssse3")
@ -76,6 +73,7 @@ else:
on
--opt:
speed
--excessiveStackTrace:
on
# enable metric collection
@ -85,8 +83,6 @@ else:
--define:
nimTypeNames
switch("define", "withoutPCRE")
# the default open files limit is too low on macOS (512), breaking the
# "--debugger:native" build. It can be increased with `ulimit -n 1024`.
if not defined(macosx) and not defined(android):

View File

@ -7,6 +7,10 @@
};
inputs = {
# Ensure Nix fetches git submodules (vendor/*) when evaluating this flake.
# Requires Nix >= 2.27. Consumers no longer need '?submodules=1' in the URL.
self.submodules = true;
# We are pinning the commit because ultimately we want to use same commit across different projects.
# A commit from nixpkgs 24.11 release : https://github.com/NixOS/nixpkgs/tree/release-24.11
nixpkgs.url = "github:NixOS/nixpkgs/0ef228213045d2cdb5a169a95d63ded38670b293";

View File

@ -45,7 +45,9 @@ proc logosdelivery_get_available_configs(
for meta in optionMetas:
configOptionDetails.add(
%*{meta.fieldName: meta.typeName & "(" & meta.defaultValue & ")", "desc": meta.desc}
%*{
meta.fieldName: meta.typeName & "(" & meta.defaultValue & ")", "desc": meta.desc
}
)
var jsonNode = newJObject()

View File

@ -7,13 +7,9 @@ import ../../waku/api/types
type JsonConnectionStatusChangeEvent* = ref object of JsonEvent
status*: ConnectionStatus
proc new*(
T: type JsonConnectionStatusChangeEvent, status: ConnectionStatus
): T =
return JsonConnectionStatusChangeEvent(
eventType: "node_health_change",
status: status
)
proc new*(T: type JsonConnectionStatusChangeEvent, status: ConnectionStatus): T =
return
JsonConnectionStatusChangeEvent(eventType: "node_health_change", status: status)
method `$`*(event: JsonConnectionStatusChangeEvent): string =
$(%*event)

View File

@ -72,7 +72,7 @@ proc waku_new(
relayHandler: onReceivedMessage(ctx),
topicHealthChangeHandler: onTopicHealthChange(ctx),
connectionChangeHandler: onConnectionChange(ctx),
connectionStatusChangeHandler: onConnectionStatusChange(ctx)
connectionStatusChangeHandler: onConnectionStatusChange(ctx),
)
ffi.sendRequestToFFIThread(

View File

@ -1,5 +1,4 @@
const ContentScriptVersion_1* =
"""
const ContentScriptVersion_1* = """
CREATE TABLE IF NOT EXISTS messages (
pubsubTopic VARCHAR NOT NULL,
contentTopic VARCHAR NOT NULL,

View File

@ -1,5 +1,4 @@
const ContentScriptVersion_2* =
"""
const ContentScriptVersion_2* = """
ALTER TABLE IF EXISTS messages_backup RENAME TO messages;
ALTER TABLE messages RENAME TO messages_backup;
ALTER TABLE messages_backup DROP CONSTRAINT messageIndex;

View File

@ -1,5 +1,4 @@
const ContentScriptVersion_3* =
"""
const ContentScriptVersion_3* = """
CREATE INDEX IF NOT EXISTS i_query ON messages
(contentTopic, pubsubTopic, storedAt, id);

View File

@ -1,5 +1,4 @@
const ContentScriptVersion_4* =
"""
const ContentScriptVersion_4* = """
ALTER TABLE messages ADD meta VARCHAR default null;
CREATE INDEX IF NOT EXISTS i_query ON messages (contentTopic, pubsubTopic, storedAt, id);

View File

@ -1,5 +1,4 @@
const ContentScriptVersion_5* =
"""
const ContentScriptVersion_5* = """
CREATE INDEX IF NOT EXISTS i_query_storedAt ON messages (storedAt, id);
UPDATE version SET version = 5 WHERE version = 4;

View File

@ -1,5 +1,4 @@
const ContentScriptVersion_6* =
"""
const ContentScriptVersion_6* = """
-- we can drop the timestamp column because this data is also kept in the storedAt column
ALTER TABLE messages DROP COLUMN timestamp;

View File

@ -1,5 +1,4 @@
const ContentScriptVersion_7* =
"""
const ContentScriptVersion_7* = """
-- Create lookup table
CREATE TABLE IF NOT EXISTS messages_lookup (

View File

@ -10,16 +10,15 @@ type MigrationScript* = object
proc init*(T: type MigrationScript, targetVersion: int, scriptContent: string): T =
return MigrationScript(targetVersion: targetVersion, scriptContent: scriptContent)
const PgMigrationScripts* =
@[
MigrationScript(version: 1, scriptContent: ContentScriptVersion_1),
MigrationScript(version: 2, scriptContent: ContentScriptVersion_2),
MigrationScript(version: 3, scriptContent: ContentScriptVersion_3),
MigrationScript(version: 4, scriptContent: ContentScriptVersion_4),
MigrationScript(version: 5, scriptContent: ContentScriptVersion_5),
MigrationScript(version: 6, scriptContent: ContentScriptVersion_6),
MigrationScript(version: 7, scriptContent: ContentScriptVersion_7),
]
const PgMigrationScripts* = @[
MigrationScript(version: 1, scriptContent: ContentScriptVersion_1),
MigrationScript(version: 2, scriptContent: ContentScriptVersion_2),
MigrationScript(version: 3, scriptContent: ContentScriptVersion_3),
MigrationScript(version: 4, scriptContent: ContentScriptVersion_4),
MigrationScript(version: 5, scriptContent: ContentScriptVersion_5),
MigrationScript(version: 6, scriptContent: ContentScriptVersion_6),
MigrationScript(version: 7, scriptContent: ContentScriptVersion_7),
]
proc getMigrationScripts*(currentVersion: int64, targetVersion: int64): seq[string] =
var ret = newSeq[string]()

View File

@ -12,7 +12,7 @@
zerokitRln,
}:
assert pkgs.lib.assertMsg ((src.submodules or true) == true)
assert pkgs.lib.assertMsg (builtins.pathExists "${src}/vendor/nimbus-build-system/scripts")
"Unable to build without submodules. Append '?submodules=1#' to the URI.";
let

View File

@ -20,14 +20,7 @@ import
./waku_archive/test_driver_sqlite,
./waku_archive/test_retention_policy,
./waku_archive/test_waku_archive,
./waku_archive/test_partition_manager,
./waku_archive_legacy/test_driver_queue_index,
./waku_archive_legacy/test_driver_queue_pagination,
./waku_archive_legacy/test_driver_queue_query,
./waku_archive_legacy/test_driver_queue,
./waku_archive_legacy/test_driver_sqlite_query,
./waku_archive_legacy/test_driver_sqlite,
./waku_archive_legacy/test_waku_archive
./waku_archive/test_partition_manager
const os* {.strdefine.} = ""
when os == "Linux" and
@ -37,8 +30,6 @@ when os == "Linux" and
import
./waku_archive/test_driver_postgres_query,
./waku_archive/test_driver_postgres,
#./waku_archive_legacy/test_driver_postgres_query,
#./waku_archive_legacy/test_driver_postgres,
./factory/test_node_factory,
./wakunode_rest/test_rest_store,
./wakunode_rest/test_all
@ -50,20 +41,9 @@ import
./waku_store/test_waku_store,
./waku_store/test_wakunode_store
# Waku legacy store test suite
import
./waku_store_legacy/test_client,
./waku_store_legacy/test_rpc_codec,
./waku_store_legacy/test_waku_store,
./waku_store_legacy/test_wakunode_store
# Waku store sync suite
import ./waku_store_sync/test_all
when defined(waku_exp_store_resume):
# TODO: Review store resume test cases (#1282)
import ./waku_store_legacy/test_resume
import
./node/test_all,
./waku_filter_v2/test_all,

View File

@ -88,7 +88,7 @@ proc waitForEvents(
return await allFutures(
manager.sentFuture, manager.propagatedFuture, manager.errorFuture
)
.withTimeout(timeout)
.withTimeout(timeout)
proc outcomes(manager: SendEventListenerManager): set[SendEventOutcome] =
if manager.sentFuture.completed():

View File

@ -1,6 +1,6 @@
{.used.}
import std/[strutils, net, options, sets]
import std/[strutils, sequtils, net, options, sets, tables]
import chronos, testutils/unittests, stew/byteutils
import libp2p/[peerid, peerinfo, multiaddress, crypto/crypto]
import ../testlib/[common, wakucore, wakunode, testasync]
@ -13,12 +13,12 @@ import
common/broker/broker_context,
events/message_events,
waku_relay/protocol,
node/kernel_api/filter,
node/delivery_service/subscription_manager,
]
import waku/factory/waku_conf
import tools/confutils/cli_args
# TODO: Edge testing (after MAPI edge support is completed)
const TestTimeout = chronos.seconds(10)
const NegativeTestTimeout = chronos.seconds(2)
@ -60,8 +60,10 @@ proc waitForEvents(
return await manager.receivedEvent.wait().withTimeout(timeout)
type TestNetwork = ref object
publisher: WakuNode
publisher: WakuNode # Relay node that publishes messages in tests.
meshBuddy: WakuNode # Extra relay peer for publisher's mesh (Edge tests only).
subscriber: Waku
# The receiver node in tests. Edge node in edge tests, Core node in relay tests.
publisherPeerInfo: RemotePeerInfo
proc createApiNodeConf(
@ -94,8 +96,12 @@ proc setupNetwork(
lockNewGlobalBrokerContext:
net.publisher =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
net.publisher.mountMetadata(3, @[0'u16]).expect("Failed to mount metadata")
net.publisher.mountMetadata(3, toSeq(0'u16 ..< numShards)).expect(
"Failed to mount metadata"
)
(await net.publisher.mountRelay()).expect("Failed to mount relay")
if mode == cli_args.WakuMode.Edge:
await net.publisher.mountFilter()
await net.publisher.mountLibp2pPing()
await net.publisher.start()
@ -104,16 +110,32 @@ proc setupNetwork(
proc dummyHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} =
discard
# Subscribe the publisher to all shards to guarantee a GossipSub mesh with the subscriber.
# Currently, Core/Relay nodes auto-subscribe to all network shards on boot, but if
# that changes, this will be needed to cause the publisher to have shard interest
# for any shards the subscriber may want to use, which is required for waitForMesh to work.
var shards: seq[PubsubTopic]
for i in 0 ..< numShards.int:
let shard = PubsubTopic("/waku/2/rs/3/" & $i)
shards.add(PubsubTopic("/waku/2/rs/3/" & $i))
for shard in shards:
net.publisher.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect(
"Failed to sub publisher"
)
if mode == cli_args.WakuMode.Edge:
lockNewGlobalBrokerContext:
net.meshBuddy =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
net.meshBuddy.mountMetadata(3, toSeq(0'u16 ..< numShards)).expect(
"Failed to mount metadata on meshBuddy"
)
(await net.meshBuddy.mountRelay()).expect("Failed to mount relay on meshBuddy")
await net.meshBuddy.start()
for shard in shards:
net.meshBuddy.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect(
"Failed to sub meshBuddy"
)
await net.meshBuddy.connectToNodes(@[net.publisherPeerInfo])
net.subscriber = await setupSubscriberNode(createApiNodeConf(mode, numShards))
await net.subscriber.node.connectToNodes(@[net.publisherPeerInfo])
@ -125,6 +147,10 @@ proc teardown(net: TestNetwork) {.async.} =
(await net.subscriber.stop()).expect("Failed to stop subscriber node")
net.subscriber = nil
if not isNil(net.meshBuddy):
await net.meshBuddy.stop()
net.meshBuddy = nil
if not isNil(net.publisher):
await net.publisher.stop()
net.publisher = nil
@ -141,18 +167,34 @@ proc waitForMesh(node: WakuNode, shard: PubsubTopic) {.async.} =
await sleepAsync(100.milliseconds)
raise newException(ValueError, "GossipSub Mesh failed to stabilize on " & shard)
proc waitForEdgeSubs(w: Waku, shard: PubsubTopic) {.async.} =
let sm = w.deliveryService.subscriptionManager
for _ in 0 ..< 50:
if sm.edgeFilterPeerCount(shard) > 0:
return
await sleepAsync(100.milliseconds)
raise newException(ValueError, "Edge filter subscription failed on " & shard)
proc publishToMesh(
net: TestNetwork, contentTopic: ContentTopic, payload: seq[byte]
): Future[Result[int, string]] {.async.} =
# Publishes a message from "publisher" via relay into the gossipsub mesh.
let shard = net.subscriber.node.getRelayShard(contentTopic)
await waitForMesh(net.publisher, shard)
let msg = WakuMessage(
payload: payload, contentTopic: contentTopic, version: 0, timestamp: now()
)
return await net.publisher.publish(some(shard), msg)
proc publishToMeshAfterEdgeReady(
net: TestNetwork, contentTopic: ContentTopic, payload: seq[byte]
): Future[Result[int, string]] {.async.} =
# First, ensure "subscriber" node (an edge node) is subscribed and ready to receive.
# Afterwards, "publisher" (relay node) sends the message in the gossipsub network.
let shard = net.subscriber.node.getRelayShard(contentTopic)
await waitForEdgeSubs(net.subscriber, shard)
return await net.publishToMesh(contentTopic, payload)
suite "Messaging API, SubscriptionManager":
asyncTest "Subscription API, relay node auto subscribe and receive message":
let net = await setupNetwork(1)
@ -398,3 +440,370 @@ suite "Messaging API, SubscriptionManager":
activeSubs.add(t)
await verifyNetworkState(activeSubs)
asyncTest "Subscription API, edge node subscribe and receive message":
let net = await setupNetwork(1, cli_args.WakuMode.Edge)
defer:
await net.teardown()
let testTopic = ContentTopic("/waku/2/test-content/proto")
(await net.subscriber.subscribe(testTopic)).expect("failed to subscribe")
let eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1)
defer:
eventManager.teardown()
discard (await net.publishToMeshAfterEdgeReady(testTopic, "Hello, edge!".toBytes())).expect(
"Publish failed"
)
require await eventManager.waitForEvents(TestTimeout)
require eventManager.receivedMessages.len == 1
check eventManager.receivedMessages[0].contentTopic == testTopic
asyncTest "Subscription API, edge node ignores unsubscribed content topics":
let net = await setupNetwork(1, cli_args.WakuMode.Edge)
defer:
await net.teardown()
let subbedTopic = ContentTopic("/waku/2/subbed-topic/proto")
let ignoredTopic = ContentTopic("/waku/2/ignored-topic/proto")
(await net.subscriber.subscribe(subbedTopic)).expect("failed to subscribe")
let eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1)
defer:
eventManager.teardown()
discard (await net.publishToMesh(ignoredTopic, "Ghost Msg".toBytes())).expect(
"Publish failed"
)
check not await eventManager.waitForEvents(NegativeTestTimeout)
check eventManager.receivedMessages.len == 0
asyncTest "Subscription API, edge node unsubscribe stops message receipt":
let net = await setupNetwork(1, cli_args.WakuMode.Edge)
defer:
await net.teardown()
let testTopic = ContentTopic("/waku/2/unsub-test/proto")
(await net.subscriber.subscribe(testTopic)).expect("failed to subscribe")
net.subscriber.unsubscribe(testTopic).expect("failed to unsubscribe")
let eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1)
defer:
eventManager.teardown()
discard (await net.publishToMesh(testTopic, "Should be dropped".toBytes())).expect(
"Publish failed"
)
check not await eventManager.waitForEvents(NegativeTestTimeout)
check eventManager.receivedMessages.len == 0
asyncTest "Subscription API, edge node overlapping topics isolation":
let net = await setupNetwork(1, cli_args.WakuMode.Edge)
defer:
await net.teardown()
let topicA = ContentTopic("/waku/2/topic-a/proto")
let topicB = ContentTopic("/waku/2/topic-b/proto")
(await net.subscriber.subscribe(topicA)).expect("failed to sub A")
(await net.subscriber.subscribe(topicB)).expect("failed to sub B")
let shard = net.subscriber.node.getRelayShard(topicA)
await waitForEdgeSubs(net.subscriber, shard)
let eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1)
defer:
eventManager.teardown()
net.subscriber.unsubscribe(topicA).expect("failed to unsub A")
discard (await net.publishToMesh(topicA, "Dropped Message".toBytes())).expect(
"Publish A failed"
)
discard
(await net.publishToMesh(topicB, "Kept Msg".toBytes())).expect("Publish B failed")
require await eventManager.waitForEvents(TestTimeout)
require eventManager.receivedMessages.len == 1
check eventManager.receivedMessages[0].contentTopic == topicB
asyncTest "Subscription API, edge node resubscribe after unsubscribe":
let net = await setupNetwork(1, cli_args.WakuMode.Edge)
defer:
await net.teardown()
let testTopic = ContentTopic("/waku/2/resub-test/proto")
(await net.subscriber.subscribe(testTopic)).expect("Initial sub failed")
var eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1)
discard (await net.publishToMeshAfterEdgeReady(testTopic, "Msg 1".toBytes())).expect(
"Pub 1 failed"
)
require await eventManager.waitForEvents(TestTimeout)
eventManager.teardown()
net.subscriber.unsubscribe(testTopic).expect("Unsub failed")
eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1)
discard
(await net.publishToMesh(testTopic, "Ghost".toBytes())).expect("Ghost pub failed")
check not await eventManager.waitForEvents(NegativeTestTimeout)
eventManager.teardown()
(await net.subscriber.subscribe(testTopic)).expect("Resub failed")
eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1)
discard (await net.publishToMeshAfterEdgeReady(testTopic, "Msg 2".toBytes())).expect(
"Pub 2 failed"
)
require await eventManager.waitForEvents(TestTimeout)
check eventManager.receivedMessages[0].payload == "Msg 2".toBytes()
asyncTest "Subscription API, edge node failover after service peer dies":
# NOTE: This test is a bit more verbose because it defines a custom topology.
# It doesn't use the shared TestNetwork helper.
# This mounts two service peers for the edge node then fails one.
let numShards: uint16 = 1
let shards = @[PubsubTopic("/waku/2/rs/3/0")]
proc dummyHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} =
discard
var publisher: WakuNode
lockNewGlobalBrokerContext:
publisher =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
publisher.mountMetadata(3, toSeq(0'u16 ..< numShards)).expect(
"Failed to mount metadata on publisher"
)
(await publisher.mountRelay()).expect("Failed to mount relay on publisher")
await publisher.mountFilter()
await publisher.mountLibp2pPing()
await publisher.start()
for shard in shards:
publisher.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect(
"Failed to sub publisher"
)
let publisherPeerInfo = publisher.peerInfo.toRemotePeerInfo()
var meshBuddy: WakuNode
lockNewGlobalBrokerContext:
meshBuddy =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
meshBuddy.mountMetadata(3, toSeq(0'u16 ..< numShards)).expect(
"Failed to mount metadata on meshBuddy"
)
(await meshBuddy.mountRelay()).expect("Failed to mount relay on meshBuddy")
await meshBuddy.mountFilter()
await meshBuddy.mountLibp2pPing()
await meshBuddy.start()
for shard in shards:
meshBuddy.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect(
"Failed to sub meshBuddy"
)
let meshBuddyPeerInfo = meshBuddy.peerInfo.toRemotePeerInfo()
await meshBuddy.connectToNodes(@[publisherPeerInfo])
let conf = createApiNodeConf(cli_args.WakuMode.Edge, numShards)
var subscriber: Waku
lockNewGlobalBrokerContext:
subscriber = (await createNode(conf)).expect("Failed to create edge subscriber")
(await startWaku(addr subscriber)).expect("Failed to start edge subscriber")
# Connect edge subscriber to both filter servers so selectPeers finds both
await subscriber.node.connectToNodes(@[publisherPeerInfo, meshBuddyPeerInfo])
let testTopic = ContentTopic("/waku/2/failover-test/proto")
let shard = subscriber.node.getRelayShard(testTopic)
(await subscriber.subscribe(testTopic)).expect("Failed to subscribe")
# Wait for dialing both filter servers (HealthyThreshold = 2)
for _ in 0 ..< 100:
if subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) >= 2:
break
await sleepAsync(100.milliseconds)
check subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) >= 2
# Verify message delivery with both servers alive
await waitForMesh(publisher, shard)
var eventManager = newReceiveEventListenerManager(subscriber.brokerCtx, 1)
let msg1 = WakuMessage(
payload: "Before failover".toBytes(),
contentTopic: testTopic,
version: 0,
timestamp: now(),
)
discard (await publisher.publish(some(shard), msg1)).expect("Publish 1 failed")
require await eventManager.waitForEvents(TestTimeout)
check eventManager.receivedMessages[0].payload == "Before failover".toBytes()
eventManager.teardown()
# Disconnect meshBuddy from edge (keeps relay mesh alive for publishing)
await subscriber.node.disconnectNode(meshBuddyPeerInfo)
# Wait for the dead peer to be pruned
for _ in 0 ..< 50:
if subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) < 2:
break
await sleepAsync(100.milliseconds)
check subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) >= 1
# Verify messages still arrive through the surviving filter server (publisher)
eventManager = newReceiveEventListenerManager(subscriber.brokerCtx, 1)
let msg2 = WakuMessage(
payload: "After failover".toBytes(),
contentTopic: testTopic,
version: 0,
timestamp: now(),
)
discard (await publisher.publish(some(shard), msg2)).expect("Publish 2 failed")
require await eventManager.waitForEvents(TestTimeout)
check eventManager.receivedMessages[0].payload == "After failover".toBytes()
eventManager.teardown()
(await subscriber.stop()).expect("Failed to stop subscriber")
await meshBuddy.stop()
await publisher.stop()
asyncTest "Subscription API, edge node dials replacement after peer eviction":
# 3 service peers: publisher, meshBuddy, sparePeer. Edge subscribes and
# confirms 2 (HealthyThreshold). After one is disconnected, the sub loop
# should detect the loss and dial the spare to recover back to threshold.
let numShards: uint16 = 1
let shards = @[PubsubTopic("/waku/2/rs/3/0")]
proc dummyHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} =
discard
var publisher: WakuNode
lockNewGlobalBrokerContext:
publisher =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
publisher.mountMetadata(3, toSeq(0'u16 ..< numShards)).expect(
"Failed to mount metadata on publisher"
)
(await publisher.mountRelay()).expect("Failed to mount relay on publisher")
await publisher.mountFilter()
await publisher.mountLibp2pPing()
await publisher.start()
for shard in shards:
publisher.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect(
"Failed to sub publisher"
)
let publisherPeerInfo = publisher.peerInfo.toRemotePeerInfo()
var meshBuddy: WakuNode
lockNewGlobalBrokerContext:
meshBuddy =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
meshBuddy.mountMetadata(3, toSeq(0'u16 ..< numShards)).expect(
"Failed to mount metadata on meshBuddy"
)
(await meshBuddy.mountRelay()).expect("Failed to mount relay on meshBuddy")
await meshBuddy.mountFilter()
await meshBuddy.mountLibp2pPing()
await meshBuddy.start()
for shard in shards:
meshBuddy.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect(
"Failed to sub meshBuddy"
)
let meshBuddyPeerInfo = meshBuddy.peerInfo.toRemotePeerInfo()
var sparePeer: WakuNode
lockNewGlobalBrokerContext:
sparePeer =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
sparePeer.mountMetadata(3, toSeq(0'u16 ..< numShards)).expect(
"Failed to mount metadata on sparePeer"
)
(await sparePeer.mountRelay()).expect("Failed to mount relay on sparePeer")
await sparePeer.mountFilter()
await sparePeer.mountLibp2pPing()
await sparePeer.start()
for shard in shards:
sparePeer.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect(
"Failed to sub sparePeer"
)
let sparePeerInfo = sparePeer.peerInfo.toRemotePeerInfo()
await meshBuddy.connectToNodes(@[publisherPeerInfo])
await sparePeer.connectToNodes(@[publisherPeerInfo])
let conf = createApiNodeConf(cli_args.WakuMode.Edge, numShards)
var subscriber: Waku
lockNewGlobalBrokerContext:
subscriber = (await createNode(conf)).expect("Failed to create edge subscriber")
(await startWaku(addr subscriber)).expect("Failed to start edge subscriber")
await subscriber.node.connectToNodes(
@[publisherPeerInfo, meshBuddyPeerInfo, sparePeerInfo]
)
let testTopic = ContentTopic("/waku/2/replacement-test/proto")
let shard = subscriber.node.getRelayShard(testTopic)
(await subscriber.subscribe(testTopic)).expect("Failed to subscribe")
# Wait for 2 confirmed peers (HealthyThreshold). The 3rd is available but not dialed.
for _ in 0 ..< 100:
if subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) >= 2:
break
await sleepAsync(100.milliseconds)
require subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) ==
2
await subscriber.node.disconnectNode(meshBuddyPeerInfo)
# Wait for the sub loop to detect the loss and dial a replacement
for _ in 0 ..< 100:
if subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) >= 2:
break
await sleepAsync(100.milliseconds)
check subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) >= 2
await waitForMesh(publisher, shard)
var eventManager = newReceiveEventListenerManager(subscriber.brokerCtx, 1)
let msg = WakuMessage(
payload: "After replacement".toBytes(),
contentTopic: testTopic,
version: 0,
timestamp: now(),
)
discard (await publisher.publish(some(shard), msg)).expect("Publish failed")
require await eventManager.waitForEvents(TestTimeout)
check eventManager.receivedMessages[0].payload == "After replacement".toBytes()
eventManager.teardown()
(await subscriber.stop()).expect("Failed to stop subscriber")
await sparePeer.stop()
await meshBuddy.stop()
await publisher.stop()

View File

@ -126,12 +126,11 @@ suite "Entry Nodes Classification":
suite "Entry Nodes Processing":
test "Process mixed entry nodes":
let entryNodes =
@[
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im",
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc",
"enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g",
]
let entryNodes = @[
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im",
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc",
"enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g",
]
let result = processEntryNodes(entryNodes)
check:
@ -147,11 +146,10 @@ suite "Entry Nodes Processing":
staticNodes[0] == entryNodes[1] # multiaddr added to static
test "Process only ENRTree nodes":
let entryNodes =
@[
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im",
"enrtree://ANOTHER_TREE@example.com",
]
let entryNodes = @[
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im",
"enrtree://ANOTHER_TREE@example.com",
]
let result = processEntryNodes(entryNodes)
check:
@ -165,11 +163,10 @@ suite "Entry Nodes Processing":
enrTreeUrls == entryNodes
test "Process only multiaddresses":
let entryNodes =
@[
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc",
"/ip4/192.168.1.1/tcp/60001/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYd",
]
let entryNodes = @[
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc",
"/ip4/192.168.1.1/tcp/60001/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYd",
]
let result = processEntryNodes(entryNodes)
check:
@ -183,11 +180,10 @@ suite "Entry Nodes Processing":
staticNodes == entryNodes
test "Process only ENR nodes":
let entryNodes =
@[
"enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g",
"enr:-QEkuECnZ3IbVAgkOzv-QLnKC4dRKAPRY80m1-R7G8jZ7yfT3ipEfBrhKN7ARcQgQ-vg-h40AQzyvAkPYlHPaFKk6u9MBgmlkgnY0iXNlY3AyNTZrMaEDk49D8JjMSns4p1XVNBvJquOUzT4PENSJknkROspfAFGg3RjcIJ2X4N1ZHCCd2g",
]
let entryNodes = @[
"enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g",
"enr:-QEkuECnZ3IbVAgkOzv-QLnKC4dRKAPRY80m1-R7G8jZ7yfT3ipEfBrhKN7ARcQgQ-vg-h40AQzyvAkPYlHPaFKk6u9MBgmlkgnY0iXNlY3AyNTZrMaEDk49D8JjMSns4p1XVNBvJquOUzT4PENSJknkROspfAFGg3RjcIJ2X4N1ZHCCd2g",
]
let result = processEntryNodes(entryNodes)
check:
@ -224,13 +220,12 @@ suite "Entry Nodes Processing":
"Entry node error: Unrecognized entry node format. Must start with 'enrtree:', 'enr:', or '/'"
test "Process different multiaddr formats":
let entryNodes =
@[
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc",
"/ip6/::1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYd",
"/dns4/example.com/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYe",
"/dns/node.example.org/tcp/443/wss/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYf",
]
let entryNodes = @[
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc",
"/ip6/::1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYd",
"/dns4/example.com/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYe",
"/dns/node.example.org/tcp/443/wss/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYf",
]
let result = processEntryNodes(entryNodes)
check:
@ -244,13 +239,12 @@ suite "Entry Nodes Processing":
staticNodes == entryNodes
test "Process with duplicate entries":
let entryNodes =
@[
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc",
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc",
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im",
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im",
]
let entryNodes = @[
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc",
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc",
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im",
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im",
]
let result = processEntryNodes(entryNodes)
check:

View File

@ -4,23 +4,22 @@ import std/strutils, results, stew/byteutils, testutils/unittests
import waku/common/base64
suite "Waku Common - stew base64 wrapper":
const TestData =
@[
# Test vectors from RFC 4648
# See: https://datatracker.ietf.org/doc/html/rfc4648#section-10
("", Base64String("")),
("f", Base64String("Zg==")),
("fo", Base64String("Zm8=")),
("foo", Base64String("Zm9v")),
("foob", Base64String("Zm9vYg==")),
("fooba", Base64String("Zm9vYmE=")),
("foobar", Base64String("Zm9vYmFy")),
const TestData = @[
# Test vectors from RFC 4648
# See: https://datatracker.ietf.org/doc/html/rfc4648#section-10
("", Base64String("")),
("f", Base64String("Zg==")),
("fo", Base64String("Zm8=")),
("foo", Base64String("Zm9v")),
("foob", Base64String("Zm9vYg==")),
("fooba", Base64String("Zm9vYmE=")),
("foobar", Base64String("Zm9vYmFy")),
# Custom test vectors
("\x01", Base64String("AQ==")),
("\x13", Base64String("Ew==")),
("\x01\x02\x03\x04", Base64String("AQIDBA==")),
]
# Custom test vectors
("\x01", Base64String("AQ==")),
("\x13", Base64String("Ew==")),
("\x01\x02\x03\x04", Base64String("AQIDBA==")),
]
for (plaintext, encoded) in TestData:
test "encode into base64 (" & escape(plaintext) & " -> \"" & string(encoded) & "\")":

View File

@ -25,7 +25,6 @@ suite "RateLimitSetting":
test "Parse rate limit setting - ok":
let test1 = "10/2m"
let test2 = " store : 10 /1h"
let test2a = "storev2 : 10 /1h"
let test2b = "storeV3: 12 /1s"
let test3 = "LIGHTPUSH: 10/ 1m"
let test4 = "px:10/2 s "
@ -34,7 +33,6 @@ suite "RateLimitSetting":
let expU = UnlimitedRateLimit
let exp1: RateLimitSetting = (10, 2.minutes)
let exp2: RateLimitSetting = (10, 1.hours)
let exp2a: RateLimitSetting = (10, 1.hours)
let exp2b: RateLimitSetting = (12, 1.seconds)
let exp3: RateLimitSetting = (10, 1.minutes)
let exp4: RateLimitSetting = (10, 2.seconds)
@ -42,7 +40,6 @@ suite "RateLimitSetting":
let res1 = ProtocolRateLimitSettings.parse(@[test1])
let res2 = ProtocolRateLimitSettings.parse(@[test2])
let res2a = ProtocolRateLimitSettings.parse(@[test2a])
let res2b = ProtocolRateLimitSettings.parse(@[test2b])
let res3 = ProtocolRateLimitSettings.parse(@[test3])
let res4 = ProtocolRateLimitSettings.parse(@[test4])
@ -53,15 +50,7 @@ suite "RateLimitSetting":
res1.get() == {GLOBAL: exp1, FILTER: FilterDefaultPerPeerRateLimit}.toTable()
res2.isOk()
res2.get() ==
{
GLOBAL: expU,
FILTER: FilterDefaultPerPeerRateLimit,
STOREV2: exp2,
STOREV3: exp2,
}.toTable()
res2a.isOk()
res2a.get() ==
{GLOBAL: expU, FILTER: FilterDefaultPerPeerRateLimit, STOREV2: exp2a}.toTable()
{GLOBAL: expU, FILTER: FilterDefaultPerPeerRateLimit, STOREV3: exp2}.toTable()
res2b.isOk()
res2b.get() ==
{GLOBAL: expU, FILTER: FilterDefaultPerPeerRateLimit, STOREV3: exp2b}.toTable()
@ -77,7 +66,6 @@ suite "RateLimitSetting":
test "Parse rate limit setting - err":
let test1 = "10/2d"
let test2 = " stre : 10 /1h"
let test2a = "storev2 10 /1h"
let test2b = "storev3: 12 1s"
let test3 = "somethingelse: 10/ 1m"
let test4 = ":px:10/2 s "
@ -85,7 +73,6 @@ suite "RateLimitSetting":
let res1 = ProtocolRateLimitSettings.parse(@[test1])
let res2 = ProtocolRateLimitSettings.parse(@[test2])
let res2a = ProtocolRateLimitSettings.parse(@[test2a])
let res2b = ProtocolRateLimitSettings.parse(@[test2b])
let res3 = ProtocolRateLimitSettings.parse(@[test3])
let res4 = ProtocolRateLimitSettings.parse(@[test4])
@ -94,7 +81,6 @@ suite "RateLimitSetting":
check:
res1.isErr()
res2.isErr()
res2a.isErr()
res2b.isErr()
res3.isErr()
res4.isErr()
@ -103,13 +89,12 @@ suite "RateLimitSetting":
test "Parse rate limit setting - complex":
let expU = UnlimitedRateLimit
let test1 = @["lightpush:2/2ms", "10/2m", " store: 3/3s", " storev2:12/12s"]
let test1 = @["lightpush:2/2ms", "10/2m", " store: 3/3s"]
let exp1 = {
GLOBAL: (10, 2.minutes),
FILTER: FilterDefaultPerPeerRateLimit,
LIGHTPUSH: (2, 2.milliseconds),
STOREV3: (3, 3.seconds),
STOREV2: (12, 12.seconds),
}.toTable()
let res1 = ProtocolRateLimitSettings.parse(test1)
@ -118,7 +103,6 @@ suite "RateLimitSetting":
res1.isOk()
res1.get() == exp1
res1.get().getSetting(PEEREXCHG) == (10, 2.minutes)
res1.get().getSetting(STOREV2) == (12, 12.seconds)
res1.get().getSetting(STOREV3) == (3, 3.seconds)
res1.get().getSetting(LIGHTPUSH) == (2, 2.milliseconds)
@ -127,7 +111,6 @@ suite "RateLimitSetting":
GLOBAL: expU,
LIGHTPUSH: (2, 2.milliseconds),
STOREV3: (3, 3.seconds),
STOREV2: (3, 3.seconds),
FILTER: (4, 42.milliseconds),
PEEREXCHG: (10, 10.hours),
}.toTable()
@ -138,13 +121,9 @@ suite "RateLimitSetting":
res2.isOk()
res2.get() == exp2
let test3 =
@["storev2:1/1s", "store:3/3s", "storev3:4/42ms", "storev3:5/5s", "storev3:6/6s"]
let test3 = @["store:3/3s", "storev3:4/42ms", "storev3:5/5s", "storev3:6/6s"]
let exp3 = {
GLOBAL: expU,
FILTER: FilterDefaultPerPeerRateLimit,
STOREV3: (6, 6.seconds),
STOREV2: (1, 1.seconds),
GLOBAL: expU, FILTER: FilterDefaultPerPeerRateLimit, STOREV3: (6, 6.seconds)
}.toTable()
let res3 = ProtocolRateLimitSettings.parse(test3)

View File

@ -45,11 +45,11 @@ static:
suite "RequestBroker macro (async mode)":
test "serves zero-argument providers":
check SimpleResponse
.setProvider(
proc(): Future[Result[SimpleResponse, string]] {.async.} =
ok(SimpleResponse(value: "hi"))
)
.isOk()
.setProvider(
proc(): Future[Result[SimpleResponse, string]] {.async.} =
ok(SimpleResponse(value: "hi"))
)
.isOk()
let res = waitFor SimpleResponse.request()
check res.isOk()
@ -65,12 +65,14 @@ suite "RequestBroker macro (async mode)":
test "serves input-based providers":
var seen: seq[string] = @[]
check KeyedResponse
.setProvider(
proc(key: string, subKey: int): Future[Result[KeyedResponse, string]] {.async.} =
seen.add(key)
ok(KeyedResponse(key: key, payload: key & "-payload+" & $subKey))
)
.isOk()
.setProvider(
proc(
key: string, subKey: int
): Future[Result[KeyedResponse, string]] {.async.} =
seen.add(key)
ok(KeyedResponse(key: key, payload: key & "-payload+" & $subKey))
)
.isOk()
let res = waitFor KeyedResponse.request("topic", 1)
check res.isOk()
@ -82,11 +84,13 @@ suite "RequestBroker macro (async mode)":
test "catches provider exception":
check KeyedResponse
.setProvider(
proc(key: string, subKey: int): Future[Result[KeyedResponse, string]] {.async.} =
raise newException(ValueError, "simulated failure")
)
.isOk()
.setProvider(
proc(
key: string, subKey: int
): Future[Result[KeyedResponse, string]] {.async.} =
raise newException(ValueError, "simulated failure")
)
.isOk()
let res = waitFor KeyedResponse.request("neglected", 11)
check res.isErr()
@ -101,18 +105,18 @@ suite "RequestBroker macro (async mode)":
test "supports both provider types simultaneously":
check DualResponse
.setProvider(
proc(): Future[Result[DualResponse, string]] {.async.} =
ok(DualResponse(note: "base", count: 1))
)
.isOk()
.setProvider(
proc(): Future[Result[DualResponse, string]] {.async.} =
ok(DualResponse(note: "base", count: 1))
)
.isOk()
check DualResponse
.setProvider(
proc(suffix: string): Future[Result[DualResponse, string]] {.async.} =
ok(DualResponse(note: "base" & suffix, count: suffix.len))
)
.isOk()
.setProvider(
proc(suffix: string): Future[Result[DualResponse, string]] {.async.} =
ok(DualResponse(note: "base" & suffix, count: suffix.len))
)
.isOk()
let noInput = waitFor DualResponse.request()
check noInput.isOk()
@ -127,11 +131,11 @@ suite "RequestBroker macro (async mode)":
test "clearProvider resets both entries":
check DualResponse
.setProvider(
proc(): Future[Result[DualResponse, string]] {.async.} =
ok(DualResponse(note: "temp", count: 0))
)
.isOk()
.setProvider(
proc(): Future[Result[DualResponse, string]] {.async.} =
ok(DualResponse(note: "temp", count: 0))
)
.isOk()
DualResponse.clearProvider()
let res = waitFor DualResponse.request()
@ -139,11 +143,11 @@ suite "RequestBroker macro (async mode)":
test "implicit zero-argument provider works by default":
check ImplicitResponse
.setProvider(
proc(): Future[Result[ImplicitResponse, string]] {.async.} =
ok(ImplicitResponse(note: "auto"))
)
.isOk()
.setProvider(
proc(): Future[Result[ImplicitResponse, string]] {.async.} =
ok(ImplicitResponse(note: "auto"))
)
.isOk()
let res = waitFor ImplicitResponse.request()
check res.isOk()
@ -158,18 +162,18 @@ suite "RequestBroker macro (async mode)":
test "no provider override":
check DualResponse
.setProvider(
proc(): Future[Result[DualResponse, string]] {.async.} =
ok(DualResponse(note: "base", count: 1))
)
.isOk()
.setProvider(
proc(): Future[Result[DualResponse, string]] {.async.} =
ok(DualResponse(note: "base", count: 1))
)
.isOk()
check DualResponse
.setProvider(
proc(suffix: string): Future[Result[DualResponse, string]] {.async.} =
ok(DualResponse(note: "base" & suffix, count: suffix.len))
)
.isOk()
.setProvider(
proc(suffix: string): Future[Result[DualResponse, string]] {.async.} =
ok(DualResponse(note: "base" & suffix, count: suffix.len))
)
.isOk()
let overrideProc = proc(): Future[Result[DualResponse, string]] {.async.} =
ok(DualResponse(note: "something else", count: 1))
@ -207,27 +211,27 @@ suite "RequestBroker macro (async mode)":
SimpleResponse.clearProvider()
check SimpleResponse
.setProvider(
proc(): Future[Result[SimpleResponse, string]] {.async.} =
ok(SimpleResponse(value: "default"))
)
.isOk()
.setProvider(
proc(): Future[Result[SimpleResponse, string]] {.async.} =
ok(SimpleResponse(value: "default"))
)
.isOk()
check SimpleResponse
.setProvider(
BrokerContext(0x11111111'u32),
proc(): Future[Result[SimpleResponse, string]] {.async.} =
ok(SimpleResponse(value: "one")),
)
.isOk()
.setProvider(
BrokerContext(0x11111111'u32),
proc(): Future[Result[SimpleResponse, string]] {.async.} =
ok(SimpleResponse(value: "one")),
)
.isOk()
check SimpleResponse
.setProvider(
BrokerContext(0x22222222'u32),
proc(): Future[Result[SimpleResponse, string]] {.async.} =
ok(SimpleResponse(value: "two")),
)
.isOk()
.setProvider(
BrokerContext(0x22222222'u32),
proc(): Future[Result[SimpleResponse, string]] {.async.} =
ok(SimpleResponse(value: "two")),
)
.isOk()
let defaultRes = waitFor SimpleResponse.request()
check defaultRes.isOk()
@ -246,12 +250,12 @@ suite "RequestBroker macro (async mode)":
check missing.error.contains("no provider registered for broker context")
check SimpleResponse
.setProvider(
BrokerContext(0x11111111'u32),
proc(): Future[Result[SimpleResponse, string]] {.async.} =
ok(SimpleResponse(value: "dup")),
)
.isErr()
.setProvider(
BrokerContext(0x11111111'u32),
proc(): Future[Result[SimpleResponse, string]] {.async.} =
ok(SimpleResponse(value: "dup")),
)
.isErr()
SimpleResponse.clearProvider()
@ -259,27 +263,33 @@ suite "RequestBroker macro (async mode)":
KeyedResponse.clearProvider()
check KeyedResponse
.setProvider(
proc(key: string, subKey: int): Future[Result[KeyedResponse, string]] {.async.} =
ok(KeyedResponse(key: "default-" & key, payload: $subKey))
)
.isOk()
.setProvider(
proc(
key: string, subKey: int
): Future[Result[KeyedResponse, string]] {.async.} =
ok(KeyedResponse(key: "default-" & key, payload: $subKey))
)
.isOk()
check KeyedResponse
.setProvider(
BrokerContext(0xABCDEF01'u32),
proc(key: string, subKey: int): Future[Result[KeyedResponse, string]] {.async.} =
ok(KeyedResponse(key: "k1-" & key, payload: "p" & $subKey)),
)
.isOk()
.setProvider(
BrokerContext(0xABCDEF01'u32),
proc(
key: string, subKey: int
): Future[Result[KeyedResponse, string]] {.async.} =
ok(KeyedResponse(key: "k1-" & key, payload: "p" & $subKey)),
)
.isOk()
check KeyedResponse
.setProvider(
BrokerContext(0xABCDEF02'u32),
proc(key: string, subKey: int): Future[Result[KeyedResponse, string]] {.async.} =
ok(KeyedResponse(key: "k2-" & key, payload: "q" & $subKey)),
)
.isOk()
.setProvider(
BrokerContext(0xABCDEF02'u32),
proc(
key: string, subKey: int
): Future[Result[KeyedResponse, string]] {.async.} =
ok(KeyedResponse(key: "k2-" & key, payload: "q" & $subKey)),
)
.isOk()
let d = waitFor KeyedResponse.request("topic", 7)
check d.isOk()
@ -343,11 +353,11 @@ static:
suite "RequestBroker macro (sync mode)":
test "serves zero-argument providers (sync)":
check SimpleResponseSync
.setProvider(
proc(): Result[SimpleResponseSync, string] =
ok(SimpleResponseSync(value: "hi"))
)
.isOk()
.setProvider(
proc(): Result[SimpleResponseSync, string] =
ok(SimpleResponseSync(value: "hi"))
)
.isOk()
let res = SimpleResponseSync.request()
check res.isOk()
@ -363,12 +373,12 @@ suite "RequestBroker macro (sync mode)":
test "serves input-based providers (sync)":
var seen: seq[string] = @[]
check KeyedResponseSync
.setProvider(
proc(key: string, subKey: int): Result[KeyedResponseSync, string] =
seen.add(key)
ok(KeyedResponseSync(key: key, payload: key & "-payload+" & $subKey))
)
.isOk()
.setProvider(
proc(key: string, subKey: int): Result[KeyedResponseSync, string] =
seen.add(key)
ok(KeyedResponseSync(key: key, payload: key & "-payload+" & $subKey))
)
.isOk()
let res = KeyedResponseSync.request("topic", 1)
check res.isOk()
@ -380,11 +390,11 @@ suite "RequestBroker macro (sync mode)":
test "catches provider exception (sync)":
check KeyedResponseSync
.setProvider(
proc(key: string, subKey: int): Result[KeyedResponseSync, string] =
raise newException(ValueError, "simulated failure")
)
.isOk()
.setProvider(
proc(key: string, subKey: int): Result[KeyedResponseSync, string] =
raise newException(ValueError, "simulated failure")
)
.isOk()
let res = KeyedResponseSync.request("neglected", 11)
check res.isErr()
@ -399,18 +409,18 @@ suite "RequestBroker macro (sync mode)":
test "supports both provider types simultaneously (sync)":
check DualResponseSync
.setProvider(
proc(): Result[DualResponseSync, string] =
ok(DualResponseSync(note: "base", count: 1))
)
.isOk()
.setProvider(
proc(): Result[DualResponseSync, string] =
ok(DualResponseSync(note: "base", count: 1))
)
.isOk()
check DualResponseSync
.setProvider(
proc(suffix: string): Result[DualResponseSync, string] =
ok(DualResponseSync(note: "base" & suffix, count: suffix.len))
)
.isOk()
.setProvider(
proc(suffix: string): Result[DualResponseSync, string] =
ok(DualResponseSync(note: "base" & suffix, count: suffix.len))
)
.isOk()
let noInput = DualResponseSync.request()
check noInput.isOk()
@ -425,11 +435,11 @@ suite "RequestBroker macro (sync mode)":
test "clearProvider resets both entries (sync)":
check DualResponseSync
.setProvider(
proc(): Result[DualResponseSync, string] =
ok(DualResponseSync(note: "temp", count: 0))
)
.isOk()
.setProvider(
proc(): Result[DualResponseSync, string] =
ok(DualResponseSync(note: "temp", count: 0))
)
.isOk()
DualResponseSync.clearProvider()
let res = DualResponseSync.request()
@ -437,11 +447,11 @@ suite "RequestBroker macro (sync mode)":
test "implicit zero-argument provider works by default (sync)":
check ImplicitResponseSync
.setProvider(
proc(): Result[ImplicitResponseSync, string] =
ok(ImplicitResponseSync(note: "auto"))
)
.isOk()
.setProvider(
proc(): Result[ImplicitResponseSync, string] =
ok(ImplicitResponseSync(note: "auto"))
)
.isOk()
let res = ImplicitResponseSync.request()
check res.isOk()
@ -456,11 +466,11 @@ suite "RequestBroker macro (sync mode)":
test "implicit zero-argument provider raises error (sync)":
check ImplicitResponseSync
.setProvider(
proc(): Result[ImplicitResponseSync, string] =
raise newException(ValueError, "simulated failure")
)
.isOk()
.setProvider(
proc(): Result[ImplicitResponseSync, string] =
raise newException(ValueError, "simulated failure")
)
.isOk()
let res = ImplicitResponseSync.request()
check res.isErr()
@ -472,19 +482,19 @@ suite "RequestBroker macro (sync mode)":
SimpleResponseSync.clearProvider()
check SimpleResponseSync
.setProvider(
proc(): Result[SimpleResponseSync, string] =
ok(SimpleResponseSync(value: "default"))
)
.isOk()
.setProvider(
proc(): Result[SimpleResponseSync, string] =
ok(SimpleResponseSync(value: "default"))
)
.isOk()
check SimpleResponseSync
.setProvider(
BrokerContext(0x10101010'u32),
proc(): Result[SimpleResponseSync, string] =
ok(SimpleResponseSync(value: "ten")),
)
.isOk()
.setProvider(
BrokerContext(0x10101010'u32),
proc(): Result[SimpleResponseSync, string] =
ok(SimpleResponseSync(value: "ten")),
)
.isOk()
let defaultRes = SimpleResponseSync.request()
check defaultRes.isOk()
@ -504,19 +514,19 @@ suite "RequestBroker macro (sync mode)":
KeyedResponseSync.clearProvider()
check KeyedResponseSync
.setProvider(
proc(key: string, subKey: int): Result[KeyedResponseSync, string] =
ok(KeyedResponseSync(key: "default-" & key, payload: $subKey))
)
.isOk()
.setProvider(
proc(key: string, subKey: int): Result[KeyedResponseSync, string] =
ok(KeyedResponseSync(key: "default-" & key, payload: $subKey))
)
.isOk()
check KeyedResponseSync
.setProvider(
BrokerContext(0xA0A0A0A0'u32),
proc(key: string, subKey: int): Result[KeyedResponseSync, string] =
ok(KeyedResponseSync(key: "k-" & key, payload: "p" & $subKey)),
)
.isOk()
.setProvider(
BrokerContext(0xA0A0A0A0'u32),
proc(key: string, subKey: int): Result[KeyedResponseSync, string] =
ok(KeyedResponseSync(key: "k-" & key, payload: "p" & $subKey)),
)
.isOk()
let d = KeyedResponseSync.request("topic", 2)
check d.isOk()
@ -576,11 +586,11 @@ RequestBroker(sync):
suite "RequestBroker macro (POD/external types)":
test "supports non-object response types (async)":
check PodResponse
.setProvider(
proc(): Future[Result[PodResponse, string]] {.async.} =
ok(PodResponse(123))
)
.isOk()
.setProvider(
proc(): Future[Result[PodResponse, string]] {.async.} =
ok(PodResponse(123))
)
.isOk()
let res = waitFor PodResponse.request()
check res.isOk()
@ -590,11 +600,11 @@ suite "RequestBroker macro (POD/external types)":
test "supports aliased external types (async)":
check ExternalAliasedResponse
.setProvider(
proc(): Future[Result[ExternalAliasedResponse, string]] {.async.} =
ok(ExternalAliasedResponse(ExternalDefinedTypeAsync(label: "ext")))
)
.isOk()
.setProvider(
proc(): Future[Result[ExternalAliasedResponse, string]] {.async.} =
ok(ExternalAliasedResponse(ExternalDefinedTypeAsync(label: "ext")))
)
.isOk()
let res = waitFor ExternalAliasedResponse.request()
check res.isOk()
@ -604,11 +614,11 @@ suite "RequestBroker macro (POD/external types)":
test "supports aliased external types (sync)":
check ExternalAliasedResponseSync
.setProvider(
proc(): Result[ExternalAliasedResponseSync, string] =
ok(ExternalAliasedResponseSync(ExternalDefinedTypeSync(label: "ext")))
)
.isOk()
.setProvider(
proc(): Result[ExternalAliasedResponseSync, string] =
ok(ExternalAliasedResponseSync(ExternalDefinedTypeSync(label: "ext")))
)
.isOk()
let res = ExternalAliasedResponseSync.request()
check res.isOk()
@ -618,32 +628,32 @@ suite "RequestBroker macro (POD/external types)":
test "distinct response types avoid overload ambiguity (sync)":
check DistinctStringResponseA
.setProvider(
proc(): Result[DistinctStringResponseA, string] =
ok(DistinctStringResponseA("a"))
)
.isOk()
.setProvider(
proc(): Result[DistinctStringResponseA, string] =
ok(DistinctStringResponseA("a"))
)
.isOk()
check DistinctStringResponseB
.setProvider(
proc(): Result[DistinctStringResponseB, string] =
ok(DistinctStringResponseB("b"))
)
.isOk()
.setProvider(
proc(): Result[DistinctStringResponseB, string] =
ok(DistinctStringResponseB("b"))
)
.isOk()
check ExternalDistinctResponseA
.setProvider(
proc(): Result[ExternalDistinctResponseA, string] =
ok(ExternalDistinctResponseA(ExternalDefinedTypeShared(label: "ea")))
)
.isOk()
.setProvider(
proc(): Result[ExternalDistinctResponseA, string] =
ok(ExternalDistinctResponseA(ExternalDefinedTypeShared(label: "ea")))
)
.isOk()
check ExternalDistinctResponseB
.setProvider(
proc(): Result[ExternalDistinctResponseB, string] =
ok(ExternalDistinctResponseB(ExternalDefinedTypeShared(label: "eb")))
)
.isOk()
.setProvider(
proc(): Result[ExternalDistinctResponseB, string] =
ok(ExternalDistinctResponseB(ExternalDefinedTypeShared(label: "eb")))
)
.isOk()
let resA = DistinctStringResponseA.request()
let resB = DistinctStringResponseB.request()

View File

@ -29,17 +29,16 @@ suite "SQLite - migrations":
test "filter and order migration script file paths":
## Given
let paths =
@[
sourceDir / "00001_valid.up.sql",
sourceDir / "00002_alsoValidWithUpperCaseExtension.UP.SQL",
sourceDir / "00007_unorderedValid.up.sql",
sourceDir / "00003_validRepeated.up.sql",
sourceDir / "00003_validRepeated.up.sql",
sourceDir / "00666_noMigrationScript.bmp",
sourceDir / "00X00_invalidVersion.down.sql",
sourceDir / "00008_notWithinVersionRange.up.sql",
]
let paths = @[
sourceDir / "00001_valid.up.sql",
sourceDir / "00002_alsoValidWithUpperCaseExtension.UP.SQL",
sourceDir / "00007_unorderedValid.up.sql",
sourceDir / "00003_validRepeated.up.sql",
sourceDir / "00003_validRepeated.up.sql",
sourceDir / "00666_noMigrationScript.bmp",
sourceDir / "00X00_invalidVersion.down.sql",
sourceDir / "00008_notWithinVersionRange.up.sql",
]
let
lowerVersion = 0
@ -64,16 +63,14 @@ suite "SQLite - migrations":
test "break migration scripts into queries":
## Given
let statement1 =
"""CREATE TABLE contacts1 (
let statement1 = """CREATE TABLE contacts1 (
contact_id INTEGER PRIMARY KEY,
first_name TEXT NOT NULL,
last_name TEXT NOT NULL,
email TEXT NOT NULL UNIQUE,
phone TEXT NOT NULL UNIQUE
);"""
let statement2 =
"""CREATE TABLE contacts2 (
let statement2 = """CREATE TABLE contacts2 (
contact_id INTEGER PRIMARY KEY,
first_name TEXT NOT NULL,
last_name TEXT NOT NULL,
@ -91,16 +88,14 @@ suite "SQLite - migrations":
test "break statements script into queries - empty statements":
## Given
let statement1 =
"""CREATE TABLE contacts1 (
let statement1 = """CREATE TABLE contacts1 (
contact_id INTEGER PRIMARY KEY,
first_name TEXT NOT NULL,
last_name TEXT NOT NULL,
email TEXT NOT NULL UNIQUE,
phone TEXT NOT NULL UNIQUE
);"""
let statement2 =
"""CREATE TABLE contacts2 (
let statement2 = """CREATE TABLE contacts2 (
contact_id INTEGER PRIMARY KEY,
first_name TEXT NOT NULL,
last_name TEXT NOT NULL,

View File

@ -4,7 +4,7 @@ import
libp2p/crypto/[crypto, secp],
libp2p/multiaddress,
nimcrypto/utils,
std/[options, sequtils],
std/[options, random, sequtils],
results,
testutils/unittests
import
@ -22,11 +22,13 @@ suite "Waku Conf - build with cluster conf":
builder.withRelayServiceRatio("50:50")
# Mount all shards in network
let expectedShards = toSeq[0.uint16 .. 7.uint16]
let userMessageLimit = rand(1 .. 1000).uint64
## Given
builder.rlnRelayConf.withEthClientUrls(@["https://my_eth_rpc_url/"])
builder.withNetworkConf(networkConf)
builder.withRelay(true)
builder.rlnRelayConf.withUserMessageLimit(userMessageLimit)
## When
let resConf = builder.build()
@ -54,7 +56,7 @@ suite "Waku Conf - build with cluster conf":
check rlnRelayConf.dynamic == networkConf.rlnRelayDynamic
check rlnRelayConf.chainId == networkConf.rlnRelayChainId
check rlnRelayConf.epochSizeSec == networkConf.rlnEpochSizeSec
check rlnRelayConf.userMessageLimit == networkConf.rlnRelayUserMessageLimit
check rlnRelayConf.userMessageLimit == userMessageLimit.uint
test "Cluster Conf is passed, but relay is disabled":
## Setup
@ -174,11 +176,13 @@ suite "Waku Conf - build with cluster conf":
# Mount all shards in network
let expectedShards = toSeq[0.uint16 .. 7.uint16]
let contractAddress = "0x0123456789ABCDEF"
let userMessageLimit = rand(1 .. 1000).uint64
## Given
builder.rlnRelayConf.withEthContractAddress(contractAddress)
builder.withNetworkConf(networkConf)
builder.withRelay(true)
builder.rlnRelayConf.withUserMessageLimit(userMessageLimit)
## When
let resConf = builder.build()
@ -207,7 +211,7 @@ suite "Waku Conf - build with cluster conf":
check rlnRelayConf.dynamic == networkConf.rlnRelayDynamic
check rlnRelayConf.chainId == networkConf.rlnRelayChainId
check rlnRelayConf.epochSizeSec == networkConf.rlnEpochSizeSec
check rlnRelayConf.userMessageLimit == networkConf.rlnRelayUserMessageLimit
check rlnRelayConf.userMessageLimit == userMessageLimit.uint
suite "Waku Conf - node key":
test "Node key is generated":

View File

@ -10,22 +10,21 @@ import
import waku/waku_core/peers, waku/node/peer_manager/peer_store/waku_peer_storage
proc `==`(a, b: RemotePeerInfo): bool =
let comparisons =
@[
a.peerId == b.peerId,
a.addrs == b.addrs,
a.enr == b.enr,
a.protocols == b.protocols,
a.agent == b.agent,
a.protoVersion == b.protoVersion,
a.publicKey == b.publicKey,
a.connectedness == b.connectedness,
a.disconnectTime == b.disconnectTime,
a.origin == b.origin,
a.direction == b.direction,
a.lastFailedConn == b.lastFailedConn,
a.numberFailedConn == b.numberFailedConn,
]
let comparisons = @[
a.peerId == b.peerId,
a.addrs == b.addrs,
a.enr == b.enr,
a.protocols == b.protocols,
a.agent == b.agent,
a.protoVersion == b.protoVersion,
a.publicKey == b.publicKey,
a.connectedness == b.connectedness,
a.disconnectTime == b.disconnectTime,
a.origin == b.origin,
a.direction == b.direction,
a.lastFailedConn == b.lastFailedConn,
a.numberFailedConn == b.numberFailedConn,
]
allIt(comparisons, it == true)
@ -61,18 +60,17 @@ suite "Protobuf Serialisation":
suite "encode":
test "simple":
# Given the expected bytes representation of a valid RemotePeerInfo
let expectedBuffer: seq[byte] =
@[
10, 39, 0, 37, 8, 2, 18, 33, 3, 43, 246, 238, 219, 109, 147, 79, 129, 40, 145,
217, 209, 109, 105, 185, 186, 200, 180, 203, 72, 166, 220, 196, 232, 170, 74,
141, 125, 255, 112, 238, 204, 18, 8, 4, 192, 168, 0, 1, 6, 31, 144, 34, 95, 8,
3, 18, 91, 48, 89, 48, 19, 6, 7, 42, 134, 72, 206, 61, 2, 1, 6, 8, 42, 134,
72, 206, 61, 3, 1, 7, 3, 66, 0, 4, 222, 61, 48, 15, 163, 106, 224, 232, 245,
213, 48, 137, 157, 131, 171, 171, 68, 171, 243, 22, 31, 22, 42, 75, 201, 1,
216, 230, 236, 218, 2, 14, 139, 109, 95, 141, 163, 5, 37, 231, 29, 104, 81,
81, 12, 9, 142, 92, 71, 198, 70, 165, 151, 251, 77, 206, 192, 52, 233, 247,
124, 64, 158, 98, 40, 0, 48, 0,
]
let expectedBuffer: seq[byte] = @[
10, 39, 0, 37, 8, 2, 18, 33, 3, 43, 246, 238, 219, 109, 147, 79, 129, 40, 145,
217, 209, 109, 105, 185, 186, 200, 180, 203, 72, 166, 220, 196, 232, 170, 74,
141, 125, 255, 112, 238, 204, 18, 8, 4, 192, 168, 0, 1, 6, 31, 144, 34, 95, 8,
3, 18, 91, 48, 89, 48, 19, 6, 7, 42, 134, 72, 206, 61, 2, 1, 6, 8, 42, 134, 72,
206, 61, 3, 1, 7, 3, 66, 0, 4, 222, 61, 48, 15, 163, 106, 224, 232, 245, 213,
48, 137, 157, 131, 171, 171, 68, 171, 243, 22, 31, 22, 42, 75, 201, 1, 216, 230,
236, 218, 2, 14, 139, 109, 95, 141, 163, 5, 37, 231, 29, 104, 81, 81, 12, 9,
142, 92, 71, 198, 70, 165, 151, 251, 77, 206, 192, 52, 233, 247, 124, 64, 158,
98, 40, 0, 48, 0,
]
# When converting a valid RemotePeerInfo to a ProtoBuffer
let encodedRemotePeerInfo = encode(remotePeerInfo).get()
@ -87,18 +85,17 @@ suite "Protobuf Serialisation":
suite "decode":
test "simple":
# Given the bytes representation of a valid RemotePeerInfo
let buffer: seq[byte] =
@[
10, 39, 0, 37, 8, 2, 18, 33, 3, 43, 246, 238, 219, 109, 147, 79, 129, 40, 145,
217, 209, 109, 105, 185, 186, 200, 180, 203, 72, 166, 220, 196, 232, 170, 74,
141, 125, 255, 112, 238, 204, 18, 8, 4, 192, 168, 0, 1, 6, 31, 144, 34, 95, 8,
3, 18, 91, 48, 89, 48, 19, 6, 7, 42, 134, 72, 206, 61, 2, 1, 6, 8, 42, 134,
72, 206, 61, 3, 1, 7, 3, 66, 0, 4, 222, 61, 48, 15, 163, 106, 224, 232, 245,
213, 48, 137, 157, 131, 171, 171, 68, 171, 243, 22, 31, 22, 42, 75, 201, 1,
216, 230, 236, 218, 2, 14, 139, 109, 95, 141, 163, 5, 37, 231, 29, 104, 81,
81, 12, 9, 142, 92, 71, 198, 70, 165, 151, 251, 77, 206, 192, 52, 233, 247,
124, 64, 158, 98, 40, 0, 48, 0,
]
let buffer: seq[byte] = @[
10, 39, 0, 37, 8, 2, 18, 33, 3, 43, 246, 238, 219, 109, 147, 79, 129, 40, 145,
217, 209, 109, 105, 185, 186, 200, 180, 203, 72, 166, 220, 196, 232, 170, 74,
141, 125, 255, 112, 238, 204, 18, 8, 4, 192, 168, 0, 1, 6, 31, 144, 34, 95, 8,
3, 18, 91, 48, 89, 48, 19, 6, 7, 42, 134, 72, 206, 61, 2, 1, 6, 8, 42, 134, 72,
206, 61, 3, 1, 7, 3, 66, 0, 4, 222, 61, 48, 15, 163, 106, 224, 232, 245, 213,
48, 137, 157, 131, 171, 171, 68, 171, 243, 22, 31, 22, 42, 75, 201, 1, 216, 230,
236, 218, 2, 14, 139, 109, 95, 141, 163, 5, 37, 231, 29, 104, 81, 81, 12, 9,
142, 92, 71, 198, 70, 165, 151, 251, 77, 206, 192, 52, 233, 247, 124, 64, 158,
98, 40, 0, 48, 0,
]
# When converting a valid buffer to RemotePeerInfo
let decodedRemotePeerInfo = RemotePeerInfo.decode(buffer).get()

View File

@ -6,6 +6,5 @@ import
./test_wakunode_lightpush,
./test_wakunode_peer_exchange,
./test_wakunode_store,
./test_wakunode_legacy_store,
./test_wakunode_peer_manager,
./test_wakunode_health_monitor

View File

@ -12,12 +12,18 @@ import
node/health_monitor/health_status,
node/health_monitor/connection_status,
node/health_monitor/protocol_health,
node/health_monitor/topic_health,
node/health_monitor/node_health_monitor,
node/delivery_service/delivery_service,
node/delivery_service/subscription_manager,
node/kernel_api/relay,
node/kernel_api/store,
node/kernel_api/lightpush,
node/kernel_api/filter,
events/health_events,
events/peer_events,
waku_archive,
common/broker/broker_context,
]
import ../testlib/[wakunode, wakucore], ../waku_archive/archive_utils
@ -35,13 +41,12 @@ proc protoHealthMock(kind: WakuProtocol, health: HealthStatus): ProtocolHealth =
suite "Health Monitor - health state calculation":
test "Disconnected, zero peers":
let protocols =
@[
protoHealthMock(RelayProtocol, HealthStatus.NOT_READY),
protoHealthMock(StoreClientProtocol, HealthStatus.NOT_READY),
protoHealthMock(FilterClientProtocol, HealthStatus.NOT_READY),
protoHealthMock(LightpushClientProtocol, HealthStatus.NOT_READY),
]
let protocols = @[
protoHealthMock(RelayProtocol, HealthStatus.NOT_READY),
protoHealthMock(StoreClientProtocol, HealthStatus.NOT_READY),
protoHealthMock(FilterClientProtocol, HealthStatus.NOT_READY),
protoHealthMock(LightpushClientProtocol, HealthStatus.NOT_READY),
]
let strength = initTable[WakuProtocol, int]()
let state = calculateConnectionState(protocols, strength, some(MockDLow))
check state == ConnectionStatus.Disconnected
@ -64,13 +69,12 @@ suite "Health Monitor - health state calculation":
check state == ConnectionStatus.Connected
test "Connected, robust edge":
let protocols =
@[
protoHealthMock(RelayProtocol, HealthStatus.NOT_MOUNTED),
protoHealthMock(LightpushClientProtocol, HealthStatus.READY),
protoHealthMock(FilterClientProtocol, HealthStatus.READY),
protoHealthMock(StoreClientProtocol, HealthStatus.READY),
]
let protocols = @[
protoHealthMock(RelayProtocol, HealthStatus.NOT_MOUNTED),
protoHealthMock(LightpushClientProtocol, HealthStatus.READY),
protoHealthMock(FilterClientProtocol, HealthStatus.READY),
protoHealthMock(StoreClientProtocol, HealthStatus.READY),
]
var strength = initTable[WakuProtocol, int]()
strength[LightpushClientProtocol] = HealthyThreshold
strength[FilterClientProtocol] = HealthyThreshold
@ -79,12 +83,11 @@ suite "Health Monitor - health state calculation":
check state == ConnectionStatus.Connected
test "Disconnected, edge missing store":
let protocols =
@[
protoHealthMock(LightpushClientProtocol, HealthStatus.READY),
protoHealthMock(FilterClientProtocol, HealthStatus.READY),
protoHealthMock(StoreClientProtocol, HealthStatus.NOT_READY),
]
let protocols = @[
protoHealthMock(LightpushClientProtocol, HealthStatus.READY),
protoHealthMock(FilterClientProtocol, HealthStatus.READY),
protoHealthMock(StoreClientProtocol, HealthStatus.NOT_READY),
]
var strength = initTable[WakuProtocol, int]()
strength[LightpushClientProtocol] = HealthyThreshold
strength[FilterClientProtocol] = HealthyThreshold
@ -94,12 +97,11 @@ suite "Health Monitor - health state calculation":
test "PartiallyConnected, edge meets minimum failover requirement":
let weakCount = max(1, HealthyThreshold - 1)
let protocols =
@[
protoHealthMock(LightpushClientProtocol, HealthStatus.READY),
protoHealthMock(FilterClientProtocol, HealthStatus.READY),
protoHealthMock(StoreClientProtocol, HealthStatus.READY),
]
let protocols = @[
protoHealthMock(LightpushClientProtocol, HealthStatus.READY),
protoHealthMock(FilterClientProtocol, HealthStatus.READY),
protoHealthMock(StoreClientProtocol, HealthStatus.READY),
]
var strength = initTable[WakuProtocol, int]()
strength[LightpushClientProtocol] = weakCount
strength[FilterClientProtocol] = weakCount
@ -108,11 +110,10 @@ suite "Health Monitor - health state calculation":
check state == ConnectionStatus.PartiallyConnected
test "Connected, robust relay ignores store server":
let protocols =
@[
protoHealthMock(RelayProtocol, HealthStatus.READY),
protoHealthMock(StoreProtocol, HealthStatus.READY),
]
let protocols = @[
protoHealthMock(RelayProtocol, HealthStatus.READY),
protoHealthMock(StoreProtocol, HealthStatus.READY),
]
var strength = initTable[WakuProtocol, int]()
strength[RelayProtocol] = MockDLow
strength[StoreProtocol] = 0
@ -120,12 +121,11 @@ suite "Health Monitor - health state calculation":
check state == ConnectionStatus.Connected
test "Connected, robust relay ignores store client":
let protocols =
@[
protoHealthMock(RelayProtocol, HealthStatus.READY),
protoHealthMock(StoreProtocol, HealthStatus.READY),
protoHealthMock(StoreClientProtocol, HealthStatus.NOT_READY),
]
let protocols = @[
protoHealthMock(RelayProtocol, HealthStatus.READY),
protoHealthMock(StoreProtocol, HealthStatus.READY),
protoHealthMock(StoreClientProtocol, HealthStatus.NOT_READY),
]
var strength = initTable[WakuProtocol, int]()
strength[RelayProtocol] = MockDLow
strength[StoreProtocol] = 0
@ -135,13 +135,12 @@ suite "Health Monitor - health state calculation":
suite "Health Monitor - events":
asyncTest "Core (relay) health update":
let
nodeAKey = generateSecp256k1Key()
var nodeA: WakuNode
lockNewGlobalBrokerContext:
let nodeAKey = generateSecp256k1Key()
nodeA = newTestWakuNode(nodeAKey, parseIpAddress("127.0.0.1"), Port(0))
(await nodeA.mountRelay()).expect("Node A failed to mount Relay")
await nodeA.start()
(await nodeA.mountRelay()).expect("Node A failed to mount Relay")
await nodeA.start()
let monitorA = NodeHealthMonitor.new(nodeA)
@ -157,17 +156,15 @@ suite "Health Monitor - events":
monitorA.startHealthMonitor().expect("Health monitor failed to start")
let
nodeBKey = generateSecp256k1Key()
var nodeB: WakuNode
lockNewGlobalBrokerContext:
let nodeBKey = generateSecp256k1Key()
nodeB = newTestWakuNode(nodeBKey, parseIpAddress("127.0.0.1"), Port(0))
let driver = newSqliteArchiveDriver()
nodeB.mountArchive(driver).expect("Node B failed to mount archive")
(await nodeB.mountRelay()).expect("Node B failed to mount relay")
await nodeB.mountStore()
await nodeB.start()
let driver = newSqliteArchiveDriver()
nodeB.mountArchive(driver).expect("Node B failed to mount archive")
(await nodeB.mountRelay()).expect("Node B failed to mount relay")
await nodeB.mountStore()
await nodeB.start()
await nodeA.connectToNodes(@[nodeB.switch.peerInfo.toRemotePeerInfo()])
@ -220,15 +217,20 @@ suite "Health Monitor - events":
await nodeA.stop()
asyncTest "Edge (light client) health update":
let
nodeAKey = generateSecp256k1Key()
var nodeA: WakuNode
lockNewGlobalBrokerContext:
let nodeAKey = generateSecp256k1Key()
nodeA = newTestWakuNode(nodeAKey, parseIpAddress("127.0.0.1"), Port(0))
nodeA.mountLightpushClient()
await nodeA.mountFilterClient()
nodeA.mountStoreClient()
require nodeA.mountAutoSharding(1, 8).isOk
nodeA.mountMetadata(1, @[0'u16]).expect("Node A failed to mount metadata")
await nodeA.start()
nodeA.mountLightpushClient()
await nodeA.mountFilterClient()
nodeA.mountStoreClient()
await nodeA.start()
let ds =
DeliveryService.new(false, nodeA).expect("Failed to create DeliveryService")
ds.startDeliveryService().expect("Failed to start DeliveryService")
let monitorA = NodeHealthMonitor.new(nodeA)
@ -244,23 +246,40 @@ suite "Health Monitor - events":
monitorA.startHealthMonitor().expect("Health monitor failed to start")
let
nodeBKey = generateSecp256k1Key()
var nodeB: WakuNode
lockNewGlobalBrokerContext:
let nodeBKey = generateSecp256k1Key()
nodeB = newTestWakuNode(nodeBKey, parseIpAddress("127.0.0.1"), Port(0))
let driver = newSqliteArchiveDriver()
nodeB.mountArchive(driver).expect("Node B failed to mount archive")
(await nodeB.mountRelay()).expect("Node B failed to mount relay")
(await nodeB.mountLightpush()).expect("Node B failed to mount lightpush")
await nodeB.mountFilter()
await nodeB.mountStore()
require nodeB.mountAutoSharding(1, 8).isOk
nodeB.mountMetadata(1, toSeq(0'u16 ..< 8'u16)).expect(
"Node B failed to mount metadata"
)
await nodeB.start()
let driver = newSqliteArchiveDriver()
nodeB.mountArchive(driver).expect("Node B failed to mount archive")
(await nodeB.mountRelay()).expect("Node B failed to mount relay")
(await nodeB.mountLightpush()).expect("Node B failed to mount lightpush")
await nodeB.mountFilter()
await nodeB.mountStore()
await nodeB.start()
var metadataFut = newFuture[void]("waitForMetadata")
let metadataLis = WakuPeerEvent
.listen(
nodeA.brokerCtx,
proc(evt: WakuPeerEvent): Future[void] {.async: (raises: []), gcsafe.} =
if not metadataFut.finished and
evt.kind == WakuPeerEventKind.EventMetadataUpdated:
metadataFut.complete()
,
)
.expect("Failed to listen for metadata")
await nodeA.connectToNodes(@[nodeB.switch.peerInfo.toRemotePeerInfo()])
let metadataOk = await metadataFut.withTimeout(TestConnectivityTimeLimit)
WakuPeerEvent.dropListener(nodeA.brokerCtx, metadataLis)
require metadataOk
let connectTimeLimit = Moment.now() + TestConnectivityTimeLimit
var gotConnected = false
@ -298,4 +317,118 @@ suite "Health Monitor - events":
lastStatus == ConnectionStatus.Disconnected
await monitorA.stopHealthMonitor()
await ds.stopDeliveryService()
await nodeA.stop()
asyncTest "Edge health driven by confirmed filter subscriptions":
var nodeA: WakuNode
lockNewGlobalBrokerContext:
let nodeAKey = generateSecp256k1Key()
nodeA = newTestWakuNode(nodeAKey, parseIpAddress("127.0.0.1"), Port(0))
await nodeA.mountFilterClient()
nodeA.mountLightpushClient()
nodeA.mountStoreClient()
require nodeA.mountAutoSharding(1, 8).isOk
nodeA.mountMetadata(1, @[0'u16]).expect("Node A failed to mount metadata")
await nodeA.start()
let ds =
DeliveryService.new(false, nodeA).expect("Failed to create DeliveryService")
ds.startDeliveryService().expect("Failed to start DeliveryService")
let subMgr = ds.subscriptionManager
var nodeB: WakuNode
lockNewGlobalBrokerContext:
let nodeBKey = generateSecp256k1Key()
nodeB = newTestWakuNode(nodeBKey, parseIpAddress("127.0.0.1"), Port(0))
let driver = newSqliteArchiveDriver()
nodeB.mountArchive(driver).expect("Node B failed to mount archive")
(await nodeB.mountRelay()).expect("Node B failed to mount relay")
(await nodeB.mountLightpush()).expect("Node B failed to mount lightpush")
await nodeB.mountFilter()
await nodeB.mountStore()
require nodeB.mountAutoSharding(1, 8).isOk
nodeB.mountMetadata(1, toSeq(0'u16 ..< 8'u16)).expect(
"Node B failed to mount metadata"
)
await nodeB.start()
let monitorA = NodeHealthMonitor.new(nodeA)
var
lastStatus = ConnectionStatus.Disconnected
healthSignal = newAsyncEvent()
monitorA.onConnectionStatusChange = proc(status: ConnectionStatus) {.async.} =
lastStatus = status
healthSignal.fire()
monitorA.startHealthMonitor().expect("Health monitor failed to start")
var metadataFut = newFuture[void]("waitForMetadata")
let metadataLis = WakuPeerEvent
.listen(
nodeA.brokerCtx,
proc(evt: WakuPeerEvent): Future[void] {.async: (raises: []), gcsafe.} =
if not metadataFut.finished and
evt.kind == WakuPeerEventKind.EventMetadataUpdated:
metadataFut.complete()
,
)
.expect("Failed to listen for metadata")
await nodeA.connectToNodes(@[nodeB.switch.peerInfo.toRemotePeerInfo()])
let metadataOk = await metadataFut.withTimeout(TestConnectivityTimeLimit)
WakuPeerEvent.dropListener(nodeA.brokerCtx, metadataLis)
require metadataOk
var deadline = Moment.now() + TestConnectivityTimeLimit
while Moment.now() < deadline:
if lastStatus == ConnectionStatus.PartiallyConnected:
break
if await healthSignal.wait().withTimeout(deadline - Moment.now()):
healthSignal.clear()
check lastStatus == ConnectionStatus.PartiallyConnected
var shardHealthFut = newFuture[EventShardTopicHealthChange]("waitForShardHealth")
let shardHealthLis = EventShardTopicHealthChange
.listen(
nodeA.brokerCtx,
proc(
evt: EventShardTopicHealthChange
): Future[void] {.async: (raises: []), gcsafe.} =
if not shardHealthFut.finished and (
evt.health == TopicHealth.MINIMALLY_HEALTHY or
evt.health == TopicHealth.SUFFICIENTLY_HEALTHY
):
shardHealthFut.complete(evt)
,
)
.expect("Failed to listen for shard health")
let contentTopic = ContentTopic("/waku/2/default-content/proto")
subMgr.subscribe(contentTopic).expect("Failed to subscribe")
let shardHealthOk = await shardHealthFut.withTimeout(TestConnectivityTimeLimit)
EventShardTopicHealthChange.dropListener(nodeA.brokerCtx, shardHealthLis)
check shardHealthOk == true
check subMgr.edgeFilterSubStates.len > 0
healthSignal.clear()
deadline = Moment.now() + TestConnectivityTimeLimit
while Moment.now() < deadline:
if lastStatus == ConnectionStatus.PartiallyConnected:
break
if await healthSignal.wait().withTimeout(deadline - Moment.now()):
healthSignal.clear()
check lastStatus == ConnectionStatus.PartiallyConnected
await ds.stopDeliveryService()
await monitorA.stopHealthMonitor()
await nodeB.stop()
await nodeA.stop()

File diff suppressed because it is too large Load Diff

View File

@ -283,31 +283,31 @@ suite "Waku RlnRelay - End to End - Static":
doAssert(
client.wakuRlnRelay
.appendRLNProof(
message1b, epoch + float64(client.wakuRlnRelay.rlnEpochSizeSec * 0)
)
.isOk()
.appendRLNProof(
message1b, epoch + float64(client.wakuRlnRelay.rlnEpochSizeSec * 0)
)
.isOk()
)
doAssert(
client.wakuRlnRelay
.appendRLNProof(
message1kib, epoch + float64(client.wakuRlnRelay.rlnEpochSizeSec * 1)
)
.isOk()
.appendRLNProof(
message1kib, epoch + float64(client.wakuRlnRelay.rlnEpochSizeSec * 1)
)
.isOk()
)
doAssert(
client.wakuRlnRelay
.appendRLNProof(
message150kib, epoch + float64(client.wakuRlnRelay.rlnEpochSizeSec * 2)
)
.isOk()
.appendRLNProof(
message150kib, epoch + float64(client.wakuRlnRelay.rlnEpochSizeSec * 2)
)
.isOk()
)
doAssert(
client.wakuRlnRelay
.appendRLNProof(
message151kibPlus, epoch + float64(client.wakuRlnRelay.rlnEpochSizeSec * 3)
)
.isOk()
.appendRLNProof(
message151kibPlus, epoch + float64(client.wakuRlnRelay.rlnEpochSizeSec * 3)
)
.isOk()
)
# When sending the 1B message
@ -372,10 +372,10 @@ suite "Waku RlnRelay - End to End - Static":
doAssert(
client.wakuRlnRelay
.appendRLNProof(
message151kibPlus, epoch + float64(client.wakuRlnRelay.rlnEpochSizeSec * 3)
)
.isOk()
.appendRLNProof(
message151kibPlus, epoch + float64(client.wakuRlnRelay.rlnEpochSizeSec * 3)
)
.isOk()
)
# When sending the 150KiB plus message
@ -496,11 +496,11 @@ suite "Waku RlnRelay - End to End - OnChain":
# However, it doesn't reduce the retries against the blockchain that the mounting rln process attempts (until it accepts failure).
# Note: These retries might be an unintended library issue.
discard await server
.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig1)
.withTimeout(FUTURE_TIMEOUT)
.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig1)
.withTimeout(FUTURE_TIMEOUT)
discard await client
.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig2)
.withTimeout(FUTURE_TIMEOUT)
.setupRelayWithOnChainRln(@[pubsubTopic], wakuRlnConfig2)
.withTimeout(FUTURE_TIMEOUT)
check:
(await serverErrorFuture.waitForResult()).get() ==

View File

@ -14,7 +14,6 @@ import
waku/[
waku_core/topics/pubsub_topic,
waku_core/topics/sharding,
waku_store_legacy/common,
node/waku_node,
node/kernel_api,
common/paging,
@ -434,18 +433,16 @@ suite "Sharding":
contentTopicShort = "/toychat/2/huilong/proto"
contentTopicFull = "/0/toychat/2/huilong/proto"
pubsubTopic = "/waku/2/rs/0/58355"
archiveMessages1 =
@[
fakeWakuMessage(
@[byte 00], ts = ts(00, timeOrigin), contentTopic = contentTopicShort
)
]
archiveMessages2 =
@[
fakeWakuMessage(
@[byte 01], ts = ts(10, timeOrigin), contentTopic = contentTopicFull
)
]
archiveMessages1 = @[
fakeWakuMessage(
@[byte 00], ts = ts(00, timeOrigin), contentTopic = contentTopicShort
)
]
archiveMessages2 = @[
fakeWakuMessage(
@[byte 01], ts = ts(10, timeOrigin), contentTopic = contentTopicFull
)
]
archiveDriver = newArchiveDriverWithMessages(pubsubTopic, archiveMessages1)
discard archiveDriver.put(pubsubTopic, archiveMessages2)
let mountArchiveResult = server.mountArchive(archiveDriver)
@ -456,29 +453,33 @@ suite "Sharding":
# Given one query for each content topic format
let
historyQuery1 = HistoryQuery(
storeQuery1 = StoreQueryRequest(
contentTopics: @[contentTopicShort],
direction: PagingDirection.Forward,
pageSize: 3,
paginationForward: PagingDirection.Forward,
paginationLimit: some(3'u64),
includeData: true,
)
historyQuery2 = HistoryQuery(
storeQuery2 = StoreQueryRequest(
contentTopics: @[contentTopicFull],
direction: PagingDirection.Forward,
pageSize: 3,
paginationForward: PagingDirection.Forward,
paginationLimit: some(3'u64),
includeData: true,
)
# When the client queries the server for the messages
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
queryResponse1 = await client.query(historyQuery1, serverRemotePeerInfo)
queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo)
queryResponse1 = await client.query(storeQuery1, serverRemotePeerInfo)
queryResponse2 = await client.query(storeQuery2, serverRemotePeerInfo)
assertResultOk(queryResponse1)
assertResultOk(queryResponse2)
# Then the responses of both queries should contain all the messages
check:
queryResponse1.get().messages == archiveMessages1 & archiveMessages2
queryResponse2.get().messages == archiveMessages1 & archiveMessages2
queryResponse1.get().messages.mapIt(it.message.get()) ==
archiveMessages1 & archiveMessages2
queryResponse2.get().messages.mapIt(it.message.get()) ==
archiveMessages1 & archiveMessages2
asyncTest "relay - exclusion (automatic sharding filtering)":
# Given a connected server and client subscribed to different content topics
@ -597,18 +598,16 @@ suite "Sharding":
contentTopic2 = "/0/toychat2/2/huilong/proto"
pubsubTopic2 = "/waku/2/rs/0/23286"
# Automatically generated from the contentTopic above
archiveMessages1 =
@[
fakeWakuMessage(
@[byte 00], ts = ts(00, timeOrigin), contentTopic = contentTopic1
)
]
archiveMessages2 =
@[
fakeWakuMessage(
@[byte 01], ts = ts(10, timeOrigin), contentTopic = contentTopic2
)
]
archiveMessages1 = @[
fakeWakuMessage(
@[byte 00], ts = ts(00, timeOrigin), contentTopic = contentTopic1
)
]
archiveMessages2 = @[
fakeWakuMessage(
@[byte 01], ts = ts(10, timeOrigin), contentTopic = contentTopic2
)
]
archiveDriver = newArchiveDriverWithMessages(pubsubTopic1, archiveMessages1)
discard archiveDriver.put(pubsubTopic2, archiveMessages2)
let mountArchiveResult = server.mountArchive(archiveDriver)
@ -619,29 +618,31 @@ suite "Sharding":
# Given one query for each content topic
let
historyQuery1 = HistoryQuery(
storeQuery1 = StoreQueryRequest(
contentTopics: @[contentTopic1],
direction: PagingDirection.Forward,
pageSize: 2,
paginationForward: PagingDirection.Forward,
paginationLimit: some(2'u64),
includeData: true,
)
historyQuery2 = HistoryQuery(
storeQuery2 = StoreQueryRequest(
contentTopics: @[contentTopic2],
direction: PagingDirection.Forward,
pageSize: 2,
paginationForward: PagingDirection.Forward,
paginationLimit: some(2'u64),
includeData: true,
)
# When the client queries the server for the messages
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
queryResponse1 = await client.query(historyQuery1, serverRemotePeerInfo)
queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo)
queryResponse1 = await client.query(storeQuery1, serverRemotePeerInfo)
queryResponse2 = await client.query(storeQuery2, serverRemotePeerInfo)
assertResultOk(queryResponse1)
assertResultOk(queryResponse2)
# Then each response should contain only the messages of the corresponding content topic
check:
queryResponse1.get().messages == archiveMessages1
queryResponse2.get().messages == archiveMessages2
queryResponse1.get().messages.mapIt(it.message.get()) == archiveMessages1
queryResponse2.get().messages.mapIt(it.message.get()) == archiveMessages2
suite "Specific Tests":
asyncTest "Configure Node with Multiple PubSub Topics":
@ -1007,22 +1008,30 @@ suite "Sharding":
# Given one query for each pubsub topic
let
historyQuery1 = HistoryQuery(
pubsubTopic: some(topic1), direction: PagingDirection.Forward, pageSize: 2
storeQuery1 = StoreQueryRequest(
pubsubTopic: some(topic1),
paginationForward: PagingDirection.Forward,
paginationLimit: some(2'u64),
includeData: true,
)
historyQuery2 = HistoryQuery(
pubsubTopic: some(topic2), direction: PagingDirection.Forward, pageSize: 2
storeQuery2 = StoreQueryRequest(
pubsubTopic: some(topic2),
paginationForward: PagingDirection.Forward,
paginationLimit: some(2'u64),
includeData: true,
)
# When the client queries the server for the messages
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
queryResponse1 = await client.query(historyQuery1, serverRemotePeerInfo)
queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo)
queryResponse1 = await client.query(storeQuery1, serverRemotePeerInfo)
queryResponse2 = await client.query(storeQuery2, serverRemotePeerInfo)
assertResultOk(queryResponse1)
assertResultOk(queryResponse2)
# Then each response should contain only the messages of the corresponding pubsub topic
check:
queryResponse1.get().messages == archiveMessages1[0 ..< 1]
queryResponse2.get().messages == archiveMessages2[0 ..< 1]
queryResponse1.get().messages.mapIt(it.message.get()) ==
archiveMessages1[0 ..< 1]
queryResponse2.get().messages.mapIt(it.message.get()) ==
archiveMessages2[0 ..< 1]

View File

@ -38,19 +38,18 @@ suite "Waku Store - End to End - Sorted Archive":
contentTopicSeq = @[contentTopic]
let timeOrigin = now()
let messages =
@[
fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)),
fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)),
fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)),
fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)),
fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)),
fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)),
fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)),
]
let messages = @[
fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)),
fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)),
fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)),
fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)),
fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)),
fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)),
fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)),
]
archiveMessages = messages.mapIt(
WakuMessageKeyValue(
messageHash: computeMessageHash(pubsubTopic, it),
@ -542,19 +541,18 @@ suite "Waku Store - End to End - Unsorted Archive":
)
let timeOrigin = now()
let messages =
@[
fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 03], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 08], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 07], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 02], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 09], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 06], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 01], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 04], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 05], ts = ts(20, timeOrigin)),
]
let messages = @[
fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 03], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 08], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 07], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 02], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 09], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 06], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 01], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 04], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 05], ts = ts(20, timeOrigin)),
]
unsortedArchiveMessages = messages.mapIt(
WakuMessageKeyValue(
messageHash: computeMessageHash(pubsubTopic, it),
@ -759,19 +757,19 @@ suite "Waku Store - End to End - Unsorted Archive without provided Timestamp":
paginationLimit: some(uint64(5)),
)
let messages =
@[ # Not providing explicit timestamp means it will be set in "arrive" order
fakeWakuMessage(@[byte 09]),
fakeWakuMessage(@[byte 07]),
fakeWakuMessage(@[byte 05]),
fakeWakuMessage(@[byte 03]),
fakeWakuMessage(@[byte 01]),
fakeWakuMessage(@[byte 00]),
fakeWakuMessage(@[byte 02]),
fakeWakuMessage(@[byte 04]),
fakeWakuMessage(@[byte 06]),
fakeWakuMessage(@[byte 08]),
]
let messages = @[
# Not providing explicit timestamp means it will be set in "arrive" order
fakeWakuMessage(@[byte 09]),
fakeWakuMessage(@[byte 07]),
fakeWakuMessage(@[byte 05]),
fakeWakuMessage(@[byte 03]),
fakeWakuMessage(@[byte 01]),
fakeWakuMessage(@[byte 00]),
fakeWakuMessage(@[byte 02]),
fakeWakuMessage(@[byte 04]),
fakeWakuMessage(@[byte 06]),
fakeWakuMessage(@[byte 08]),
]
unsortedArchiveMessages = messages.mapIt(
WakuMessageKeyValue(
messageHash: computeMessageHash(pubsubTopic, it),
@ -900,21 +898,20 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
originTs = proc(offset = 0): Timestamp {.gcsafe, raises: [].} =
ts(offset, timeOrigin)
let messages =
@[
fakeWakuMessage(@[byte 00], ts = originTs(00), contentTopic = contentTopic),
fakeWakuMessage(@[byte 01], ts = originTs(10), contentTopic = contentTopicB),
fakeWakuMessage(@[byte 02], ts = originTs(20), contentTopic = contentTopicC),
fakeWakuMessage(@[byte 03], ts = originTs(30), contentTopic = contentTopic),
fakeWakuMessage(@[byte 04], ts = originTs(40), contentTopic = contentTopicB),
fakeWakuMessage(@[byte 05], ts = originTs(50), contentTopic = contentTopicC),
fakeWakuMessage(@[byte 06], ts = originTs(60), contentTopic = contentTopic),
fakeWakuMessage(@[byte 07], ts = originTs(70), contentTopic = contentTopicB),
fakeWakuMessage(@[byte 08], ts = originTs(80), contentTopic = contentTopicC),
fakeWakuMessage(
@[byte 09], ts = originTs(90), contentTopic = contentTopicSpecials
),
]
let messages = @[
fakeWakuMessage(@[byte 00], ts = originTs(00), contentTopic = contentTopic),
fakeWakuMessage(@[byte 01], ts = originTs(10), contentTopic = contentTopicB),
fakeWakuMessage(@[byte 02], ts = originTs(20), contentTopic = contentTopicC),
fakeWakuMessage(@[byte 03], ts = originTs(30), contentTopic = contentTopic),
fakeWakuMessage(@[byte 04], ts = originTs(40), contentTopic = contentTopicB),
fakeWakuMessage(@[byte 05], ts = originTs(50), contentTopic = contentTopicC),
fakeWakuMessage(@[byte 06], ts = originTs(60), contentTopic = contentTopic),
fakeWakuMessage(@[byte 07], ts = originTs(70), contentTopic = contentTopicB),
fakeWakuMessage(@[byte 08], ts = originTs(80), contentTopic = contentTopicC),
fakeWakuMessage(
@[byte 09], ts = originTs(90), contentTopic = contentTopicSpecials
),
]
archiveMessages = messages.mapIt(
WakuMessageKeyValue(
@ -1172,12 +1169,11 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
xasyncTest "Only ephemeral Messages:":
# Given an archive with only ephemeral messages
let
ephemeralMessages =
@[
fakeWakuMessage(@[byte 00], ts = ts(00), ephemeral = true),
fakeWakuMessage(@[byte 01], ts = ts(10), ephemeral = true),
fakeWakuMessage(@[byte 02], ts = ts(20), ephemeral = true),
]
ephemeralMessages = @[
fakeWakuMessage(@[byte 00], ts = ts(00), ephemeral = true),
fakeWakuMessage(@[byte 01], ts = ts(10), ephemeral = true),
fakeWakuMessage(@[byte 02], ts = ts(20), ephemeral = true),
]
ephemeralArchiveDriver =
newSqliteArchiveDriver().put(pubsubTopic, ephemeralMessages)
@ -1207,18 +1203,16 @@ suite "Waku Store - End to End - Archive with Multiple Topics":
xasyncTest "Mixed messages":
# Given an archive with both ephemeral and non-ephemeral messages
let
ephemeralMessages =
@[
fakeWakuMessage(@[byte 00], ts = ts(00), ephemeral = true),
fakeWakuMessage(@[byte 01], ts = ts(10), ephemeral = true),
fakeWakuMessage(@[byte 02], ts = ts(20), ephemeral = true),
]
nonEphemeralMessages =
@[
fakeWakuMessage(@[byte 03], ts = ts(30), ephemeral = false),
fakeWakuMessage(@[byte 04], ts = ts(40), ephemeral = false),
fakeWakuMessage(@[byte 05], ts = ts(50), ephemeral = false),
]
ephemeralMessages = @[
fakeWakuMessage(@[byte 00], ts = ts(00), ephemeral = true),
fakeWakuMessage(@[byte 01], ts = ts(10), ephemeral = true),
fakeWakuMessage(@[byte 02], ts = ts(20), ephemeral = true),
]
nonEphemeralMessages = @[
fakeWakuMessage(@[byte 03], ts = ts(30), ephemeral = false),
fakeWakuMessage(@[byte 04], ts = ts(40), ephemeral = false),
fakeWakuMessage(@[byte 05], ts = ts(50), ephemeral = false),
]
mixedArchiveDriver = newSqliteArchiveDriver()
.put(pubsubTopic, ephemeralMessages)
.put(pubsubTopic, nonEphemeralMessages)

View File

@ -8,8 +8,7 @@ const
EMOJI* =
"😀 😃 😄 😁 😆 😅 🤣 😂 🙂 🙃 😉 😊 😇 🥰 😍 🤩 😘 😗 😚 😙"
CODE* = "def main():\n\tprint('Hello, world!')"
QUERY* =
"""
QUERY* = """
SELECT
u.id,
u.name,
@ -30,8 +29,7 @@ const
u.id = 1
"""
TEXT_SMALL* = "Lorem ipsum dolor sit amet, consectetur adipiscing elit."
TEXT_LARGE* =
"""
TEXT_LARGE* = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Cras gravida vulputate semper. Proin
eleifend varius cursus. Morbi lacinia posuere quam sit amet pretium. Sed non metus fermentum,
venenatis nisl id, vestibulum eros. Quisque non lorem sit amet lectus faucibus elementum eu

View File

@ -1216,30 +1216,29 @@ procSuite "Peer Manager":
shardId1 = 1.uint16
# Create 3 nodes with different shards
let nodes =
@[
newTestWakuNode(
generateSecp256k1Key(),
parseIpAddress("0.0.0.0"),
Port(0),
clusterId = clusterId,
subscribeShards = @[shardId0],
),
newTestWakuNode(
generateSecp256k1Key(),
parseIpAddress("0.0.0.0"),
Port(0),
clusterId = clusterId,
subscribeShards = @[shardId1],
),
newTestWakuNode(
generateSecp256k1Key(),
parseIpAddress("0.0.0.0"),
Port(0),
clusterId = clusterId,
subscribeShards = @[shardId0],
),
]
let nodes = @[
newTestWakuNode(
generateSecp256k1Key(),
parseIpAddress("0.0.0.0"),
Port(0),
clusterId = clusterId,
subscribeShards = @[shardId0],
),
newTestWakuNode(
generateSecp256k1Key(),
parseIpAddress("0.0.0.0"),
Port(0),
clusterId = clusterId,
subscribeShards = @[shardId1],
),
newTestWakuNode(
generateSecp256k1Key(),
parseIpAddress("0.0.0.0"),
Port(0),
clusterId = clusterId,
subscribeShards = @[shardId0],
),
]
await allFutures(nodes.mapIt(it.start()))
for node in nodes:
@ -1364,13 +1363,12 @@ procSuite "Peer Manager":
node.peerManager.switch.peerStore[ProtoBook][peerInfo.peerId] = @[WakuRelayCodec]
## When: selectPeer is called with malformed pubsub topic
let invalidTopics =
@[
some(PubsubTopic("invalid-topic")),
some(PubsubTopic("/waku/2/invalid")),
some(PubsubTopic("/waku/2/rs/abc/0")), # non-numeric cluster
some(PubsubTopic("")), # empty topic
]
let invalidTopics = @[
some(PubsubTopic("invalid-topic")),
some(PubsubTopic("/waku/2/invalid")),
some(PubsubTopic("/waku/2/rs/abc/0")), # non-numeric cluster
some(PubsubTopic("")), # empty topic
]
## Then: Returns none(RemotePeerInfo) without crashing
for invalidTopic in invalidTopics:

View File

@ -35,14 +35,12 @@ suite "Waku API - Create node":
nodeConf.rest = false
nodeConf.numShardsInNetwork = 16
nodeConf.maxMessageSize = "1024 KiB"
nodeConf.entryNodes =
@[
"enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g"
]
nodeConf.staticnodes =
@[
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"
]
nodeConf.entryNodes = @[
"enr:-QESuEC1p_s3xJzAC_XlOuuNrhVUETmfhbm1wxRGis0f7DlqGSw2FM-p2Vn7gmfkTTnAe8Ys2cgGBN8ufJnvzKQFZqFMBgmlkgnY0iXNlY3AyNTZrMaEDS8-D878DrdbNwcuY-3p1qdDp5MOoCurhdsNPJTXZ3c5g3RjcIJ2X4N1ZHCCd2g"
]
nodeConf.staticnodes = @[
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc"
]
## When
let node = (await createNode(nodeConf)).valueOr:
@ -67,11 +65,10 @@ suite "Waku API - Create node":
nodeConf.mode = Core
nodeConf.clusterId = 42'u16
nodeConf.rest = false
nodeConf.entryNodes =
@[
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im",
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc",
]
nodeConf.entryNodes = @[
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im",
"/ip4/127.0.0.1/tcp/60000/p2p/16Uuu2HBmAcHvhLqQKwSSbX6BG5JLWUDRcaLVrehUVqpw7fz1hbYc",
]
## When
let node = (await createNode(nodeConf)).valueOr:

View File

@ -307,30 +307,29 @@ suite "KeyFile test suite (adapted from nim-eth keyfile tests)":
# but the last byte of mac is changed to 00.
# While ciphertext is the correct encryption of priv under password,
# mac verfication should fail and nothing will be decrypted
let keyfileWrongMac =
%*{
"keyfile": {
"crypto": {
"cipher": "aes-128-ctr",
"cipherparams": {"iv": "6087dab2f9fdbbfaddc31a909735c1e6"},
"ciphertext":
"5318b4d5bcd28de64ee5559e671353e16f075ecae9f99c7a79a38af5f869aa46",
"kdf": "pbkdf2",
"kdfparams": {
"c": 262144,
"dklen": 32,
"prf": "hmac-sha256",
"salt": "ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd",
},
"mac": "517ead924a9d0dc3124507e3393d175ce3ff7c1e96529c6c555ce9e51205e900",
let keyfileWrongMac = %*{
"keyfile": {
"crypto": {
"cipher": "aes-128-ctr",
"cipherparams": {"iv": "6087dab2f9fdbbfaddc31a909735c1e6"},
"ciphertext":
"5318b4d5bcd28de64ee5559e671353e16f075ecae9f99c7a79a38af5f869aa46",
"kdf": "pbkdf2",
"kdfparams": {
"c": 262144,
"dklen": 32,
"prf": "hmac-sha256",
"salt": "ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd",
},
"id": "3198bc9c-6672-5ab3-d995-4942343ae5b6",
"version": 3,
"mac": "517ead924a9d0dc3124507e3393d175ce3ff7c1e96529c6c555ce9e51205e900",
},
"name": "test1",
"password": "testpassword",
"priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d",
}
"id": "3198bc9c-6672-5ab3-d995-4942343ae5b6",
"version": 3,
},
"name": "test1",
"password": "testpassword",
"priv": "7a28b5ba57c53603b0b07b56bba752f7784bf506fa95edc395f5cf6c7514fe9d",
}
# Decryption with correct password
let expectedSecret = decodeHex(keyfileWrongMac.getOrDefault("priv").getStr())

View File

@ -669,11 +669,10 @@ procSuite "Waku Noise":
# <- s
# ...
# So we define accordingly the sequence of the pre-message public keys
let preMessagePKs: seq[NoisePublicKey] =
@[
toNoisePublicKey(getPublicKey(aliceStaticKey)),
toNoisePublicKey(getPublicKey(bobStaticKey)),
]
let preMessagePKs: seq[NoisePublicKey] = @[
toNoisePublicKey(getPublicKey(aliceStaticKey)),
toNoisePublicKey(getPublicKey(bobStaticKey)),
]
var aliceHS = initialize(
hsPattern = hsPattern,

View File

@ -117,11 +117,10 @@ procSuite "Waku Rendezvous":
## Given: A light client node with no relay protocol
let
clusterId = 10.uint16
configuredShards =
@[
RelayShard(clusterId: clusterId, shardId: 0),
RelayShard(clusterId: clusterId, shardId: 1),
]
configuredShards = @[
RelayShard(clusterId: clusterId, shardId: 0),
RelayShard(clusterId: clusterId, shardId: 1),
]
let lightClient = newTestWakuNode(
generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0), clusterId = clusterId

View File

@ -12,14 +12,14 @@ import waku/node/waku_switch, ./testlib/common, ./testlib/wakucore
proc newCircuitRelayClientSwitch(relayClient: RelayClient): Switch =
SwitchBuilder
.new()
.withRng(rng())
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
.withTcpTransport()
.withMplex()
.withNoise()
.withCircuitRelay(relayClient)
.build()
.new()
.withRng(rng())
.withAddresses(@[MultiAddress.init("/ip4/0.0.0.0/tcp/0").tryGet()])
.withTcpTransport()
.withMplex()
.withNoise()
.withCircuitRelay(relayClient)
.build()
suite "Waku Switch":
asyncTest "Waku Switch works with AutoNat":

View File

@ -1,6 +1,6 @@
import chronos
import waku/[waku_core/message, waku_store, waku_store_legacy]
import waku/[waku_core/message, waku_store]
const
FUTURE_TIMEOUT* = 1.seconds
@ -18,9 +18,6 @@ proc newBoolFuture*(): Future[bool] =
proc newHistoryFuture*(): Future[StoreQueryRequest] =
newFuture[StoreQueryRequest]()
proc newLegacyHistoryFuture*(): Future[waku_store_legacy.HistoryQuery] =
newFuture[waku_store_legacy.HistoryQuery]()
proc toResult*[T](future: Future[T]): Result[T, string] =
if future.cancelled():
return chronos.err("Future timeouted before completing.")

View File

@ -1,27 +0,0 @@
import chronicles, chronos
import
waku/waku_archive_legacy,
waku/waku_archive_legacy/driver as driver_module,
waku/waku_archive_legacy/driver/builder,
waku/waku_archive_legacy/driver/postgres_driver
const storeMessageDbUrl = "postgres://postgres:test123@localhost:5432/postgres"
proc newTestPostgresDriver*(): Future[Result[ArchiveDriver, string]] {.
async, deprecated
.} =
proc onErr(errMsg: string) {.gcsafe, closure.} =
error "error creating ArchiveDriver", error = errMsg
quit(QuitFailure)
let
vacuum = false
migrate = true
maxNumConn = 50
let driverRes =
await ArchiveDriver.new(storeMessageDbUrl, vacuum, migrate, maxNumConn, onErr)
if driverRes.isErr():
onErr("could not create archive driver: " & driverRes.error)
return ok(driverRes.get())

View File

@ -42,7 +42,6 @@ proc defaultTestWakuConfBuilder*(): WakuConfBuilder =
builder.withRelay(true)
builder.withRendezvous(true)
builder.storeServiceConf.withDbMigration(false)
builder.storeServiceConf.withSupportV2(false)
return builder
proc defaultTestWakuConf*(): WakuConf =

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -121,16 +121,15 @@ suite "Waku Archive - Retention policy":
retentionPolicy: RetentionPolicy =
CapacityRetentionPolicy.new(capacity = capacity)
let messages =
@[
fakeWakuMessage(contentTopic = DefaultContentTopic, ts = ts(0)),
fakeWakuMessage(contentTopic = DefaultContentTopic, ts = ts(1)),
fakeWakuMessage(contentTopic = contentTopic, ts = ts(2)),
fakeWakuMessage(contentTopic = contentTopic, ts = ts(3)),
fakeWakuMessage(contentTopic = contentTopic, ts = ts(4)),
fakeWakuMessage(contentTopic = contentTopic, ts = ts(5)),
fakeWakuMessage(contentTopic = contentTopic, ts = ts(6)),
]
let messages = @[
fakeWakuMessage(contentTopic = DefaultContentTopic, ts = ts(0)),
fakeWakuMessage(contentTopic = DefaultContentTopic, ts = ts(1)),
fakeWakuMessage(contentTopic = contentTopic, ts = ts(2)),
fakeWakuMessage(contentTopic = contentTopic, ts = ts(3)),
fakeWakuMessage(contentTopic = contentTopic, ts = ts(4)),
fakeWakuMessage(contentTopic = contentTopic, ts = ts(5)),
fakeWakuMessage(contentTopic = contentTopic, ts = ts(6)),
]
## When
for msg in messages:

View File

@ -36,14 +36,13 @@ suite "Waku Archive - message handling":
let archive = newWakuArchive(driver)
## Given
let msgList =
@[
fakeWakuMessage(ephemeral = false, payload = "1"),
fakeWakuMessage(ephemeral = true, payload = "2"),
fakeWakuMessage(ephemeral = true, payload = "3"),
fakeWakuMessage(ephemeral = true, payload = "4"),
fakeWakuMessage(ephemeral = false, payload = "5"),
]
let msgList = @[
fakeWakuMessage(ephemeral = false, payload = "1"),
fakeWakuMessage(ephemeral = true, payload = "2"),
fakeWakuMessage(ephemeral = true, payload = "3"),
fakeWakuMessage(ephemeral = true, payload = "4"),
fakeWakuMessage(ephemeral = false, payload = "5"),
]
## When
for msg in msgList:
@ -127,39 +126,38 @@ suite "Waku Archive - message handling":
procSuite "Waku Archive - find messages":
## Fixtures
let timeOrigin = now()
let msgListA =
@[
fakeWakuMessage(
@[byte 00], contentTopic = ContentTopic("2"), ts = ts(00, timeOrigin)
),
fakeWakuMessage(
@[byte 01], contentTopic = ContentTopic("1"), ts = ts(10, timeOrigin)
),
fakeWakuMessage(
@[byte 02], contentTopic = ContentTopic("2"), ts = ts(20, timeOrigin)
),
fakeWakuMessage(
@[byte 03], contentTopic = ContentTopic("1"), ts = ts(30, timeOrigin)
),
fakeWakuMessage(
@[byte 04], contentTopic = ContentTopic("2"), ts = ts(40, timeOrigin)
),
fakeWakuMessage(
@[byte 05], contentTopic = ContentTopic("1"), ts = ts(50, timeOrigin)
),
fakeWakuMessage(
@[byte 06], contentTopic = ContentTopic("2"), ts = ts(60, timeOrigin)
),
fakeWakuMessage(
@[byte 07], contentTopic = ContentTopic("1"), ts = ts(70, timeOrigin)
),
fakeWakuMessage(
@[byte 08], contentTopic = ContentTopic("2"), ts = ts(80, timeOrigin)
),
fakeWakuMessage(
@[byte 09], contentTopic = ContentTopic("1"), ts = ts(90, timeOrigin)
),
]
let msgListA = @[
fakeWakuMessage(
@[byte 00], contentTopic = ContentTopic("2"), ts = ts(00, timeOrigin)
),
fakeWakuMessage(
@[byte 01], contentTopic = ContentTopic("1"), ts = ts(10, timeOrigin)
),
fakeWakuMessage(
@[byte 02], contentTopic = ContentTopic("2"), ts = ts(20, timeOrigin)
),
fakeWakuMessage(
@[byte 03], contentTopic = ContentTopic("1"), ts = ts(30, timeOrigin)
),
fakeWakuMessage(
@[byte 04], contentTopic = ContentTopic("2"), ts = ts(40, timeOrigin)
),
fakeWakuMessage(
@[byte 05], contentTopic = ContentTopic("1"), ts = ts(50, timeOrigin)
),
fakeWakuMessage(
@[byte 06], contentTopic = ContentTopic("2"), ts = ts(60, timeOrigin)
),
fakeWakuMessage(
@[byte 07], contentTopic = ContentTopic("1"), ts = ts(70, timeOrigin)
),
fakeWakuMessage(
@[byte 08], contentTopic = ContentTopic("2"), ts = ts(80, timeOrigin)
),
fakeWakuMessage(
@[byte 09], contentTopic = ContentTopic("1"), ts = ts(90, timeOrigin)
),
]
let archiveA = block:
let
@ -446,19 +444,18 @@ procSuite "Waku Archive - find messages":
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
let msgList =
@[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("2")),
fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 2], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 3], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 4], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 5], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 6], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 7], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 8], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("2")),
]
let msgList = @[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("2")),
fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 2], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 3], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 4], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 5], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 6], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 7], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 8], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("2")),
]
for msg in msgList:
require (

View File

@ -1,55 +0,0 @@
{.used.}
import std/options, results, chronos, libp2p/crypto/crypto
import
waku/[
node/peer_manager,
waku_core,
waku_archive_legacy,
waku_archive_legacy/common,
waku_archive_legacy/driver/sqlite_driver,
waku_archive_legacy/driver/sqlite_driver/migrations,
common/databases/db_sqlite,
],
../testlib/[wakucore]
proc newSqliteDatabase*(path: Option[string] = string.none()): SqliteDatabase =
SqliteDatabase.new(path.get(":memory:")).tryGet()
proc newSqliteArchiveDriver*(): ArchiveDriver =
let database = newSqliteDatabase()
migrate(database).tryGet()
return SqliteDriver.new(database).tryGet()
proc newWakuArchive*(driver: ArchiveDriver): WakuArchive =
WakuArchive.new(driver).get()
proc computeArchiveCursor*(
pubsubTopic: PubsubTopic, message: WakuMessage
): ArchiveCursor =
ArchiveCursor(
pubsubTopic: pubsubTopic,
senderTime: message.timestamp,
storeTime: message.timestamp,
digest: computeDigest(message),
hash: computeMessageHash(pubsubTopic, message),
)
proc put*(
driver: ArchiveDriver, pubsubTopic: PubSubTopic, msgList: seq[WakuMessage]
): ArchiveDriver =
for msg in msgList:
let
msgDigest = computeDigest(msg)
msgHash = computeMessageHash(pubsubTopic, msg)
_ = waitFor driver.put(pubsubTopic, msg, msgDigest, msgHash, msg.timestamp)
# discard crashes
return driver
proc newArchiveDriverWithMessages*(
pubsubTopic: PubSubTopic, msgList: seq[WakuMessage]
): ArchiveDriver =
var driver = newSqliteArchiveDriver()
driver = driver.put(pubsubTopic, msgList)
return driver

View File

@ -1,13 +0,0 @@
{.used.}
import
./test_driver_postgres_query,
./test_driver_postgres,
./test_driver_queue_index,
./test_driver_queue_pagination,
./test_driver_queue_query,
./test_driver_queue,
./test_driver_sqlite_query,
./test_driver_sqlite,
./test_retention_policy,
./test_waku_archive

View File

@ -1,220 +0,0 @@
{.used.}
import std/[sequtils, options], testutils/unittests, chronos
import
waku/waku_archive_legacy,
waku/waku_archive_legacy/driver/postgres_driver,
waku/waku_archive/driver/postgres_driver as new_postgres_driver,
waku/waku_core,
waku/waku_core/message/digest,
../testlib/wakucore,
../testlib/testasync,
../testlib/postgres_legacy,
../testlib/postgres as new_postgres
proc computeTestCursor(pubsubTopic: PubsubTopic, message: WakuMessage): ArchiveCursor =
ArchiveCursor(
pubsubTopic: pubsubTopic,
senderTime: message.timestamp,
storeTime: message.timestamp,
digest: computeDigest(message),
hash: computeMessageHash(pubsubTopic, message),
)
suite "Postgres driver":
## Unique driver instance
var driver {.threadvar.}: postgres_driver.PostgresDriver
## We need to artificially create an instance of the "newDriver"
## because this is the only one in charge of creating partitions
## We will clean legacy store soon and this file will get removed.
var newDriver {.threadvar.}: new_postgres_driver.PostgresDriver
asyncSetup:
let driverRes = await postgres_legacy.newTestPostgresDriver()
if driverRes.isErr():
assert false, driverRes.error
driver = postgres_driver.PostgresDriver(driverRes.get())
let newDriverRes = await new_postgres.newTestPostgresDriver()
if driverRes.isErr():
assert false, driverRes.error
newDriver = new_postgres_driver.PostgresDriver(newDriverRes.get())
asyncTeardown:
var resetRes = await driver.reset()
if resetRes.isErr():
assert false, resetRes.error
(await driver.close()).expect("driver to close")
resetRes = await newDriver.reset()
if resetRes.isErr():
assert false, resetRes.error
(await newDriver.close()).expect("driver to close")
asyncTest "Asynchronous queries":
var futures = newSeq[Future[ArchiveDriverResult[void]]](0)
let beforeSleep = now()
for _ in 1 .. 100:
futures.add(driver.sleep(1))
await allFutures(futures)
let diff = now() - beforeSleep
# Actually, the diff randomly goes between 1 and 2 seconds.
# although in theory it should spend 1s because we establish 100
# connections and we spawn 100 tasks that spend ~1s each.
assert diff < 20_000_000_000
asyncTest "Insert a message":
const contentTopic = "test-content-topic"
const meta = "test meta"
let msg = fakeWakuMessage(contentTopic = contentTopic, meta = meta)
let computedDigest = computeDigest(msg)
let computedHash = computeMessageHash(DefaultPubsubTopic, msg)
let putRes = await driver.put(
DefaultPubsubTopic, msg, computedDigest, computedHash, msg.timestamp
)
assert putRes.isOk(), putRes.error
let storedMsg = (await driver.getAllMessages()).tryGet()
assert storedMsg.len == 1
let (pubsubTopic, actualMsg, digest, _, hash) = storedMsg[0]
assert actualMsg.contentTopic == contentTopic
assert pubsubTopic == DefaultPubsubTopic
assert toHex(computedDigest.data) == toHex(digest)
assert toHex(actualMsg.payload) == toHex(msg.payload)
assert toHex(computedHash) == toHex(hash)
assert toHex(actualMsg.meta) == toHex(msg.meta)
asyncTest "Insert and query message":
const contentTopic1 = "test-content-topic-1"
const contentTopic2 = "test-content-topic-2"
const pubsubTopic1 = "pubsubtopic-1"
const pubsubTopic2 = "pubsubtopic-2"
let msg1 = fakeWakuMessage(contentTopic = contentTopic1)
var putRes = await driver.put(
pubsubTopic1,
msg1,
computeDigest(msg1),
computeMessageHash(pubsubTopic1, msg1),
msg1.timestamp,
)
assert putRes.isOk(), putRes.error
let msg2 = fakeWakuMessage(contentTopic = contentTopic2)
putRes = await driver.put(
pubsubTopic2,
msg2,
computeDigest(msg2),
computeMessageHash(pubsubTopic2, msg2),
msg2.timestamp,
)
assert putRes.isOk(), putRes.error
let countMessagesRes = await driver.getMessagesCount()
assert countMessagesRes.isOk(), $countMessagesRes.error
assert countMessagesRes.get() == 2
var messagesRes = await driver.getMessages(contentTopic = @[contentTopic1])
assert messagesRes.isOk(), $messagesRes.error
assert messagesRes.get().len == 1
# Get both content topics, check ordering
messagesRes =
await driver.getMessages(contentTopic = @[contentTopic1, contentTopic2])
assert messagesRes.isOk(), messagesRes.error
assert messagesRes.get().len == 2
assert messagesRes.get()[0][1].contentTopic == contentTopic1
# Descending order
messagesRes = await driver.getMessages(
contentTopic = @[contentTopic1, contentTopic2], ascendingOrder = false
)
assert messagesRes.isOk(), messagesRes.error
assert messagesRes.get().len == 2
assert messagesRes.get()[0][1].contentTopic == contentTopic2
# cursor
# Get both content topics
messagesRes = await driver.getMessages(
contentTopic = @[contentTopic1, contentTopic2],
cursor = some(computeTestCursor(pubsubTopic1, messagesRes.get()[1][1])),
)
assert messagesRes.isOk()
assert messagesRes.get().len == 1
# Get both content topics but one pubsub topic
messagesRes = await driver.getMessages(
contentTopic = @[contentTopic1, contentTopic2], pubsubTopic = some(pubsubTopic1)
)
assert messagesRes.isOk(), messagesRes.error
assert messagesRes.get().len == 1
assert messagesRes.get()[0][1].contentTopic == contentTopic1
# Limit
messagesRes = await driver.getMessages(
contentTopic = @[contentTopic1, contentTopic2], maxPageSize = 1
)
assert messagesRes.isOk(), messagesRes.error
assert messagesRes.get().len == 1
asyncTest "Insert true duplicated messages":
# Validates that two completely equal messages can not be stored.
let now = now()
let msg1 = fakeWakuMessage(ts = now)
let msg2 = fakeWakuMessage(ts = now)
let initialNumMsgs = (await driver.getMessagesCount()).valueOr:
raiseAssert "could not get num mgs correctly: " & $error
var putRes = await driver.put(
DefaultPubsubTopic,
msg1,
computeDigest(msg1),
computeMessageHash(DefaultPubsubTopic, msg1),
msg1.timestamp,
)
assert putRes.isOk(), putRes.error
var newNumMsgs = (await driver.getMessagesCount()).valueOr:
raiseAssert "could not get num mgs correctly: " & $error
assert newNumMsgs == (initialNumMsgs + 1.int64),
"wrong number of messages: " & $newNumMsgs
putRes = await driver.put(
DefaultPubsubTopic,
msg2,
computeDigest(msg2),
computeMessageHash(DefaultPubsubTopic, msg2),
msg2.timestamp,
)
assert putRes.isOk()
newNumMsgs = (await driver.getMessagesCount()).valueOr:
raiseAssert "could not get num mgs correctly: " & $error
assert newNumMsgs == (initialNumMsgs + 1.int64),
"wrong number of messages: " & $newNumMsgs

File diff suppressed because it is too large Load Diff

View File

@ -1,182 +0,0 @@
{.used.}
import std/options, results, testutils/unittests
import
waku/waku_archive_legacy,
waku/waku_archive_legacy/driver/queue_driver/queue_driver {.all.},
waku/waku_archive_legacy/driver/queue_driver/index,
waku/waku_core
# Helper functions
proc genIndexedWakuMessage(i: int8): (Index, WakuMessage) =
## Use i to generate an Index WakuMessage
var data {.noinit.}: array[32, byte]
for x in data.mitems:
x = i.byte
let
message = WakuMessage(payload: @[byte i], timestamp: Timestamp(i))
topic = "test-pubsub-topic"
cursor = Index(
receiverTime: Timestamp(i),
senderTime: Timestamp(i),
digest: MessageDigest(data: data),
pubsubTopic: topic,
hash: computeMessageHash(topic, message),
)
(cursor, message)
proc getPrepopulatedTestQueue(unsortedSet: auto, capacity: int): QueueDriver =
let driver = QueueDriver.new(capacity)
for i in unsortedSet:
let (index, message) = genIndexedWakuMessage(i.int8)
discard driver.add(index, message)
driver
procSuite "Sorted driver queue":
test "queue capacity - add a message over the limit":
## Given
let capacity = 5
let driver = QueueDriver.new(capacity)
## When
# Fill up the queue
for i in 1 .. capacity:
let (index, message) = genIndexedWakuMessage(i.int8)
require(driver.add(index, message).isOk())
# Add one more. Capacity should not be exceeded
let (index, message) = genIndexedWakuMessage(capacity.int8 + 1)
require(driver.add(index, message).isOk())
## Then
check:
driver.len == capacity
test "queue capacity - add message older than oldest in the queue":
## Given
let capacity = 5
let driver = QueueDriver.new(capacity)
## When
# Fill up the queue
for i in 1 .. capacity:
let (index, message) = genIndexedWakuMessage(i.int8)
require(driver.add(index, message).isOk())
# Attempt to add message with older value than oldest in queue should fail
let
oldestTimestamp = driver.first().get().senderTime
(index, message) = genIndexedWakuMessage(oldestTimestamp.int8 - 1)
addRes = driver.add(index, message)
## Then
check:
addRes.isErr()
addRes.error() == "too_old"
check:
driver.len == capacity
test "queue sort-on-insert":
## Given
let
capacity = 5
unsortedSet = [5, 1, 3, 2, 4]
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
# Walk forward through the set and verify ascending order
var (prevSmaller, _) = genIndexedWakuMessage(min(unsortedSet).int8 - 1)
for i in driver.fwdIterator:
let (index, _) = i
check cmp(index, prevSmaller) > 0
prevSmaller = index
# Walk backward through the set and verify descending order
var (prevLarger, _) = genIndexedWakuMessage(max(unsortedSet).int8 + 1)
for i in driver.bwdIterator:
let (index, _) = i
check cmp(index, prevLarger) < 0
prevLarger = index
test "access first item from queue":
## Given
let
capacity = 5
unsortedSet = [5, 1, 3, 2, 4]
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
## When
let firstRes = driver.first()
## Then
check:
firstRes.isOk()
let first = firstRes.tryGet()
check:
first.senderTime == Timestamp(1)
test "get first item from empty queue should fail":
## Given
let capacity = 5
let driver = QueueDriver.new(capacity)
## When
let firstRes = driver.first()
## Then
check:
firstRes.isErr()
firstRes.error() == "Not found"
test "access last item from queue":
## Given
let
capacity = 5
unsortedSet = [5, 1, 3, 2, 4]
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
## When
let lastRes = driver.last()
## Then
check:
lastRes.isOk()
let last = lastRes.tryGet()
check:
last.senderTime == Timestamp(5)
test "get last item from empty queue should fail":
## Given
let capacity = 5
let driver = QueueDriver.new(capacity)
## When
let lastRes = driver.last()
## Then
check:
lastRes.isErr()
lastRes.error() == "Not found"
test "verify if queue contains an index":
## Given
let
capacity = 5
unsortedSet = [5, 1, 3, 2, 4]
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
let
(existingIndex, _) = genIndexedWakuMessage(4)
(nonExistingIndex, _) = genIndexedWakuMessage(99)
## Then
check:
driver.contains(existingIndex) == true
driver.contains(nonExistingIndex) == false

View File

@ -1,219 +0,0 @@
{.used.}
import std/[times, random], stew/byteutils, testutils/unittests, nimcrypto
import waku/waku_core, waku/waku_archive_legacy/driver/queue_driver/index
var rng = initRand()
## Helpers
proc getTestTimestamp(offset = 0): Timestamp =
let now = getNanosecondTime(epochTime() + float(offset))
Timestamp(now)
proc hashFromStr(input: string): MDigest[256] =
var ctx: sha256
ctx.init()
ctx.update(input.toBytes())
let hashed = ctx.finish()
ctx.clear()
return hashed
proc randomHash(): WakuMessageHash =
var hash: WakuMessageHash
for i in 0 ..< hash.len:
let numb: byte = byte(rng.next())
hash[i] = numb
hash
suite "Queue Driver - index":
## Test vars
let
smallIndex1 = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(1000),
hash: randomHash(),
)
smallIndex2 = Index(
digest: hashFromStr("1234567"), # digest is less significant than senderTime
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(1000),
hash: randomHash(),
)
largeIndex1 = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(9000),
hash: randomHash(),
) # only senderTime differ from smallIndex1
largeIndex2 = Index(
digest: hashFromStr("12345"), # only digest differs from smallIndex1
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(1000),
hash: randomHash(),
)
eqIndex1 = Index(
digest: hashFromStr("0003"),
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(54321),
hash: randomHash(),
)
eqIndex2 = Index(
digest: hashFromStr("0003"),
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(54321),
hash: randomHash(),
)
eqIndex3 = Index(
digest: hashFromStr("0003"),
receiverTime: getNanosecondTime(9999),
# receiverTime difference should have no effect on comparisons
senderTime: getNanosecondTime(54321),
hash: randomHash(),
)
diffPsTopic = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(1000),
pubsubTopic: "zzzz",
hash: randomHash(),
)
noSenderTime1 = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(1100),
senderTime: getNanosecondTime(0),
pubsubTopic: "zzzz",
hash: randomHash(),
)
noSenderTime2 = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(10000),
senderTime: getNanosecondTime(0),
pubsubTopic: "zzzz",
hash: randomHash(),
)
noSenderTime3 = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(1200),
senderTime: getNanosecondTime(0),
pubsubTopic: "aaaa",
hash: randomHash(),
)
noSenderTime4 = Index(
digest: hashFromStr("0"),
receiverTime: getNanosecondTime(1200),
senderTime: getNanosecondTime(0),
pubsubTopic: "zzzz",
hash: randomHash(),
)
test "Index comparison":
# Index comparison with senderTime diff
check:
cmp(smallIndex1, largeIndex1) < 0
cmp(smallIndex2, largeIndex1) < 0
# Index comparison with digest diff
check:
cmp(smallIndex1, smallIndex2) < 0
cmp(smallIndex1, largeIndex2) < 0
cmp(smallIndex2, largeIndex2) > 0
cmp(largeIndex1, largeIndex2) > 0
# Index comparison when equal
check:
cmp(eqIndex1, eqIndex2) == 0
# pubsubTopic difference
check:
cmp(smallIndex1, diffPsTopic) < 0
# receiverTime diff plays no role when senderTime set
check:
cmp(eqIndex1, eqIndex3) == 0
# receiverTime diff plays no role when digest/pubsubTopic equal
check:
cmp(noSenderTime1, noSenderTime2) == 0
# sort on receiverTime with no senderTimestamp and unequal pubsubTopic
check:
cmp(noSenderTime1, noSenderTime3) < 0
# sort on receiverTime with no senderTimestamp and unequal digest
check:
cmp(noSenderTime1, noSenderTime4) < 0
# sort on receiverTime if no senderTimestamp on only one side
check:
cmp(smallIndex1, noSenderTime1) < 0
cmp(noSenderTime1, smallIndex1) > 0 # Test symmetry
cmp(noSenderTime2, eqIndex3) < 0
cmp(eqIndex3, noSenderTime2) > 0 # Test symmetry
test "Index equality":
# Exactly equal
check:
eqIndex1 == eqIndex2
# Receiver time plays no role, even without sender time
check:
eqIndex1 == eqIndex3
noSenderTime1 == noSenderTime2 # only receiver time differs, indices are equal
noSenderTime1 != noSenderTime3 # pubsubTopics differ
noSenderTime1 != noSenderTime4 # digests differ
# Unequal sender time
check:
smallIndex1 != largeIndex1
# Unequal digest
check:
smallIndex1 != smallIndex2
# Unequal hash and digest
check:
smallIndex1 != eqIndex1
# Unequal pubsubTopic
check:
smallIndex1 != diffPsTopic
test "Index computation should not be empty":
## Given
let ts = getTestTimestamp()
let wm = WakuMessage(payload: @[byte 1, 2, 3], timestamp: ts)
## When
let ts2 = getTestTimestamp() + 10
let index = Index.compute(wm, ts2, DefaultContentTopic)
## Then
check:
index.digest.data.len != 0
index.digest.data.len == 32 # sha2 output length in bytes
index.receiverTime == ts2 # the receiver timestamp should be a non-zero value
index.senderTime == ts
index.pubsubTopic == DefaultContentTopic
test "Index digest of two identical messsage should be the same":
## Given
let topic = ContentTopic("test-content-topic")
let
wm1 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: topic)
wm2 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: topic)
## When
let ts = getTestTimestamp()
let
index1 = Index.compute(wm1, ts, DefaultPubsubTopic)
index2 = Index.compute(wm2, ts, DefaultPubsubTopic)
## Then
check:
index1.digest == index2.digest

View File

@ -1,405 +0,0 @@
{.used.}
import
std/[options, sequtils, algorithm], testutils/unittests, libp2p/protobuf/minprotobuf
import
waku/waku_archive_legacy,
waku/waku_archive_legacy/driver/queue_driver/queue_driver {.all.},
waku/waku_archive_legacy/driver/queue_driver/index,
waku/waku_core,
../testlib/wakucore
proc getTestQueueDriver(numMessages: int): QueueDriver =
let testQueueDriver = QueueDriver.new(numMessages)
var data {.noinit.}: array[32, byte]
for x in data.mitems:
x = 1
for i in 0 ..< numMessages:
let msg = WakuMessage(payload: @[byte i], timestamp: Timestamp(i))
let index = Index(
receiverTime: Timestamp(i),
senderTime: Timestamp(i),
digest: MessageDigest(data: data),
hash: computeMessageHash(DefaultPubsubTopic, msg),
)
discard testQueueDriver.add(index, msg)
return testQueueDriver
procSuite "Queue driver - pagination":
let driver = getTestQueueDriver(10)
let
indexList: seq[Index] = toSeq(driver.fwdIterator()).mapIt(it[0])
msgList: seq[WakuMessage] = toSeq(driver.fwdIterator()).mapIt(it[1])
test "Forward pagination - normal pagination":
## Given
let
pageSize: uint = 2
cursor: Option[Index] = some(indexList[3])
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 2
data == msgList[4 .. 5]
test "Forward pagination - initial pagination request with an empty cursor":
## Given
let
pageSize: uint = 2
cursor: Option[Index] = none(Index)
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 2
data == msgList[0 .. 1]
test "Forward pagination - initial pagination request with an empty cursor to fetch the entire history":
## Given
let
pageSize: uint = 13
cursor: Option[Index] = none(Index)
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 10
data == msgList[0 .. 9]
test "Forward pagination - empty msgList":
## Given
let driver = getTestQueueDriver(0)
let
pageSize: uint = 2
cursor: Option[Index] = none(Index)
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 0
test "Forward pagination - page size larger than the remaining messages":
## Given
let
pageSize: uint = 10
cursor: Option[Index] = some(indexList[3])
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 6
data == msgList[4 .. 9]
test "Forward pagination - page size larger than the maximum allowed page size":
## Given
let
pageSize: uint = MaxPageSize + 1
cursor: Option[Index] = some(indexList[3])
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
uint(data.len) <= MaxPageSize
test "Forward pagination - cursor pointing to the end of the message list":
## Given
let
pageSize: uint = 10
cursor: Option[Index] = some(indexList[9])
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 0
test "Forward pagination - invalid cursor":
## Given
let msg = fakeWakuMessage(payload = @[byte 10])
let index = ArchiveCursor(
pubsubTopic: DefaultPubsubTopic,
senderTime: msg.timestamp,
storeTime: msg.timestamp,
digest: computeDigest(msg),
).toIndex()
let
pageSize: uint = 10
cursor: Option[Index] = some(index)
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let error = page.tryError()
check:
error == QueueDriverErrorKind.INVALID_CURSOR
test "Forward pagination - initial paging query over a message list with one message":
## Given
let driver = getTestQueueDriver(1)
let
pageSize: uint = 10
cursor: Option[Index] = none(Index)
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 1
test "Forward pagination - pagination over a message list with one message":
## Given
let driver = getTestQueueDriver(1)
let
pageSize: uint = 10
cursor: Option[Index] = some(indexList[0])
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 0
test "Forward pagination - with pradicate":
## Given
let
pageSize: uint = 3
cursor: Option[Index] = none(Index)
forward = true
proc onlyEvenTimes(index: Index, msg: WakuMessage): bool =
msg.timestamp.int64 mod 2 == 0
## When
let page = driver.getPage(
pageSize = pageSize, forward = forward, cursor = cursor, predicate = onlyEvenTimes
)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.mapIt(it.timestamp.int) == @[0, 2, 4]
test "Backward pagination - normal pagination":
## Given
let
pageSize: uint = 2
cursor: Option[Index] = some(indexList[3])
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data == msgList[1 .. 2].reversed
test "Backward pagination - empty msgList":
## Given
let driver = getTestQueueDriver(0)
let
pageSize: uint = 2
cursor: Option[Index] = none(Index)
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 0
test "Backward pagination - initial pagination request with an empty cursor":
## Given
let
pageSize: uint = 2
cursor: Option[Index] = none(Index)
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 2
data == msgList[8 .. 9].reversed
test "Backward pagination - initial pagination request with an empty cursor to fetch the entire history":
## Given
let
pageSize: uint = 13
cursor: Option[Index] = none(Index)
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 10
data == msgList[0 .. 9].reversed
test "Backward pagination - page size larger than the remaining messages":
## Given
let
pageSize: uint = 5
cursor: Option[Index] = some(indexList[3])
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data == msgList[0 .. 2].reversed
test "Backward pagination - page size larger than the Maximum allowed page size":
## Given
let
pageSize: uint = MaxPageSize + 1
cursor: Option[Index] = some(indexList[3])
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
uint(data.len) <= MaxPageSize
test "Backward pagination - cursor pointing to the begining of the message list":
## Given
let
pageSize: uint = 5
cursor: Option[Index] = some(indexList[0])
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 0
test "Backward pagination - invalid cursor":
## Given
let msg = fakeWakuMessage(payload = @[byte 10])
let index = ArchiveCursor(
pubsubTopic: DefaultPubsubTopic,
senderTime: msg.timestamp,
storeTime: msg.timestamp,
digest: computeDigest(msg),
).toIndex()
let
pageSize: uint = 2
cursor: Option[Index] = some(index)
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let error = page.tryError()
check:
error == QueueDriverErrorKind.INVALID_CURSOR
test "Backward pagination - initial paging query over a message list with one message":
## Given
let driver = getTestQueueDriver(1)
let
pageSize: uint = 10
cursor: Option[Index] = none(Index)
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 1
test "Backward pagination - paging query over a message list with one message":
## Given
let driver = getTestQueueDriver(1)
let
pageSize: uint = 10
cursor: Option[Index] = some(indexList[0])
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 0
test "Backward pagination - with predicate":
## Given
let
pageSize: uint = 3
cursor: Option[Index] = none(Index)
forward = false
proc onlyOddTimes(index: Index, msg: WakuMessage): bool =
msg.timestamp.int64 mod 2 != 0
## When
let page = driver.getPage(
pageSize = pageSize, forward = forward, cursor = cursor, predicate = onlyOddTimes
)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.mapIt(it.timestamp.int) == @[5, 7, 9].reversed

File diff suppressed because it is too large Load Diff

View File

@ -1,58 +0,0 @@
{.used.}
import std/sequtils, testutils/unittests, chronos
import
waku/waku_archive_legacy,
waku/waku_archive_legacy/driver/sqlite_driver,
waku/waku_core,
../waku_archive_legacy/archive_utils,
../testlib/wakucore
suite "SQLite driver":
test "init driver and database":
## Given
let database = newSqliteDatabase()
## When
let driverRes = SqliteDriver.new(database)
## Then
check:
driverRes.isOk()
let driver: ArchiveDriver = driverRes.tryGet()
check:
not driver.isNil()
## Cleanup
(waitFor driver.close()).expect("driver to close")
test "insert a message":
## Given
const contentTopic = "test-content-topic"
const meta = "test meta"
let driver = newSqliteArchiveDriver()
let msg = fakeWakuMessage(contentTopic = contentTopic, meta = meta)
let msgHash = computeMessageHash(DefaultPubsubTopic, msg)
## When
let putRes = waitFor driver.put(
DefaultPubsubTopic, msg, computeDigest(msg), msgHash, msg.timestamp
)
## Then
check:
putRes.isOk()
let storedMsg = (waitFor driver.getAllMessages()).tryGet()
check:
storedMsg.len == 1
storedMsg.all do(item: auto) -> bool:
let (pubsubTopic, actualMsg, _, _, hash) = item
actualMsg.contentTopic == contentTopic and pubsubTopic == DefaultPubsubTopic and
hash == msgHash and msg.meta == actualMsg.meta
## Cleanup
(waitFor driver.close()).expect("driver to close")

File diff suppressed because it is too large Load Diff

View File

@ -1,535 +0,0 @@
{.used.}
import std/[options, sequtils], testutils/unittests, chronos, libp2p/crypto/crypto
import
waku/common/paging,
waku/waku_core,
waku/waku_core/message/digest,
waku/waku_archive_legacy,
../waku_archive_legacy/archive_utils,
../testlib/wakucore
suite "Waku Archive - message handling":
test "it should archive a valid and non-ephemeral message":
## Setup
let driver = newSqliteArchiveDriver()
let archive = newWakuArchive(driver)
## Given
let validSenderTime = now()
let message = fakeWakuMessage(ephemeral = false, ts = validSenderTime)
## When
waitFor archive.handleMessage(DefaultPubSubTopic, message)
## Then
check:
(waitFor driver.getMessagesCount()).tryGet() == 1
test "it should not archive ephemeral messages":
## Setup
let driver = newSqliteArchiveDriver()
let archive = newWakuArchive(driver)
## Given
let msgList =
@[
fakeWakuMessage(ephemeral = false, payload = "1"),
fakeWakuMessage(ephemeral = true, payload = "2"),
fakeWakuMessage(ephemeral = true, payload = "3"),
fakeWakuMessage(ephemeral = true, payload = "4"),
fakeWakuMessage(ephemeral = false, payload = "5"),
]
## When
for msg in msgList:
waitFor archive.handleMessage(DefaultPubsubTopic, msg)
## Then
check:
(waitFor driver.getMessagesCount()).tryGet() == 2
test "it should archive a message with no sender timestamp":
## Setup
let driver = newSqliteArchiveDriver()
let archive = newWakuArchive(driver)
## Given
let invalidSenderTime = 0
let message = fakeWakuMessage(ts = invalidSenderTime)
## When
waitFor archive.handleMessage(DefaultPubSubTopic, message)
## Then
check:
(waitFor driver.getMessagesCount()).tryGet() == 1
test "it should not archive a message with a sender time variance greater than max time variance (future)":
## Setup
let driver = newSqliteArchiveDriver()
let archive = newWakuArchive(driver)
## Given
let
now = now()
invalidSenderTime = now + MaxMessageTimestampVariance + 1_000_000_000
# 1 second over the max variance
let message = fakeWakuMessage(ts = invalidSenderTime)
## When
waitFor archive.handleMessage(DefaultPubSubTopic, message)
## Then
check:
(waitFor driver.getMessagesCount()).tryGet() == 0
test "it should not archive a message with a sender time variance greater than max time variance (past)":
## Setup
let driver = newSqliteArchiveDriver()
let archive = newWakuArchive(driver)
## Given
let
now = now()
invalidSenderTime = now - MaxMessageTimestampVariance - 1
let message = fakeWakuMessage(ts = invalidSenderTime)
## When
waitFor archive.handleMessage(DefaultPubSubTopic, message)
## Then
check:
(waitFor driver.getMessagesCount()).tryGet() == 0
procSuite "Waku Archive - find messages":
## Fixtures
let timeOrigin = now()
let msgListA =
@[
fakeWakuMessage(
@[byte 00], contentTopic = ContentTopic("2"), ts = ts(00, timeOrigin)
),
fakeWakuMessage(
@[byte 01], contentTopic = ContentTopic("1"), ts = ts(10, timeOrigin)
),
fakeWakuMessage(
@[byte 02], contentTopic = ContentTopic("2"), ts = ts(20, timeOrigin)
),
fakeWakuMessage(
@[byte 03], contentTopic = ContentTopic("1"), ts = ts(30, timeOrigin)
),
fakeWakuMessage(
@[byte 04], contentTopic = ContentTopic("2"), ts = ts(40, timeOrigin)
),
fakeWakuMessage(
@[byte 05], contentTopic = ContentTopic("1"), ts = ts(50, timeOrigin)
),
fakeWakuMessage(
@[byte 06], contentTopic = ContentTopic("2"), ts = ts(60, timeOrigin)
),
fakeWakuMessage(
@[byte 07], contentTopic = ContentTopic("1"), ts = ts(70, timeOrigin)
),
fakeWakuMessage(
@[byte 08], contentTopic = ContentTopic("2"), ts = ts(80, timeOrigin)
),
fakeWakuMessage(
@[byte 09], contentTopic = ContentTopic("1"), ts = ts(90, timeOrigin)
),
]
let archiveA = block:
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
for msg in msgListA:
require (
waitFor driver.put(
DefaultPubsubTopic,
msg,
computeDigest(msg),
computeMessageHash(DefaultPubsubTopic, msg),
msg.timestamp,
)
).isOk()
archive
test "handle query":
## Setup
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
let topic = ContentTopic("1")
let
msg1 = fakeWakuMessage(contentTopic = topic)
msg2 = fakeWakuMessage()
waitFor archive.handleMessage("foo", msg1)
waitFor archive.handleMessage("foo", msg2)
## Given
let req = ArchiveQuery(includeData: true, contentTopics: @[topic])
## When
let queryRes = waitFor archive.findMessages(req)
## Then
check:
queryRes.isOk()
let response = queryRes.tryGet()
check:
response.messages.len == 1
response.messages == @[msg1]
test "handle query with multiple content filters":
## Setup
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
let
topic1 = ContentTopic("1")
topic2 = ContentTopic("2")
topic3 = ContentTopic("3")
let
msg1 = fakeWakuMessage(contentTopic = topic1)
msg2 = fakeWakuMessage(contentTopic = topic2)
msg3 = fakeWakuMessage(contentTopic = topic3)
waitFor archive.handleMessage("foo", msg1)
waitFor archive.handleMessage("foo", msg2)
waitFor archive.handleMessage("foo", msg3)
## Given
let req = ArchiveQuery(includeData: true, contentTopics: @[topic1, topic3])
## When
let queryRes = waitFor archive.findMessages(req)
## Then
check:
queryRes.isOk()
let response = queryRes.tryGet()
check:
response.messages.len() == 2
response.messages.anyIt(it == msg1)
response.messages.anyIt(it == msg3)
test "handle query with more than 10 content filters":
## Setup
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
let queryTopics = toSeq(1 .. 15).mapIt(ContentTopic($it))
## Given
let req = ArchiveQuery(contentTopics: queryTopics)
## When
let queryRes = waitFor archive.findMessages(req)
## Then
check:
queryRes.isErr()
let error = queryRes.tryError()
check:
error.kind == ArchiveErrorKind.INVALID_QUERY
error.cause == "too many content topics"
test "handle query with pubsub topic filter":
## Setup
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
let
pubsubTopic1 = "queried-topic"
pubsubTopic2 = "non-queried-topic"
let
contentTopic1 = ContentTopic("1")
contentTopic2 = ContentTopic("2")
contentTopic3 = ContentTopic("3")
let
msg1 = fakeWakuMessage(contentTopic = contentTopic1)
msg2 = fakeWakuMessage(contentTopic = contentTopic2)
msg3 = fakeWakuMessage(contentTopic = contentTopic3)
waitFor archive.handleMessage(pubsubtopic1, msg1)
waitFor archive.handleMessage(pubsubtopic2, msg2)
waitFor archive.handleMessage(pubsubtopic2, msg3)
## Given
# This query targets: pubsubtopic1 AND (contentTopic1 OR contentTopic3)
let req = ArchiveQuery(
includeData: true,
pubsubTopic: some(pubsubTopic1),
contentTopics: @[contentTopic1, contentTopic3],
)
## When
let queryRes = waitFor archive.findMessages(req)
## Then
check:
queryRes.isOk()
let response = queryRes.tryGet()
check:
response.messages.len() == 1
response.messages.anyIt(it == msg1)
test "handle query with pubsub topic filter - no match":
## Setup
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
let
pubsubtopic1 = "queried-topic"
pubsubtopic2 = "non-queried-topic"
let
msg1 = fakeWakuMessage()
msg2 = fakeWakuMessage()
msg3 = fakeWakuMessage()
waitFor archive.handleMessage(pubsubtopic2, msg1)
waitFor archive.handleMessage(pubsubtopic2, msg2)
waitFor archive.handleMessage(pubsubtopic2, msg3)
## Given
let req = ArchiveQuery(pubsubTopic: some(pubsubTopic1))
## When
let res = waitFor archive.findMessages(req)
## Then
check:
res.isOk()
let response = res.tryGet()
check:
response.messages.len() == 0
test "handle query with pubsub topic filter - match the entire stored messages":
## Setup
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
let pubsubTopic = "queried-topic"
let
msg1 = fakeWakuMessage(payload = "TEST-1")
msg2 = fakeWakuMessage(payload = "TEST-2")
msg3 = fakeWakuMessage(payload = "TEST-3")
waitFor archive.handleMessage(pubsubTopic, msg1)
waitFor archive.handleMessage(pubsubTopic, msg2)
waitFor archive.handleMessage(pubsubTopic, msg3)
## Given
let req = ArchiveQuery(includeData: true, pubsubTopic: some(pubsubTopic))
## When
let res = waitFor archive.findMessages(req)
## Then
check:
res.isOk()
let response = res.tryGet()
check:
response.messages.len() == 3
response.messages.anyIt(it == msg1)
response.messages.anyIt(it == msg2)
response.messages.anyIt(it == msg3)
test "handle query with forward pagination":
## Given
let req =
ArchiveQuery(includeData: true, pageSize: 4, direction: PagingDirection.FORWARD)
## When
var nextReq = req # copy
var pages = newSeq[seq[WakuMessage]](3)
var cursors = newSeq[Option[ArchiveCursor]](3)
for i in 0 ..< 3:
let res = waitFor archiveA.findMessages(nextReq)
require res.isOk()
# Keep query response content
let response = res.get()
pages[i] = response.messages
cursors[i] = response.cursor
# Set/update the request cursor
nextReq.cursor = cursors[i]
## Then
check:
cursors[0] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[3]))
cursors[1] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[7]))
cursors[2] == none(ArchiveCursor)
check:
pages[0] == msgListA[0 .. 3]
pages[1] == msgListA[4 .. 7]
pages[2] == msgListA[8 .. 9]
test "handle query with backward pagination":
## Given
let req =
ArchiveQuery(includeData: true, pageSize: 4, direction: PagingDirection.BACKWARD)
## When
var nextReq = req # copy
var pages = newSeq[seq[WakuMessage]](3)
var cursors = newSeq[Option[ArchiveCursor]](3)
for i in 0 ..< 3:
let res = waitFor archiveA.findMessages(nextReq)
require res.isOk()
# Keep query response content
let response = res.get()
pages[i] = response.messages
cursors[i] = response.cursor
# Set/update the request cursor
nextReq.cursor = cursors[i]
## Then
check:
cursors[0] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[6]))
cursors[1] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[2]))
cursors[2] == none(ArchiveCursor)
check:
pages[0] == msgListA[6 .. 9]
pages[1] == msgListA[2 .. 5]
pages[2] == msgListA[0 .. 1]
test "handle query with no paging info - auto-pagination":
## Setup
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
let msgList =
@[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("2")),
fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 2], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 3], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 4], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 5], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 6], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 7], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 8], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("2")),
]
for msg in msgList:
require (
waitFor driver.put(
DefaultPubsubTopic,
msg,
computeDigest(msg),
computeMessageHash(DefaultPubsubTopic, msg),
msg.timestamp,
)
).isOk()
## Given
let req = ArchiveQuery(includeData: true, contentTopics: @[DefaultContentTopic])
## When
let res = waitFor archive.findMessages(req)
## Then
check:
res.isOk()
let response = res.tryGet()
check:
## No pagination specified. Response will be auto-paginated with
## up to MaxPageSize messages per page.
response.messages.len() == 8
response.cursor.isNone()
test "handle temporal history query with a valid time window":
## Given
let req = ArchiveQuery(
includeData: true,
contentTopics: @[ContentTopic("1")],
startTime: some(ts(15, timeOrigin)),
endTime: some(ts(55, timeOrigin)),
direction: PagingDirection.FORWARD,
)
## When
let res = waitFor archiveA.findMessages(req)
## Then
check res.isOk()
let response = res.tryGet()
check:
response.messages.len() == 2
response.messages.mapIt(it.timestamp) == @[ts(30, timeOrigin), ts(50, timeOrigin)]
test "handle temporal history query with a zero-size time window":
## A zero-size window results in an empty list of history messages
## Given
let req = ArchiveQuery(
contentTopics: @[ContentTopic("1")],
startTime: some(Timestamp(2)),
endTime: some(Timestamp(2)),
)
## When
let res = waitFor archiveA.findMessages(req)
## Then
check res.isOk()
let response = res.tryGet()
check:
response.messages.len == 0
test "handle temporal history query with an invalid time window":
## A history query with an invalid time range results in an empty list of history messages
## Given
let req = ArchiveQuery(
contentTopics: @[ContentTopic("1")],
startTime: some(Timestamp(5)),
endTime: some(Timestamp(2)),
)
## When
let res = waitFor archiveA.findMessages(req)
## Then
check res.isOk()
let response = res.tryGet()
check:
response.messages.len == 0

View File

@ -140,14 +140,13 @@ suite "Discovery Mechanisms for Shards":
test "Bit Vector Representation":
# Given a valid bit vector and its representation
let
bitVector: seq[byte] =
@[
0, 73, 2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
]
bitVector: seq[byte] = @[
0, 73, 2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
]
clusterId: uint16 = 73 # bitVector's clusterId
shardIds: seq[uint16] = @[1u16, 10u16] # bitVector's shardIds

View File

@ -79,11 +79,10 @@ suite "Waku rln relay":
let rln = rlnInstance.get()
# prepare the input
let msg =
@[
"126f4c026cd731979365f79bd345a46d673c5a3f6f588bdc718e6356d02b6fdc".toBytes(),
"1f0e5db2b69d599166ab16219a97b82b662085c93220382b39f9f911d3b943b1".toBytes(),
]
let msg = @[
"126f4c026cd731979365f79bd345a46d673c5a3f6f588bdc718e6356d02b6fdc".toBytes(),
"1f0e5db2b69d599166ab16219a97b82b662085c93220382b39f9f911d3b943b1".toBytes(),
]
let hashRes = poseidon(msg)
@ -348,8 +347,7 @@ suite "Waku rln relay":
let idCredentials1 = generateCredentials()
(waitFor manager1.register(idCredentials1, UserMessageLimit(20))).isOkOr:
assert false,
"error returned when calling register: " & error
assert false, "error returned when calling register: " & error
let index2 = MembershipIndex(6)
let rlnConf2 = getWakuRlnConfig(manager = manager, index = index2)
@ -362,8 +360,7 @@ suite "Waku rln relay":
let idCredentials2 = generateCredentials()
(waitFor manager2.register(idCredentials2, UserMessageLimit(20))).isOkOr:
assert false,
"error returned when calling register: " & error
assert false, "error returned when calling register: " & error
# get the current epoch time
let epoch = wakuRlnRelay1.getCurrentEpoch()
@ -447,7 +444,7 @@ suite "Waku rln relay":
password = password,
appInfo = RLNAppInfo,
)
.isOk()
.isOk()
let readKeystoreRes = getMembershipCredentials(
path = filepath,

View File

@ -135,8 +135,10 @@ procSuite "WakuNode - RLN relay":
WakuMessage(payload: @payload, contentTopic: contentTopic, timestamp: now())
doAssert(
node1.wakuRlnRelay
.unsafeAppendRLNProof(message, node1.wakuRlnRelay.getCurrentEpoch(), MessageId(0))
.isOk()
.unsafeAppendRLNProof(
message, node1.wakuRlnRelay.getCurrentEpoch(), MessageId(0)
)
.isOk()
)
info " Nodes participating in the test",
@ -211,11 +213,10 @@ procSuite "WakuNode - RLN relay":
let shards =
@[RelayShard(clusterId: 0, shardId: 0), RelayShard(clusterId: 0, shardId: 1)]
let contentTopics =
@[
ContentTopic("/waku/2/content-topic-a/proto"),
ContentTopic("/waku/2/content-topic-b/proto"),
]
let contentTopics = @[
ContentTopic("/waku/2/content-topic-a/proto"),
ContentTopic("/waku/2/content-topic-b/proto"),
]
# connect them together
await node1.connectToNodes(@[node2.switch.peerInfo.toRemotePeerInfo()])

View File

@ -535,22 +535,21 @@ proc runAnvil*(
let anvilPath = getAnvilPath()
info "Anvil path", anvilPath
var args =
@[
"--port",
$port,
"--gas-limit",
"30000000",
"--gas-price",
"7",
"--base-fee",
"7",
"--balance",
"10000000000",
"--chain-id",
$chainId,
"--disable-min-priority-fee",
]
var args = @[
"--port",
$port,
"--gas-limit",
"30000000",
"--gas-price",
"7",
"--base-fee",
"7",
"--balance",
"10000000000",
"--chain-id",
$chainId,
"--disable-min-priority-fee",
]
# Add state file argument if provided
if stateFile.isSome():

View File

@ -35,24 +35,23 @@ suite "Store Client":
hash1 = computeMessageHash(DefaultPubsubTopic, message1)
hash2 = computeMessageHash(DefaultPubsubTopic, message2)
hash3 = computeMessageHash(DefaultPubsubTopic, message3)
messageSeq =
@[
WakuMessageKeyValue(
messageHash: hash1,
message: some(message1),
pubsubTopic: some(DefaultPubsubTopic),
),
WakuMessageKeyValue(
messageHash: hash2,
message: some(message2),
pubsubTopic: some(DefaultPubsubTopic),
),
WakuMessageKeyValue(
messageHash: hash3,
message: some(message3),
pubsubTopic: some(DefaultPubsubTopic),
),
]
messageSeq = @[
WakuMessageKeyValue(
messageHash: hash1,
message: some(message1),
pubsubTopic: some(DefaultPubsubTopic),
),
WakuMessageKeyValue(
messageHash: hash2,
message: some(message2),
pubsubTopic: some(DefaultPubsubTopic),
),
WakuMessageKeyValue(
messageHash: hash3,
message: some(message3),
pubsubTopic: some(DefaultPubsubTopic),
),
]
handlerFuture = newHistoryFuture()
handler = proc(req: StoreQueryRequest): Future[StoreQueryResult] {.async, gcsafe.} =
var request = req

View File

@ -50,19 +50,18 @@ suite "Store Resume - End to End":
var clientDriver {.threadvar.}: ArchiveDriver
asyncSetup:
let messages =
@[
fakeWakuMessage(@[byte 00]),
fakeWakuMessage(@[byte 01]),
fakeWakuMessage(@[byte 02]),
fakeWakuMessage(@[byte 03]),
fakeWakuMessage(@[byte 04]),
fakeWakuMessage(@[byte 05]),
fakeWakuMessage(@[byte 06]),
fakeWakuMessage(@[byte 07]),
fakeWakuMessage(@[byte 08]),
fakeWakuMessage(@[byte 09]),
]
let messages = @[
fakeWakuMessage(@[byte 00]),
fakeWakuMessage(@[byte 01]),
fakeWakuMessage(@[byte 02]),
fakeWakuMessage(@[byte 03]),
fakeWakuMessage(@[byte 04]),
fakeWakuMessage(@[byte 05]),
fakeWakuMessage(@[byte 06]),
fakeWakuMessage(@[byte 07]),
fakeWakuMessage(@[byte 08]),
fakeWakuMessage(@[byte 09]),
]
let
serverKey = generateSecp256k1Key()

View File

@ -32,19 +32,18 @@ import
procSuite "WakuNode - Store":
## Fixtures
let timeOrigin = now()
let msgListA =
@[
fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)),
fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)),
fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)),
fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)),
fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)),
fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)),
fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)),
]
let msgListA = @[
fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)),
fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)),
fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)),
fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)),
fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)),
fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)),
fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)),
]
let hashes = msgListA.mapIt(computeMessageHash(DefaultPubsubTopic, it))

View File

@ -1,33 +0,0 @@
{.used.}
import std/options, chronos
import
waku/[node/peer_manager, waku_core, waku_store_legacy, waku_store_legacy/client],
../testlib/[common, wakucore]
proc newTestWakuStore*(
switch: Switch, handler: HistoryQueryHandler
): Future[WakuStore] {.async.} =
let
peerManager = PeerManager.new(switch)
proto = WakuStore.new(peerManager, rng, handler)
await proto.start()
switch.mount(proto)
return proto
proc newTestWakuStoreClient*(switch: Switch): WakuStoreClient =
let peerManager = PeerManager.new(switch)
WakuStoreClient.new(peerManager, rng)
proc computeHistoryCursor*(
pubsubTopic: PubsubTopic, message: WakuMessage
): HistoryCursor =
HistoryCursor(
pubsubTopic: pubsubTopic,
senderTime: message.timestamp,
storeTime: message.timestamp,
digest: computeDigest(message),
)

View File

@ -1,8 +0,0 @@
{.used.}
import
./test_client,
./test_resume,
./test_rpc_codec,
./test_waku_store,
./test_wakunode_store

View File

@ -1,214 +0,0 @@
{.used.}
import std/options, testutils/unittests, chronos, libp2p/crypto/crypto
import
waku/[
node/peer_manager,
waku_core,
waku_store_legacy,
waku_store_legacy/client,
common/paging,
],
../testlib/[wakucore, testasync, futures],
./store_utils
suite "Store Client":
var message1 {.threadvar.}: WakuMessage
var message2 {.threadvar.}: WakuMessage
var message3 {.threadvar.}: WakuMessage
var messageSeq {.threadvar.}: seq[WakuMessage]
var handlerFuture {.threadvar.}: Future[HistoryQuery]
var handler {.threadvar.}: HistoryQueryHandler
var historyQuery {.threadvar.}: HistoryQuery
var serverSwitch {.threadvar.}: Switch
var clientSwitch {.threadvar.}: Switch
var server {.threadvar.}: WakuStore
var client {.threadvar.}: WakuStoreClient
var serverPeerInfo {.threadvar.}: RemotePeerInfo
var clientPeerInfo {.threadvar.}: RemotePeerInfo
asyncSetup:
message1 = fakeWakuMessage(contentTopic = DefaultContentTopic)
message2 = fakeWakuMessage(contentTopic = DefaultContentTopic)
message3 = fakeWakuMessage(contentTopic = DefaultContentTopic)
messageSeq = @[message1, message2, message3]
handlerFuture = newLegacyHistoryFuture()
handler = proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.} =
handlerFuture.complete(req)
return ok(HistoryResponse(messages: messageSeq))
historyQuery = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
direction: PagingDirection.FORWARD,
requestId: "customRequestId",
)
serverSwitch = newTestSwitch()
clientSwitch = newTestSwitch()
server = await newTestWakuStore(serverSwitch, handler = handler)
client = newTestWakuStoreClient(clientSwitch)
await allFutures(serverSwitch.start(), clientSwitch.start())
## The following sleep is aimed to prevent macos failures in CI
#[
2024-05-16T13:24:45.5106200Z INF 2024-05-16 13:24:45.509+00:00 Stopping AutonatService topics="libp2p autonatservice" tid=53712 file=service.nim:203
2024-05-16T13:24:45.5107960Z WRN 2024-05-16 13:24:45.509+00:00 service is already stopped topics="libp2p switch" tid=53712 file=switch.nim:86
2024-05-16T13:24:45.5109010Z . (1.68s)
2024-05-16T13:24:45.5109320Z Store Client (0.00s)
2024-05-16T13:24:45.5109870Z SIGSEGV: Illegal storage access. (Attempt to read from nil?)
2024-05-16T13:24:45.5111470Z stack trace: (most recent call last)
]#
await sleepAsync(500.millis)
serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo()
clientPeerInfo = clientSwitch.peerInfo.toRemotePeerInfo()
asyncTeardown:
await allFutures(serverSwitch.stop(), clientSwitch.stop())
suite "HistoryQuery Creation and Execution":
asyncTest "Valid Queries":
# When a valid query is sent to the server
let queryResponse = await client.query(historyQuery, peer = serverPeerInfo)
# Then the query is processed successfully
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == historyQuery
queryResponse.get().messages == messageSeq
asyncTest "Invalid Queries":
# TODO: IMPROVE: We can't test "actual" invalid queries because
# it directly depends on the handler implementation, to achieve
# proper coverage we'd need an example implementation.
# Given some invalid queries
let
invalidQuery1 = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[],
direction: PagingDirection.FORWARD,
requestId: "reqId1",
)
invalidQuery2 = HistoryQuery(
pubsubTopic: PubsubTopic.none(),
contentTopics: @[DefaultContentTopic],
direction: PagingDirection.FORWARD,
requestId: "reqId2",
)
invalidQuery3 = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
pageSize: 0,
requestId: "reqId3",
)
invalidQuery4 = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
pageSize: 0,
requestId: "reqId4",
)
invalidQuery5 = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
startTime: some(0.Timestamp),
endTime: some(0.Timestamp),
requestId: "reqId5",
)
invalidQuery6 = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
startTime: some(0.Timestamp),
endTime: some(-1.Timestamp),
requestId: "reqId6",
)
# When the query is sent to the server
let queryResponse1 = await client.query(invalidQuery1, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery1
queryResponse1.get().messages == messageSeq
# When the query is sent to the server
handlerFuture = newLegacyHistoryFuture()
let queryResponse2 = await client.query(invalidQuery2, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery2
queryResponse2.get().messages == messageSeq
# When the query is sent to the server
handlerFuture = newLegacyHistoryFuture()
let queryResponse3 = await client.query(invalidQuery3, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery3
queryResponse3.get().messages == messageSeq
# When the query is sent to the server
handlerFuture = newLegacyHistoryFuture()
let queryResponse4 = await client.query(invalidQuery4, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery4
queryResponse4.get().messages == messageSeq
# When the query is sent to the server
handlerFuture = newLegacyHistoryFuture()
let queryResponse5 = await client.query(invalidQuery5, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery5
queryResponse5.get().messages == messageSeq
# When the query is sent to the server
handlerFuture = newLegacyHistoryFuture()
let queryResponse6 = await client.query(invalidQuery6, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery6
queryResponse6.get().messages == messageSeq
suite "Verification of HistoryResponse Payload":
asyncTest "Positive Responses":
# When a valid query is sent to the server
let queryResponse = await client.query(historyQuery, peer = serverPeerInfo)
# Then the query is processed successfully, and is of the expected type
check:
await handlerFuture.withTimeout(FUTURE_TIMEOUT)
type(queryResponse.get()) is HistoryResponse
asyncTest "Negative Responses - PeerDialFailure":
# Given a stopped peer
let
otherServerSwitch = newTestSwitch()
otherServerPeerInfo = otherServerSwitch.peerInfo.toRemotePeerInfo()
# When a query is sent to the stopped peer
let queryResponse = await client.query(historyQuery, peer = otherServerPeerInfo)
# Then the query is not processed
check:
not await handlerFuture.withTimeout(FUTURE_TIMEOUT)
queryResponse.isErr()
queryResponse.error.kind == HistoryErrorKind.PEER_DIAL_FAILURE

View File

@ -1,342 +0,0 @@
{.used.}
when defined(waku_exp_store_resume):
# TODO: Review store resume test cases (#1282)
# Ongoing changes to test code base had ruin this test meanwhile, need to investigate and fix
import
std/[options, tables, sets],
testutils/unittests,
chronos,
chronicles,
libp2p/crypto/crypto
import
waku/[
common/databases/db_sqlite,
waku_archive_legacy/driver,
waku_archive_legacy/driver/sqlite_driver/sqlite_driver,
node/peer_manager,
waku_core,
waku_core/message/digest,
waku_store_legacy,
],
../waku_store_legacy/store_utils,
../waku_archive_legacy/archive_utils,
./testlib/common,
./testlib/switch
procSuite "Waku Store - resume store":
## Fixtures
let storeA = block:
let store = newTestMessageStore()
let msgList =
@[
fakeWakuMessage(
payload = @[byte 0], contentTopic = ContentTopic("2"), ts = ts(0)
),
fakeWakuMessage(
payload = @[byte 1], contentTopic = ContentTopic("1"), ts = ts(1)
),
fakeWakuMessage(
payload = @[byte 2], contentTopic = ContentTopic("2"), ts = ts(2)
),
fakeWakuMessage(
payload = @[byte 3], contentTopic = ContentTopic("1"), ts = ts(3)
),
fakeWakuMessage(
payload = @[byte 4], contentTopic = ContentTopic("2"), ts = ts(4)
),
fakeWakuMessage(
payload = @[byte 5], contentTopic = ContentTopic("1"), ts = ts(5)
),
fakeWakuMessage(
payload = @[byte 6], contentTopic = ContentTopic("2"), ts = ts(6)
),
fakeWakuMessage(
payload = @[byte 7], contentTopic = ContentTopic("1"), ts = ts(7)
),
fakeWakuMessage(
payload = @[byte 8], contentTopic = ContentTopic("2"), ts = ts(8)
),
fakeWakuMessage(
payload = @[byte 9], contentTopic = ContentTopic("1"), ts = ts(9)
),
]
for msg in msgList:
require store
.put(
DefaultPubsubTopic,
msg,
computeDigest(msg),
computeMessageHash(DefaultPubsubTopic, msg),
msg.timestamp,
)
.isOk()
store
let storeB = block:
let store = newTestMessageStore()
let msgList2 =
@[
fakeWakuMessage(
payload = @[byte 0], contentTopic = ContentTopic("2"), ts = ts(0)
),
fakeWakuMessage(
payload = @[byte 11], contentTopic = ContentTopic("1"), ts = ts(1)
),
fakeWakuMessage(
payload = @[byte 12], contentTopic = ContentTopic("2"), ts = ts(2)
),
fakeWakuMessage(
payload = @[byte 3], contentTopic = ContentTopic("1"), ts = ts(3)
),
fakeWakuMessage(
payload = @[byte 4], contentTopic = ContentTopic("2"), ts = ts(4)
),
fakeWakuMessage(
payload = @[byte 5], contentTopic = ContentTopic("1"), ts = ts(5)
),
fakeWakuMessage(
payload = @[byte 13], contentTopic = ContentTopic("2"), ts = ts(6)
),
fakeWakuMessage(
payload = @[byte 14], contentTopic = ContentTopic("1"), ts = ts(7)
),
]
for msg in msgList2:
require store
.put(
DefaultPubsubTopic,
msg,
computeDigest(msg),
computeMessageHash(DefaultPubsubTopic, msg),
msg.timestamp,
)
.isOk()
store
asyncTest "multiple query to multiple peers with pagination":
## Setup
let
serverSwitchA = newTestSwitch()
serverSwitchB = newTestSwitch()
clientSwitch = newTestSwitch()
await allFutures(
serverSwitchA.start(), serverSwitchB.start(), clientSwitch.start()
)
let
serverA = await newTestWakuStoreNode(serverSwitchA, store = testStore)
serverB = await newTestWakuStoreNode(serverSwitchB, store = testStore)
client = newTestWakuStoreClient(clientSwitch)
## Given
let peers =
@[
serverSwitchA.peerInfo.toRemotePeerInfo(),
serverSwitchB.peerInfo.toRemotePeerInfo(),
]
let req = HistoryQuery(contentTopics: @[DefaultContentTopic], pageSize: 5)
## When
let res = await client.queryLoop(req, peers)
## Then
check:
res.isOk()
let response = res.tryGet()
check:
response.len == 10
## Cleanup
await allFutures(clientSwitch.stop(), serverSwitchA.stop(), serverSwitchB.stop())
asyncTest "resume message history":
## Setup
let
serverSwitch = newTestSwitch()
clientSwitch = newTestSwitch()
await allFutures(serverSwitch.start(), clientSwitch.start())
let
server = await newTestWakuStore(serverSwitch, store = storeA)
client = await newTestWakuStore(clientSwitch)
client.setPeer(serverSwitch.peerInfo.toRemotePeerInfo())
## When
let res = await client.resume()
## Then
check res.isOk()
let resumedMessagesCount = res.tryGet()
let storedMessagesCount = client.store.getMessagesCount().tryGet()
check:
resumedMessagesCount == 10
storedMessagesCount == 10
## Cleanup
await allFutures(clientSwitch.stop(), serverSwitch.stop())
asyncTest "resume history from a list of candidates - offline peer":
## Setup
let
clientSwitch = newTestSwitch()
offlineSwitch = newTestSwitch()
await clientSwitch.start()
let client = await newTestWakuStore(clientSwitch)
## Given
let peers = @[offlineSwitch.peerInfo.toRemotePeerInfo()]
## When
let res = await client.resume(some(peers))
## Then
check res.isErr()
## Cleanup
await clientSwitch.stop()
asyncTest "resume history from a list of candidates - online and offline peers":
## Setup
let
offlineSwitch = newTestSwitch()
serverASwitch = newTestSwitch()
serverBSwitch = newTestSwitch()
clientSwitch = newTestSwitch()
await allFutures(
serverASwitch.start(), serverBSwitch.start(), clientSwitch.start()
)
let
serverA = await newTestWakuStore(serverASwitch, store = storeA)
serverB = await newTestWakuStore(serverBSwitch, store = storeB)
client = await newTestWakuStore(clientSwitch)
## Given
let peers =
@[
offlineSwitch.peerInfo.toRemotePeerInfo(),
serverASwitch.peerInfo.toRemotePeerInfo(),
serverBSwitch.peerInfo.toRemotePeerInfo(),
]
## When
let res = await client.resume(some(peers))
## Then
# `client` is expected to retrieve 14 messages:
# - The store mounted on `serverB` holds 10 messages (see `storeA` fixture)
# - The store mounted on `serverB` holds 7 messages (see `storeB` fixture)
# Both stores share 3 messages, resulting in 14 unique messages in total
check res.isOk()
let restoredMessagesCount = res.tryGet()
let storedMessagesCount = client.store.getMessagesCount().tryGet()
check:
restoredMessagesCount == 14
storedMessagesCount == 14
## Cleanup
await allFutures(serverASwitch.stop(), serverBSwitch.stop(), clientSwitch.stop())
suite "WakuNode - waku store":
asyncTest "Resume proc fetches the history":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
await allFutures(client.start(), server.start())
let driver = newSqliteArchiveDriver()
server.mountArchive(some(driver), none(MessageValidator), none(RetentionPolicy))
await server.mountStore()
let clientStore = StoreQueueRef.new()
await client.mountStore(store = clientStore)
client.mountStoreClient(store = clientStore)
## Given
let message = fakeWakuMessage()
require server.wakuStore.store.put(DefaultPubsubTopic, message).isOk()
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
await client.resume(some(@[serverPeer]))
# Then
check:
client.wakuStore.store.getMessagesCount().tryGet() == 1
## Cleanup
await allFutures(client.stop(), server.stop())
asyncTest "Resume proc discards duplicate messages":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
await allFutures(server.start(), client.start())
await server.mountStore(store = StoreQueueRef.new())
let clientStore = StoreQueueRef.new()
await client.mountStore(store = clientStore)
client.mountStoreClient(store = clientStore)
## Given
let timeOrigin = now()
let
msg1 = fakeWakuMessage(
payload = "hello world1", ts = (timeOrigin + getNanoSecondTime(1))
)
msg2 = fakeWakuMessage(
payload = "hello world2", ts = (timeOrigin + getNanoSecondTime(2))
)
msg3 = fakeWakuMessage(
payload = "hello world3", ts = (timeOrigin + getNanoSecondTime(3))
)
require server.wakuStore.store.put(DefaultPubsubTopic, msg1).isOk()
require server.wakuStore.store.put(DefaultPubsubTopic, msg2).isOk()
# Insert the same message in both node's store
let
receivedTime3 = now() + getNanosecondTime(10)
digest3 = computeDigest(msg3)
require server.wakuStore.store
.put(DefaultPubsubTopic, msg3, digest3, receivedTime3)
.isOk()
require client.wakuStore.store
.put(DefaultPubsubTopic, msg3, digest3, receivedTime3)
.isOk()
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
await client.resume(some(@[serverPeer]))
## Then
check:
# If the duplicates are discarded properly, then the total number of messages after resume should be 3
client.wakuStore.store.getMessagesCount().tryGet() == 3
await allFutures(client.stop(), server.stop())

View File

@ -1,185 +0,0 @@
{.used.}
import std/options, testutils/unittests, chronos
import
waku/[
common/protobuf,
common/paging,
waku_core,
waku_store_legacy/rpc,
waku_store_legacy/rpc_codec,
],
../testlib/wakucore
procSuite "Waku Store - RPC codec":
test "PagingIndexRPC protobuf codec":
## Given
let index = PagingIndexRPC.compute(
fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
)
## When
let encodedIndex = index.encode()
let decodedIndexRes = PagingIndexRPC.decode(encodedIndex.buffer)
## Then
check:
decodedIndexRes.isOk()
let decodedIndex = decodedIndexRes.tryGet()
check:
# The fields of decodedIndex must be the same as the original index
decodedIndex == index
test "PagingIndexRPC protobuf codec - empty index":
## Given
let emptyIndex = PagingIndexRPC()
let encodedIndex = emptyIndex.encode()
let decodedIndexRes = PagingIndexRPC.decode(encodedIndex.buffer)
## Then
check:
decodedIndexRes.isOk()
let decodedIndex = decodedIndexRes.tryGet()
check:
# Check the correctness of init and encode for an empty PagingIndexRPC
decodedIndex == emptyIndex
test "PagingInfoRPC protobuf codec":
## Given
let
index = PagingIndexRPC.compute(
fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
)
pagingInfo = PagingInfoRPC(
pageSize: some(1'u64),
cursor: some(index),
direction: some(PagingDirection.FORWARD),
)
## When
let pb = pagingInfo.encode()
let decodedPagingInfo = PagingInfoRPC.decode(pb.buffer)
## Then
check:
decodedPagingInfo.isOk()
check:
# The fields of decodedPagingInfo must be the same as the original pagingInfo
decodedPagingInfo.value == pagingInfo
decodedPagingInfo.value.direction == pagingInfo.direction
test "PagingInfoRPC protobuf codec - empty paging info":
## Given
let emptyPagingInfo = PagingInfoRPC()
## When
let pb = emptyPagingInfo.encode()
let decodedEmptyPagingInfo = PagingInfoRPC.decode(pb.buffer)
## Then
check:
decodedEmptyPagingInfo.isOk()
check:
# check the correctness of init and encode for an empty PagingInfoRPC
decodedEmptyPagingInfo.value == emptyPagingInfo
test "HistoryQueryRPC protobuf codec":
## Given
let
index = PagingIndexRPC.compute(
fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
)
pagingInfo = PagingInfoRPC(
pageSize: some(1'u64),
cursor: some(index),
direction: some(PagingDirection.BACKWARD),
)
query = HistoryQueryRPC(
contentFilters:
@[
HistoryContentFilterRPC(contentTopic: DefaultContentTopic),
HistoryContentFilterRPC(contentTopic: DefaultContentTopic),
],
pagingInfo: some(pagingInfo),
startTime: some(Timestamp(10)),
endTime: some(Timestamp(11)),
)
## When
let pb = query.encode()
let decodedQuery = HistoryQueryRPC.decode(pb.buffer)
## Then
check:
decodedQuery.isOk()
check:
# the fields of decoded query decodedQuery must be the same as the original query query
decodedQuery.value == query
test "HistoryQueryRPC protobuf codec - empty history query":
## Given
let emptyQuery = HistoryQueryRPC()
## When
let pb = emptyQuery.encode()
let decodedEmptyQuery = HistoryQueryRPC.decode(pb.buffer)
## Then
check:
decodedEmptyQuery.isOk()
check:
# check the correctness of init and encode for an empty HistoryQueryRPC
decodedEmptyQuery.value == emptyQuery
test "HistoryResponseRPC protobuf codec":
## Given
let
message = fakeWakuMessage()
index = PagingIndexRPC.compute(
message, receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
)
pagingInfo = PagingInfoRPC(
pageSize: some(1'u64),
cursor: some(index),
direction: some(PagingDirection.BACKWARD),
)
res = HistoryResponseRPC(
messages: @[message],
pagingInfo: some(pagingInfo),
error: HistoryResponseErrorRPC.INVALID_CURSOR,
)
## When
let pb = res.encode()
let decodedRes = HistoryResponseRPC.decode(pb.buffer)
## Then
check:
decodedRes.isOk()
check:
# the fields of decoded response decodedRes must be the same as the original response res
decodedRes.value == res
test "HistoryResponseRPC protobuf codec - empty history response":
## Given
let emptyRes = HistoryResponseRPC()
## When
let pb = emptyRes.encode()
let decodedEmptyRes = HistoryResponseRPC.decode(pb.buffer)
## Then
check:
decodedEmptyRes.isOk()
check:
# check the correctness of init and encode for an empty HistoryResponseRPC
decodedEmptyRes.value == emptyRes

View File

@ -1,113 +0,0 @@
{.used.}
import testutils/unittests, chronos, libp2p/crypto/crypto
import
waku/[
common/paging,
node/peer_manager,
waku_core,
waku_store_legacy,
waku_store_legacy/client,
],
../testlib/wakucore,
./store_utils
suite "Waku Store - query handler legacy":
asyncTest "history query handler should be called":
## Setup
let
serverSwitch = newTestSwitch()
clientSwitch = newTestSwitch()
await allFutures(serverSwitch.start(), clientSwitch.start())
## Given
let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo()
let msg = fakeWakuMessage(contentTopic = DefaultContentTopic)
var queryHandlerFut = newFuture[(HistoryQuery)]()
let queryHandler = proc(
req: HistoryQuery
): Future[HistoryResult] {.async, gcsafe.} =
queryHandlerFut.complete(req)
return ok(HistoryResponse(messages: @[msg]))
let
server = await newTestWakuStore(serverSwitch, handler = queryhandler)
client = newTestWakuStoreClient(clientSwitch)
let req = HistoryQuery(
contentTopics: @[DefaultContentTopic],
direction: PagingDirection.FORWARD,
requestId: "reqId",
)
## When
let queryRes = await client.query(req, peer = serverPeerInfo)
## Then
check:
not queryHandlerFut.failed()
queryRes.isOk()
let request = queryHandlerFut.read()
check:
request == req
let response = queryRes.tryGet()
check:
response.messages.len == 1
response.messages == @[msg]
## Cleanup
await allFutures(serverSwitch.stop(), clientSwitch.stop())
asyncTest "history query handler should be called and return an error":
## Setup
let
serverSwitch = newTestSwitch()
clientSwitch = newTestSwitch()
await allFutures(serverSwitch.start(), clientSwitch.start())
## Given
let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo()
var queryHandlerFut = newFuture[(HistoryQuery)]()
let queryHandler = proc(
req: HistoryQuery
): Future[HistoryResult] {.async, gcsafe.} =
queryHandlerFut.complete(req)
return err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST))
let
server = await newTestWakuStore(serverSwitch, handler = queryhandler)
client = newTestWakuStoreClient(clientSwitch)
let req = HistoryQuery(
contentTopics: @[DefaultContentTopic],
direction: PagingDirection.FORWARD,
requestId: "reqId",
)
## When
let queryRes = await client.query(req, peer = serverPeerInfo)
## Then
check:
not queryHandlerFut.failed()
queryRes.isErr()
let request = queryHandlerFut.read()
check:
request == req
let error = queryRes.tryError()
check:
error.kind == HistoryErrorKind.BAD_REQUEST
## Cleanup
await allFutures(serverSwitch.stop(), clientSwitch.stop())

View File

@ -1,316 +0,0 @@
{.used.}
import
std/net,
testutils/unittests,
chronos,
libp2p/crypto/crypto,
libp2p/peerid,
libp2p/multiaddress,
libp2p/switch,
libp2p/protocols/pubsub/pubsub,
libp2p/protocols/pubsub/gossipsub
import
waku/[
common/paging,
waku_core,
waku_core/message/digest,
node/peer_manager,
waku_archive_legacy,
waku_filter_v2,
waku_filter_v2/client,
waku_store_legacy,
waku_node,
],
../waku_store_legacy/store_utils,
../waku_archive_legacy/archive_utils,
../testlib/wakucore,
../testlib/wakunode
procSuite "WakuNode - Store Legacy":
## Fixtures
let timeOrigin = now()
let msgListA =
@[
fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)),
fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)),
fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)),
fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)),
fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)),
fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)),
fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)),
]
let archiveA = block:
let driver = newSqliteArchiveDriver()
for msg in msgListA:
let msg_digest = waku_archive_legacy.computeDigest(msg)
let msg_hash = computeMessageHash(DefaultPubsubTopic, msg)
require (
waitFor driver.put(DefaultPubsubTopic, msg, msg_digest, msg_hash, msg.timestamp)
).isOk()
driver
test "Store protocol returns expected messages":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
waitFor allFutures(client.start(), server.start())
let mountArchiveRes = server.mountLegacyArchive(archiveA)
assert mountArchiveRes.isOk(), mountArchiveRes.error
waitFor server.mountLegacyStore()
client.mountLegacyStoreClient()
## Given
let req = HistoryQuery(contentTopics: @[DefaultContentTopic])
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
let queryRes = waitFor client.query(req, peer = serverPeer)
## Then
check queryRes.isOk()
let response = queryRes.get()
check:
response.messages == msgListA
# Cleanup
waitFor allFutures(client.stop(), server.stop())
test "Store node history response - forward pagination":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
waitFor allFutures(client.start(), server.start())
let mountArchiveRes = server.mountLegacyArchive(archiveA)
assert mountArchiveRes.isOk(), mountArchiveRes.error
waitFor server.mountLegacyStore()
client.mountLegacyStoreClient()
## Given
let req = HistoryQuery(
contentTopics: @[DefaultContentTopic],
pageSize: 7,
direction: PagingDirection.FORWARD,
)
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
var nextReq = req # copy
var pages = newSeq[seq[WakuMessage]](2)
var cursors = newSeq[Option[HistoryCursor]](2)
for i in 0 ..< 2:
let res = waitFor client.query(nextReq, peer = serverPeer)
require res.isOk()
# Keep query response content
let response = res.get()
pages[i] = response.messages
cursors[i] = response.cursor
# Set/update the request cursor
nextReq.cursor = cursors[i]
## Then
check:
cursors[0] == some(computeHistoryCursor(DefaultPubsubTopic, msgListA[6]))
cursors[1] == none(HistoryCursor)
check:
pages[0] == msgListA[0 .. 6]
pages[1] == msgListA[7 .. 9]
# Cleanup
waitFor allFutures(client.stop(), server.stop())
test "Store node history response - backward pagination":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
waitFor allFutures(client.start(), server.start())
let mountArchiveRes = server.mountLegacyArchive(archiveA)
assert mountArchiveRes.isOk(), mountArchiveRes.error
waitFor server.mountLegacyStore()
client.mountLegacyStoreClient()
## Given
let req = HistoryQuery(
contentTopics: @[DefaultContentTopic],
pageSize: 7,
direction: PagingDirection.BACKWARD,
)
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
var nextReq = req # copy
var pages = newSeq[seq[WakuMessage]](2)
var cursors = newSeq[Option[HistoryCursor]](2)
for i in 0 ..< 2:
let res = waitFor client.query(nextReq, peer = serverPeer)
require res.isOk()
# Keep query response content
let response = res.get()
pages[i] = response.messages
cursors[i] = response.cursor
# Set/update the request cursor
nextReq.cursor = cursors[i]
## Then
check:
cursors[0] == some(computeHistoryCursor(DefaultPubsubTopic, msgListA[3]))
cursors[1] == none(HistoryCursor)
check:
pages[0] == msgListA[3 .. 9]
pages[1] == msgListA[0 .. 2]
# Cleanup
waitFor allFutures(client.stop(), server.stop())
test "Store protocol returns expected message when relay is disabled and filter enabled":
## See nwaku issue #937: 'Store: ability to decouple store from relay'
## Setup
let
filterSourceKey = generateSecp256k1Key()
filterSource =
newTestWakuNode(filterSourceKey, parseIpAddress("0.0.0.0"), Port(0))
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
waitFor allFutures(client.start(), server.start(), filterSource.start())
waitFor filterSource.mountFilter()
let driver = newSqliteArchiveDriver()
let mountArchiveRes = server.mountLegacyArchive(driver)
assert mountArchiveRes.isOk(), mountArchiveRes.error
waitFor server.mountLegacyStore()
waitFor server.mountFilterClient()
client.mountLegacyStoreClient()
## Given
let message = fakeWakuMessage()
let
serverPeer = server.peerInfo.toRemotePeerInfo()
filterSourcePeer = filterSource.peerInfo.toRemotePeerInfo()
## Then
let filterFut = newFuture[(PubsubTopic, WakuMessage)]()
proc filterHandler(
pubsubTopic: PubsubTopic, msg: WakuMessage
) {.async, gcsafe, closure.} =
await server.wakuLegacyArchive.handleMessage(pubsubTopic, msg)
filterFut.complete((pubsubTopic, msg))
server.wakuFilterClient.registerPushHandler(filterHandler)
let resp = waitFor server.filterSubscribe(
some(DefaultPubsubTopic), DefaultContentTopic, peer = filterSourcePeer
)
waitFor sleepAsync(100.millis)
waitFor filterSource.wakuFilter.handleMessage(DefaultPubsubTopic, message)
# Wait for the server filter to receive the push message
require waitFor filterFut.withTimeout(5.seconds)
let res = waitFor client.query(
HistoryQuery(contentTopics: @[DefaultContentTopic]), peer = serverPeer
)
## Then
check res.isOk()
let response = res.get()
check:
response.messages.len == 1
response.messages[0] == message
let (handledPubsubTopic, handledMsg) = filterFut.read()
check:
handledPubsubTopic == DefaultPubsubTopic
handledMsg == message
## Cleanup
waitFor allFutures(client.stop(), server.stop(), filterSource.stop())
test "history query should return INVALID_CURSOR if the cursor has empty data in the request":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
waitFor allFutures(client.start(), server.start())
let mountArchiveRes = server.mountLegacyArchive(archiveA)
assert mountArchiveRes.isOk(), mountArchiveRes.error
waitFor server.mountLegacyStore()
client.mountLegacyStoreClient()
## Forcing a bad cursor with empty digest data
var data: array[32, byte] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
]
let cursor = HistoryCursor(
pubsubTopic: "pubsubTopic",
senderTime: now(),
storeTime: now(),
digest: waku_archive_legacy.MessageDigest(data: data),
)
## Given
let req = HistoryQuery(contentTopics: @[DefaultContentTopic], cursor: some(cursor))
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
let queryRes = waitFor client.query(req, peer = serverPeer)
## Then
check not queryRes.isOk()
check queryRes.error ==
"legacy store client query error: BAD_REQUEST: invalid cursor"
# Cleanup
waitFor allFutures(client.stop(), server.stop())

View File

@ -119,12 +119,11 @@ suite "Waku Sync reconciliation":
pubsubTopics: @[DefaultPubsubTopic],
contentTopics: @[DefaultContentTopic],
ranges: @[(wholeRange, RangeType.Fingerprint)],
fingerprints:
@[
remote.computeFingerprint(
wholeRange, @[DefaultPubsubTopic], @[DefaultContentTopic]
)
],
fingerprints: @[
remote.computeFingerprint(
wholeRange, @[DefaultPubsubTopic], @[DefaultContentTopic]
)
],
itemSets: @[],
)
@ -180,12 +179,11 @@ suite "Waku Sync reconciliation":
pubsubTopics: @[DefaultPubsubTopic],
contentTopics: @[DefaultContentTopic],
ranges: @[(sliceWhole, RangeType.Fingerprint)],
fingerprints:
@[
remote.computeFingerprint(
sliceWhole, @[DefaultPubsubTopic], @[DefaultContentTopic]
)
],
fingerprints: @[
remote.computeFingerprint(
sliceWhole, @[DefaultPubsubTopic], @[DefaultContentTopic]
)
],
itemSets: @[],
)
@ -207,12 +205,11 @@ suite "Waku Sync reconciliation":
pubsubTopics: @[DefaultPubsubTopic],
contentTopics: @[DefaultContentTopic],
ranges: @[(subSlice, RangeType.Fingerprint)],
fingerprints:
@[
remote.computeFingerprint(
subSlice, @[DefaultPubsubTopic], @[DefaultContentTopic]
)
],
fingerprints: @[
remote.computeFingerprint(
subSlice, @[DefaultPubsubTopic], @[DefaultContentTopic]
)
],
itemSets: @[],
)
@ -272,12 +269,9 @@ suite "Waku Sync reconciliation":
pubsubTopics: @[DefaultPubsubTopic],
contentTopics: @[DefaultContentTopic],
ranges: @[(slice, RangeType.Fingerprint)],
fingerprints:
@[
remote.computeFingerprint(
slice, @[DefaultPubsubTopic], @[DefaultContentTopic]
)
],
fingerprints: @[
remote.computeFingerprint(slice, @[DefaultPubsubTopic], @[DefaultContentTopic])
],
itemSets: @[],
)

View File

@ -44,12 +44,9 @@ suite "Waku Sync reconciliation":
pubsubTopics: @[DefaultPubsubTopic],
contentTopics: @[DefaultContentTopic],
ranges: @[(whole, RangeType.Fingerprint)],
fingerprints:
@[
remote.computeFingerprint(
whole, @[DefaultPubsubTopic], @[DefaultContentTopic]
)
],
fingerprints: @[
remote.computeFingerprint(whole, @[DefaultPubsubTopic], @[DefaultContentTopic])
],
itemSets: @[],
)
let rep1 = local.processPayload(p1, s1, r1)
@ -131,15 +128,10 @@ suite "Waku Sync reconciliation":
pubsubTopics: @[DefaultPubsubTopic],
contentTopics: @[DefaultContentTopic],
ranges: @[(sliceA, RangeType.Fingerprint), (sliceB, RangeType.Fingerprint)],
fingerprints:
@[
remote.computeFingerprint(
sliceA, @[DefaultPubsubTopic], @[DefaultContentTopic]
),
remote.computeFingerprint(
sliceB, @[DefaultPubsubTopic], @[DefaultContentTopic]
),
],
fingerprints: @[
remote.computeFingerprint(sliceA, @[DefaultPubsubTopic], @[DefaultContentTopic]),
remote.computeFingerprint(sliceB, @[DefaultPubsubTopic], @[DefaultContentTopic]),
],
itemSets: @[],
)
let reply = local.processPayload(payload, s, r)
@ -180,12 +172,9 @@ suite "Waku Sync reconciliation":
pubsubTopics: @[DefaultPubsubTopic],
contentTopics: @[DefaultContentTopic],
ranges: @[(slice, RangeType.Fingerprint)],
fingerprints:
@[
remote.computeFingerprint(
slice, @[DefaultPubsubTopic], @[DefaultContentTopic]
)
],
fingerprints: @[
remote.computeFingerprint(slice, @[DefaultPubsubTopic], @[DefaultContentTopic])
],
itemSets: @[],
)
let reply = local.processPayload(p, toS, toR)
@ -236,12 +225,9 @@ suite "Waku Sync reconciliation":
pubsubTopics: @[DefaultPubsubTopic],
contentTopics: @[DefaultContentTopic],
ranges: @[(s, RangeType.Fingerprint)],
fingerprints:
@[
remote.computeFingerprint(
s, @[DefaultPubsubTopic], @[DefaultContentTopic]
)
],
fingerprints: @[
remote.computeFingerprint(s, @[DefaultPubsubTopic], @[DefaultContentTopic])
],
itemSets: @[],
),
sendQ,

View File

@ -141,43 +141,6 @@ suite "Waku external config - apply preset":
## Then
assert res.isErr(), "Invalid shard was accepted"
test "Apply TWN preset when cluster id = 1":
## Setup
let expectedConf = NetworkConf.TheWakuNetworkConf()
## Given
let preConfig = WakuNodeConf(
cmd: noCommand,
clusterId: 1.uint16,
relay: true,
ethClientUrls: @["http://someaddress".EthRpcUrl],
)
## When
let res = preConfig.toWakuConf()
assert res.isOk(), $res.error
## Then
let conf = res.get()
check conf.maxMessageSizeBytes ==
uint64(parseCorrectMsgSize(expectedConf.maxMessageSize))
check conf.clusterId == expectedConf.clusterId
check conf.rlnRelayConf.isSome() == expectedConf.rlnRelay
if conf.rlnRelayConf.isSome():
let rlnRelayConf = conf.rlnRelayConf.get()
check rlnRelayConf.ethContractAddress == expectedConf.rlnRelayEthContractAddress
check rlnRelayConf.dynamic == expectedConf.rlnRelayDynamic
check rlnRelayConf.chainId == expectedConf.rlnRelayChainId
check rlnRelayConf.epochSizeSec == expectedConf.rlnEpochSizeSec
check rlnRelayConf.userMessageLimit == expectedConf.rlnRelayUserMessageLimit
check conf.shardingConf.kind == expectedConf.shardingConf.kind
check conf.shardingConf.numShardsInCluster ==
expectedConf.shardingConf.numShardsInCluster
check conf.discv5Conf.isSome() == expectedConf.discv5Discovery
if conf.discv5Conf.isSome():
let discv5Conf = conf.discv5Conf.get()
check discv5Conf.bootstrapNodes == expectedConf.discv5BootstrapNodes
suite "Waku external config - node key":
test "Passed node key is used":
## Setup

View File

@ -176,12 +176,11 @@ suite "Waku v2 Rest API - Filter V2":
)
discard await restFilterTest.client.filterPostSubscriptions(requestBody)
let contentFilters =
@[
ContentTopic("1"),
ContentTopic("2"),
ContentTopic("3"), # ,ContentTopic("4") # Keep this subscription for check
]
let contentFilters = @[
ContentTopic("1"),
ContentTopic("2"),
ContentTopic("3"), # ,ContentTopic("4") # Keep this subscription for check
]
let requestBodyUnsub = FilterUnsubscribeRequest(
requestId: "4321",

View File

@ -86,7 +86,7 @@ suite "Waku v2 REST API - health":
response.status == 200
$response.contentType == $MIMETYPE_JSON
report.nodeHealth == HealthStatus.READY
report.protocolsHealth.len() == 15
report.protocolsHealth.len() == 13
report.getHealth(RelayProtocol).health == HealthStatus.NOT_READY
report.getHealth(RelayProtocol).desc == some("No connected peers")
@ -97,7 +97,6 @@ suite "Waku v2 REST API - health":
report.getHealth(LegacyLightpushProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(FilterProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(StoreProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(LegacyStoreProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(PeerExchangeProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(RendezvousProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(MixProtocol).health == HealthStatus.NOT_MOUNTED
@ -108,7 +107,6 @@ suite "Waku v2 REST API - health":
report.getHealth(LegacyLightpushClientProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(StoreClientProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(LegacyStoreClientProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(FilterClientProtocol).health == HealthStatus.NOT_READY
report.getHealth(FilterClientProtocol).desc ==

View File

@ -193,15 +193,14 @@ suite "Waku v2 Rest API - Relay":
let pubSubTopic = "/waku/2/rs/0/0"
var messages =
@[
fakeWakuMessage(
contentTopic = "content-topic-x",
payload = toBytes("TEST-1"),
meta = toBytes("test-meta"),
ephemeral = true,
)
]
var messages = @[
fakeWakuMessage(
contentTopic = "content-topic-x",
payload = toBytes("TEST-1"),
meta = toBytes("test-meta"),
ephemeral = true,
)
]
# Prevent duplicate messages
for i in 0 ..< 2:
@ -345,12 +344,11 @@ suite "Waku v2 Rest API - Relay":
installRelayApiHandlers(restServer.router, node, cache)
restServer.start()
let contentTopics =
@[
ContentTopic("/app-1/2/default-content/proto"),
ContentTopic("/app-2/2/default-content/proto"),
ContentTopic("/app-3/2/default-content/proto"),
]
let contentTopics = @[
ContentTopic("/app-1/2/default-content/proto"),
ContentTopic("/app-2/2/default-content/proto"),
ContentTopic("/app-3/2/default-content/proto"),
]
# When
let client = newRestHttpClient(initTAddress(restAddress, restPort))
@ -391,13 +389,12 @@ suite "Waku v2 Rest API - Relay":
restPort = restServer.httpServer.address.port # update with bound port for client use
let contentTopics =
@[
ContentTopic("/waku/2/default-content1/proto"),
ContentTopic("/waku/2/default-content2/proto"),
ContentTopic("/waku/2/default-content3/proto"),
ContentTopic("/waku/2/default-contentX/proto"),
]
let contentTopics = @[
ContentTopic("/waku/2/default-content1/proto"),
ContentTopic("/waku/2/default-content2/proto"),
ContentTopic("/waku/2/default-content3/proto"),
ContentTopic("/waku/2/default-contentX/proto"),
]
let cache = MessageCache.init()
cache.contentSubscribe(contentTopics[0])
@ -451,10 +448,9 @@ suite "Waku v2 Rest API - Relay":
let contentTopic = DefaultContentTopic
var messages =
@[
fakeWakuMessage(contentTopic = DefaultContentTopic, payload = toBytes("TEST-1"))
]
var messages = @[
fakeWakuMessage(contentTopic = DefaultContentTopic, payload = toBytes("TEST-1"))
]
# Prevent duplicate messages
for i in 0 ..< 2:

View File

@ -115,17 +115,16 @@ procSuite "Waku Rest API - Store v3":
await sleepAsync(1.seconds())
# Now prime it with some history before tests
let msgList =
@[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0),
fakeWakuMessage(@[byte 1], ts = 1),
fakeWakuMessage(@[byte 1, byte 2], ts = 2),
fakeWakuMessage(@[byte 1], ts = 3),
fakeWakuMessage(@[byte 1], ts = 4),
fakeWakuMessage(@[byte 1], ts = 5),
fakeWakuMessage(@[byte 1], ts = 6),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("c2"), ts = 9),
]
let msgList = @[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0),
fakeWakuMessage(@[byte 1], ts = 1),
fakeWakuMessage(@[byte 1, byte 2], ts = 2),
fakeWakuMessage(@[byte 1], ts = 3),
fakeWakuMessage(@[byte 1], ts = 4),
fakeWakuMessage(@[byte 1], ts = 5),
fakeWakuMessage(@[byte 1], ts = 6),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("c2"), ts = 9),
]
for msg in msgList:
require (await driver.put(DefaultPubsubTopic, msg)).isOk()
@ -191,17 +190,16 @@ procSuite "Waku Rest API - Store v3":
peerSwitch.mount(node.wakuStore)
# Now prime it with some history before tests
let msgList =
@[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0),
fakeWakuMessage(@[byte 1], ts = 1),
fakeWakuMessage(@[byte 1, byte 2], ts = 2),
fakeWakuMessage(@[byte 1], ts = 3),
fakeWakuMessage(@[byte 1], ts = 4),
fakeWakuMessage(@[byte 1], ts = 5),
fakeWakuMessage(@[byte 1], ts = 6),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("c2"), ts = 9),
]
let msgList = @[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0),
fakeWakuMessage(@[byte 1], ts = 1),
fakeWakuMessage(@[byte 1, byte 2], ts = 2),
fakeWakuMessage(@[byte 1], ts = 3),
fakeWakuMessage(@[byte 1], ts = 4),
fakeWakuMessage(@[byte 1], ts = 5),
fakeWakuMessage(@[byte 1], ts = 6),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("c2"), ts = 9),
]
for msg in msgList:
require (await driver.put(DefaultPubsubTopic, msg)).isOk()
@ -262,19 +260,18 @@ procSuite "Waku Rest API - Store v3":
# Now prime it with some history before tests
let timeOrigin = wakucore.now()
let msgList =
@[
fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)),
fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)),
fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)),
fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)),
fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)),
fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)),
fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)),
]
let msgList = @[
fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)),
fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)),
fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)),
fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)),
fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)),
fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)),
fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)),
]
for msg in msgList:
require (await driver.put(DefaultPubsubTopic, msg)).isOk()
@ -357,12 +354,11 @@ procSuite "Waku Rest API - Store v3":
peerSwitch.mount(node.wakuStore)
# Now prime it with some history before tests
let msgList =
@[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("2"), ts = 0),
fakeWakuMessage(@[byte 1], ts = 1),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("2"), ts = 9),
]
let msgList = @[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("2"), ts = 0),
fakeWakuMessage(@[byte 1], ts = 1),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("2"), ts = 9),
]
for msg in msgList:
require (await driver.put(DefaultPubsubTopic, msg)).isOk()
@ -431,12 +427,11 @@ procSuite "Waku Rest API - Store v3":
peerSwitch.mount(node.wakuStore)
# Now prime it with some history before tests
let msgList =
@[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0),
fakeWakuMessage(@[byte 1], ts = 1),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("ct2"), ts = 9),
]
let msgList = @[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0),
fakeWakuMessage(@[byte 1], ts = 1),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("ct2"), ts = 9),
]
for msg in msgList:
require (await driver.put(DefaultPubsubTopic, msg)).isOk()
@ -521,12 +516,11 @@ procSuite "Waku Rest API - Store v3":
peerSwitch.mount(node.wakuStore)
# Now prime it with some history before tests
let msgList =
@[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0),
fakeWakuMessage(@[byte 1], ts = 1),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("ct2"), ts = 9),
]
let msgList = @[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0),
fakeWakuMessage(@[byte 1], ts = 1),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("ct2"), ts = 9),
]
for msg in msgList:
require (await driver.put(DefaultPubsubTopic, msg)).isOk()
@ -594,12 +588,11 @@ procSuite "Waku Rest API - Store v3":
await node.mountStore()
# Now prime it with some history before tests
let msgList =
@[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0),
fakeWakuMessage(@[byte 1], ts = 1),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("ct2"), ts = 9),
]
let msgList = @[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0),
fakeWakuMessage(@[byte 1], ts = 1),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("ct2"), ts = 9),
]
for msg in msgList:
require (await driver.put(DefaultPubsubTopic, msg)).isOk()
@ -640,14 +633,13 @@ procSuite "Waku Rest API - Store v3":
await node.mountStore()
# Now prime it with some history before tests
let msgList =
@[
fakeWakuMessage(
@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0, meta = (@[byte 8])
),
fakeWakuMessage(@[byte 1], ts = 1),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("ct2"), ts = 9),
]
let msgList = @[
fakeWakuMessage(
@[byte 0], contentTopic = ContentTopic("ct1"), ts = 0, meta = (@[byte 8])
),
fakeWakuMessage(@[byte 1], ts = 1),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("ct2"), ts = 9),
]
for msg in msgList:
require (await driver.put(DefaultPubsubTopic, msg)).isOk()
@ -757,19 +749,18 @@ procSuite "Waku Rest API - Store v3":
# Now prime it with some history before tests
let timeOrigin = wakucore.now()
let msgList =
@[
fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)),
fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)),
fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)),
fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)),
fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)),
fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)),
fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)),
]
let msgList = @[
fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)),
fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)),
fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)),
fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)),
fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)),
fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)),
fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)),
]
for msg in msgList:
require (await driver.put(DefaultPubsubTopic, msg)).isOk()

View File

@ -50,13 +50,13 @@ type ConfResult*[T] = Result[T, string]
type EthRpcUrl* = distinct string
type StartUpCommand* = enum
noCommand # default, runs waku
noCommand # default, runs waku
generateRlnKeystore # generates a new RLN keystore
type WakuMode* {.pure.} = enum
noMode # default - use explicit CLI flags as-is
Core # full service node
Edge # client-only node
Core # full service node
Edge # client-only node
type WakuNodeConf* = object
configFile* {.
@ -183,7 +183,8 @@ type WakuNodeConf* = object
name: "agent-string"
.}: string
nodekey* {.desc: "P2P node private key as 64 char hex string.", name: "nodekey".}:
nodekey* {.desc: "P2P node private key as 64 char hex string.",
name: "nodekey".}:
Option[PrivateKey]
listenAddress* {.
@ -192,11 +193,13 @@ type WakuNodeConf* = object
name: "listen-address"
.}: IpAddress
tcpPort* {.desc: "TCP listening port.", defaultValue: 60000, name: "tcp-port".}:
tcpPort* {.desc: "TCP listening port.", defaultValue: 60000,
name: "tcp-port".}:
Port
portsShift* {.
desc: "Add a shift to all port numbers.", defaultValue: 0, name: "ports-shift"
desc: "Add a shift to all port numbers.", defaultValue: 0,
name: "ports-shift"
.}: uint16
nat* {.
@ -240,11 +243,13 @@ type WakuNodeConf* = object
.}: int
peerStoreCapacity* {.
desc: "Maximum stored peers in the peerstore.", name: "peer-store-capacity"
desc: "Maximum stored peers in the peerstore.",
name: "peer-store-capacity"
.}: Option[int]
peerPersistence* {.
desc: "Enable peer persistence.", defaultValue: false, name: "peer-persistence"
desc: "Enable peer persistence.", defaultValue: false,
name: "peer-persistence"
.}: bool
## DNS addrs config
@ -263,8 +268,7 @@ type WakuNodeConf* = object
## Circuit-relay config
isRelayClient* {.
desc:
"""Set the node as a relay-client.
desc: """Set the node as a relay-client.
Set it to true for nodes that run behind a NAT or firewall and
hence would have reachability issues.""",
defaultValue: false,
@ -349,12 +353,6 @@ hence would have reachability issues.""",
desc: "Enable/disable waku store protocol", defaultValue: false, name: "store"
.}: bool
legacyStore* {.
desc: "Enable/disable support of Waku Store v2 as a service",
defaultValue: false,
name: "legacy-store"
.}: bool
storenode* {.
desc: "Peer multiaddress to query for storage",
defaultValue: "",
@ -408,7 +406,7 @@ hence would have reachability issues.""",
storeSyncInterval* {.
desc: "Interval between store sync attempts. In seconds.",
defaultValue: 300, # 5 minutes
defaultValue: 300, # 5 minutes
name: "store-sync-interval"
.}: uint32
@ -439,7 +437,7 @@ hence would have reachability issues.""",
filterSubscriptionTimeout* {.
desc:
"Timeout for filter subscription without ping or refresh it, in seconds. Only for v2 filter protocol.",
defaultValue: 300, # 5 minutes
defaultValue: 300, # 5 minutes
name: "filter-subscription-timeout"
.}: uint16
@ -666,7 +664,8 @@ with the drawback of consuming some more bandwidth.""",
.}: bool
websocketPort* {.
desc: "WebSocket listening port.", defaultValue: 8000, name: "websocket-port"
desc: "WebSocket listening port.", defaultValue: 8000,
name: "websocket-port"
.}: Port
websocketSecureSupport* {.
@ -692,7 +691,7 @@ with the drawback of consuming some more bandwidth.""",
desc:
"Rate limit settings for different protocols." &
"Format: protocol:volume/period<unit>" &
" Where 'protocol' can be one of: <store|storev2|storev3|lightpush|px|filter> if not defined it means a global setting" &
" Where 'protocol' can be one of: <store|storev3|lightpush|px|filter> if not defined it means a global setting" &
" 'volume' and period must be an integer value. " &
" 'unit' must be one of <h|m|s|ms> - hours, minutes, seconds, milliseconds respectively. " &
"Argument may be repeated.",
@ -763,7 +762,8 @@ proc parseCmdArg*(T: type ProtectedShard, p: string): T =
raise newException(ValueError, "Invalid public key")
if isNumber(elements[0]):
return ProtectedShard(shard: uint16.parseCmdArg(elements[0]), key: publicKey)
return ProtectedShard(shard: uint16.parseCmdArg(elements[0]),
key: publicKey)
# TODO: Remove when removing protected-topic configuration
let shard = RelayShard.parse(elements[0]).valueOr:
@ -891,11 +891,11 @@ proc load*(T: type WakuNodeConf, version = ""): ConfResult[T] =
secondarySources = proc(
conf: WakuNodeConf, sources: auto
) {.gcsafe, raises: [ConfigurationError].} =
sources.addConfigFile(Envvar, InputFile("wakunode2"))
sources.addConfigFile(Envvar, InputFile("wakunode2"))
if conf.configFile.isSome():
sources.addConfigFile(Toml, conf.configFile.get())
,
if conf.configFile.isSome():
sources.addConfigFile(Toml, conf.configFile.get())
,
)
ok(conf)
@ -949,6 +949,12 @@ proc toNetworkConf(
proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
var b = WakuConfBuilder.init()
let networkConf = toNetworkConf(n.preset, some(n.clusterId)).valueOr:
return err("Error determining cluster from preset: " & $error)
if networkConf.isSome():
b.withNetworkConf(networkConf.get())
b.withLogLevel(n.logLevel)
b.withLogFormat(n.logFormat)
@ -977,12 +983,6 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.withProtectedShards(n.protectedShards)
b.withClusterId(n.clusterId)
let networkConf = toNetworkConf(n.preset, some(n.clusterId)).valueOr:
return err("Error determining cluster from preset: " & $error)
if networkConf.isSome():
b.withNetworkConf(networkConf.get())
b.withAgentString(n.agentString)
if n.nodeKey.isSome():
@ -1046,7 +1046,6 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.withContentTopics(n.contentTopics)
b.storeServiceConf.withEnabled(n.store)
b.storeServiceConf.withSupportV2(n.legacyStore)
b.storeServiceConf.withRetentionPolicies(n.storeMessageRetentionPolicy)
b.storeServiceConf.withDbUrl(n.storeMessageDbUrl)
b.storeServiceConf.withDbVacuum(n.storeMessageDbVacuum)

2
vendor/nph vendored

@ -1 +1 @@
Subproject commit c6e03162dc2820d3088660f644818d7040e95791
Subproject commit 2cacf6cc28116e4046e0b67a13545af5c4e756bd

View File

@ -73,11 +73,11 @@ proc buildLibrary(lib_name: string, srcDir = "./", params = "", `type` = "static
extra_params &= " " & paramStr(i)
if `type` == "static":
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:" & mainPrefix & " --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
" --threads:on --app:staticlib --opt:speed --noMain --mm:refc --header -d:metrics --nimMainPrefix:" & mainPrefix & " --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & srcFile
else:
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:" & mainPrefix & " --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
" --threads:on --app:lib --opt:speed --noMain --mm:refc --header -d:metrics --nimMainPrefix:" & mainPrefix & " --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & srcFile
proc buildMobileAndroid(srcDir = ".", params = "") =
@ -93,7 +93,7 @@ proc buildMobileAndroid(srcDir = ".", params = "") =
extra_params &= " " & paramStr(i)
exec "nim c" & " --out:" & outDir &
"/libwaku.so --threads:on --app:lib --opt:size --noMain --mm:refc -d:chronicles_sinks=textlines[dynamic] --header -d:chronosEventEngine=epoll --passL:-L" &
"/libwaku.so --threads:on --app:lib --opt:speed --noMain --mm:refc -d:chronicles_sinks=textlines[dynamic] --header -d:chronosEventEngine=epoll --passL:-L" &
outdir & " --passL:-lrln --passL:-llog --cpu:" & cpu & " --os:android -d:androidNDK " &
extra_params & " " & srcDir & "/libwaku.nim"
@ -266,7 +266,7 @@ proc buildMobileIOS(srcDir = ".", params = "") =
" --os:ios --cpu:" & cpu &
" --compileOnly:on" &
" --noMain --mm:refc" &
" --threads:on --opt:size --header" &
" --threads:on --opt:speed --header" &
" -d:metrics -d:discv5_protocol_id=d5waku" &
" --nimMainPrefix:libwaku --skipParentCfg:on" &
" --cc:clang" &

View File

@ -62,10 +62,9 @@ proc init*(
)
const TheWakuNetworkPreset* = ProtocolsConfig(
entryNodes:
@[
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im"
],
entryNodes: @[
"enrtree://AIRVQ5DDA4FFWLRBCHJWUWOO6X6S4ZTZ5B667LQ6AJU6PEYDLRD5O@sandbox.waku.nodes.status.im"
],
staticStoreNodes: @[],
clusterId: 1,
autoShardingConfig: AutoShardingConfig(numShardsInCluster: 8),

View File

@ -151,14 +151,13 @@ macro EventBroker*(body: untyped): untyped =
proc `accessProcIdent`(): `brokerTypeIdent` =
if `globalVarIdent`.isNil():
new(`globalVarIdent`)
`globalVarIdent`.buckets =
@[
`bucketTypeIdent`(
brokerCtx: DefaultBrokerContext,
listeners: initTable[uint64, `handlerProcIdent`](),
nextId: 1'u64,
)
]
`globalVarIdent`.buckets = @[
`bucketTypeIdent`(
brokerCtx: DefaultBrokerContext,
listeners: initTable[uint64, `handlerProcIdent`](),
nextId: 1'u64,
)
]
`globalVarIdent`
)

View File

@ -211,11 +211,10 @@ proc waitQueryToFinish(
pqclear(pqResult)
proc containsRiskyPatterns(input: string): bool =
let riskyPatterns =
@[
" OR ", " AND ", " UNION ", " SELECT ", "INSERT ", "DELETE ", "UPDATE ", "DROP ",
"EXEC ", "--", "/*", "*/",
]
let riskyPatterns = @[
" OR ", " AND ", " UNION ", " SELECT ", "INSERT ", "DELETE ", "UPDATE ", "DROP ",
"EXEC ", "--", "/*", "*/",
]
for pattern in riskyPatterns:
if pattern.toLowerAscii() in input.toLowerAscii():

View File

@ -7,7 +7,6 @@ type RateLimitSetting* = tuple[volume: int, period: Duration]
type RateLimitedProtocol* = enum
GLOBAL
STOREV2
STOREV3
LIGHTPUSH
PEEREXCHG
@ -47,8 +46,6 @@ proc translate(sProtocol: string): RateLimitedProtocol {.raises: [ValueError].}
case sProtocol
of "global":
return GLOBAL
of "storev2":
return STOREV2
of "storev3":
return STOREV3
of "lightpush":
@ -65,7 +62,6 @@ proc fillSettingTable(
) {.raises: [ValueError].} =
if sProtocol == "store":
# generic store will only applies to version which is not listed directly
discard t.hasKeyOrPut(STOREV2, setting)
discard t.hasKeyOrPut(STOREV3, setting)
else:
let protocol = translate(sProtocol)
@ -87,7 +83,7 @@ proc parse*(
## group4: Unit of period - only h:hour, m:minute, s:second, ms:millisecond allowed
## whitespaces are allowed lazily
const parseRegex =
"""^\s*((store|storev2|storev3|lightpush|px|filter)\s*:)?\s*(\d+)\s*\/\s*(\d+)\s*(s|h|m|ms)\s*$"""
"""^\s*((store|storev3|lightpush|px|filter)\s*:)?\s*(\d+)\s*\/\s*(\d+)\s*(s|h|m|ms)\s*$"""
const regexParseSize = re2(parseRegex)
for settingStr in settings:
let aSetting = settingStr.toLower()

View File

@ -106,16 +106,8 @@ proc mgetOrPut*[K, V](t: var TimedMap[K, V], k: K, v: V, now = Moment.now()): va
let
previous = t.del(k) # Refresh existing item
addedAt =
if previous.isSome():
previous[].addedAt
else:
now
value =
if previous.isSome():
previous[].value
else:
v
addedAt = if previous.isSome(): previous[].addedAt else: now
value = if previous.isSome(): previous[].value else: v
let node =
TimedEntry[K, V](key: k, value: value, addedAt: addedAt, expiresAt: now + t.timeout)

View File

@ -4,7 +4,6 @@ type WakuProtocol* {.pure.} = enum
RelayProtocol = "Relay"
RlnRelayProtocol = "Rln Relay"
StoreProtocol = "Store"
LegacyStoreProtocol = "Legacy Store"
FilterProtocol = "Filter"
LightpushProtocol = "Lightpush"
LegacyLightpushProtocol = "Legacy Lightpush"
@ -12,13 +11,12 @@ type WakuProtocol* {.pure.} = enum
RendezvousProtocol = "Rendezvous"
MixProtocol = "Mix"
StoreClientProtocol = "Store Client"
LegacyStoreClientProtocol = "Legacy Store Client"
FilterClientProtocol = "Filter Client"
LightpushClientProtocol = "Lightpush Client"
LegacyLightpushClientProtocol = "Legacy Lightpush Client"
const
RelayProtocols* = {RelayProtocol}
StoreClientProtocols* = {StoreClientProtocol, LegacyStoreClientProtocol}
StoreClientProtocols* = {StoreClientProtocol}
LightpushClientProtocols* = {LightpushClientProtocol, LegacyLightpushClientProtocol}
FilterClientProtocols* = {FilterClientProtocol}

View File

@ -8,6 +8,6 @@ type WakuPeerEventKind* {.pure.} = enum
EventMetadataUpdated
EventBroker:
type EventWakuPeer* = object
type WakuPeerEvent* = object
peerId*: PeerId
kind*: WakuPeerEventKind

View File

@ -84,20 +84,19 @@ proc withNetworkConfigurationDetails*(
): WakuNodeBuilderResult {.
deprecated: "use 'builder.withNetworkConfiguration()' instead"
.} =
let netConfig =
?NetConfig.init(
bindIp = bindIp,
bindPort = bindPort,
extIp = extIp,
extPort = extPort,
extMultiAddrs = extMultiAddrs,
wsBindPort = some(wsBindPort),
wsEnabled = wsEnabled,
wssEnabled = wssEnabled,
wakuFlags = wakuFlags,
dns4DomainName = dns4DomainName,
dnsNameServers = dnsNameServers,
)
let netConfig = ?NetConfig.init(
bindIp = bindIp,
bindPort = bindPort,
extIp = extIp,
extPort = extPort,
extMultiAddrs = extMultiAddrs,
wsBindPort = some(wsBindPort),
wsEnabled = wsEnabled,
wssEnabled = wssEnabled,
wakuFlags = wakuFlags,
dns4DomainName = dns4DomainName,
dnsNameServers = dnsNameServers,
)
builder.withNetworkConfiguration(netConfig)
ok()

View File

@ -14,7 +14,6 @@ type StoreServiceConfBuilder* = object
dbMigration*: Option[bool]
dbURl*: Option[string]
dbVacuum*: Option[bool]
supportV2*: Option[bool]
maxNumDbConnections*: Option[int]
retentionPolicies*: seq[string]
resume*: Option[bool]
@ -35,9 +34,6 @@ proc withDbUrl*(b: var StoreServiceConfBuilder, dbUrl: string) =
proc withDbVacuum*(b: var StoreServiceConfBuilder, dbVacuum: bool) =
b.dbVacuum = some(dbVacuum)
proc withSupportV2*(b: var StoreServiceConfBuilder, supportV2: bool) =
b.supportV2 = some(supportV2)
proc withMaxNumDbConnections*(
b: var StoreServiceConfBuilder, maxNumDbConnections: int
) =
@ -104,7 +100,6 @@ proc build*(b: StoreServiceConfBuilder): Result[Option[StoreServiceConf], string
dbMigration: b.dbMigration.get(true),
dbURl: b.dbUrl.get(),
dbVacuum: b.dbVacuum.get(false),
supportV2: b.supportV2.get(false),
maxNumDbConnections: b.maxNumDbConnections.get(50),
retentionPolicies: retentionPolicies,
resume: b.resume.get(false),

View File

@ -353,10 +353,13 @@ proc applyNetworkConf(builder: var WakuConfBuilder) =
builder.rlnRelayConf.withEpochSizeSec(networkConf.rlnEpochSizeSec)
if builder.rlnRelayConf.userMessageLimit.isSome():
warn "RLN Relay Dynamic was provided alongside a network conf",
warn "RLN Relay User Message Limit was provided alongside a network conf",
used = networkConf.rlnRelayUserMessageLimit,
discarded = builder.rlnRelayConf.userMessageLimit
builder.rlnRelayConf.withUserMessageLimit(networkConf.rlnRelayUserMessageLimit)
if builder.rlnRelayConf.userMessageLimit.get(0) == 0:
## only override with the "preset" value if there was not explicit set value
builder.rlnRelayConf.withUserMessageLimit(networkConf.rlnRelayUserMessageLimit)
# End Apply relay parameters
case builder.maxMessageSize.kind

View File

@ -56,12 +56,11 @@ proc TheWakuNetworkConf*(T: type NetworkConf): NetworkConf =
mix: false,
p2pReliability: false,
discv5Discovery: true,
discv5BootstrapNodes:
@[
"enr:-QESuED0qW1BCmF-oH_ARGPr97Nv767bl_43uoy70vrbah3EaCAdK3Q0iRQ6wkSTTpdrg_dU_NC2ydO8leSlRpBX4pxiAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOTd-h5owwj-cx7xrmbvQKU8CV3Fomfdvcv1MBc-67T5oN0Y3CCdl-DdWRwgiMohXdha3UyDw",
"enr:-QEkuED9X80QF_jcN9gA2ZRhhmwVEeJnsg_Hyg7IFCTYnZD0BDI7a8HArE61NhJZFwygpHCWkgwSt2vqiABXkBxzIqZBAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQPFAS8zz2cg1QQhxMaK8CzkGQ5wdHvPJcrgLzJGOiHpwYN0Y3CCdl-DdWRwgiMohXdha3UyDw",
"enr:-QEkuEBfEzJm_kigJ2HoSS_RBFJYhKHocGdkhhBr6jSUAWjLdFPp6Pj1l4yiTQp7TGHyu1kC6FyaU573VN8klLsEm-XuAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOwsS69tgD7u1K50r5-qG5hweuTwa0W26aYPnvivpNlrYN0Y3CCdl-DdWRwgiMohXdha3UyDw",
],
discv5BootstrapNodes: @[
"enr:-QESuED0qW1BCmF-oH_ARGPr97Nv767bl_43uoy70vrbah3EaCAdK3Q0iRQ6wkSTTpdrg_dU_NC2ydO8leSlRpBX4pxiAYJpZIJ2NIJpcIRA4VDAim11bHRpYWRkcnO4XAArNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwAtNiZub2RlLTAxLmRvLWFtczMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOTd-h5owwj-cx7xrmbvQKU8CV3Fomfdvcv1MBc-67T5oN0Y3CCdl-DdWRwgiMohXdha3UyDw",
"enr:-QEkuED9X80QF_jcN9gA2ZRhhmwVEeJnsg_Hyg7IFCTYnZD0BDI7a8HArE61NhJZFwygpHCWkgwSt2vqiABXkBxzIqZBAYJpZIJ2NIJpcIQiQlleim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmdjLXVzLWNlbnRyYWwxLWEud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQPFAS8zz2cg1QQhxMaK8CzkGQ5wdHvPJcrgLzJGOiHpwYN0Y3CCdl-DdWRwgiMohXdha3UyDw",
"enr:-QEkuEBfEzJm_kigJ2HoSS_RBFJYhKHocGdkhhBr6jSUAWjLdFPp6Pj1l4yiTQp7TGHyu1kC6FyaU573VN8klLsEm-XuAYJpZIJ2NIJpcIQI2SVcim11bHRpYWRkcnO4bgA0Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQZ2XwA2Ni9ub2RlLTAxLmFjLWNuLWhvbmdrb25nLWMud2FrdS5zYW5kYm94LnN0YXR1cy5pbQYfQN4DgnJzkwABCAAAAAEAAgADAAQABQAGAAeJc2VjcDI1NmsxoQOwsS69tgD7u1K50r5-qG5hweuTwa0W26aYPnvivpNlrYN0Y3CCdl-DdWRwgiMohXdha3UyDw",
],
)
# cluster-id=2 (Logos Dev Network)
@ -83,15 +82,14 @@ proc LogosDevConf*(T: type NetworkConf): NetworkConf =
p2pReliability: true,
discv5Discovery: true,
discv5BootstrapNodes: @[],
entryNodes:
@[
"/dns4/delivery-01.do-ams3.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAmTUbnxLGT9JvV6mu9oPyDjqHK4Phs1VDJNUgESgNSkuby",
"/dns4/delivery-02.do-ams3.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAmMK7PYygBtKUQ8EHp7EfaD3bCEsJrkFooK8RQ2PVpJprH",
"/dns4/delivery-01.gc-us-central1-a.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAm4S1JYkuzDKLKQvwgAhZKs9otxXqt8SCGtB4hoJP1S397",
"/dns4/delivery-02.gc-us-central1-a.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAm8Y9kgBNtjxvCnf1X6gnZJW5EGE4UwwCL3CCm55TwqBiH",
"/dns4/delivery-01.ac-cn-hongkong-c.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAm8YokiNun9BkeA1ZRmhLbtNUvcwRr64F69tYj9fkGyuEP",
"/dns4/delivery-02.ac-cn-hongkong-c.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAkvwhGHKNry6LACrB8TmEFoCJKEX29XR5dDUzk3UT3UNSE",
],
entryNodes: @[
"/dns4/delivery-01.do-ams3.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAmTUbnxLGT9JvV6mu9oPyDjqHK4Phs1VDJNUgESgNSkuby",
"/dns4/delivery-02.do-ams3.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAmMK7PYygBtKUQ8EHp7EfaD3bCEsJrkFooK8RQ2PVpJprH",
"/dns4/delivery-01.gc-us-central1-a.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAm4S1JYkuzDKLKQvwgAhZKs9otxXqt8SCGtB4hoJP1S397",
"/dns4/delivery-02.gc-us-central1-a.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAm8Y9kgBNtjxvCnf1X6gnZJW5EGE4UwwCL3CCm55TwqBiH",
"/dns4/delivery-01.ac-cn-hongkong-c.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAm8YokiNun9BkeA1ZRmhLbtNUvcwRr64F69tYj9fkGyuEP",
"/dns4/delivery-02.ac-cn-hongkong-c.logos.dev.status.im/tcp/30303/p2p/16Uiu2HAkvwhGHKNry6LACrB8TmEFoCJKEX29XR5dDUzk3UT3UNSE",
],
)
proc validateShards*(

Some files were not shown because too many files have changed in this diff Show More