Merge branch 'master' into update_canary

This commit is contained in:
Darshan 2026-04-06 15:44:58 +05:30 committed by GitHub
commit f87c997ce6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
89 changed files with 1208 additions and 14799 deletions

View File

@ -8,7 +8,7 @@ assignees: ''
---
<!--
Add appropriate release number and adjust the target fleet in the tittle!
Add appropriate release number and adjust the target fleet in the title!
-->
### Link to the Release PR
@ -20,17 +20,22 @@ Kindly add a link to the release PR where we have a sign-off from QA. At this ti
### Items to complete, in order
<!--
You can release into either waku.sanbox, status.prod, or both.
For status.prod it is crucial to coordinate such deployment with status friends.
You can release into either waku.sanbox, status.prod, or both. Both cases require coordination with Infra Team.
waku.sandbox must be considered a prod fleet as it is used by external parties.
For both status.prod it is crucial to coordinate such deployment with Status Team.
The following points should be followed in order.
-->
- [ ] Receive sign-off from DST.
- [ ] Inform DST team about what are the expectations for this release. For example, if we expect higher, same or lower bandwidth consumption. Or a new protocol appears, etc.
- [ ] Ask DST to add a comment approving this deployment and add a link to the analysis report.
- [ ] Update waku.sandbox with [this deployment job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/).
- [ ] Deploy to waku.sandbox
- [ ] Coordinate with Infra Team about possible changes in CI behavior
- [ ] Update waku.sandbox with [this deployment job](https://ci.infra.status.im/job/nim-waku/job/deploy-waku-sandbox/).
- [ ] Deploy to status.prod
- [ ] Coordinate with Infra Team about possible changes in CI behavior
- [ ] Ask Status admin to add a comment approving that this deployment to happen now.
- [ ] Update status.prod with [this deployment job](https://ci.infra.status.im/job/nim-waku/job/deploy-status-prod/).
@ -42,7 +47,7 @@ For status.prod it is crucial to coordinate such deployment with status friends.
- [Release process](https://github.com/logos-messaging/logos-delivery/blob/master/docs/contributors/release-process.md)
- [Release notes](https://github.com/logos-messaging/logos-delivery/blob/master/CHANGELOG.md)
- [Infra-role-nim-waku](https://github.com/status-im/infra-role-nim-waku)
- [Infra-nim-waku](https://github.com/status-im/infra-nim-waku)
- [Infra-waku](https://github.com/status-im/infra-waku)
- [Infra-Status](https://github.com/status-im/infra-status)
- [Jenkins](https://ci.infra.status.im/job/nim-waku/)
- [Fleets](https://fleets.waku.org/)

View File

@ -1,3 +1,9 @@
## v0.37.3 (2026-03-25)
### Features
- Allow override user-message-rate-limit ([#3778](https://github.com/logos-messaging/logos-delivery/pull/3778))
## v0.37.2 (2026-03-19)
### Features

View File

@ -144,7 +144,7 @@ deps: | deps-common nat-libs waku.nims
# "-d:release" implies "--stacktrace:off" and it cannot be added to config.nims
ifeq ($(DEBUG), 0)
NIM_PARAMS := $(NIM_PARAMS) -d:release
NIM_PARAMS := $(NIM_PARAMS) -d:release -d:lto_incremental -d:strip
else
NIM_PARAMS := $(NIM_PARAMS) -d:debug
endif

View File

@ -36,7 +36,6 @@ import
waku_lightpush_legacy/rpc,
waku_enr,
discovery/waku_dnsdisc,
waku_store_legacy,
waku_node,
node/waku_metrics,
node/peer_manager,

View File

@ -26,10 +26,6 @@ if defined(windows):
# set the IMAGE_FILE_LARGE_ADDRESS_AWARE flag so we can use PAE, if enabled, and access more than 2 GiB of RAM
switch("passL", "-Wl,--large-address-aware")
# The dynamic Chronicles output currently prevents us from using colors on Windows
# because these require direct manipulations of the stdout File object.
switch("define", "chronicles_colors=off")
# https://github.com/status-im/nimbus-eth2/blob/stable/docs/cpu_features.md#ssse3-supplemental-sse3
# suggests that SHA256 hashing with SSSE3 is 20% faster than without SSSE3, so
# given its near-ubiquity in the x86 installed base, it renders a distribution
@ -52,9 +48,10 @@ if defined(disableMarchNative):
switch("passL", "-march=haswell -mtune=generic")
else:
if defined(marchOptimized):
# https://github.com/status-im/nimbus-eth2/blob/stable/docs/cpu_features.md#bmi2--adx
switch("passC", "-march=broadwell -mtune=generic")
switch("passL", "-march=broadwell -mtune=generic")
# -march=broadwell: https://github.com/status-im/nimbus-eth2/blob/stable/docs/cpu_features.md#bmi2--adx
# Changed to x86-64-v2 for broader support
switch("passC", "-march=x86-64-v2 -mtune=generic")
switch("passL", "-march=x86-64-v2 -mtune=generic")
else:
switch("passC", "-mssse3")
switch("passL", "-mssse3")
@ -76,6 +73,7 @@ else:
on
--opt:
speed
--excessiveStackTrace:
on
# enable metric collection
@ -85,8 +83,6 @@ else:
--define:
nimTypeNames
switch("define", "withoutPCRE")
# the default open files limit is too low on macOS (512), breaking the
# "--debugger:native" build. It can be increased with `ulimit -n 1024`.
if not defined(macosx) and not defined(android):

View File

@ -20,14 +20,7 @@ import
./waku_archive/test_driver_sqlite,
./waku_archive/test_retention_policy,
./waku_archive/test_waku_archive,
./waku_archive/test_partition_manager,
./waku_archive_legacy/test_driver_queue_index,
./waku_archive_legacy/test_driver_queue_pagination,
./waku_archive_legacy/test_driver_queue_query,
./waku_archive_legacy/test_driver_queue,
./waku_archive_legacy/test_driver_sqlite_query,
./waku_archive_legacy/test_driver_sqlite,
./waku_archive_legacy/test_waku_archive
./waku_archive/test_partition_manager
const os* {.strdefine.} = ""
when os == "Linux" and
@ -37,8 +30,6 @@ when os == "Linux" and
import
./waku_archive/test_driver_postgres_query,
./waku_archive/test_driver_postgres,
#./waku_archive_legacy/test_driver_postgres_query,
#./waku_archive_legacy/test_driver_postgres,
./factory/test_node_factory,
./wakunode_rest/test_rest_store,
./wakunode_rest/test_all
@ -50,20 +41,9 @@ import
./waku_store/test_waku_store,
./waku_store/test_wakunode_store
# Waku legacy store test suite
import
./waku_store_legacy/test_client,
./waku_store_legacy/test_rpc_codec,
./waku_store_legacy/test_waku_store,
./waku_store_legacy/test_wakunode_store
# Waku store sync suite
import ./waku_store_sync/test_all
when defined(waku_exp_store_resume):
# TODO: Review store resume test cases (#1282)
import ./waku_store_legacy/test_resume
import
./node/test_all,
./waku_filter_v2/test_all,

View File

@ -1,6 +1,6 @@
{.used.}
import std/[strutils, net, options, sets]
import std/[strutils, sequtils, net, options, sets, tables]
import chronos, testutils/unittests, stew/byteutils
import libp2p/[peerid, peerinfo, multiaddress, crypto/crypto]
import ../testlib/[common, wakucore, wakunode, testasync]
@ -13,12 +13,12 @@ import
common/broker/broker_context,
events/message_events,
waku_relay/protocol,
node/kernel_api/filter,
node/delivery_service/subscription_manager,
]
import waku/factory/waku_conf
import tools/confutils/cli_args
# TODO: Edge testing (after MAPI edge support is completed)
const TestTimeout = chronos.seconds(10)
const NegativeTestTimeout = chronos.seconds(2)
@ -60,8 +60,10 @@ proc waitForEvents(
return await manager.receivedEvent.wait().withTimeout(timeout)
type TestNetwork = ref object
publisher: WakuNode
publisher: WakuNode # Relay node that publishes messages in tests.
meshBuddy: WakuNode # Extra relay peer for publisher's mesh (Edge tests only).
subscriber: Waku
# The receiver node in tests. Edge node in edge tests, Core node in relay tests.
publisherPeerInfo: RemotePeerInfo
proc createApiNodeConf(
@ -94,8 +96,12 @@ proc setupNetwork(
lockNewGlobalBrokerContext:
net.publisher =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
net.publisher.mountMetadata(3, @[0'u16]).expect("Failed to mount metadata")
net.publisher.mountMetadata(3, toSeq(0'u16 ..< numShards)).expect(
"Failed to mount metadata"
)
(await net.publisher.mountRelay()).expect("Failed to mount relay")
if mode == cli_args.WakuMode.Edge:
await net.publisher.mountFilter()
await net.publisher.mountLibp2pPing()
await net.publisher.start()
@ -104,16 +110,32 @@ proc setupNetwork(
proc dummyHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} =
discard
# Subscribe the publisher to all shards to guarantee a GossipSub mesh with the subscriber.
# Currently, Core/Relay nodes auto-subscribe to all network shards on boot, but if
# that changes, this will be needed to cause the publisher to have shard interest
# for any shards the subscriber may want to use, which is required for waitForMesh to work.
var shards: seq[PubsubTopic]
for i in 0 ..< numShards.int:
let shard = PubsubTopic("/waku/2/rs/3/" & $i)
shards.add(PubsubTopic("/waku/2/rs/3/" & $i))
for shard in shards:
net.publisher.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect(
"Failed to sub publisher"
)
if mode == cli_args.WakuMode.Edge:
lockNewGlobalBrokerContext:
net.meshBuddy =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
net.meshBuddy.mountMetadata(3, toSeq(0'u16 ..< numShards)).expect(
"Failed to mount metadata on meshBuddy"
)
(await net.meshBuddy.mountRelay()).expect("Failed to mount relay on meshBuddy")
await net.meshBuddy.start()
for shard in shards:
net.meshBuddy.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect(
"Failed to sub meshBuddy"
)
await net.meshBuddy.connectToNodes(@[net.publisherPeerInfo])
net.subscriber = await setupSubscriberNode(createApiNodeConf(mode, numShards))
await net.subscriber.node.connectToNodes(@[net.publisherPeerInfo])
@ -125,6 +147,10 @@ proc teardown(net: TestNetwork) {.async.} =
(await net.subscriber.stop()).expect("Failed to stop subscriber node")
net.subscriber = nil
if not isNil(net.meshBuddy):
await net.meshBuddy.stop()
net.meshBuddy = nil
if not isNil(net.publisher):
await net.publisher.stop()
net.publisher = nil
@ -141,18 +167,34 @@ proc waitForMesh(node: WakuNode, shard: PubsubTopic) {.async.} =
await sleepAsync(100.milliseconds)
raise newException(ValueError, "GossipSub Mesh failed to stabilize on " & shard)
proc waitForEdgeSubs(w: Waku, shard: PubsubTopic) {.async.} =
let sm = w.deliveryService.subscriptionManager
for _ in 0 ..< 50:
if sm.edgeFilterPeerCount(shard) > 0:
return
await sleepAsync(100.milliseconds)
raise newException(ValueError, "Edge filter subscription failed on " & shard)
proc publishToMesh(
net: TestNetwork, contentTopic: ContentTopic, payload: seq[byte]
): Future[Result[int, string]] {.async.} =
# Publishes a message from "publisher" via relay into the gossipsub mesh.
let shard = net.subscriber.node.getRelayShard(contentTopic)
await waitForMesh(net.publisher, shard)
let msg = WakuMessage(
payload: payload, contentTopic: contentTopic, version: 0, timestamp: now()
)
return await net.publisher.publish(some(shard), msg)
proc publishToMeshAfterEdgeReady(
net: TestNetwork, contentTopic: ContentTopic, payload: seq[byte]
): Future[Result[int, string]] {.async.} =
# First, ensure "subscriber" node (an edge node) is subscribed and ready to receive.
# Afterwards, "publisher" (relay node) sends the message in the gossipsub network.
let shard = net.subscriber.node.getRelayShard(contentTopic)
await waitForEdgeSubs(net.subscriber, shard)
return await net.publishToMesh(contentTopic, payload)
suite "Messaging API, SubscriptionManager":
asyncTest "Subscription API, relay node auto subscribe and receive message":
let net = await setupNetwork(1)
@ -398,3 +440,370 @@ suite "Messaging API, SubscriptionManager":
activeSubs.add(t)
await verifyNetworkState(activeSubs)
asyncTest "Subscription API, edge node subscribe and receive message":
let net = await setupNetwork(1, cli_args.WakuMode.Edge)
defer:
await net.teardown()
let testTopic = ContentTopic("/waku/2/test-content/proto")
(await net.subscriber.subscribe(testTopic)).expect("failed to subscribe")
let eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1)
defer:
eventManager.teardown()
discard (await net.publishToMeshAfterEdgeReady(testTopic, "Hello, edge!".toBytes())).expect(
"Publish failed"
)
require await eventManager.waitForEvents(TestTimeout)
require eventManager.receivedMessages.len == 1
check eventManager.receivedMessages[0].contentTopic == testTopic
asyncTest "Subscription API, edge node ignores unsubscribed content topics":
let net = await setupNetwork(1, cli_args.WakuMode.Edge)
defer:
await net.teardown()
let subbedTopic = ContentTopic("/waku/2/subbed-topic/proto")
let ignoredTopic = ContentTopic("/waku/2/ignored-topic/proto")
(await net.subscriber.subscribe(subbedTopic)).expect("failed to subscribe")
let eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1)
defer:
eventManager.teardown()
discard (await net.publishToMesh(ignoredTopic, "Ghost Msg".toBytes())).expect(
"Publish failed"
)
check not await eventManager.waitForEvents(NegativeTestTimeout)
check eventManager.receivedMessages.len == 0
asyncTest "Subscription API, edge node unsubscribe stops message receipt":
let net = await setupNetwork(1, cli_args.WakuMode.Edge)
defer:
await net.teardown()
let testTopic = ContentTopic("/waku/2/unsub-test/proto")
(await net.subscriber.subscribe(testTopic)).expect("failed to subscribe")
net.subscriber.unsubscribe(testTopic).expect("failed to unsubscribe")
let eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1)
defer:
eventManager.teardown()
discard (await net.publishToMesh(testTopic, "Should be dropped".toBytes())).expect(
"Publish failed"
)
check not await eventManager.waitForEvents(NegativeTestTimeout)
check eventManager.receivedMessages.len == 0
asyncTest "Subscription API, edge node overlapping topics isolation":
let net = await setupNetwork(1, cli_args.WakuMode.Edge)
defer:
await net.teardown()
let topicA = ContentTopic("/waku/2/topic-a/proto")
let topicB = ContentTopic("/waku/2/topic-b/proto")
(await net.subscriber.subscribe(topicA)).expect("failed to sub A")
(await net.subscriber.subscribe(topicB)).expect("failed to sub B")
let shard = net.subscriber.node.getRelayShard(topicA)
await waitForEdgeSubs(net.subscriber, shard)
let eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1)
defer:
eventManager.teardown()
net.subscriber.unsubscribe(topicA).expect("failed to unsub A")
discard (await net.publishToMesh(topicA, "Dropped Message".toBytes())).expect(
"Publish A failed"
)
discard
(await net.publishToMesh(topicB, "Kept Msg".toBytes())).expect("Publish B failed")
require await eventManager.waitForEvents(TestTimeout)
require eventManager.receivedMessages.len == 1
check eventManager.receivedMessages[0].contentTopic == topicB
asyncTest "Subscription API, edge node resubscribe after unsubscribe":
let net = await setupNetwork(1, cli_args.WakuMode.Edge)
defer:
await net.teardown()
let testTopic = ContentTopic("/waku/2/resub-test/proto")
(await net.subscriber.subscribe(testTopic)).expect("Initial sub failed")
var eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1)
discard (await net.publishToMeshAfterEdgeReady(testTopic, "Msg 1".toBytes())).expect(
"Pub 1 failed"
)
require await eventManager.waitForEvents(TestTimeout)
eventManager.teardown()
net.subscriber.unsubscribe(testTopic).expect("Unsub failed")
eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1)
discard
(await net.publishToMesh(testTopic, "Ghost".toBytes())).expect("Ghost pub failed")
check not await eventManager.waitForEvents(NegativeTestTimeout)
eventManager.teardown()
(await net.subscriber.subscribe(testTopic)).expect("Resub failed")
eventManager = newReceiveEventListenerManager(net.subscriber.brokerCtx, 1)
discard (await net.publishToMeshAfterEdgeReady(testTopic, "Msg 2".toBytes())).expect(
"Pub 2 failed"
)
require await eventManager.waitForEvents(TestTimeout)
check eventManager.receivedMessages[0].payload == "Msg 2".toBytes()
asyncTest "Subscription API, edge node failover after service peer dies":
# NOTE: This test is a bit more verbose because it defines a custom topology.
# It doesn't use the shared TestNetwork helper.
# This mounts two service peers for the edge node then fails one.
let numShards: uint16 = 1
let shards = @[PubsubTopic("/waku/2/rs/3/0")]
proc dummyHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} =
discard
var publisher: WakuNode
lockNewGlobalBrokerContext:
publisher =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
publisher.mountMetadata(3, toSeq(0'u16 ..< numShards)).expect(
"Failed to mount metadata on publisher"
)
(await publisher.mountRelay()).expect("Failed to mount relay on publisher")
await publisher.mountFilter()
await publisher.mountLibp2pPing()
await publisher.start()
for shard in shards:
publisher.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect(
"Failed to sub publisher"
)
let publisherPeerInfo = publisher.peerInfo.toRemotePeerInfo()
var meshBuddy: WakuNode
lockNewGlobalBrokerContext:
meshBuddy =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
meshBuddy.mountMetadata(3, toSeq(0'u16 ..< numShards)).expect(
"Failed to mount metadata on meshBuddy"
)
(await meshBuddy.mountRelay()).expect("Failed to mount relay on meshBuddy")
await meshBuddy.mountFilter()
await meshBuddy.mountLibp2pPing()
await meshBuddy.start()
for shard in shards:
meshBuddy.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect(
"Failed to sub meshBuddy"
)
let meshBuddyPeerInfo = meshBuddy.peerInfo.toRemotePeerInfo()
await meshBuddy.connectToNodes(@[publisherPeerInfo])
let conf = createApiNodeConf(cli_args.WakuMode.Edge, numShards)
var subscriber: Waku
lockNewGlobalBrokerContext:
subscriber = (await createNode(conf)).expect("Failed to create edge subscriber")
(await startWaku(addr subscriber)).expect("Failed to start edge subscriber")
# Connect edge subscriber to both filter servers so selectPeers finds both
await subscriber.node.connectToNodes(@[publisherPeerInfo, meshBuddyPeerInfo])
let testTopic = ContentTopic("/waku/2/failover-test/proto")
let shard = subscriber.node.getRelayShard(testTopic)
(await subscriber.subscribe(testTopic)).expect("Failed to subscribe")
# Wait for dialing both filter servers (HealthyThreshold = 2)
for _ in 0 ..< 100:
if subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) >= 2:
break
await sleepAsync(100.milliseconds)
check subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) >= 2
# Verify message delivery with both servers alive
await waitForMesh(publisher, shard)
var eventManager = newReceiveEventListenerManager(subscriber.brokerCtx, 1)
let msg1 = WakuMessage(
payload: "Before failover".toBytes(),
contentTopic: testTopic,
version: 0,
timestamp: now(),
)
discard (await publisher.publish(some(shard), msg1)).expect("Publish 1 failed")
require await eventManager.waitForEvents(TestTimeout)
check eventManager.receivedMessages[0].payload == "Before failover".toBytes()
eventManager.teardown()
# Disconnect meshBuddy from edge (keeps relay mesh alive for publishing)
await subscriber.node.disconnectNode(meshBuddyPeerInfo)
# Wait for the dead peer to be pruned
for _ in 0 ..< 50:
if subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) < 2:
break
await sleepAsync(100.milliseconds)
check subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) >= 1
# Verify messages still arrive through the surviving filter server (publisher)
eventManager = newReceiveEventListenerManager(subscriber.brokerCtx, 1)
let msg2 = WakuMessage(
payload: "After failover".toBytes(),
contentTopic: testTopic,
version: 0,
timestamp: now(),
)
discard (await publisher.publish(some(shard), msg2)).expect("Publish 2 failed")
require await eventManager.waitForEvents(TestTimeout)
check eventManager.receivedMessages[0].payload == "After failover".toBytes()
eventManager.teardown()
(await subscriber.stop()).expect("Failed to stop subscriber")
await meshBuddy.stop()
await publisher.stop()
asyncTest "Subscription API, edge node dials replacement after peer eviction":
# 3 service peers: publisher, meshBuddy, sparePeer. Edge subscribes and
# confirms 2 (HealthyThreshold). After one is disconnected, the sub loop
# should detect the loss and dial the spare to recover back to threshold.
let numShards: uint16 = 1
let shards = @[PubsubTopic("/waku/2/rs/3/0")]
proc dummyHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} =
discard
var publisher: WakuNode
lockNewGlobalBrokerContext:
publisher =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
publisher.mountMetadata(3, toSeq(0'u16 ..< numShards)).expect(
"Failed to mount metadata on publisher"
)
(await publisher.mountRelay()).expect("Failed to mount relay on publisher")
await publisher.mountFilter()
await publisher.mountLibp2pPing()
await publisher.start()
for shard in shards:
publisher.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect(
"Failed to sub publisher"
)
let publisherPeerInfo = publisher.peerInfo.toRemotePeerInfo()
var meshBuddy: WakuNode
lockNewGlobalBrokerContext:
meshBuddy =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
meshBuddy.mountMetadata(3, toSeq(0'u16 ..< numShards)).expect(
"Failed to mount metadata on meshBuddy"
)
(await meshBuddy.mountRelay()).expect("Failed to mount relay on meshBuddy")
await meshBuddy.mountFilter()
await meshBuddy.mountLibp2pPing()
await meshBuddy.start()
for shard in shards:
meshBuddy.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect(
"Failed to sub meshBuddy"
)
let meshBuddyPeerInfo = meshBuddy.peerInfo.toRemotePeerInfo()
var sparePeer: WakuNode
lockNewGlobalBrokerContext:
sparePeer =
newTestWakuNode(generateSecp256k1Key(), parseIpAddress("0.0.0.0"), Port(0))
sparePeer.mountMetadata(3, toSeq(0'u16 ..< numShards)).expect(
"Failed to mount metadata on sparePeer"
)
(await sparePeer.mountRelay()).expect("Failed to mount relay on sparePeer")
await sparePeer.mountFilter()
await sparePeer.mountLibp2pPing()
await sparePeer.start()
for shard in shards:
sparePeer.subscribe((kind: PubsubSub, topic: shard), dummyHandler).expect(
"Failed to sub sparePeer"
)
let sparePeerInfo = sparePeer.peerInfo.toRemotePeerInfo()
await meshBuddy.connectToNodes(@[publisherPeerInfo])
await sparePeer.connectToNodes(@[publisherPeerInfo])
let conf = createApiNodeConf(cli_args.WakuMode.Edge, numShards)
var subscriber: Waku
lockNewGlobalBrokerContext:
subscriber = (await createNode(conf)).expect("Failed to create edge subscriber")
(await startWaku(addr subscriber)).expect("Failed to start edge subscriber")
await subscriber.node.connectToNodes(
@[publisherPeerInfo, meshBuddyPeerInfo, sparePeerInfo]
)
let testTopic = ContentTopic("/waku/2/replacement-test/proto")
let shard = subscriber.node.getRelayShard(testTopic)
(await subscriber.subscribe(testTopic)).expect("Failed to subscribe")
# Wait for 2 confirmed peers (HealthyThreshold). The 3rd is available but not dialed.
for _ in 0 ..< 100:
if subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) >= 2:
break
await sleepAsync(100.milliseconds)
require subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) ==
2
await subscriber.node.disconnectNode(meshBuddyPeerInfo)
# Wait for the sub loop to detect the loss and dial a replacement
for _ in 0 ..< 100:
if subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) >= 2:
break
await sleepAsync(100.milliseconds)
check subscriber.deliveryService.subscriptionManager.edgeFilterPeerCount(shard) >= 2
await waitForMesh(publisher, shard)
var eventManager = newReceiveEventListenerManager(subscriber.brokerCtx, 1)
let msg = WakuMessage(
payload: "After replacement".toBytes(),
contentTopic: testTopic,
version: 0,
timestamp: now(),
)
discard (await publisher.publish(some(shard), msg)).expect("Publish failed")
require await eventManager.waitForEvents(TestTimeout)
check eventManager.receivedMessages[0].payload == "After replacement".toBytes()
eventManager.teardown()
(await subscriber.stop()).expect("Failed to stop subscriber")
await sparePeer.stop()
await meshBuddy.stop()
await publisher.stop()

View File

@ -25,7 +25,6 @@ suite "RateLimitSetting":
test "Parse rate limit setting - ok":
let test1 = "10/2m"
let test2 = " store : 10 /1h"
let test2a = "storev2 : 10 /1h"
let test2b = "storeV3: 12 /1s"
let test3 = "LIGHTPUSH: 10/ 1m"
let test4 = "px:10/2 s "
@ -34,7 +33,6 @@ suite "RateLimitSetting":
let expU = UnlimitedRateLimit
let exp1: RateLimitSetting = (10, 2.minutes)
let exp2: RateLimitSetting = (10, 1.hours)
let exp2a: RateLimitSetting = (10, 1.hours)
let exp2b: RateLimitSetting = (12, 1.seconds)
let exp3: RateLimitSetting = (10, 1.minutes)
let exp4: RateLimitSetting = (10, 2.seconds)
@ -42,7 +40,6 @@ suite "RateLimitSetting":
let res1 = ProtocolRateLimitSettings.parse(@[test1])
let res2 = ProtocolRateLimitSettings.parse(@[test2])
let res2a = ProtocolRateLimitSettings.parse(@[test2a])
let res2b = ProtocolRateLimitSettings.parse(@[test2b])
let res3 = ProtocolRateLimitSettings.parse(@[test3])
let res4 = ProtocolRateLimitSettings.parse(@[test4])
@ -53,15 +50,7 @@ suite "RateLimitSetting":
res1.get() == {GLOBAL: exp1, FILTER: FilterDefaultPerPeerRateLimit}.toTable()
res2.isOk()
res2.get() ==
{
GLOBAL: expU,
FILTER: FilterDefaultPerPeerRateLimit,
STOREV2: exp2,
STOREV3: exp2,
}.toTable()
res2a.isOk()
res2a.get() ==
{GLOBAL: expU, FILTER: FilterDefaultPerPeerRateLimit, STOREV2: exp2a}.toTable()
{GLOBAL: expU, FILTER: FilterDefaultPerPeerRateLimit, STOREV3: exp2}.toTable()
res2b.isOk()
res2b.get() ==
{GLOBAL: expU, FILTER: FilterDefaultPerPeerRateLimit, STOREV3: exp2b}.toTable()
@ -77,7 +66,6 @@ suite "RateLimitSetting":
test "Parse rate limit setting - err":
let test1 = "10/2d"
let test2 = " stre : 10 /1h"
let test2a = "storev2 10 /1h"
let test2b = "storev3: 12 1s"
let test3 = "somethingelse: 10/ 1m"
let test4 = ":px:10/2 s "
@ -85,7 +73,6 @@ suite "RateLimitSetting":
let res1 = ProtocolRateLimitSettings.parse(@[test1])
let res2 = ProtocolRateLimitSettings.parse(@[test2])
let res2a = ProtocolRateLimitSettings.parse(@[test2a])
let res2b = ProtocolRateLimitSettings.parse(@[test2b])
let res3 = ProtocolRateLimitSettings.parse(@[test3])
let res4 = ProtocolRateLimitSettings.parse(@[test4])
@ -94,7 +81,6 @@ suite "RateLimitSetting":
check:
res1.isErr()
res2.isErr()
res2a.isErr()
res2b.isErr()
res3.isErr()
res4.isErr()
@ -103,13 +89,12 @@ suite "RateLimitSetting":
test "Parse rate limit setting - complex":
let expU = UnlimitedRateLimit
let test1 = @["lightpush:2/2ms", "10/2m", " store: 3/3s", " storev2:12/12s"]
let test1 = @["lightpush:2/2ms", "10/2m", " store: 3/3s"]
let exp1 = {
GLOBAL: (10, 2.minutes),
FILTER: FilterDefaultPerPeerRateLimit,
LIGHTPUSH: (2, 2.milliseconds),
STOREV3: (3, 3.seconds),
STOREV2: (12, 12.seconds),
}.toTable()
let res1 = ProtocolRateLimitSettings.parse(test1)
@ -118,7 +103,6 @@ suite "RateLimitSetting":
res1.isOk()
res1.get() == exp1
res1.get().getSetting(PEEREXCHG) == (10, 2.minutes)
res1.get().getSetting(STOREV2) == (12, 12.seconds)
res1.get().getSetting(STOREV3) == (3, 3.seconds)
res1.get().getSetting(LIGHTPUSH) == (2, 2.milliseconds)
@ -127,7 +111,6 @@ suite "RateLimitSetting":
GLOBAL: expU,
LIGHTPUSH: (2, 2.milliseconds),
STOREV3: (3, 3.seconds),
STOREV2: (3, 3.seconds),
FILTER: (4, 42.milliseconds),
PEEREXCHG: (10, 10.hours),
}.toTable()
@ -138,13 +121,9 @@ suite "RateLimitSetting":
res2.isOk()
res2.get() == exp2
let test3 =
@["storev2:1/1s", "store:3/3s", "storev3:4/42ms", "storev3:5/5s", "storev3:6/6s"]
let test3 = @["store:3/3s", "storev3:4/42ms", "storev3:5/5s", "storev3:6/6s"]
let exp3 = {
GLOBAL: expU,
FILTER: FilterDefaultPerPeerRateLimit,
STOREV3: (6, 6.seconds),
STOREV2: (1, 1.seconds),
GLOBAL: expU, FILTER: FilterDefaultPerPeerRateLimit, STOREV3: (6, 6.seconds)
}.toTable()
let res3 = ProtocolRateLimitSettings.parse(test3)

View File

@ -6,6 +6,5 @@ import
./test_wakunode_lightpush,
./test_wakunode_peer_exchange,
./test_wakunode_store,
./test_wakunode_legacy_store,
./test_wakunode_peer_manager,
./test_wakunode_health_monitor

View File

@ -12,12 +12,18 @@ import
node/health_monitor/health_status,
node/health_monitor/connection_status,
node/health_monitor/protocol_health,
node/health_monitor/topic_health,
node/health_monitor/node_health_monitor,
node/delivery_service/delivery_service,
node/delivery_service/subscription_manager,
node/kernel_api/relay,
node/kernel_api/store,
node/kernel_api/lightpush,
node/kernel_api/filter,
events/health_events,
events/peer_events,
waku_archive,
common/broker/broker_context,
]
import ../testlib/[wakunode, wakucore], ../waku_archive/archive_utils
@ -129,13 +135,12 @@ suite "Health Monitor - health state calculation":
suite "Health Monitor - events":
asyncTest "Core (relay) health update":
let
nodeAKey = generateSecp256k1Key()
var nodeA: WakuNode
lockNewGlobalBrokerContext:
let nodeAKey = generateSecp256k1Key()
nodeA = newTestWakuNode(nodeAKey, parseIpAddress("127.0.0.1"), Port(0))
(await nodeA.mountRelay()).expect("Node A failed to mount Relay")
await nodeA.start()
(await nodeA.mountRelay()).expect("Node A failed to mount Relay")
await nodeA.start()
let monitorA = NodeHealthMonitor.new(nodeA)
@ -151,17 +156,15 @@ suite "Health Monitor - events":
monitorA.startHealthMonitor().expect("Health monitor failed to start")
let
nodeBKey = generateSecp256k1Key()
var nodeB: WakuNode
lockNewGlobalBrokerContext:
let nodeBKey = generateSecp256k1Key()
nodeB = newTestWakuNode(nodeBKey, parseIpAddress("127.0.0.1"), Port(0))
let driver = newSqliteArchiveDriver()
nodeB.mountArchive(driver).expect("Node B failed to mount archive")
(await nodeB.mountRelay()).expect("Node B failed to mount relay")
await nodeB.mountStore()
await nodeB.start()
let driver = newSqliteArchiveDriver()
nodeB.mountArchive(driver).expect("Node B failed to mount archive")
(await nodeB.mountRelay()).expect("Node B failed to mount relay")
await nodeB.mountStore()
await nodeB.start()
await nodeA.connectToNodes(@[nodeB.switch.peerInfo.toRemotePeerInfo()])
@ -214,15 +217,20 @@ suite "Health Monitor - events":
await nodeA.stop()
asyncTest "Edge (light client) health update":
let
nodeAKey = generateSecp256k1Key()
var nodeA: WakuNode
lockNewGlobalBrokerContext:
let nodeAKey = generateSecp256k1Key()
nodeA = newTestWakuNode(nodeAKey, parseIpAddress("127.0.0.1"), Port(0))
nodeA.mountLightpushClient()
await nodeA.mountFilterClient()
nodeA.mountStoreClient()
require nodeA.mountAutoSharding(1, 8).isOk
nodeA.mountMetadata(1, @[0'u16]).expect("Node A failed to mount metadata")
await nodeA.start()
nodeA.mountLightpushClient()
await nodeA.mountFilterClient()
nodeA.mountStoreClient()
await nodeA.start()
let ds =
DeliveryService.new(false, nodeA).expect("Failed to create DeliveryService")
ds.startDeliveryService().expect("Failed to start DeliveryService")
let monitorA = NodeHealthMonitor.new(nodeA)
@ -238,23 +246,40 @@ suite "Health Monitor - events":
monitorA.startHealthMonitor().expect("Health monitor failed to start")
let
nodeBKey = generateSecp256k1Key()
var nodeB: WakuNode
lockNewGlobalBrokerContext:
let nodeBKey = generateSecp256k1Key()
nodeB = newTestWakuNode(nodeBKey, parseIpAddress("127.0.0.1"), Port(0))
let driver = newSqliteArchiveDriver()
nodeB.mountArchive(driver).expect("Node B failed to mount archive")
(await nodeB.mountRelay()).expect("Node B failed to mount relay")
(await nodeB.mountLightpush()).expect("Node B failed to mount lightpush")
await nodeB.mountFilter()
await nodeB.mountStore()
require nodeB.mountAutoSharding(1, 8).isOk
nodeB.mountMetadata(1, toSeq(0'u16 ..< 8'u16)).expect(
"Node B failed to mount metadata"
)
await nodeB.start()
let driver = newSqliteArchiveDriver()
nodeB.mountArchive(driver).expect("Node B failed to mount archive")
(await nodeB.mountRelay()).expect("Node B failed to mount relay")
(await nodeB.mountLightpush()).expect("Node B failed to mount lightpush")
await nodeB.mountFilter()
await nodeB.mountStore()
await nodeB.start()
var metadataFut = newFuture[void]("waitForMetadata")
let metadataLis = WakuPeerEvent
.listen(
nodeA.brokerCtx,
proc(evt: WakuPeerEvent): Future[void] {.async: (raises: []), gcsafe.} =
if not metadataFut.finished and
evt.kind == WakuPeerEventKind.EventMetadataUpdated:
metadataFut.complete()
,
)
.expect("Failed to listen for metadata")
await nodeA.connectToNodes(@[nodeB.switch.peerInfo.toRemotePeerInfo()])
let metadataOk = await metadataFut.withTimeout(TestConnectivityTimeLimit)
WakuPeerEvent.dropListener(nodeA.brokerCtx, metadataLis)
require metadataOk
let connectTimeLimit = Moment.now() + TestConnectivityTimeLimit
var gotConnected = false
@ -292,4 +317,118 @@ suite "Health Monitor - events":
lastStatus == ConnectionStatus.Disconnected
await monitorA.stopHealthMonitor()
await ds.stopDeliveryService()
await nodeA.stop()
asyncTest "Edge health driven by confirmed filter subscriptions":
var nodeA: WakuNode
lockNewGlobalBrokerContext:
let nodeAKey = generateSecp256k1Key()
nodeA = newTestWakuNode(nodeAKey, parseIpAddress("127.0.0.1"), Port(0))
await nodeA.mountFilterClient()
nodeA.mountLightpushClient()
nodeA.mountStoreClient()
require nodeA.mountAutoSharding(1, 8).isOk
nodeA.mountMetadata(1, @[0'u16]).expect("Node A failed to mount metadata")
await nodeA.start()
let ds =
DeliveryService.new(false, nodeA).expect("Failed to create DeliveryService")
ds.startDeliveryService().expect("Failed to start DeliveryService")
let subMgr = ds.subscriptionManager
var nodeB: WakuNode
lockNewGlobalBrokerContext:
let nodeBKey = generateSecp256k1Key()
nodeB = newTestWakuNode(nodeBKey, parseIpAddress("127.0.0.1"), Port(0))
let driver = newSqliteArchiveDriver()
nodeB.mountArchive(driver).expect("Node B failed to mount archive")
(await nodeB.mountRelay()).expect("Node B failed to mount relay")
(await nodeB.mountLightpush()).expect("Node B failed to mount lightpush")
await nodeB.mountFilter()
await nodeB.mountStore()
require nodeB.mountAutoSharding(1, 8).isOk
nodeB.mountMetadata(1, toSeq(0'u16 ..< 8'u16)).expect(
"Node B failed to mount metadata"
)
await nodeB.start()
let monitorA = NodeHealthMonitor.new(nodeA)
var
lastStatus = ConnectionStatus.Disconnected
healthSignal = newAsyncEvent()
monitorA.onConnectionStatusChange = proc(status: ConnectionStatus) {.async.} =
lastStatus = status
healthSignal.fire()
monitorA.startHealthMonitor().expect("Health monitor failed to start")
var metadataFut = newFuture[void]("waitForMetadata")
let metadataLis = WakuPeerEvent
.listen(
nodeA.brokerCtx,
proc(evt: WakuPeerEvent): Future[void] {.async: (raises: []), gcsafe.} =
if not metadataFut.finished and
evt.kind == WakuPeerEventKind.EventMetadataUpdated:
metadataFut.complete()
,
)
.expect("Failed to listen for metadata")
await nodeA.connectToNodes(@[nodeB.switch.peerInfo.toRemotePeerInfo()])
let metadataOk = await metadataFut.withTimeout(TestConnectivityTimeLimit)
WakuPeerEvent.dropListener(nodeA.brokerCtx, metadataLis)
require metadataOk
var deadline = Moment.now() + TestConnectivityTimeLimit
while Moment.now() < deadline:
if lastStatus == ConnectionStatus.PartiallyConnected:
break
if await healthSignal.wait().withTimeout(deadline - Moment.now()):
healthSignal.clear()
check lastStatus == ConnectionStatus.PartiallyConnected
var shardHealthFut = newFuture[EventShardTopicHealthChange]("waitForShardHealth")
let shardHealthLis = EventShardTopicHealthChange
.listen(
nodeA.brokerCtx,
proc(
evt: EventShardTopicHealthChange
): Future[void] {.async: (raises: []), gcsafe.} =
if not shardHealthFut.finished and (
evt.health == TopicHealth.MINIMALLY_HEALTHY or
evt.health == TopicHealth.SUFFICIENTLY_HEALTHY
):
shardHealthFut.complete(evt)
,
)
.expect("Failed to listen for shard health")
let contentTopic = ContentTopic("/waku/2/default-content/proto")
subMgr.subscribe(contentTopic).expect("Failed to subscribe")
let shardHealthOk = await shardHealthFut.withTimeout(TestConnectivityTimeLimit)
EventShardTopicHealthChange.dropListener(nodeA.brokerCtx, shardHealthLis)
check shardHealthOk == true
check subMgr.edgeFilterSubStates.len > 0
healthSignal.clear()
deadline = Moment.now() + TestConnectivityTimeLimit
while Moment.now() < deadline:
if lastStatus == ConnectionStatus.PartiallyConnected:
break
if await healthSignal.wait().withTimeout(deadline - Moment.now()):
healthSignal.clear()
check lastStatus == ConnectionStatus.PartiallyConnected
await ds.stopDeliveryService()
await monitorA.stopHealthMonitor()
await nodeB.stop()
await nodeA.stop()

File diff suppressed because it is too large Load Diff

View File

@ -14,7 +14,6 @@ import
waku/[
waku_core/topics/pubsub_topic,
waku_core/topics/sharding,
waku_store_legacy/common,
node/waku_node,
node/kernel_api,
common/paging,
@ -454,29 +453,33 @@ suite "Sharding":
# Given one query for each content topic format
let
historyQuery1 = HistoryQuery(
storeQuery1 = StoreQueryRequest(
contentTopics: @[contentTopicShort],
direction: PagingDirection.Forward,
pageSize: 3,
paginationForward: PagingDirection.Forward,
paginationLimit: some(3'u64),
includeData: true,
)
historyQuery2 = HistoryQuery(
storeQuery2 = StoreQueryRequest(
contentTopics: @[contentTopicFull],
direction: PagingDirection.Forward,
pageSize: 3,
paginationForward: PagingDirection.Forward,
paginationLimit: some(3'u64),
includeData: true,
)
# When the client queries the server for the messages
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
queryResponse1 = await client.query(historyQuery1, serverRemotePeerInfo)
queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo)
queryResponse1 = await client.query(storeQuery1, serverRemotePeerInfo)
queryResponse2 = await client.query(storeQuery2, serverRemotePeerInfo)
assertResultOk(queryResponse1)
assertResultOk(queryResponse2)
# Then the responses of both queries should contain all the messages
check:
queryResponse1.get().messages == archiveMessages1 & archiveMessages2
queryResponse2.get().messages == archiveMessages1 & archiveMessages2
queryResponse1.get().messages.mapIt(it.message.get()) ==
archiveMessages1 & archiveMessages2
queryResponse2.get().messages.mapIt(it.message.get()) ==
archiveMessages1 & archiveMessages2
asyncTest "relay - exclusion (automatic sharding filtering)":
# Given a connected server and client subscribed to different content topics
@ -615,29 +618,31 @@ suite "Sharding":
# Given one query for each content topic
let
historyQuery1 = HistoryQuery(
storeQuery1 = StoreQueryRequest(
contentTopics: @[contentTopic1],
direction: PagingDirection.Forward,
pageSize: 2,
paginationForward: PagingDirection.Forward,
paginationLimit: some(2'u64),
includeData: true,
)
historyQuery2 = HistoryQuery(
storeQuery2 = StoreQueryRequest(
contentTopics: @[contentTopic2],
direction: PagingDirection.Forward,
pageSize: 2,
paginationForward: PagingDirection.Forward,
paginationLimit: some(2'u64),
includeData: true,
)
# When the client queries the server for the messages
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
queryResponse1 = await client.query(historyQuery1, serverRemotePeerInfo)
queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo)
queryResponse1 = await client.query(storeQuery1, serverRemotePeerInfo)
queryResponse2 = await client.query(storeQuery2, serverRemotePeerInfo)
assertResultOk(queryResponse1)
assertResultOk(queryResponse2)
# Then each response should contain only the messages of the corresponding content topic
check:
queryResponse1.get().messages == archiveMessages1
queryResponse2.get().messages == archiveMessages2
queryResponse1.get().messages.mapIt(it.message.get()) == archiveMessages1
queryResponse2.get().messages.mapIt(it.message.get()) == archiveMessages2
suite "Specific Tests":
asyncTest "Configure Node with Multiple PubSub Topics":
@ -1003,22 +1008,30 @@ suite "Sharding":
# Given one query for each pubsub topic
let
historyQuery1 = HistoryQuery(
pubsubTopic: some(topic1), direction: PagingDirection.Forward, pageSize: 2
storeQuery1 = StoreQueryRequest(
pubsubTopic: some(topic1),
paginationForward: PagingDirection.Forward,
paginationLimit: some(2'u64),
includeData: true,
)
historyQuery2 = HistoryQuery(
pubsubTopic: some(topic2), direction: PagingDirection.Forward, pageSize: 2
storeQuery2 = StoreQueryRequest(
pubsubTopic: some(topic2),
paginationForward: PagingDirection.Forward,
paginationLimit: some(2'u64),
includeData: true,
)
# When the client queries the server for the messages
let
serverRemotePeerInfo = server.switch.peerInfo.toRemotePeerInfo()
queryResponse1 = await client.query(historyQuery1, serverRemotePeerInfo)
queryResponse2 = await client.query(historyQuery2, serverRemotePeerInfo)
queryResponse1 = await client.query(storeQuery1, serverRemotePeerInfo)
queryResponse2 = await client.query(storeQuery2, serverRemotePeerInfo)
assertResultOk(queryResponse1)
assertResultOk(queryResponse2)
# Then each response should contain only the messages of the corresponding pubsub topic
check:
queryResponse1.get().messages == archiveMessages1[0 ..< 1]
queryResponse2.get().messages == archiveMessages2[0 ..< 1]
queryResponse1.get().messages.mapIt(it.message.get()) ==
archiveMessages1[0 ..< 1]
queryResponse2.get().messages.mapIt(it.message.get()) ==
archiveMessages2[0 ..< 1]

View File

@ -1,6 +1,6 @@
import chronos
import waku/[waku_core/message, waku_store, waku_store_legacy]
import waku/[waku_core/message, waku_store]
const
FUTURE_TIMEOUT* = 1.seconds
@ -18,9 +18,6 @@ proc newBoolFuture*(): Future[bool] =
proc newHistoryFuture*(): Future[StoreQueryRequest] =
newFuture[StoreQueryRequest]()
proc newLegacyHistoryFuture*(): Future[waku_store_legacy.HistoryQuery] =
newFuture[waku_store_legacy.HistoryQuery]()
proc toResult*[T](future: Future[T]): Result[T, string] =
if future.cancelled():
return chronos.err("Future timeouted before completing.")

View File

@ -1,27 +0,0 @@
import chronicles, chronos
import
waku/waku_archive_legacy,
waku/waku_archive_legacy/driver as driver_module,
waku/waku_archive_legacy/driver/builder,
waku/waku_archive_legacy/driver/postgres_driver
const storeMessageDbUrl = "postgres://postgres:test123@localhost:5432/postgres"
proc newTestPostgresDriver*(): Future[Result[ArchiveDriver, string]] {.
async, deprecated
.} =
proc onErr(errMsg: string) {.gcsafe, closure.} =
error "error creating ArchiveDriver", error = errMsg
quit(QuitFailure)
let
vacuum = false
migrate = true
maxNumConn = 50
let driverRes =
await ArchiveDriver.new(storeMessageDbUrl, vacuum, migrate, maxNumConn, onErr)
if driverRes.isErr():
onErr("could not create archive driver: " & driverRes.error)
return ok(driverRes.get())

View File

@ -42,7 +42,6 @@ proc defaultTestWakuConfBuilder*(): WakuConfBuilder =
builder.withRelay(true)
builder.withRendezvous(true)
builder.storeServiceConf.withDbMigration(false)
builder.storeServiceConf.withSupportV2(false)
return builder
proc defaultTestWakuConf*(): WakuConf =

View File

@ -1,55 +0,0 @@
{.used.}
import std/options, results, chronos, libp2p/crypto/crypto
import
waku/[
node/peer_manager,
waku_core,
waku_archive_legacy,
waku_archive_legacy/common,
waku_archive_legacy/driver/sqlite_driver,
waku_archive_legacy/driver/sqlite_driver/migrations,
common/databases/db_sqlite,
],
../testlib/[wakucore]
proc newSqliteDatabase*(path: Option[string] = string.none()): SqliteDatabase =
SqliteDatabase.new(path.get(":memory:")).tryGet()
proc newSqliteArchiveDriver*(): ArchiveDriver =
let database = newSqliteDatabase()
migrate(database).tryGet()
return SqliteDriver.new(database).tryGet()
proc newWakuArchive*(driver: ArchiveDriver): WakuArchive =
WakuArchive.new(driver).get()
proc computeArchiveCursor*(
pubsubTopic: PubsubTopic, message: WakuMessage
): ArchiveCursor =
ArchiveCursor(
pubsubTopic: pubsubTopic,
senderTime: message.timestamp,
storeTime: message.timestamp,
digest: computeDigest(message),
hash: computeMessageHash(pubsubTopic, message),
)
proc put*(
driver: ArchiveDriver, pubsubTopic: PubSubTopic, msgList: seq[WakuMessage]
): ArchiveDriver =
for msg in msgList:
let
msgDigest = computeDigest(msg)
msgHash = computeMessageHash(pubsubTopic, msg)
_ = waitFor driver.put(pubsubTopic, msg, msgDigest, msgHash, msg.timestamp)
# discard crashes
return driver
proc newArchiveDriverWithMessages*(
pubsubTopic: PubSubTopic, msgList: seq[WakuMessage]
): ArchiveDriver =
var driver = newSqliteArchiveDriver()
driver = driver.put(pubsubTopic, msgList)
return driver

View File

@ -1,13 +0,0 @@
{.used.}
import
./test_driver_postgres_query,
./test_driver_postgres,
./test_driver_queue_index,
./test_driver_queue_pagination,
./test_driver_queue_query,
./test_driver_queue,
./test_driver_sqlite_query,
./test_driver_sqlite,
./test_retention_policy,
./test_waku_archive

View File

@ -1,220 +0,0 @@
{.used.}
import std/[sequtils, options], testutils/unittests, chronos
import
waku/waku_archive_legacy,
waku/waku_archive_legacy/driver/postgres_driver,
waku/waku_archive/driver/postgres_driver as new_postgres_driver,
waku/waku_core,
waku/waku_core/message/digest,
../testlib/wakucore,
../testlib/testasync,
../testlib/postgres_legacy,
../testlib/postgres as new_postgres
proc computeTestCursor(pubsubTopic: PubsubTopic, message: WakuMessage): ArchiveCursor =
ArchiveCursor(
pubsubTopic: pubsubTopic,
senderTime: message.timestamp,
storeTime: message.timestamp,
digest: computeDigest(message),
hash: computeMessageHash(pubsubTopic, message),
)
suite "Postgres driver":
## Unique driver instance
var driver {.threadvar.}: postgres_driver.PostgresDriver
## We need to artificially create an instance of the "newDriver"
## because this is the only one in charge of creating partitions
## We will clean legacy store soon and this file will get removed.
var newDriver {.threadvar.}: new_postgres_driver.PostgresDriver
asyncSetup:
let driverRes = await postgres_legacy.newTestPostgresDriver()
if driverRes.isErr():
assert false, driverRes.error
driver = postgres_driver.PostgresDriver(driverRes.get())
let newDriverRes = await new_postgres.newTestPostgresDriver()
if driverRes.isErr():
assert false, driverRes.error
newDriver = new_postgres_driver.PostgresDriver(newDriverRes.get())
asyncTeardown:
var resetRes = await driver.reset()
if resetRes.isErr():
assert false, resetRes.error
(await driver.close()).expect("driver to close")
resetRes = await newDriver.reset()
if resetRes.isErr():
assert false, resetRes.error
(await newDriver.close()).expect("driver to close")
asyncTest "Asynchronous queries":
var futures = newSeq[Future[ArchiveDriverResult[void]]](0)
let beforeSleep = now()
for _ in 1 .. 100:
futures.add(driver.sleep(1))
await allFutures(futures)
let diff = now() - beforeSleep
# Actually, the diff randomly goes between 1 and 2 seconds.
# although in theory it should spend 1s because we establish 100
# connections and we spawn 100 tasks that spend ~1s each.
assert diff < 20_000_000_000
asyncTest "Insert a message":
const contentTopic = "test-content-topic"
const meta = "test meta"
let msg = fakeWakuMessage(contentTopic = contentTopic, meta = meta)
let computedDigest = computeDigest(msg)
let computedHash = computeMessageHash(DefaultPubsubTopic, msg)
let putRes = await driver.put(
DefaultPubsubTopic, msg, computedDigest, computedHash, msg.timestamp
)
assert putRes.isOk(), putRes.error
let storedMsg = (await driver.getAllMessages()).tryGet()
assert storedMsg.len == 1
let (pubsubTopic, actualMsg, digest, _, hash) = storedMsg[0]
assert actualMsg.contentTopic == contentTopic
assert pubsubTopic == DefaultPubsubTopic
assert toHex(computedDigest.data) == toHex(digest)
assert toHex(actualMsg.payload) == toHex(msg.payload)
assert toHex(computedHash) == toHex(hash)
assert toHex(actualMsg.meta) == toHex(msg.meta)
asyncTest "Insert and query message":
const contentTopic1 = "test-content-topic-1"
const contentTopic2 = "test-content-topic-2"
const pubsubTopic1 = "pubsubtopic-1"
const pubsubTopic2 = "pubsubtopic-2"
let msg1 = fakeWakuMessage(contentTopic = contentTopic1)
var putRes = await driver.put(
pubsubTopic1,
msg1,
computeDigest(msg1),
computeMessageHash(pubsubTopic1, msg1),
msg1.timestamp,
)
assert putRes.isOk(), putRes.error
let msg2 = fakeWakuMessage(contentTopic = contentTopic2)
putRes = await driver.put(
pubsubTopic2,
msg2,
computeDigest(msg2),
computeMessageHash(pubsubTopic2, msg2),
msg2.timestamp,
)
assert putRes.isOk(), putRes.error
let countMessagesRes = await driver.getMessagesCount()
assert countMessagesRes.isOk(), $countMessagesRes.error
assert countMessagesRes.get() == 2
var messagesRes = await driver.getMessages(contentTopic = @[contentTopic1])
assert messagesRes.isOk(), $messagesRes.error
assert messagesRes.get().len == 1
# Get both content topics, check ordering
messagesRes =
await driver.getMessages(contentTopic = @[contentTopic1, contentTopic2])
assert messagesRes.isOk(), messagesRes.error
assert messagesRes.get().len == 2
assert messagesRes.get()[0][1].contentTopic == contentTopic1
# Descending order
messagesRes = await driver.getMessages(
contentTopic = @[contentTopic1, contentTopic2], ascendingOrder = false
)
assert messagesRes.isOk(), messagesRes.error
assert messagesRes.get().len == 2
assert messagesRes.get()[0][1].contentTopic == contentTopic2
# cursor
# Get both content topics
messagesRes = await driver.getMessages(
contentTopic = @[contentTopic1, contentTopic2],
cursor = some(computeTestCursor(pubsubTopic1, messagesRes.get()[1][1])),
)
assert messagesRes.isOk()
assert messagesRes.get().len == 1
# Get both content topics but one pubsub topic
messagesRes = await driver.getMessages(
contentTopic = @[contentTopic1, contentTopic2], pubsubTopic = some(pubsubTopic1)
)
assert messagesRes.isOk(), messagesRes.error
assert messagesRes.get().len == 1
assert messagesRes.get()[0][1].contentTopic == contentTopic1
# Limit
messagesRes = await driver.getMessages(
contentTopic = @[contentTopic1, contentTopic2], maxPageSize = 1
)
assert messagesRes.isOk(), messagesRes.error
assert messagesRes.get().len == 1
asyncTest "Insert true duplicated messages":
# Validates that two completely equal messages can not be stored.
let now = now()
let msg1 = fakeWakuMessage(ts = now)
let msg2 = fakeWakuMessage(ts = now)
let initialNumMsgs = (await driver.getMessagesCount()).valueOr:
raiseAssert "could not get num mgs correctly: " & $error
var putRes = await driver.put(
DefaultPubsubTopic,
msg1,
computeDigest(msg1),
computeMessageHash(DefaultPubsubTopic, msg1),
msg1.timestamp,
)
assert putRes.isOk(), putRes.error
var newNumMsgs = (await driver.getMessagesCount()).valueOr:
raiseAssert "could not get num mgs correctly: " & $error
assert newNumMsgs == (initialNumMsgs + 1.int64),
"wrong number of messages: " & $newNumMsgs
putRes = await driver.put(
DefaultPubsubTopic,
msg2,
computeDigest(msg2),
computeMessageHash(DefaultPubsubTopic, msg2),
msg2.timestamp,
)
assert putRes.isOk()
newNumMsgs = (await driver.getMessagesCount()).valueOr:
raiseAssert "could not get num mgs correctly: " & $error
assert newNumMsgs == (initialNumMsgs + 1.int64),
"wrong number of messages: " & $newNumMsgs

File diff suppressed because it is too large Load Diff

View File

@ -1,182 +0,0 @@
{.used.}
import std/options, results, testutils/unittests
import
waku/waku_archive_legacy,
waku/waku_archive_legacy/driver/queue_driver/queue_driver {.all.},
waku/waku_archive_legacy/driver/queue_driver/index,
waku/waku_core
# Helper functions
proc genIndexedWakuMessage(i: int8): (Index, WakuMessage) =
## Use i to generate an Index WakuMessage
var data {.noinit.}: array[32, byte]
for x in data.mitems:
x = i.byte
let
message = WakuMessage(payload: @[byte i], timestamp: Timestamp(i))
topic = "test-pubsub-topic"
cursor = Index(
receiverTime: Timestamp(i),
senderTime: Timestamp(i),
digest: MessageDigest(data: data),
pubsubTopic: topic,
hash: computeMessageHash(topic, message),
)
(cursor, message)
proc getPrepopulatedTestQueue(unsortedSet: auto, capacity: int): QueueDriver =
let driver = QueueDriver.new(capacity)
for i in unsortedSet:
let (index, message) = genIndexedWakuMessage(i.int8)
discard driver.add(index, message)
driver
procSuite "Sorted driver queue":
test "queue capacity - add a message over the limit":
## Given
let capacity = 5
let driver = QueueDriver.new(capacity)
## When
# Fill up the queue
for i in 1 .. capacity:
let (index, message) = genIndexedWakuMessage(i.int8)
require(driver.add(index, message).isOk())
# Add one more. Capacity should not be exceeded
let (index, message) = genIndexedWakuMessage(capacity.int8 + 1)
require(driver.add(index, message).isOk())
## Then
check:
driver.len == capacity
test "queue capacity - add message older than oldest in the queue":
## Given
let capacity = 5
let driver = QueueDriver.new(capacity)
## When
# Fill up the queue
for i in 1 .. capacity:
let (index, message) = genIndexedWakuMessage(i.int8)
require(driver.add(index, message).isOk())
# Attempt to add message with older value than oldest in queue should fail
let
oldestTimestamp = driver.first().get().senderTime
(index, message) = genIndexedWakuMessage(oldestTimestamp.int8 - 1)
addRes = driver.add(index, message)
## Then
check:
addRes.isErr()
addRes.error() == "too_old"
check:
driver.len == capacity
test "queue sort-on-insert":
## Given
let
capacity = 5
unsortedSet = [5, 1, 3, 2, 4]
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
# Walk forward through the set and verify ascending order
var (prevSmaller, _) = genIndexedWakuMessage(min(unsortedSet).int8 - 1)
for i in driver.fwdIterator:
let (index, _) = i
check cmp(index, prevSmaller) > 0
prevSmaller = index
# Walk backward through the set and verify descending order
var (prevLarger, _) = genIndexedWakuMessage(max(unsortedSet).int8 + 1)
for i in driver.bwdIterator:
let (index, _) = i
check cmp(index, prevLarger) < 0
prevLarger = index
test "access first item from queue":
## Given
let
capacity = 5
unsortedSet = [5, 1, 3, 2, 4]
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
## When
let firstRes = driver.first()
## Then
check:
firstRes.isOk()
let first = firstRes.tryGet()
check:
first.senderTime == Timestamp(1)
test "get first item from empty queue should fail":
## Given
let capacity = 5
let driver = QueueDriver.new(capacity)
## When
let firstRes = driver.first()
## Then
check:
firstRes.isErr()
firstRes.error() == "Not found"
test "access last item from queue":
## Given
let
capacity = 5
unsortedSet = [5, 1, 3, 2, 4]
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
## When
let lastRes = driver.last()
## Then
check:
lastRes.isOk()
let last = lastRes.tryGet()
check:
last.senderTime == Timestamp(5)
test "get last item from empty queue should fail":
## Given
let capacity = 5
let driver = QueueDriver.new(capacity)
## When
let lastRes = driver.last()
## Then
check:
lastRes.isErr()
lastRes.error() == "Not found"
test "verify if queue contains an index":
## Given
let
capacity = 5
unsortedSet = [5, 1, 3, 2, 4]
let driver = getPrepopulatedTestQueue(unsortedSet, capacity)
let
(existingIndex, _) = genIndexedWakuMessage(4)
(nonExistingIndex, _) = genIndexedWakuMessage(99)
## Then
check:
driver.contains(existingIndex) == true
driver.contains(nonExistingIndex) == false

View File

@ -1,219 +0,0 @@
{.used.}
import std/[times, random], stew/byteutils, testutils/unittests, nimcrypto
import waku/waku_core, waku/waku_archive_legacy/driver/queue_driver/index
var rng = initRand()
## Helpers
proc getTestTimestamp(offset = 0): Timestamp =
let now = getNanosecondTime(epochTime() + float(offset))
Timestamp(now)
proc hashFromStr(input: string): MDigest[256] =
var ctx: sha256
ctx.init()
ctx.update(input.toBytes())
let hashed = ctx.finish()
ctx.clear()
return hashed
proc randomHash(): WakuMessageHash =
var hash: WakuMessageHash
for i in 0 ..< hash.len:
let numb: byte = byte(rng.next())
hash[i] = numb
hash
suite "Queue Driver - index":
## Test vars
let
smallIndex1 = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(1000),
hash: randomHash(),
)
smallIndex2 = Index(
digest: hashFromStr("1234567"), # digest is less significant than senderTime
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(1000),
hash: randomHash(),
)
largeIndex1 = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(9000),
hash: randomHash(),
) # only senderTime differ from smallIndex1
largeIndex2 = Index(
digest: hashFromStr("12345"), # only digest differs from smallIndex1
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(1000),
hash: randomHash(),
)
eqIndex1 = Index(
digest: hashFromStr("0003"),
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(54321),
hash: randomHash(),
)
eqIndex2 = Index(
digest: hashFromStr("0003"),
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(54321),
hash: randomHash(),
)
eqIndex3 = Index(
digest: hashFromStr("0003"),
receiverTime: getNanosecondTime(9999),
# receiverTime difference should have no effect on comparisons
senderTime: getNanosecondTime(54321),
hash: randomHash(),
)
diffPsTopic = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(0),
senderTime: getNanosecondTime(1000),
pubsubTopic: "zzzz",
hash: randomHash(),
)
noSenderTime1 = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(1100),
senderTime: getNanosecondTime(0),
pubsubTopic: "zzzz",
hash: randomHash(),
)
noSenderTime2 = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(10000),
senderTime: getNanosecondTime(0),
pubsubTopic: "zzzz",
hash: randomHash(),
)
noSenderTime3 = Index(
digest: hashFromStr("1234"),
receiverTime: getNanosecondTime(1200),
senderTime: getNanosecondTime(0),
pubsubTopic: "aaaa",
hash: randomHash(),
)
noSenderTime4 = Index(
digest: hashFromStr("0"),
receiverTime: getNanosecondTime(1200),
senderTime: getNanosecondTime(0),
pubsubTopic: "zzzz",
hash: randomHash(),
)
test "Index comparison":
# Index comparison with senderTime diff
check:
cmp(smallIndex1, largeIndex1) < 0
cmp(smallIndex2, largeIndex1) < 0
# Index comparison with digest diff
check:
cmp(smallIndex1, smallIndex2) < 0
cmp(smallIndex1, largeIndex2) < 0
cmp(smallIndex2, largeIndex2) > 0
cmp(largeIndex1, largeIndex2) > 0
# Index comparison when equal
check:
cmp(eqIndex1, eqIndex2) == 0
# pubsubTopic difference
check:
cmp(smallIndex1, diffPsTopic) < 0
# receiverTime diff plays no role when senderTime set
check:
cmp(eqIndex1, eqIndex3) == 0
# receiverTime diff plays no role when digest/pubsubTopic equal
check:
cmp(noSenderTime1, noSenderTime2) == 0
# sort on receiverTime with no senderTimestamp and unequal pubsubTopic
check:
cmp(noSenderTime1, noSenderTime3) < 0
# sort on receiverTime with no senderTimestamp and unequal digest
check:
cmp(noSenderTime1, noSenderTime4) < 0
# sort on receiverTime if no senderTimestamp on only one side
check:
cmp(smallIndex1, noSenderTime1) < 0
cmp(noSenderTime1, smallIndex1) > 0 # Test symmetry
cmp(noSenderTime2, eqIndex3) < 0
cmp(eqIndex3, noSenderTime2) > 0 # Test symmetry
test "Index equality":
# Exactly equal
check:
eqIndex1 == eqIndex2
# Receiver time plays no role, even without sender time
check:
eqIndex1 == eqIndex3
noSenderTime1 == noSenderTime2 # only receiver time differs, indices are equal
noSenderTime1 != noSenderTime3 # pubsubTopics differ
noSenderTime1 != noSenderTime4 # digests differ
# Unequal sender time
check:
smallIndex1 != largeIndex1
# Unequal digest
check:
smallIndex1 != smallIndex2
# Unequal hash and digest
check:
smallIndex1 != eqIndex1
# Unequal pubsubTopic
check:
smallIndex1 != diffPsTopic
test "Index computation should not be empty":
## Given
let ts = getTestTimestamp()
let wm = WakuMessage(payload: @[byte 1, 2, 3], timestamp: ts)
## When
let ts2 = getTestTimestamp() + 10
let index = Index.compute(wm, ts2, DefaultContentTopic)
## Then
check:
index.digest.data.len != 0
index.digest.data.len == 32 # sha2 output length in bytes
index.receiverTime == ts2 # the receiver timestamp should be a non-zero value
index.senderTime == ts
index.pubsubTopic == DefaultContentTopic
test "Index digest of two identical messsage should be the same":
## Given
let topic = ContentTopic("test-content-topic")
let
wm1 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: topic)
wm2 = WakuMessage(payload: @[byte 1, 2, 3], contentTopic: topic)
## When
let ts = getTestTimestamp()
let
index1 = Index.compute(wm1, ts, DefaultPubsubTopic)
index2 = Index.compute(wm2, ts, DefaultPubsubTopic)
## Then
check:
index1.digest == index2.digest

View File

@ -1,405 +0,0 @@
{.used.}
import
std/[options, sequtils, algorithm], testutils/unittests, libp2p/protobuf/minprotobuf
import
waku/waku_archive_legacy,
waku/waku_archive_legacy/driver/queue_driver/queue_driver {.all.},
waku/waku_archive_legacy/driver/queue_driver/index,
waku/waku_core,
../testlib/wakucore
proc getTestQueueDriver(numMessages: int): QueueDriver =
let testQueueDriver = QueueDriver.new(numMessages)
var data {.noinit.}: array[32, byte]
for x in data.mitems:
x = 1
for i in 0 ..< numMessages:
let msg = WakuMessage(payload: @[byte i], timestamp: Timestamp(i))
let index = Index(
receiverTime: Timestamp(i),
senderTime: Timestamp(i),
digest: MessageDigest(data: data),
hash: computeMessageHash(DefaultPubsubTopic, msg),
)
discard testQueueDriver.add(index, msg)
return testQueueDriver
procSuite "Queue driver - pagination":
let driver = getTestQueueDriver(10)
let
indexList: seq[Index] = toSeq(driver.fwdIterator()).mapIt(it[0])
msgList: seq[WakuMessage] = toSeq(driver.fwdIterator()).mapIt(it[1])
test "Forward pagination - normal pagination":
## Given
let
pageSize: uint = 2
cursor: Option[Index] = some(indexList[3])
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 2
data == msgList[4 .. 5]
test "Forward pagination - initial pagination request with an empty cursor":
## Given
let
pageSize: uint = 2
cursor: Option[Index] = none(Index)
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 2
data == msgList[0 .. 1]
test "Forward pagination - initial pagination request with an empty cursor to fetch the entire history":
## Given
let
pageSize: uint = 13
cursor: Option[Index] = none(Index)
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 10
data == msgList[0 .. 9]
test "Forward pagination - empty msgList":
## Given
let driver = getTestQueueDriver(0)
let
pageSize: uint = 2
cursor: Option[Index] = none(Index)
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 0
test "Forward pagination - page size larger than the remaining messages":
## Given
let
pageSize: uint = 10
cursor: Option[Index] = some(indexList[3])
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 6
data == msgList[4 .. 9]
test "Forward pagination - page size larger than the maximum allowed page size":
## Given
let
pageSize: uint = MaxPageSize + 1
cursor: Option[Index] = some(indexList[3])
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
uint(data.len) <= MaxPageSize
test "Forward pagination - cursor pointing to the end of the message list":
## Given
let
pageSize: uint = 10
cursor: Option[Index] = some(indexList[9])
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 0
test "Forward pagination - invalid cursor":
## Given
let msg = fakeWakuMessage(payload = @[byte 10])
let index = ArchiveCursor(
pubsubTopic: DefaultPubsubTopic,
senderTime: msg.timestamp,
storeTime: msg.timestamp,
digest: computeDigest(msg),
).toIndex()
let
pageSize: uint = 10
cursor: Option[Index] = some(index)
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let error = page.tryError()
check:
error == QueueDriverErrorKind.INVALID_CURSOR
test "Forward pagination - initial paging query over a message list with one message":
## Given
let driver = getTestQueueDriver(1)
let
pageSize: uint = 10
cursor: Option[Index] = none(Index)
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 1
test "Forward pagination - pagination over a message list with one message":
## Given
let driver = getTestQueueDriver(1)
let
pageSize: uint = 10
cursor: Option[Index] = some(indexList[0])
forward: bool = true
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 0
test "Forward pagination - with pradicate":
## Given
let
pageSize: uint = 3
cursor: Option[Index] = none(Index)
forward = true
proc onlyEvenTimes(index: Index, msg: WakuMessage): bool =
msg.timestamp.int64 mod 2 == 0
## When
let page = driver.getPage(
pageSize = pageSize, forward = forward, cursor = cursor, predicate = onlyEvenTimes
)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.mapIt(it.timestamp.int) == @[0, 2, 4]
test "Backward pagination - normal pagination":
## Given
let
pageSize: uint = 2
cursor: Option[Index] = some(indexList[3])
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data == msgList[1 .. 2].reversed
test "Backward pagination - empty msgList":
## Given
let driver = getTestQueueDriver(0)
let
pageSize: uint = 2
cursor: Option[Index] = none(Index)
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 0
test "Backward pagination - initial pagination request with an empty cursor":
## Given
let
pageSize: uint = 2
cursor: Option[Index] = none(Index)
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 2
data == msgList[8 .. 9].reversed
test "Backward pagination - initial pagination request with an empty cursor to fetch the entire history":
## Given
let
pageSize: uint = 13
cursor: Option[Index] = none(Index)
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 10
data == msgList[0 .. 9].reversed
test "Backward pagination - page size larger than the remaining messages":
## Given
let
pageSize: uint = 5
cursor: Option[Index] = some(indexList[3])
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data == msgList[0 .. 2].reversed
test "Backward pagination - page size larger than the Maximum allowed page size":
## Given
let
pageSize: uint = MaxPageSize + 1
cursor: Option[Index] = some(indexList[3])
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
uint(data.len) <= MaxPageSize
test "Backward pagination - cursor pointing to the begining of the message list":
## Given
let
pageSize: uint = 5
cursor: Option[Index] = some(indexList[0])
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 0
test "Backward pagination - invalid cursor":
## Given
let msg = fakeWakuMessage(payload = @[byte 10])
let index = ArchiveCursor(
pubsubTopic: DefaultPubsubTopic,
senderTime: msg.timestamp,
storeTime: msg.timestamp,
digest: computeDigest(msg),
).toIndex()
let
pageSize: uint = 2
cursor: Option[Index] = some(index)
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let error = page.tryError()
check:
error == QueueDriverErrorKind.INVALID_CURSOR
test "Backward pagination - initial paging query over a message list with one message":
## Given
let driver = getTestQueueDriver(1)
let
pageSize: uint = 10
cursor: Option[Index] = none(Index)
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 1
test "Backward pagination - paging query over a message list with one message":
## Given
let driver = getTestQueueDriver(1)
let
pageSize: uint = 10
cursor: Option[Index] = some(indexList[0])
forward: bool = false
## When
let page = driver.getPage(pageSize = pageSize, forward = forward, cursor = cursor)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.len == 0
test "Backward pagination - with predicate":
## Given
let
pageSize: uint = 3
cursor: Option[Index] = none(Index)
forward = false
proc onlyOddTimes(index: Index, msg: WakuMessage): bool =
msg.timestamp.int64 mod 2 != 0
## When
let page = driver.getPage(
pageSize = pageSize, forward = forward, cursor = cursor, predicate = onlyOddTimes
)
## Then
let data = page.tryGet().mapIt(it[1])
check:
data.mapIt(it.timestamp.int) == @[5, 7, 9].reversed

File diff suppressed because it is too large Load Diff

View File

@ -1,58 +0,0 @@
{.used.}
import std/sequtils, testutils/unittests, chronos
import
waku/waku_archive_legacy,
waku/waku_archive_legacy/driver/sqlite_driver,
waku/waku_core,
../waku_archive_legacy/archive_utils,
../testlib/wakucore
suite "SQLite driver":
test "init driver and database":
## Given
let database = newSqliteDatabase()
## When
let driverRes = SqliteDriver.new(database)
## Then
check:
driverRes.isOk()
let driver: ArchiveDriver = driverRes.tryGet()
check:
not driver.isNil()
## Cleanup
(waitFor driver.close()).expect("driver to close")
test "insert a message":
## Given
const contentTopic = "test-content-topic"
const meta = "test meta"
let driver = newSqliteArchiveDriver()
let msg = fakeWakuMessage(contentTopic = contentTopic, meta = meta)
let msgHash = computeMessageHash(DefaultPubsubTopic, msg)
## When
let putRes = waitFor driver.put(
DefaultPubsubTopic, msg, computeDigest(msg), msgHash, msg.timestamp
)
## Then
check:
putRes.isOk()
let storedMsg = (waitFor driver.getAllMessages()).tryGet()
check:
storedMsg.len == 1
storedMsg.all do(item: auto) -> bool:
let (pubsubTopic, actualMsg, _, _, hash) = item
actualMsg.contentTopic == contentTopic and pubsubTopic == DefaultPubsubTopic and
hash == msgHash and msg.meta == actualMsg.meta
## Cleanup
(waitFor driver.close()).expect("driver to close")

File diff suppressed because it is too large Load Diff

View File

@ -1,532 +0,0 @@
{.used.}
import std/[options, sequtils], testutils/unittests, chronos, libp2p/crypto/crypto
import
waku/common/paging,
waku/waku_core,
waku/waku_core/message/digest,
waku/waku_archive_legacy,
../waku_archive_legacy/archive_utils,
../testlib/wakucore
suite "Waku Archive - message handling":
test "it should archive a valid and non-ephemeral message":
## Setup
let driver = newSqliteArchiveDriver()
let archive = newWakuArchive(driver)
## Given
let validSenderTime = now()
let message = fakeWakuMessage(ephemeral = false, ts = validSenderTime)
## When
waitFor archive.handleMessage(DefaultPubSubTopic, message)
## Then
check:
(waitFor driver.getMessagesCount()).tryGet() == 1
test "it should not archive ephemeral messages":
## Setup
let driver = newSqliteArchiveDriver()
let archive = newWakuArchive(driver)
## Given
let msgList = @[
fakeWakuMessage(ephemeral = false, payload = "1"),
fakeWakuMessage(ephemeral = true, payload = "2"),
fakeWakuMessage(ephemeral = true, payload = "3"),
fakeWakuMessage(ephemeral = true, payload = "4"),
fakeWakuMessage(ephemeral = false, payload = "5"),
]
## When
for msg in msgList:
waitFor archive.handleMessage(DefaultPubsubTopic, msg)
## Then
check:
(waitFor driver.getMessagesCount()).tryGet() == 2
test "it should archive a message with no sender timestamp":
## Setup
let driver = newSqliteArchiveDriver()
let archive = newWakuArchive(driver)
## Given
let invalidSenderTime = 0
let message = fakeWakuMessage(ts = invalidSenderTime)
## When
waitFor archive.handleMessage(DefaultPubSubTopic, message)
## Then
check:
(waitFor driver.getMessagesCount()).tryGet() == 1
test "it should not archive a message with a sender time variance greater than max time variance (future)":
## Setup
let driver = newSqliteArchiveDriver()
let archive = newWakuArchive(driver)
## Given
let
now = now()
invalidSenderTime = now + MaxMessageTimestampVariance + 1_000_000_000
# 1 second over the max variance
let message = fakeWakuMessage(ts = invalidSenderTime)
## When
waitFor archive.handleMessage(DefaultPubSubTopic, message)
## Then
check:
(waitFor driver.getMessagesCount()).tryGet() == 0
test "it should not archive a message with a sender time variance greater than max time variance (past)":
## Setup
let driver = newSqliteArchiveDriver()
let archive = newWakuArchive(driver)
## Given
let
now = now()
invalidSenderTime = now - MaxMessageTimestampVariance - 1
let message = fakeWakuMessage(ts = invalidSenderTime)
## When
waitFor archive.handleMessage(DefaultPubSubTopic, message)
## Then
check:
(waitFor driver.getMessagesCount()).tryGet() == 0
procSuite "Waku Archive - find messages":
## Fixtures
let timeOrigin = now()
let msgListA = @[
fakeWakuMessage(
@[byte 00], contentTopic = ContentTopic("2"), ts = ts(00, timeOrigin)
),
fakeWakuMessage(
@[byte 01], contentTopic = ContentTopic("1"), ts = ts(10, timeOrigin)
),
fakeWakuMessage(
@[byte 02], contentTopic = ContentTopic("2"), ts = ts(20, timeOrigin)
),
fakeWakuMessage(
@[byte 03], contentTopic = ContentTopic("1"), ts = ts(30, timeOrigin)
),
fakeWakuMessage(
@[byte 04], contentTopic = ContentTopic("2"), ts = ts(40, timeOrigin)
),
fakeWakuMessage(
@[byte 05], contentTopic = ContentTopic("1"), ts = ts(50, timeOrigin)
),
fakeWakuMessage(
@[byte 06], contentTopic = ContentTopic("2"), ts = ts(60, timeOrigin)
),
fakeWakuMessage(
@[byte 07], contentTopic = ContentTopic("1"), ts = ts(70, timeOrigin)
),
fakeWakuMessage(
@[byte 08], contentTopic = ContentTopic("2"), ts = ts(80, timeOrigin)
),
fakeWakuMessage(
@[byte 09], contentTopic = ContentTopic("1"), ts = ts(90, timeOrigin)
),
]
let archiveA = block:
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
for msg in msgListA:
require (
waitFor driver.put(
DefaultPubsubTopic,
msg,
computeDigest(msg),
computeMessageHash(DefaultPubsubTopic, msg),
msg.timestamp,
)
).isOk()
archive
test "handle query":
## Setup
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
let topic = ContentTopic("1")
let
msg1 = fakeWakuMessage(contentTopic = topic)
msg2 = fakeWakuMessage()
waitFor archive.handleMessage("foo", msg1)
waitFor archive.handleMessage("foo", msg2)
## Given
let req = ArchiveQuery(includeData: true, contentTopics: @[topic])
## When
let queryRes = waitFor archive.findMessages(req)
## Then
check:
queryRes.isOk()
let response = queryRes.tryGet()
check:
response.messages.len == 1
response.messages == @[msg1]
test "handle query with multiple content filters":
## Setup
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
let
topic1 = ContentTopic("1")
topic2 = ContentTopic("2")
topic3 = ContentTopic("3")
let
msg1 = fakeWakuMessage(contentTopic = topic1)
msg2 = fakeWakuMessage(contentTopic = topic2)
msg3 = fakeWakuMessage(contentTopic = topic3)
waitFor archive.handleMessage("foo", msg1)
waitFor archive.handleMessage("foo", msg2)
waitFor archive.handleMessage("foo", msg3)
## Given
let req = ArchiveQuery(includeData: true, contentTopics: @[topic1, topic3])
## When
let queryRes = waitFor archive.findMessages(req)
## Then
check:
queryRes.isOk()
let response = queryRes.tryGet()
check:
response.messages.len() == 2
response.messages.anyIt(it == msg1)
response.messages.anyIt(it == msg3)
test "handle query with more than 10 content filters":
## Setup
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
let queryTopics = toSeq(1 .. 15).mapIt(ContentTopic($it))
## Given
let req = ArchiveQuery(contentTopics: queryTopics)
## When
let queryRes = waitFor archive.findMessages(req)
## Then
check:
queryRes.isErr()
let error = queryRes.tryError()
check:
error.kind == ArchiveErrorKind.INVALID_QUERY
error.cause == "too many content topics"
test "handle query with pubsub topic filter":
## Setup
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
let
pubsubTopic1 = "queried-topic"
pubsubTopic2 = "non-queried-topic"
let
contentTopic1 = ContentTopic("1")
contentTopic2 = ContentTopic("2")
contentTopic3 = ContentTopic("3")
let
msg1 = fakeWakuMessage(contentTopic = contentTopic1)
msg2 = fakeWakuMessage(contentTopic = contentTopic2)
msg3 = fakeWakuMessage(contentTopic = contentTopic3)
waitFor archive.handleMessage(pubsubtopic1, msg1)
waitFor archive.handleMessage(pubsubtopic2, msg2)
waitFor archive.handleMessage(pubsubtopic2, msg3)
## Given
# This query targets: pubsubtopic1 AND (contentTopic1 OR contentTopic3)
let req = ArchiveQuery(
includeData: true,
pubsubTopic: some(pubsubTopic1),
contentTopics: @[contentTopic1, contentTopic3],
)
## When
let queryRes = waitFor archive.findMessages(req)
## Then
check:
queryRes.isOk()
let response = queryRes.tryGet()
check:
response.messages.len() == 1
response.messages.anyIt(it == msg1)
test "handle query with pubsub topic filter - no match":
## Setup
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
let
pubsubtopic1 = "queried-topic"
pubsubtopic2 = "non-queried-topic"
let
msg1 = fakeWakuMessage()
msg2 = fakeWakuMessage()
msg3 = fakeWakuMessage()
waitFor archive.handleMessage(pubsubtopic2, msg1)
waitFor archive.handleMessage(pubsubtopic2, msg2)
waitFor archive.handleMessage(pubsubtopic2, msg3)
## Given
let req = ArchiveQuery(pubsubTopic: some(pubsubTopic1))
## When
let res = waitFor archive.findMessages(req)
## Then
check:
res.isOk()
let response = res.tryGet()
check:
response.messages.len() == 0
test "handle query with pubsub topic filter - match the entire stored messages":
## Setup
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
let pubsubTopic = "queried-topic"
let
msg1 = fakeWakuMessage(payload = "TEST-1")
msg2 = fakeWakuMessage(payload = "TEST-2")
msg3 = fakeWakuMessage(payload = "TEST-3")
waitFor archive.handleMessage(pubsubTopic, msg1)
waitFor archive.handleMessage(pubsubTopic, msg2)
waitFor archive.handleMessage(pubsubTopic, msg3)
## Given
let req = ArchiveQuery(includeData: true, pubsubTopic: some(pubsubTopic))
## When
let res = waitFor archive.findMessages(req)
## Then
check:
res.isOk()
let response = res.tryGet()
check:
response.messages.len() == 3
response.messages.anyIt(it == msg1)
response.messages.anyIt(it == msg2)
response.messages.anyIt(it == msg3)
test "handle query with forward pagination":
## Given
let req =
ArchiveQuery(includeData: true, pageSize: 4, direction: PagingDirection.FORWARD)
## When
var nextReq = req # copy
var pages = newSeq[seq[WakuMessage]](3)
var cursors = newSeq[Option[ArchiveCursor]](3)
for i in 0 ..< 3:
let res = waitFor archiveA.findMessages(nextReq)
require res.isOk()
# Keep query response content
let response = res.get()
pages[i] = response.messages
cursors[i] = response.cursor
# Set/update the request cursor
nextReq.cursor = cursors[i]
## Then
check:
cursors[0] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[3]))
cursors[1] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[7]))
cursors[2] == none(ArchiveCursor)
check:
pages[0] == msgListA[0 .. 3]
pages[1] == msgListA[4 .. 7]
pages[2] == msgListA[8 .. 9]
test "handle query with backward pagination":
## Given
let req =
ArchiveQuery(includeData: true, pageSize: 4, direction: PagingDirection.BACKWARD)
## When
var nextReq = req # copy
var pages = newSeq[seq[WakuMessage]](3)
var cursors = newSeq[Option[ArchiveCursor]](3)
for i in 0 ..< 3:
let res = waitFor archiveA.findMessages(nextReq)
require res.isOk()
# Keep query response content
let response = res.get()
pages[i] = response.messages
cursors[i] = response.cursor
# Set/update the request cursor
nextReq.cursor = cursors[i]
## Then
check:
cursors[0] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[6]))
cursors[1] == some(computeArchiveCursor(DefaultPubsubTopic, msgListA[2]))
cursors[2] == none(ArchiveCursor)
check:
pages[0] == msgListA[6 .. 9]
pages[1] == msgListA[2 .. 5]
pages[2] == msgListA[0 .. 1]
test "handle query with no paging info - auto-pagination":
## Setup
let
driver = newSqliteArchiveDriver()
archive = newWakuArchive(driver)
let msgList = @[
fakeWakuMessage(@[byte 0], contentTopic = ContentTopic("2")),
fakeWakuMessage(@[byte 1], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 2], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 3], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 4], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 5], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 6], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 7], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 8], contentTopic = DefaultContentTopic),
fakeWakuMessage(@[byte 9], contentTopic = ContentTopic("2")),
]
for msg in msgList:
require (
waitFor driver.put(
DefaultPubsubTopic,
msg,
computeDigest(msg),
computeMessageHash(DefaultPubsubTopic, msg),
msg.timestamp,
)
).isOk()
## Given
let req = ArchiveQuery(includeData: true, contentTopics: @[DefaultContentTopic])
## When
let res = waitFor archive.findMessages(req)
## Then
check:
res.isOk()
let response = res.tryGet()
check:
## No pagination specified. Response will be auto-paginated with
## up to MaxPageSize messages per page.
response.messages.len() == 8
response.cursor.isNone()
test "handle temporal history query with a valid time window":
## Given
let req = ArchiveQuery(
includeData: true,
contentTopics: @[ContentTopic("1")],
startTime: some(ts(15, timeOrigin)),
endTime: some(ts(55, timeOrigin)),
direction: PagingDirection.FORWARD,
)
## When
let res = waitFor archiveA.findMessages(req)
## Then
check res.isOk()
let response = res.tryGet()
check:
response.messages.len() == 2
response.messages.mapIt(it.timestamp) == @[ts(30, timeOrigin), ts(50, timeOrigin)]
test "handle temporal history query with a zero-size time window":
## A zero-size window results in an empty list of history messages
## Given
let req = ArchiveQuery(
contentTopics: @[ContentTopic("1")],
startTime: some(Timestamp(2)),
endTime: some(Timestamp(2)),
)
## When
let res = waitFor archiveA.findMessages(req)
## Then
check res.isOk()
let response = res.tryGet()
check:
response.messages.len == 0
test "handle temporal history query with an invalid time window":
## A history query with an invalid time range results in an empty list of history messages
## Given
let req = ArchiveQuery(
contentTopics: @[ContentTopic("1")],
startTime: some(Timestamp(5)),
endTime: some(Timestamp(2)),
)
## When
let res = waitFor archiveA.findMessages(req)
## Then
check res.isOk()
let response = res.tryGet()
check:
response.messages.len == 0

View File

@ -1,33 +0,0 @@
{.used.}
import std/options, chronos
import
waku/[node/peer_manager, waku_core, waku_store_legacy, waku_store_legacy/client],
../testlib/[common, wakucore]
proc newTestWakuStore*(
switch: Switch, handler: HistoryQueryHandler
): Future[WakuStore] {.async.} =
let
peerManager = PeerManager.new(switch)
proto = WakuStore.new(peerManager, rng, handler)
await proto.start()
switch.mount(proto)
return proto
proc newTestWakuStoreClient*(switch: Switch): WakuStoreClient =
let peerManager = PeerManager.new(switch)
WakuStoreClient.new(peerManager, rng)
proc computeHistoryCursor*(
pubsubTopic: PubsubTopic, message: WakuMessage
): HistoryCursor =
HistoryCursor(
pubsubTopic: pubsubTopic,
senderTime: message.timestamp,
storeTime: message.timestamp,
digest: computeDigest(message),
)

View File

@ -1,8 +0,0 @@
{.used.}
import
./test_client,
./test_resume,
./test_rpc_codec,
./test_waku_store,
./test_wakunode_store

View File

@ -1,214 +0,0 @@
{.used.}
import std/options, testutils/unittests, chronos, libp2p/crypto/crypto
import
waku/[
node/peer_manager,
waku_core,
waku_store_legacy,
waku_store_legacy/client,
common/paging,
],
../testlib/[wakucore, testasync, futures],
./store_utils
suite "Store Client":
var message1 {.threadvar.}: WakuMessage
var message2 {.threadvar.}: WakuMessage
var message3 {.threadvar.}: WakuMessage
var messageSeq {.threadvar.}: seq[WakuMessage]
var handlerFuture {.threadvar.}: Future[HistoryQuery]
var handler {.threadvar.}: HistoryQueryHandler
var historyQuery {.threadvar.}: HistoryQuery
var serverSwitch {.threadvar.}: Switch
var clientSwitch {.threadvar.}: Switch
var server {.threadvar.}: WakuStore
var client {.threadvar.}: WakuStoreClient
var serverPeerInfo {.threadvar.}: RemotePeerInfo
var clientPeerInfo {.threadvar.}: RemotePeerInfo
asyncSetup:
message1 = fakeWakuMessage(contentTopic = DefaultContentTopic)
message2 = fakeWakuMessage(contentTopic = DefaultContentTopic)
message3 = fakeWakuMessage(contentTopic = DefaultContentTopic)
messageSeq = @[message1, message2, message3]
handlerFuture = newLegacyHistoryFuture()
handler = proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.} =
handlerFuture.complete(req)
return ok(HistoryResponse(messages: messageSeq))
historyQuery = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
direction: PagingDirection.FORWARD,
requestId: "customRequestId",
)
serverSwitch = newTestSwitch()
clientSwitch = newTestSwitch()
server = await newTestWakuStore(serverSwitch, handler = handler)
client = newTestWakuStoreClient(clientSwitch)
await allFutures(serverSwitch.start(), clientSwitch.start())
## The following sleep is aimed to prevent macos failures in CI
#[
2024-05-16T13:24:45.5106200Z INF 2024-05-16 13:24:45.509+00:00 Stopping AutonatService topics="libp2p autonatservice" tid=53712 file=service.nim:203
2024-05-16T13:24:45.5107960Z WRN 2024-05-16 13:24:45.509+00:00 service is already stopped topics="libp2p switch" tid=53712 file=switch.nim:86
2024-05-16T13:24:45.5109010Z . (1.68s)
2024-05-16T13:24:45.5109320Z Store Client (0.00s)
2024-05-16T13:24:45.5109870Z SIGSEGV: Illegal storage access. (Attempt to read from nil?)
2024-05-16T13:24:45.5111470Z stack trace: (most recent call last)
]#
await sleepAsync(500.millis)
serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo()
clientPeerInfo = clientSwitch.peerInfo.toRemotePeerInfo()
asyncTeardown:
await allFutures(serverSwitch.stop(), clientSwitch.stop())
suite "HistoryQuery Creation and Execution":
asyncTest "Valid Queries":
# When a valid query is sent to the server
let queryResponse = await client.query(historyQuery, peer = serverPeerInfo)
# Then the query is processed successfully
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == historyQuery
queryResponse.get().messages == messageSeq
asyncTest "Invalid Queries":
# TODO: IMPROVE: We can't test "actual" invalid queries because
# it directly depends on the handler implementation, to achieve
# proper coverage we'd need an example implementation.
# Given some invalid queries
let
invalidQuery1 = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[],
direction: PagingDirection.FORWARD,
requestId: "reqId1",
)
invalidQuery2 = HistoryQuery(
pubsubTopic: PubsubTopic.none(),
contentTopics: @[DefaultContentTopic],
direction: PagingDirection.FORWARD,
requestId: "reqId2",
)
invalidQuery3 = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
pageSize: 0,
requestId: "reqId3",
)
invalidQuery4 = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
pageSize: 0,
requestId: "reqId4",
)
invalidQuery5 = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
startTime: some(0.Timestamp),
endTime: some(0.Timestamp),
requestId: "reqId5",
)
invalidQuery6 = HistoryQuery(
pubsubTopic: some(DefaultPubsubTopic),
contentTopics: @[DefaultContentTopic],
startTime: some(0.Timestamp),
endTime: some(-1.Timestamp),
requestId: "reqId6",
)
# When the query is sent to the server
let queryResponse1 = await client.query(invalidQuery1, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery1
queryResponse1.get().messages == messageSeq
# When the query is sent to the server
handlerFuture = newLegacyHistoryFuture()
let queryResponse2 = await client.query(invalidQuery2, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery2
queryResponse2.get().messages == messageSeq
# When the query is sent to the server
handlerFuture = newLegacyHistoryFuture()
let queryResponse3 = await client.query(invalidQuery3, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery3
queryResponse3.get().messages == messageSeq
# When the query is sent to the server
handlerFuture = newLegacyHistoryFuture()
let queryResponse4 = await client.query(invalidQuery4, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery4
queryResponse4.get().messages == messageSeq
# When the query is sent to the server
handlerFuture = newLegacyHistoryFuture()
let queryResponse5 = await client.query(invalidQuery5, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery5
queryResponse5.get().messages == messageSeq
# When the query is sent to the server
handlerFuture = newLegacyHistoryFuture()
let queryResponse6 = await client.query(invalidQuery6, peer = serverPeerInfo)
# Then the query is not processed
assert await handlerFuture.withTimeout(FUTURE_TIMEOUT)
check:
handlerFuture.read() == invalidQuery6
queryResponse6.get().messages == messageSeq
suite "Verification of HistoryResponse Payload":
asyncTest "Positive Responses":
# When a valid query is sent to the server
let queryResponse = await client.query(historyQuery, peer = serverPeerInfo)
# Then the query is processed successfully, and is of the expected type
check:
await handlerFuture.withTimeout(FUTURE_TIMEOUT)
type(queryResponse.get()) is HistoryResponse
asyncTest "Negative Responses - PeerDialFailure":
# Given a stopped peer
let
otherServerSwitch = newTestSwitch()
otherServerPeerInfo = otherServerSwitch.peerInfo.toRemotePeerInfo()
# When a query is sent to the stopped peer
let queryResponse = await client.query(historyQuery, peer = otherServerPeerInfo)
# Then the query is not processed
check:
not await handlerFuture.withTimeout(FUTURE_TIMEOUT)
queryResponse.isErr()
queryResponse.error.kind == HistoryErrorKind.PEER_DIAL_FAILURE

View File

@ -1,338 +0,0 @@
{.used.}
when defined(waku_exp_store_resume):
# TODO: Review store resume test cases (#1282)
# Ongoing changes to test code base had ruin this test meanwhile, need to investigate and fix
import
std/[options, tables, sets],
testutils/unittests,
chronos,
chronicles,
libp2p/crypto/crypto
import
waku/[
common/databases/db_sqlite,
waku_archive_legacy/driver,
waku_archive_legacy/driver/sqlite_driver/sqlite_driver,
node/peer_manager,
waku_core,
waku_core/message/digest,
waku_store_legacy,
],
../waku_store_legacy/store_utils,
../waku_archive_legacy/archive_utils,
./testlib/common,
./testlib/switch
procSuite "Waku Store - resume store":
## Fixtures
let storeA = block:
let store = newTestMessageStore()
let msgList = @[
fakeWakuMessage(
payload = @[byte 0], contentTopic = ContentTopic("2"), ts = ts(0)
),
fakeWakuMessage(
payload = @[byte 1], contentTopic = ContentTopic("1"), ts = ts(1)
),
fakeWakuMessage(
payload = @[byte 2], contentTopic = ContentTopic("2"), ts = ts(2)
),
fakeWakuMessage(
payload = @[byte 3], contentTopic = ContentTopic("1"), ts = ts(3)
),
fakeWakuMessage(
payload = @[byte 4], contentTopic = ContentTopic("2"), ts = ts(4)
),
fakeWakuMessage(
payload = @[byte 5], contentTopic = ContentTopic("1"), ts = ts(5)
),
fakeWakuMessage(
payload = @[byte 6], contentTopic = ContentTopic("2"), ts = ts(6)
),
fakeWakuMessage(
payload = @[byte 7], contentTopic = ContentTopic("1"), ts = ts(7)
),
fakeWakuMessage(
payload = @[byte 8], contentTopic = ContentTopic("2"), ts = ts(8)
),
fakeWakuMessage(
payload = @[byte 9], contentTopic = ContentTopic("1"), ts = ts(9)
),
]
for msg in msgList:
require store
.put(
DefaultPubsubTopic,
msg,
computeDigest(msg),
computeMessageHash(DefaultPubsubTopic, msg),
msg.timestamp,
)
.isOk()
store
let storeB = block:
let store = newTestMessageStore()
let msgList2 = @[
fakeWakuMessage(
payload = @[byte 0], contentTopic = ContentTopic("2"), ts = ts(0)
),
fakeWakuMessage(
payload = @[byte 11], contentTopic = ContentTopic("1"), ts = ts(1)
),
fakeWakuMessage(
payload = @[byte 12], contentTopic = ContentTopic("2"), ts = ts(2)
),
fakeWakuMessage(
payload = @[byte 3], contentTopic = ContentTopic("1"), ts = ts(3)
),
fakeWakuMessage(
payload = @[byte 4], contentTopic = ContentTopic("2"), ts = ts(4)
),
fakeWakuMessage(
payload = @[byte 5], contentTopic = ContentTopic("1"), ts = ts(5)
),
fakeWakuMessage(
payload = @[byte 13], contentTopic = ContentTopic("2"), ts = ts(6)
),
fakeWakuMessage(
payload = @[byte 14], contentTopic = ContentTopic("1"), ts = ts(7)
),
]
for msg in msgList2:
require store
.put(
DefaultPubsubTopic,
msg,
computeDigest(msg),
computeMessageHash(DefaultPubsubTopic, msg),
msg.timestamp,
)
.isOk()
store
asyncTest "multiple query to multiple peers with pagination":
## Setup
let
serverSwitchA = newTestSwitch()
serverSwitchB = newTestSwitch()
clientSwitch = newTestSwitch()
await allFutures(
serverSwitchA.start(), serverSwitchB.start(), clientSwitch.start()
)
let
serverA = await newTestWakuStoreNode(serverSwitchA, store = testStore)
serverB = await newTestWakuStoreNode(serverSwitchB, store = testStore)
client = newTestWakuStoreClient(clientSwitch)
## Given
let peers = @[
serverSwitchA.peerInfo.toRemotePeerInfo(),
serverSwitchB.peerInfo.toRemotePeerInfo(),
]
let req = HistoryQuery(contentTopics: @[DefaultContentTopic], pageSize: 5)
## When
let res = await client.queryLoop(req, peers)
## Then
check:
res.isOk()
let response = res.tryGet()
check:
response.len == 10
## Cleanup
await allFutures(clientSwitch.stop(), serverSwitchA.stop(), serverSwitchB.stop())
asyncTest "resume message history":
## Setup
let
serverSwitch = newTestSwitch()
clientSwitch = newTestSwitch()
await allFutures(serverSwitch.start(), clientSwitch.start())
let
server = await newTestWakuStore(serverSwitch, store = storeA)
client = await newTestWakuStore(clientSwitch)
client.setPeer(serverSwitch.peerInfo.toRemotePeerInfo())
## When
let res = await client.resume()
## Then
check res.isOk()
let resumedMessagesCount = res.tryGet()
let storedMessagesCount = client.store.getMessagesCount().tryGet()
check:
resumedMessagesCount == 10
storedMessagesCount == 10
## Cleanup
await allFutures(clientSwitch.stop(), serverSwitch.stop())
asyncTest "resume history from a list of candidates - offline peer":
## Setup
let
clientSwitch = newTestSwitch()
offlineSwitch = newTestSwitch()
await clientSwitch.start()
let client = await newTestWakuStore(clientSwitch)
## Given
let peers = @[offlineSwitch.peerInfo.toRemotePeerInfo()]
## When
let res = await client.resume(some(peers))
## Then
check res.isErr()
## Cleanup
await clientSwitch.stop()
asyncTest "resume history from a list of candidates - online and offline peers":
## Setup
let
offlineSwitch = newTestSwitch()
serverASwitch = newTestSwitch()
serverBSwitch = newTestSwitch()
clientSwitch = newTestSwitch()
await allFutures(
serverASwitch.start(), serverBSwitch.start(), clientSwitch.start()
)
let
serverA = await newTestWakuStore(serverASwitch, store = storeA)
serverB = await newTestWakuStore(serverBSwitch, store = storeB)
client = await newTestWakuStore(clientSwitch)
## Given
let peers = @[
offlineSwitch.peerInfo.toRemotePeerInfo(),
serverASwitch.peerInfo.toRemotePeerInfo(),
serverBSwitch.peerInfo.toRemotePeerInfo(),
]
## When
let res = await client.resume(some(peers))
## Then
# `client` is expected to retrieve 14 messages:
# - The store mounted on `serverB` holds 10 messages (see `storeA` fixture)
# - The store mounted on `serverB` holds 7 messages (see `storeB` fixture)
# Both stores share 3 messages, resulting in 14 unique messages in total
check res.isOk()
let restoredMessagesCount = res.tryGet()
let storedMessagesCount = client.store.getMessagesCount().tryGet()
check:
restoredMessagesCount == 14
storedMessagesCount == 14
## Cleanup
await allFutures(serverASwitch.stop(), serverBSwitch.stop(), clientSwitch.stop())
suite "WakuNode - waku store":
asyncTest "Resume proc fetches the history":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
await allFutures(client.start(), server.start())
let driver = newSqliteArchiveDriver()
server.mountArchive(some(driver), none(MessageValidator), none(RetentionPolicy))
await server.mountStore()
let clientStore = StoreQueueRef.new()
await client.mountStore(store = clientStore)
client.mountStoreClient(store = clientStore)
## Given
let message = fakeWakuMessage()
require server.wakuStore.store.put(DefaultPubsubTopic, message).isOk()
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
await client.resume(some(@[serverPeer]))
# Then
check:
client.wakuStore.store.getMessagesCount().tryGet() == 1
## Cleanup
await allFutures(client.stop(), server.stop())
asyncTest "Resume proc discards duplicate messages":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
await allFutures(server.start(), client.start())
await server.mountStore(store = StoreQueueRef.new())
let clientStore = StoreQueueRef.new()
await client.mountStore(store = clientStore)
client.mountStoreClient(store = clientStore)
## Given
let timeOrigin = now()
let
msg1 = fakeWakuMessage(
payload = "hello world1", ts = (timeOrigin + getNanoSecondTime(1))
)
msg2 = fakeWakuMessage(
payload = "hello world2", ts = (timeOrigin + getNanoSecondTime(2))
)
msg3 = fakeWakuMessage(
payload = "hello world3", ts = (timeOrigin + getNanoSecondTime(3))
)
require server.wakuStore.store.put(DefaultPubsubTopic, msg1).isOk()
require server.wakuStore.store.put(DefaultPubsubTopic, msg2).isOk()
# Insert the same message in both node's store
let
receivedTime3 = now() + getNanosecondTime(10)
digest3 = computeDigest(msg3)
require server.wakuStore.store
.put(DefaultPubsubTopic, msg3, digest3, receivedTime3)
.isOk()
require client.wakuStore.store
.put(DefaultPubsubTopic, msg3, digest3, receivedTime3)
.isOk()
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
await client.resume(some(@[serverPeer]))
## Then
check:
# If the duplicates are discarded properly, then the total number of messages after resume should be 3
client.wakuStore.store.getMessagesCount().tryGet() == 3
await allFutures(client.stop(), server.stop())

View File

@ -1,184 +0,0 @@
{.used.}
import std/options, testutils/unittests, chronos
import
waku/[
common/protobuf,
common/paging,
waku_core,
waku_store_legacy/rpc,
waku_store_legacy/rpc_codec,
],
../testlib/wakucore
procSuite "Waku Store - RPC codec":
test "PagingIndexRPC protobuf codec":
## Given
let index = PagingIndexRPC.compute(
fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
)
## When
let encodedIndex = index.encode()
let decodedIndexRes = PagingIndexRPC.decode(encodedIndex.buffer)
## Then
check:
decodedIndexRes.isOk()
let decodedIndex = decodedIndexRes.tryGet()
check:
# The fields of decodedIndex must be the same as the original index
decodedIndex == index
test "PagingIndexRPC protobuf codec - empty index":
## Given
let emptyIndex = PagingIndexRPC()
let encodedIndex = emptyIndex.encode()
let decodedIndexRes = PagingIndexRPC.decode(encodedIndex.buffer)
## Then
check:
decodedIndexRes.isOk()
let decodedIndex = decodedIndexRes.tryGet()
check:
# Check the correctness of init and encode for an empty PagingIndexRPC
decodedIndex == emptyIndex
test "PagingInfoRPC protobuf codec":
## Given
let
index = PagingIndexRPC.compute(
fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
)
pagingInfo = PagingInfoRPC(
pageSize: some(1'u64),
cursor: some(index),
direction: some(PagingDirection.FORWARD),
)
## When
let pb = pagingInfo.encode()
let decodedPagingInfo = PagingInfoRPC.decode(pb.buffer)
## Then
check:
decodedPagingInfo.isOk()
check:
# The fields of decodedPagingInfo must be the same as the original pagingInfo
decodedPagingInfo.value == pagingInfo
decodedPagingInfo.value.direction == pagingInfo.direction
test "PagingInfoRPC protobuf codec - empty paging info":
## Given
let emptyPagingInfo = PagingInfoRPC()
## When
let pb = emptyPagingInfo.encode()
let decodedEmptyPagingInfo = PagingInfoRPC.decode(pb.buffer)
## Then
check:
decodedEmptyPagingInfo.isOk()
check:
# check the correctness of init and encode for an empty PagingInfoRPC
decodedEmptyPagingInfo.value == emptyPagingInfo
test "HistoryQueryRPC protobuf codec":
## Given
let
index = PagingIndexRPC.compute(
fakeWakuMessage(), receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
)
pagingInfo = PagingInfoRPC(
pageSize: some(1'u64),
cursor: some(index),
direction: some(PagingDirection.BACKWARD),
)
query = HistoryQueryRPC(
contentFilters: @[
HistoryContentFilterRPC(contentTopic: DefaultContentTopic),
HistoryContentFilterRPC(contentTopic: DefaultContentTopic),
],
pagingInfo: some(pagingInfo),
startTime: some(Timestamp(10)),
endTime: some(Timestamp(11)),
)
## When
let pb = query.encode()
let decodedQuery = HistoryQueryRPC.decode(pb.buffer)
## Then
check:
decodedQuery.isOk()
check:
# the fields of decoded query decodedQuery must be the same as the original query query
decodedQuery.value == query
test "HistoryQueryRPC protobuf codec - empty history query":
## Given
let emptyQuery = HistoryQueryRPC()
## When
let pb = emptyQuery.encode()
let decodedEmptyQuery = HistoryQueryRPC.decode(pb.buffer)
## Then
check:
decodedEmptyQuery.isOk()
check:
# check the correctness of init and encode for an empty HistoryQueryRPC
decodedEmptyQuery.value == emptyQuery
test "HistoryResponseRPC protobuf codec":
## Given
let
message = fakeWakuMessage()
index = PagingIndexRPC.compute(
message, receivedTime = ts(), pubsubTopic = DefaultPubsubTopic
)
pagingInfo = PagingInfoRPC(
pageSize: some(1'u64),
cursor: some(index),
direction: some(PagingDirection.BACKWARD),
)
res = HistoryResponseRPC(
messages: @[message],
pagingInfo: some(pagingInfo),
error: HistoryResponseErrorRPC.INVALID_CURSOR,
)
## When
let pb = res.encode()
let decodedRes = HistoryResponseRPC.decode(pb.buffer)
## Then
check:
decodedRes.isOk()
check:
# the fields of decoded response decodedRes must be the same as the original response res
decodedRes.value == res
test "HistoryResponseRPC protobuf codec - empty history response":
## Given
let emptyRes = HistoryResponseRPC()
## When
let pb = emptyRes.encode()
let decodedEmptyRes = HistoryResponseRPC.decode(pb.buffer)
## Then
check:
decodedEmptyRes.isOk()
check:
# check the correctness of init and encode for an empty HistoryResponseRPC
decodedEmptyRes.value == emptyRes

View File

@ -1,113 +0,0 @@
{.used.}
import testutils/unittests, chronos, libp2p/crypto/crypto
import
waku/[
common/paging,
node/peer_manager,
waku_core,
waku_store_legacy,
waku_store_legacy/client,
],
../testlib/wakucore,
./store_utils
suite "Waku Store - query handler legacy":
asyncTest "history query handler should be called":
## Setup
let
serverSwitch = newTestSwitch()
clientSwitch = newTestSwitch()
await allFutures(serverSwitch.start(), clientSwitch.start())
## Given
let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo()
let msg = fakeWakuMessage(contentTopic = DefaultContentTopic)
var queryHandlerFut = newFuture[(HistoryQuery)]()
let queryHandler = proc(
req: HistoryQuery
): Future[HistoryResult] {.async, gcsafe.} =
queryHandlerFut.complete(req)
return ok(HistoryResponse(messages: @[msg]))
let
server = await newTestWakuStore(serverSwitch, handler = queryhandler)
client = newTestWakuStoreClient(clientSwitch)
let req = HistoryQuery(
contentTopics: @[DefaultContentTopic],
direction: PagingDirection.FORWARD,
requestId: "reqId",
)
## When
let queryRes = await client.query(req, peer = serverPeerInfo)
## Then
check:
not queryHandlerFut.failed()
queryRes.isOk()
let request = queryHandlerFut.read()
check:
request == req
let response = queryRes.tryGet()
check:
response.messages.len == 1
response.messages == @[msg]
## Cleanup
await allFutures(serverSwitch.stop(), clientSwitch.stop())
asyncTest "history query handler should be called and return an error":
## Setup
let
serverSwitch = newTestSwitch()
clientSwitch = newTestSwitch()
await allFutures(serverSwitch.start(), clientSwitch.start())
## Given
let serverPeerInfo = serverSwitch.peerInfo.toRemotePeerInfo()
var queryHandlerFut = newFuture[(HistoryQuery)]()
let queryHandler = proc(
req: HistoryQuery
): Future[HistoryResult] {.async, gcsafe.} =
queryHandlerFut.complete(req)
return err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST))
let
server = await newTestWakuStore(serverSwitch, handler = queryhandler)
client = newTestWakuStoreClient(clientSwitch)
let req = HistoryQuery(
contentTopics: @[DefaultContentTopic],
direction: PagingDirection.FORWARD,
requestId: "reqId",
)
## When
let queryRes = await client.query(req, peer = serverPeerInfo)
## Then
check:
not queryHandlerFut.failed()
queryRes.isErr()
let request = queryHandlerFut.read()
check:
request == req
let error = queryRes.tryError()
check:
error.kind == HistoryErrorKind.BAD_REQUEST
## Cleanup
await allFutures(serverSwitch.stop(), clientSwitch.stop())

View File

@ -1,315 +0,0 @@
{.used.}
import
std/net,
testutils/unittests,
chronos,
libp2p/crypto/crypto,
libp2p/peerid,
libp2p/multiaddress,
libp2p/switch,
libp2p/protocols/pubsub/pubsub,
libp2p/protocols/pubsub/gossipsub
import
waku/[
common/paging,
waku_core,
waku_core/message/digest,
node/peer_manager,
waku_archive_legacy,
waku_filter_v2,
waku_filter_v2/client,
waku_store_legacy,
waku_node,
],
../waku_store_legacy/store_utils,
../waku_archive_legacy/archive_utils,
../testlib/wakucore,
../testlib/wakunode
procSuite "WakuNode - Store Legacy":
## Fixtures
let timeOrigin = now()
let msgListA = @[
fakeWakuMessage(@[byte 00], ts = ts(00, timeOrigin)),
fakeWakuMessage(@[byte 01], ts = ts(10, timeOrigin)),
fakeWakuMessage(@[byte 02], ts = ts(20, timeOrigin)),
fakeWakuMessage(@[byte 03], ts = ts(30, timeOrigin)),
fakeWakuMessage(@[byte 04], ts = ts(40, timeOrigin)),
fakeWakuMessage(@[byte 05], ts = ts(50, timeOrigin)),
fakeWakuMessage(@[byte 06], ts = ts(60, timeOrigin)),
fakeWakuMessage(@[byte 07], ts = ts(70, timeOrigin)),
fakeWakuMessage(@[byte 08], ts = ts(80, timeOrigin)),
fakeWakuMessage(@[byte 09], ts = ts(90, timeOrigin)),
]
let archiveA = block:
let driver = newSqliteArchiveDriver()
for msg in msgListA:
let msg_digest = waku_archive_legacy.computeDigest(msg)
let msg_hash = computeMessageHash(DefaultPubsubTopic, msg)
require (
waitFor driver.put(DefaultPubsubTopic, msg, msg_digest, msg_hash, msg.timestamp)
).isOk()
driver
test "Store protocol returns expected messages":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
waitFor allFutures(client.start(), server.start())
let mountArchiveRes = server.mountLegacyArchive(archiveA)
assert mountArchiveRes.isOk(), mountArchiveRes.error
waitFor server.mountLegacyStore()
client.mountLegacyStoreClient()
## Given
let req = HistoryQuery(contentTopics: @[DefaultContentTopic])
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
let queryRes = waitFor client.query(req, peer = serverPeer)
## Then
check queryRes.isOk()
let response = queryRes.get()
check:
response.messages == msgListA
# Cleanup
waitFor allFutures(client.stop(), server.stop())
test "Store node history response - forward pagination":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
waitFor allFutures(client.start(), server.start())
let mountArchiveRes = server.mountLegacyArchive(archiveA)
assert mountArchiveRes.isOk(), mountArchiveRes.error
waitFor server.mountLegacyStore()
client.mountLegacyStoreClient()
## Given
let req = HistoryQuery(
contentTopics: @[DefaultContentTopic],
pageSize: 7,
direction: PagingDirection.FORWARD,
)
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
var nextReq = req # copy
var pages = newSeq[seq[WakuMessage]](2)
var cursors = newSeq[Option[HistoryCursor]](2)
for i in 0 ..< 2:
let res = waitFor client.query(nextReq, peer = serverPeer)
require res.isOk()
# Keep query response content
let response = res.get()
pages[i] = response.messages
cursors[i] = response.cursor
# Set/update the request cursor
nextReq.cursor = cursors[i]
## Then
check:
cursors[0] == some(computeHistoryCursor(DefaultPubsubTopic, msgListA[6]))
cursors[1] == none(HistoryCursor)
check:
pages[0] == msgListA[0 .. 6]
pages[1] == msgListA[7 .. 9]
# Cleanup
waitFor allFutures(client.stop(), server.stop())
test "Store node history response - backward pagination":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
waitFor allFutures(client.start(), server.start())
let mountArchiveRes = server.mountLegacyArchive(archiveA)
assert mountArchiveRes.isOk(), mountArchiveRes.error
waitFor server.mountLegacyStore()
client.mountLegacyStoreClient()
## Given
let req = HistoryQuery(
contentTopics: @[DefaultContentTopic],
pageSize: 7,
direction: PagingDirection.BACKWARD,
)
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
var nextReq = req # copy
var pages = newSeq[seq[WakuMessage]](2)
var cursors = newSeq[Option[HistoryCursor]](2)
for i in 0 ..< 2:
let res = waitFor client.query(nextReq, peer = serverPeer)
require res.isOk()
# Keep query response content
let response = res.get()
pages[i] = response.messages
cursors[i] = response.cursor
# Set/update the request cursor
nextReq.cursor = cursors[i]
## Then
check:
cursors[0] == some(computeHistoryCursor(DefaultPubsubTopic, msgListA[3]))
cursors[1] == none(HistoryCursor)
check:
pages[0] == msgListA[3 .. 9]
pages[1] == msgListA[0 .. 2]
# Cleanup
waitFor allFutures(client.stop(), server.stop())
test "Store protocol returns expected message when relay is disabled and filter enabled":
## See nwaku issue #937: 'Store: ability to decouple store from relay'
## Setup
let
filterSourceKey = generateSecp256k1Key()
filterSource =
newTestWakuNode(filterSourceKey, parseIpAddress("0.0.0.0"), Port(0))
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
waitFor allFutures(client.start(), server.start(), filterSource.start())
waitFor filterSource.mountFilter()
let driver = newSqliteArchiveDriver()
let mountArchiveRes = server.mountLegacyArchive(driver)
assert mountArchiveRes.isOk(), mountArchiveRes.error
waitFor server.mountLegacyStore()
waitFor server.mountFilterClient()
client.mountLegacyStoreClient()
## Given
let message = fakeWakuMessage()
let
serverPeer = server.peerInfo.toRemotePeerInfo()
filterSourcePeer = filterSource.peerInfo.toRemotePeerInfo()
## Then
let filterFut = newFuture[(PubsubTopic, WakuMessage)]()
proc filterHandler(
pubsubTopic: PubsubTopic, msg: WakuMessage
) {.async, gcsafe, closure.} =
await server.wakuLegacyArchive.handleMessage(pubsubTopic, msg)
filterFut.complete((pubsubTopic, msg))
server.wakuFilterClient.registerPushHandler(filterHandler)
let resp = waitFor server.filterSubscribe(
some(DefaultPubsubTopic), DefaultContentTopic, peer = filterSourcePeer
)
waitFor sleepAsync(100.millis)
waitFor filterSource.wakuFilter.handleMessage(DefaultPubsubTopic, message)
# Wait for the server filter to receive the push message
require waitFor filterFut.withTimeout(5.seconds)
let res = waitFor client.query(
HistoryQuery(contentTopics: @[DefaultContentTopic]), peer = serverPeer
)
## Then
check res.isOk()
let response = res.get()
check:
response.messages.len == 1
response.messages[0] == message
let (handledPubsubTopic, handledMsg) = filterFut.read()
check:
handledPubsubTopic == DefaultPubsubTopic
handledMsg == message
## Cleanup
waitFor allFutures(client.stop(), server.stop(), filterSource.stop())
test "history query should return INVALID_CURSOR if the cursor has empty data in the request":
## Setup
let
serverKey = generateSecp256k1Key()
server = newTestWakuNode(serverKey, parseIpAddress("0.0.0.0"), Port(0))
clientKey = generateSecp256k1Key()
client = newTestWakuNode(clientKey, parseIpAddress("0.0.0.0"), Port(0))
waitFor allFutures(client.start(), server.start())
let mountArchiveRes = server.mountLegacyArchive(archiveA)
assert mountArchiveRes.isOk(), mountArchiveRes.error
waitFor server.mountLegacyStore()
client.mountLegacyStoreClient()
## Forcing a bad cursor with empty digest data
var data: array[32, byte] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
]
let cursor = HistoryCursor(
pubsubTopic: "pubsubTopic",
senderTime: now(),
storeTime: now(),
digest: waku_archive_legacy.MessageDigest(data: data),
)
## Given
let req = HistoryQuery(contentTopics: @[DefaultContentTopic], cursor: some(cursor))
let serverPeer = server.peerInfo.toRemotePeerInfo()
## When
let queryRes = waitFor client.query(req, peer = serverPeer)
## Then
check not queryRes.isOk()
check queryRes.error ==
"legacy store client query error: BAD_REQUEST: invalid cursor"
# Cleanup
waitFor allFutures(client.stop(), server.stop())

View File

@ -86,7 +86,7 @@ suite "Waku v2 REST API - health":
response.status == 200
$response.contentType == $MIMETYPE_JSON
report.nodeHealth == HealthStatus.READY
report.protocolsHealth.len() == 15
report.protocolsHealth.len() == 13
report.getHealth(RelayProtocol).health == HealthStatus.NOT_READY
report.getHealth(RelayProtocol).desc == some("No connected peers")
@ -97,7 +97,6 @@ suite "Waku v2 REST API - health":
report.getHealth(LegacyLightpushProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(FilterProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(StoreProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(LegacyStoreProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(PeerExchangeProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(RendezvousProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(MixProtocol).health == HealthStatus.NOT_MOUNTED
@ -108,7 +107,6 @@ suite "Waku v2 REST API - health":
report.getHealth(LegacyLightpushClientProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(StoreClientProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(LegacyStoreClientProtocol).health == HealthStatus.NOT_MOUNTED
report.getHealth(FilterClientProtocol).health == HealthStatus.NOT_READY
report.getHealth(FilterClientProtocol).desc ==

View File

@ -348,12 +348,6 @@ hence would have reachability issues.""",
desc: "Enable/disable waku store protocol", defaultValue: false, name: "store"
.}: bool
legacyStore* {.
desc: "Enable/disable support of Waku Store v2 as a service",
defaultValue: false,
name: "legacy-store"
.}: bool
storenode* {.
desc: "Peer multiaddress to query for storage",
defaultValue: "",
@ -691,7 +685,7 @@ with the drawback of consuming some more bandwidth.""",
desc:
"Rate limit settings for different protocols." &
"Format: protocol:volume/period<unit>" &
" Where 'protocol' can be one of: <store|storev2|storev3|lightpush|px|filter> if not defined it means a global setting" &
" Where 'protocol' can be one of: <store|storev3|lightpush|px|filter> if not defined it means a global setting" &
" 'volume' and period must be an integer value. " &
" 'unit' must be one of <h|m|s|ms> - hours, minutes, seconds, milliseconds respectively. " &
"Argument may be repeated.",
@ -1045,7 +1039,6 @@ proc toWakuConf*(n: WakuNodeConf): ConfResult[WakuConf] =
b.withContentTopics(n.contentTopics)
b.storeServiceConf.withEnabled(n.store)
b.storeServiceConf.withSupportV2(n.legacyStore)
b.storeServiceConf.withRetentionPolicies(n.storeMessageRetentionPolicy)
b.storeServiceConf.withDbUrl(n.storeMessageDbUrl)
b.storeServiceConf.withDbVacuum(n.storeMessageDbVacuum)

View File

@ -73,11 +73,11 @@ proc buildLibrary(lib_name: string, srcDir = "./", params = "", `type` = "static
extra_params &= " " & paramStr(i)
if `type` == "static":
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:staticlib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:" & mainPrefix & " --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
" --threads:on --app:staticlib --opt:speed --noMain --mm:refc --header -d:metrics --nimMainPrefix:" & mainPrefix & " --skipParentCfg:on -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & srcFile
else:
exec "nim c" & " --out:build/" & lib_name &
" --threads:on --app:lib --opt:size --noMain --mm:refc --header -d:metrics --nimMainPrefix:" & mainPrefix & " --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
" --threads:on --app:lib --opt:speed --noMain --mm:refc --header -d:metrics --nimMainPrefix:" & mainPrefix & " --skipParentCfg:off -d:discv5_protocol_id=d5waku " &
extra_params & " " & srcDir & srcFile
proc buildMobileAndroid(srcDir = ".", params = "") =
@ -93,7 +93,7 @@ proc buildMobileAndroid(srcDir = ".", params = "") =
extra_params &= " " & paramStr(i)
exec "nim c" & " --out:" & outDir &
"/libwaku.so --threads:on --app:lib --opt:size --noMain --mm:refc -d:chronicles_sinks=textlines[dynamic] --header -d:chronosEventEngine=epoll --passL:-L" &
"/libwaku.so --threads:on --app:lib --opt:speed --noMain --mm:refc -d:chronicles_sinks=textlines[dynamic] --header -d:chronosEventEngine=epoll --passL:-L" &
outdir & " --passL:-lrln --passL:-llog --cpu:" & cpu & " --os:android -d:androidNDK " &
extra_params & " " & srcDir & "/libwaku.nim"
@ -266,7 +266,7 @@ proc buildMobileIOS(srcDir = ".", params = "") =
" --os:ios --cpu:" & cpu &
" --compileOnly:on" &
" --noMain --mm:refc" &
" --threads:on --opt:size --header" &
" --threads:on --opt:speed --header" &
" -d:metrics -d:discv5_protocol_id=d5waku" &
" --nimMainPrefix:libwaku --skipParentCfg:on" &
" --cc:clang" &

View File

@ -7,7 +7,6 @@ type RateLimitSetting* = tuple[volume: int, period: Duration]
type RateLimitedProtocol* = enum
GLOBAL
STOREV2
STOREV3
LIGHTPUSH
PEEREXCHG
@ -47,8 +46,6 @@ proc translate(sProtocol: string): RateLimitedProtocol {.raises: [ValueError].}
case sProtocol
of "global":
return GLOBAL
of "storev2":
return STOREV2
of "storev3":
return STOREV3
of "lightpush":
@ -65,7 +62,6 @@ proc fillSettingTable(
) {.raises: [ValueError].} =
if sProtocol == "store":
# generic store will only applies to version which is not listed directly
discard t.hasKeyOrPut(STOREV2, setting)
discard t.hasKeyOrPut(STOREV3, setting)
else:
let protocol = translate(sProtocol)
@ -87,7 +83,7 @@ proc parse*(
## group4: Unit of period - only h:hour, m:minute, s:second, ms:millisecond allowed
## whitespaces are allowed lazily
const parseRegex =
"""^\s*((store|storev2|storev3|lightpush|px|filter)\s*:)?\s*(\d+)\s*\/\s*(\d+)\s*(s|h|m|ms)\s*$"""
"""^\s*((store|storev3|lightpush|px|filter)\s*:)?\s*(\d+)\s*\/\s*(\d+)\s*(s|h|m|ms)\s*$"""
const regexParseSize = re2(parseRegex)
for settingStr in settings:
let aSetting = settingStr.toLower()

View File

@ -4,7 +4,6 @@ type WakuProtocol* {.pure.} = enum
RelayProtocol = "Relay"
RlnRelayProtocol = "Rln Relay"
StoreProtocol = "Store"
LegacyStoreProtocol = "Legacy Store"
FilterProtocol = "Filter"
LightpushProtocol = "Lightpush"
LegacyLightpushProtocol = "Legacy Lightpush"
@ -12,13 +11,12 @@ type WakuProtocol* {.pure.} = enum
RendezvousProtocol = "Rendezvous"
MixProtocol = "Mix"
StoreClientProtocol = "Store Client"
LegacyStoreClientProtocol = "Legacy Store Client"
FilterClientProtocol = "Filter Client"
LightpushClientProtocol = "Lightpush Client"
LegacyLightpushClientProtocol = "Legacy Lightpush Client"
const
RelayProtocols* = {RelayProtocol}
StoreClientProtocols* = {StoreClientProtocol, LegacyStoreClientProtocol}
StoreClientProtocols* = {StoreClientProtocol}
LightpushClientProtocols* = {LightpushClientProtocol, LegacyLightpushClientProtocol}
FilterClientProtocols* = {FilterClientProtocol}

View File

@ -8,6 +8,6 @@ type WakuPeerEventKind* {.pure.} = enum
EventMetadataUpdated
EventBroker:
type EventWakuPeer* = object
type WakuPeerEvent* = object
peerId*: PeerId
kind*: WakuPeerEventKind

View File

@ -14,7 +14,6 @@ type StoreServiceConfBuilder* = object
dbMigration*: Option[bool]
dbURl*: Option[string]
dbVacuum*: Option[bool]
supportV2*: Option[bool]
maxNumDbConnections*: Option[int]
retentionPolicies*: seq[string]
resume*: Option[bool]
@ -35,9 +34,6 @@ proc withDbUrl*(b: var StoreServiceConfBuilder, dbUrl: string) =
proc withDbVacuum*(b: var StoreServiceConfBuilder, dbVacuum: bool) =
b.dbVacuum = some(dbVacuum)
proc withSupportV2*(b: var StoreServiceConfBuilder, supportV2: bool) =
b.supportV2 = some(supportV2)
proc withMaxNumDbConnections*(
b: var StoreServiceConfBuilder, maxNumDbConnections: int
) =
@ -104,7 +100,6 @@ proc build*(b: StoreServiceConfBuilder): Result[Option[StoreServiceConf], string
dbMigration: b.dbMigration.get(true),
dbURl: b.dbUrl.get(),
dbVacuum: b.dbVacuum.get(false),
supportV2: b.supportV2.get(false),
maxNumDbConnections: b.maxNumDbConnections.get(50),
retentionPolicies: retentionPolicies,
resume: b.resume.get(false),

View File

@ -25,12 +25,8 @@ import
../waku_archive/retention_policy/builder as policy_builder,
../waku_archive/driver as driver,
../waku_archive/driver/builder as driver_builder,
../waku_archive_legacy/driver as legacy_driver,
../waku_archive_legacy/driver/builder as legacy_driver_builder,
../waku_store,
../waku_store/common as store_common,
../waku_store_legacy,
../waku_store_legacy/common as legacy_common,
../waku_filter_v2,
../waku_peer_exchange,
../discovery/waku_kademlia,
@ -38,8 +34,7 @@ import
../node/peer_manager/peer_store/waku_peer_storage,
../node/peer_manager/peer_store/migrations as peer_store_sqlite_migrations,
../waku_lightpush_legacy/common,
../common/rate_limit/setting,
../common/databases/dburl
../common/rate_limit/setting
## Peer persistence
@ -198,42 +193,10 @@ proc setupProtocols(
if conf.storeServiceConf.isSome():
let storeServiceConf = conf.storeServiceConf.get()
if storeServiceConf.supportV2:
let archiveDriver = (
await legacy_driver.ArchiveDriver.new(
storeServiceConf.dbUrl, storeServiceConf.dbVacuum,
storeServiceConf.dbMigration, storeServiceConf.maxNumDbConnections,
onFatalErrorAction,
)
).valueOr:
return err("failed to setup legacy archive driver: " & error)
node.mountLegacyArchive(archiveDriver).isOkOr:
return err("failed to mount waku legacy archive protocol: " & error)
## For now we always mount the future archive driver but if the legacy one is mounted,
## then the legacy will be in charge of performing the archiving.
## Regarding storage, the only diff between the current/future archive driver and the legacy
## one, is that the legacy stores an extra field: the id (message digest.)
## TODO: remove this "migrate" variable once legacy store is removed
## It is now necessary because sqlite's legacy store has an extra field: storedAt
## This breaks compatibility between store's and legacy store's schemas in sqlite
## So for now, we need to make sure that when legacy store is enabled and we use sqlite
## that we migrate our db according to legacy store's schema to have the extra field
let engine = dburl.getDbEngine(storeServiceConf.dbUrl).valueOr:
return err("error getting db engine in setupProtocols: " & error)
let migrate =
if engine == "sqlite" and storeServiceConf.supportV2:
false
else:
storeServiceConf.dbMigration
let archiveDriver = (
await driver.ArchiveDriver.new(
storeServiceConf.dbUrl, storeServiceConf.dbVacuum, migrate,
storeServiceConf.dbUrl, storeServiceConf.dbVacuum, storeServiceConf.dbMigration,
storeServiceConf.maxNumDbConnections, onFatalErrorAction,
)
).valueOr:
@ -245,14 +208,6 @@ proc setupProtocols(
node.mountArchive(archiveDriver, retPolicies).isOkOr:
return err("failed to mount waku archive protocol: " & error)
if storeServiceConf.supportV2:
# Store legacy setup
try:
await mountLegacyStore(node, node.rateLimitSettings.getSetting(STOREV2))
except CatchableError:
return
err("failed to mount waku legacy store protocol: " & getCurrentExceptionMsg())
# Store setup
try:
await mountStore(node, node.rateLimitSettings.getSetting(STOREV3))
@ -284,12 +239,6 @@ proc setupProtocols(
return err("failed to set node waku store peer: " & error)
node.peerManager.addServicePeer(storeNode, WakuStoreCodec)
mountLegacyStoreClient(node)
if conf.remoteStoreNode.isSome():
let storeNode = parsePeerInfo(conf.remoteStoreNode.get()).valueOr:
return err("failed to set node waku legacy store peer: " & error)
node.peerManager.addServicePeer(storeNode, WakuLegacyStoreCodec)
if conf.storeServiceConf.isSome and conf.storeServiceConf.get().resume:
node.setupStoreResume()

View File

@ -416,7 +416,8 @@ proc startWaku*(waku: ptr Waku): Future[Result[void, string]] {.async: (raises:
## Reliability
if not waku[].deliveryService.isNil():
waku[].deliveryService.startDeliveryService()
waku[].deliveryService.startDeliveryService().isOkOr:
return err("failed to start delivery service: " & $error)
## Health Monitor
waku[].healthMonitor.startHealthMonitor().isOkOr:

View File

@ -60,7 +60,6 @@ type StoreServiceConf* {.requiresInit.} = object
dbMigration*: bool
dbURl*: string
dbVacuum*: bool
supportV2*: bool
maxNumDbConnections*: int
retentionPolicies*: seq[string]
resume*: bool

View File

@ -1,18 +1,13 @@
## This module helps to ensure the correct transmission and reception of messages
import results
import chronos
import chronos, chronicles
import
./recv_service,
./send_service,
./subscription_manager,
waku/[
waku_core,
waku_node,
waku_store/client,
waku_relay/protocol,
waku_lightpush/client,
waku_filter_v2/client,
waku_core, waku_node, waku_store/client, waku_relay/protocol, waku_lightpush/client
]
type DeliveryService* = ref object
@ -37,10 +32,11 @@ proc new*(
)
)
proc startDeliveryService*(self: DeliveryService) =
self.subscriptionManager.startSubscriptionManager()
proc startDeliveryService*(self: DeliveryService): Result[void, string] =
?self.subscriptionManager.startSubscriptionManager()
self.recvService.startRecvService()
self.sendService.startSendService()
return ok()
proc stopDeliveryService*(self: DeliveryService) {.async.} =
await self.sendService.stopSendService()

View File

@ -91,20 +91,20 @@ proc msgChecker(self: RecvService) {.async.} =
self.endTimeToCheck = getNowInNanosecondTime()
var msgHashesInStore = newSeq[WakuMessageHash](0)
for sub in self.subscriptionManager.getActiveSubscriptions():
for pubsubTopic, contentTopics in self.subscriptionManager.subscribedTopics:
let storeResp: StoreQueryResponse = (
await self.node.wakuStoreClient.queryToAny(
StoreQueryRequest(
includeData: false,
pubsubTopic: some(PubsubTopic(sub.pubsubTopic)),
contentTopics: sub.contentTopics,
pubsubTopic: some(pubsubTopic),
contentTopics: toSeq(contentTopics),
startTime: some(self.startTimeToCheck - DelayExtra.nanos),
endTime: some(self.endTimeToCheck + DelayExtra.nanos),
)
)
).valueOr:
error "msgChecker failed to get remote msgHashes",
pubsubTopic = sub.pubsubTopic, cTopics = sub.contentTopics, error = $error
pubsubTopic = pubsubTopic, cTopics = toSeq(contentTopics), error = $error
continue
msgHashesInStore.add(storeResp.messages.mapIt(it.messageHash))
@ -154,10 +154,6 @@ proc new*(T: typedesc[RecvService], node: WakuNode, s: SubscriptionManager): T =
recentReceivedMsgs: @[],
)
# TODO: For MAPI Edge support, either call node.wakuFilterClient.registerPushHandler
# so that the RecvService listens to incoming filter messages,
# or have the filter client emit MessageSeenEvent.
return recvService
proc loopPruneOldMessages(self: RecvService) {.async.} =

View File

@ -1,4 +1,5 @@
import std/[sets, tables, options, strutils], chronos, chronicles, results
import std/[sequtils, sets, tables, options, strutils], chronos, chronicles, results
import libp2p/[peerid, peerinfo]
import
waku/[
waku_core,
@ -6,16 +7,67 @@ import
waku_core/topics/sharding,
waku_node,
waku_relay,
waku_filter_v2/common as filter_common,
waku_filter_v2/client as filter_client,
waku_filter_v2/protocol as filter_protocol,
common/broker/broker_context,
events/delivery_events,
events/health_events,
events/peer_events,
requests/health_requests,
node/peer_manager,
node/health_monitor/topic_health,
node/health_monitor/connection_status,
]
# ---------------------------------------------------------------------------
# Logos Messaging API SubscriptionManager
#
# Maps all topic subscription intent and centralizes all consistency
# maintenance of the pubsub and content topic subscription model across
# the various network drivers that handle topics (Edge/Filter and Core/Relay).
# ---------------------------------------------------------------------------
type EdgeFilterSubState* = object
peers: seq[RemotePeerInfo]
## Filter service peers with confirmed subscriptions on this shard.
pending: seq[Future[void]] ## In-flight dial futures for peers not yet confirmed.
pendingPeers: HashSet[PeerId] ## PeerIds of peers currently being dialed.
currentHealth: TopicHealth
## Cached health derived from peers.len; updated on every peer set change.
func toTopicHealth*(peersCount: int): TopicHealth =
if peersCount >= HealthyThreshold:
TopicHealth.SUFFICIENTLY_HEALTHY
elif peersCount > 0:
TopicHealth.MINIMALLY_HEALTHY
else:
TopicHealth.UNHEALTHY
type SubscriptionManager* = ref object of RootObj
node: WakuNode
contentTopicSubs: Table[PubsubTopic, HashSet[ContentTopic]]
## Map of Shard to ContentTopic needed because e.g. WakuRelay is PubsubTopic only.
## A present key with an empty HashSet value means pubsubtopic already subscribed
## (via subscribePubsubTopics()) but there's no specific content topic interest yet.
edgeFilterSubStates*: Table[PubsubTopic, EdgeFilterSubState]
## Per-shard filter subscription state for edge mode.
edgeFilterWakeup: AsyncEvent
## Signalled when the edge filter sub loop should re-reconcile.
edgeFilterSubLoopFut: Future[void]
edgeFilterHealthLoopFut: Future[void]
peerEventListener: WakuPeerEventListener
## Listener for peer connect/disconnect events (edge filter wakeup).
iterator subscribedTopics*(
self: SubscriptionManager
): (PubsubTopic, HashSet[ContentTopic]) =
for pubsub, topics in self.contentTopicSubs.pairs:
yield (pubsub, topics)
proc edgeFilterPeerCount*(sm: SubscriptionManager, shard: PubsubTopic): int =
sm.edgeFilterSubStates.withValue(shard, state):
return state.peers.len
return 0
proc new*(T: typedesc[SubscriptionManager], node: WakuNode): T =
SubscriptionManager(
@ -25,30 +77,35 @@ proc new*(T: typedesc[SubscriptionManager], node: WakuNode): T =
proc addContentTopicInterest(
self: SubscriptionManager, shard: PubsubTopic, topic: ContentTopic
): Result[void, string] =
var changed = false
if not self.contentTopicSubs.hasKey(shard):
self.contentTopicSubs[shard] = initHashSet[ContentTopic]()
changed = true
self.contentTopicSubs.withValue(shard, cTopics):
if not cTopics[].contains(topic):
cTopics[].incl(topic)
changed = true
# TODO: Call a "subscribe(shard, topic)" on filter client here,
# so the filter client can know that subscriptions changed.
if changed and not isNil(self.edgeFilterWakeup):
self.edgeFilterWakeup.fire()
return ok()
proc removeContentTopicInterest(
self: SubscriptionManager, shard: PubsubTopic, topic: ContentTopic
): Result[void, string] =
var changed = false
self.contentTopicSubs.withValue(shard, cTopics):
if cTopics[].contains(topic):
cTopics[].excl(topic)
changed = true
if cTopics[].len == 0 and isNil(self.node.wakuRelay):
self.contentTopicSubs.del(shard) # We're done with cTopics here
# TODO: Call a "unsubscribe(shard, topic)" on filter client here,
# so the filter client can know that subscriptions changed.
if changed and not isNil(self.edgeFilterWakeup):
self.edgeFilterWakeup.fire()
return ok()
@ -73,46 +130,6 @@ proc subscribePubsubTopics(
return ok()
proc startSubscriptionManager*(self: SubscriptionManager) =
if isNil(self.node.wakuRelay):
return
if self.node.wakuAutoSharding.isSome():
# Subscribe relay to all shards in autosharding.
let autoSharding = self.node.wakuAutoSharding.get()
let clusterId = autoSharding.clusterId
let numShards = autoSharding.shardCountGenZero
if numShards > 0:
var clusterPubsubTopics = newSeqOfCap[PubsubTopic](numShards)
for i in 0 ..< numShards:
let shardObj = RelayShard(clusterId: clusterId, shardId: uint16(i))
clusterPubsubTopics.add(PubsubTopic($shardObj))
self.subscribePubsubTopics(clusterPubsubTopics).isOkOr:
error "Failed to auto-subscribe Relay to cluster shards: ", error = error
else:
info "SubscriptionManager has no AutoSharding configured; skipping auto-subscribe."
proc stopSubscriptionManager*(self: SubscriptionManager) {.async.} =
discard
proc getActiveSubscriptions*(
self: SubscriptionManager
): seq[tuple[pubsubTopic: string, contentTopics: seq[ContentTopic]]] =
var activeSubs: seq[tuple[pubsubTopic: string, contentTopics: seq[ContentTopic]]] =
@[]
for pubsub, cTopicSet in self.contentTopicSubs.pairs:
if cTopicSet.len > 0:
var cTopicSeq = newSeqOfCap[ContentTopic](cTopicSet.len)
for t in cTopicSet:
cTopicSeq.add(t)
activeSubs.add((pubsub, cTopicSeq))
return activeSubs
proc getShardForContentTopic(
self: SubscriptionManager, topic: ContentTopic
): Result[PubsubTopic, string] =
@ -162,3 +179,358 @@ proc unsubscribe*(
?self.removeContentTopicInterest(shard, topic)
return ok()
# ---------------------------------------------------------------------------
# Edge Filter driver for the Logos Messaging API
#
# The SubscriptionManager absorbs natively the responsibility of using the
# Edge Filter protocol to effect subscriptions and message receipt for edge.
# ---------------------------------------------------------------------------
const EdgeFilterSubscribeTimeout = chronos.seconds(15)
## Timeout for a single filter subscribe/unsubscribe RPC to a service peer.
const EdgeFilterPingTimeout = chronos.seconds(5)
## Timeout for a filter ping health check.
const EdgeFilterLoopInterval = chronos.seconds(30)
## Interval for the edge filter health ping loop.
const EdgeFilterSubLoopDebounce = chronos.seconds(1)
## Debounce delay to coalesce rapid-fire wakeups into a single reconciliation pass.
proc updateShardHealth(
self: SubscriptionManager, shard: PubsubTopic, state: var EdgeFilterSubState
) =
## Recompute and emit health for a shard after its peer set changed.
let newHealth = toTopicHealth(state.peers.len)
if newHealth != state.currentHealth:
state.currentHealth = newHealth
EventShardTopicHealthChange.emit(self.node.brokerCtx, shard, newHealth)
proc removePeer(self: SubscriptionManager, shard: PubsubTopic, peerId: PeerId) =
## Remove a peer from edgeFilterSubStates for the given shard,
## update health, and wake the sub loop to dial a replacement.
## Best-effort unsubscribe so the service peer stops pushing to us.
self.edgeFilterSubStates.withValue(shard, state):
var peer: RemotePeerInfo
var found = false
for p in state.peers:
if p.peerId == peerId:
peer = p
found = true
break
if not found:
return
state.peers.keepItIf(it.peerId != peerId)
self.updateShardHealth(shard, state[])
self.edgeFilterWakeup.fire()
if not self.node.wakuFilterClient.isNil():
self.contentTopicSubs.withValue(shard, topics):
let ct = toSeq(topics[])
if ct.len > 0:
proc doUnsubscribe() {.async.} =
discard await self.node.wakuFilterClient.unsubscribe(peer, shard, ct)
asyncSpawn doUnsubscribe()
type SendChunkedFilterRpcKind = enum
FilterSubscribe
FilterUnsubscribe
proc sendChunkedFilterRpc(
self: SubscriptionManager,
peer: RemotePeerInfo,
shard: PubsubTopic,
topics: seq[ContentTopic],
kind: SendChunkedFilterRpcKind,
): Future[bool] {.async.} =
## Send a chunked filter subscribe or unsubscribe RPC. Returns true on
## success. On failure the peer is removed and false is returned.
try:
var i = 0
while i < topics.len:
let chunk =
topics[i ..< min(i + filter_protocol.MaxContentTopicsPerRequest, topics.len)]
let fut =
case kind
of FilterSubscribe:
self.node.wakuFilterClient.subscribe(peer, shard, chunk)
of FilterUnsubscribe:
self.node.wakuFilterClient.unsubscribe(peer, shard, chunk)
if not (await fut.withTimeout(EdgeFilterSubscribeTimeout)) or fut.read().isErr():
trace "sendChunkedFilterRpc: chunk failed",
op = kind, shard = shard, peer = peer.peerId
self.removePeer(shard, peer.peerId)
return false
i += filter_protocol.MaxContentTopicsPerRequest
except CatchableError as exc:
debug "sendChunkedFilterRpc: failed",
op = kind, shard = shard, peer = peer.peerId, err = exc.msg
self.removePeer(shard, peer.peerId)
return false
return true
proc syncFilterDeltas(
self: SubscriptionManager,
peer: RemotePeerInfo,
shard: PubsubTopic,
added: seq[ContentTopic],
removed: seq[ContentTopic],
) {.async.} =
## Push content topic changes (adds/removes) to an already-tracked peer.
if added.len > 0:
if not await self.sendChunkedFilterRpc(peer, shard, added, FilterSubscribe):
return
if removed.len > 0:
discard await self.sendChunkedFilterRpc(peer, shard, removed, FilterUnsubscribe)
proc dialFilterPeer(
self: SubscriptionManager,
peer: RemotePeerInfo,
shard: PubsubTopic,
contentTopics: seq[ContentTopic],
) {.async.} =
## Subscribe a new peer to all content topics on a shard and start tracking it.
self.edgeFilterSubStates.withValue(shard, state):
state.pendingPeers.incl(peer.peerId)
try:
if not await self.sendChunkedFilterRpc(peer, shard, contentTopics, FilterSubscribe):
return
self.edgeFilterSubStates.withValue(shard, state):
if state.peers.anyIt(it.peerId == peer.peerId):
trace "dialFilterPeer: peer already tracked, skipping duplicate",
shard = shard, peer = peer.peerId
return
state.peers.add(peer)
self.updateShardHealth(shard, state[])
trace "dialFilterPeer: successfully subscribed to all chunks",
shard = shard, peer = peer.peerId, totalPeers = state.peers.len
do:
trace "dialFilterPeer: shard removed while subscribing, discarding result",
shard = shard, peer = peer.peerId
finally:
self.edgeFilterSubStates.withValue(shard, state):
state.pendingPeers.excl(peer.peerId)
proc edgeFilterHealthLoop*(self: SubscriptionManager) {.async.} =
## Periodically pings all connected filter service peers to verify they are
## still alive at the application layer. Peers that fail the ping are removed.
while true:
await sleepAsync(EdgeFilterLoopInterval)
if self.node.wakuFilterClient.isNil():
warn "filter client is nil within edge filter health loop"
continue
var connected = initTable[PeerId, RemotePeerInfo]()
for state in self.edgeFilterSubStates.values:
for peer in state.peers:
if self.node.peerManager.switch.peerStore.isConnected(peer.peerId):
connected[peer.peerId] = peer
var alive = initHashSet[PeerId]()
if connected.len > 0:
var pingTasks: seq[(PeerId, Future[FilterSubscribeResult])] = @[]
for peer in connected.values:
pingTasks.add(
(peer.peerId, self.node.wakuFilterClient.ping(peer, EdgeFilterPingTimeout))
)
# extract future tasks from (PeerId, Future) tuples and await them
await allFutures(pingTasks.mapIt(it[1]))
for (peerId, task) in pingTasks:
if task.read().isOk():
alive.incl(peerId)
var changed = false
for shard, state in self.edgeFilterSubStates.mpairs:
let oldLen = state.peers.len
state.peers.keepItIf(it.peerId notin connected or alive.contains(it.peerId))
if state.peers.len < oldLen:
changed = true
self.updateShardHealth(shard, state)
trace "Edge Filter health degraded by Ping failure",
shard = shard, new = state.currentHealth
if changed:
self.edgeFilterWakeup.fire()
proc edgeFilterSubLoop*(self: SubscriptionManager) {.async.} =
## Reconciles filter subscriptions with the desired state from SubscriptionManager.
var lastSynced = initTable[PubsubTopic, HashSet[ContentTopic]]()
while true:
await self.edgeFilterWakeup.wait()
await sleepAsync(EdgeFilterSubLoopDebounce)
self.edgeFilterWakeup.clear()
trace "edgeFilterSubLoop: woke up"
if isNil(self.node.wakuFilterClient):
trace "edgeFilterSubLoop: wakuFilterClient is nil, skipping"
continue
let desired = self.contentTopicSubs
trace "edgeFilterSubLoop: desired state", numShards = desired.len
let allShards = toHashSet(toSeq(desired.keys)) + toHashSet(toSeq(lastSynced.keys))
for shard in allShards:
let currTopics = desired.getOrDefault(shard)
let prevTopics = lastSynced.getOrDefault(shard)
if shard notin self.edgeFilterSubStates:
self.edgeFilterSubStates[shard] =
EdgeFilterSubState(currentHealth: TopicHealth.UNHEALTHY)
let addedTopics = toSeq(currTopics - prevTopics)
let removedTopics = toSeq(prevTopics - currTopics)
self.edgeFilterSubStates.withValue(shard, state):
state.peers.keepItIf(
self.node.peerManager.switch.peerStore.isConnected(it.peerId)
)
state.pending.keepItIf(not it.finished)
if addedTopics.len > 0 or removedTopics.len > 0:
for peer in state.peers:
asyncSpawn self.syncFilterDeltas(peer, shard, addedTopics, removedTopics)
if currTopics.len == 0:
for fut in state.pending:
if not fut.finished:
await fut.cancelAndWait()
self.edgeFilterSubStates.del(shard)
# invalidates `state` — do not use after this
else:
self.updateShardHealth(shard, state[])
let needed = max(0, HealthyThreshold - state.peers.len - state.pending.len)
if needed > 0:
let tracked = state.peers.mapIt(it.peerId).toHashSet() + state.pendingPeers
var candidates = self.node.peerManager.selectPeers(
filter_common.WakuFilterSubscribeCodec, some(shard)
)
candidates.keepItIf(it.peerId notin tracked)
let toDial = min(needed, candidates.len)
trace "edgeFilterSubLoop: shard reconciliation",
shard = shard,
num_peers = state.peers.len,
num_pending = state.pending.len,
num_needed = needed,
num_available = candidates.len,
toDial = toDial
for i in 0 ..< toDial:
let fut = self.dialFilterPeer(candidates[i], shard, toSeq(currTopics))
state.pending.add(fut)
lastSynced = desired
proc startEdgeFilterLoops(self: SubscriptionManager): Result[void, string] =
## Start the edge filter orchestration loops.
## Caller must ensure this is only called in edge mode (relay nil, filter client present).
self.edgeFilterWakeup = newAsyncEvent()
self.peerEventListener = WakuPeerEvent.listen(
self.node.brokerCtx,
proc(evt: WakuPeerEvent) {.async: (raises: []), gcsafe.} =
if evt.kind == WakuPeerEventKind.EventDisconnected or
evt.kind == WakuPeerEventKind.EventMetadataUpdated:
self.edgeFilterWakeup.fire()
,
).valueOr:
return err("Failed to listen to peer events for edge filter: " & error)
self.edgeFilterSubLoopFut = self.edgeFilterSubLoop()
self.edgeFilterHealthLoopFut = self.edgeFilterHealthLoop()
return ok()
proc stopEdgeFilterLoops(self: SubscriptionManager) {.async: (raises: []).} =
## Stop the edge filter orchestration loops and clean up pending futures.
if not isNil(self.edgeFilterSubLoopFut):
await self.edgeFilterSubLoopFut.cancelAndWait()
self.edgeFilterSubLoopFut = nil
if not isNil(self.edgeFilterHealthLoopFut):
await self.edgeFilterHealthLoopFut.cancelAndWait()
self.edgeFilterHealthLoopFut = nil
for shard, state in self.edgeFilterSubStates:
for fut in state.pending:
if not fut.finished:
await fut.cancelAndWait()
WakuPeerEvent.dropListener(self.node.brokerCtx, self.peerEventListener)
# ---------------------------------------------------------------------------
# SubscriptionManager Lifecycle (calls Edge behavior above)
#
# startSubscriptionManager and stopSubscriptionManager orchestrate both the
# core (relay) and edge (filter) paths, and register/clear broker providers.
# ---------------------------------------------------------------------------
proc startSubscriptionManager*(self: SubscriptionManager): Result[void, string] =
# Register edge filter broker providers. The shard/content health providers
# in WakuNode query these via the broker as a fallback when relay health is
# not available. If edge mode is not active, these providers simply return
# NOT_SUBSCRIBED / strength 0, which is harmless.
RequestEdgeShardHealth.setProvider(
self.node.brokerCtx,
proc(shard: PubsubTopic): Result[RequestEdgeShardHealth, string] =
self.edgeFilterSubStates.withValue(shard, state):
return ok(RequestEdgeShardHealth(health: state.currentHealth))
return ok(RequestEdgeShardHealth(health: TopicHealth.NOT_SUBSCRIBED)),
).isOkOr:
error "Can't set provider for RequestEdgeShardHealth", error = error
RequestEdgeFilterPeerCount.setProvider(
self.node.brokerCtx,
proc(): Result[RequestEdgeFilterPeerCount, string] =
var minPeers = high(int)
for state in self.edgeFilterSubStates.values:
minPeers = min(minPeers, state.peers.len)
if minPeers == high(int):
minPeers = 0
return ok(RequestEdgeFilterPeerCount(peerCount: minPeers)),
).isOkOr:
error "Can't set provider for RequestEdgeFilterPeerCount", error = error
if self.node.wakuRelay.isNil():
return self.startEdgeFilterLoops()
# Core mode: auto-subscribe relay to all shards in autosharding.
if self.node.wakuAutoSharding.isSome():
let autoSharding = self.node.wakuAutoSharding.get()
let clusterId = autoSharding.clusterId
let numShards = autoSharding.shardCountGenZero
if numShards > 0:
var clusterPubsubTopics = newSeqOfCap[PubsubTopic](numShards)
for i in 0 ..< numShards:
let shardObj = RelayShard(clusterId: clusterId, shardId: uint16(i))
clusterPubsubTopics.add(PubsubTopic($shardObj))
self.subscribePubsubTopics(clusterPubsubTopics).isOkOr:
error "Failed to auto-subscribe Relay to cluster shards: ", error = error
else:
info "SubscriptionManager has no AutoSharding configured; skipping auto-subscribe."
return ok()
proc stopSubscriptionManager*(self: SubscriptionManager) {.async: (raises: []).} =
if self.node.wakuRelay.isNil():
await self.stopEdgeFilterLoops()
RequestEdgeShardHealth.clearProvider(self.node.brokerCtx)
RequestEdgeFilterPeerCount.clearProvider(self.node.brokerCtx)

View File

@ -2,6 +2,9 @@ import chronos, results, std/strutils, ../../api/types
export ConnectionStatus
const HealthyThreshold* = 2
## Minimum peers required per service protocol for a "Connected" status (excluding Relay).
proc init*(
t: typedesc[ConnectionStatus], strRep: string
): Result[ConnectionStatus, string] =

View File

@ -21,6 +21,7 @@ import
node/health_monitor/health_report,
node/health_monitor/connection_status,
node/health_monitor/protocol_health,
requests/health_requests,
]
## This module is aimed to check the state of the "self" Waku Node
@ -29,9 +30,6 @@ import
# if not called, the outcome of randomization procedures will be the same in every run
random.randomize()
const HealthyThreshold* = 2
## minimum peers required for all services for a Connected status, excluding Relay
type NodeHealthMonitor* = ref object
nodeHealth: HealthStatus
node: WakuNode
@ -48,7 +46,8 @@ type NodeHealthMonitor* = ref object
## latest known connectivity strength (e.g. connected peer count) metric for each protocol.
## if it doesn't make sense for the protocol in question, this is set to zero.
relayObserver: PubSubObserver
peerEventListener: EventWakuPeerListener
peerEventListener: WakuPeerEventListener
shardHealthListener: EventShardTopicHealthChangeListener
func getHealth*(report: HealthReport, kind: WakuProtocol): ProtocolHealth =
for h in report.protocolsHealth:
@ -163,17 +162,6 @@ proc getStoreHealth(hm: NodeHealthMonitor): ProtocolHealth =
hm.strength[WakuProtocol.StoreProtocol] = peerCount
return p.ready()
proc getLegacyStoreHealth(hm: NodeHealthMonitor): ProtocolHealth =
var p = ProtocolHealth.init(WakuProtocol.LegacyStoreProtocol)
if isNil(hm.node.wakuLegacyStore):
hm.strength[WakuProtocol.LegacyStoreProtocol] = 0
return p.notMounted()
let peerCount = hm.countCapablePeers(WakuLegacyStoreCodec)
hm.strength[WakuProtocol.LegacyStoreProtocol] = peerCount
return p.ready()
proc getLightpushClientHealth(hm: NodeHealthMonitor): ProtocolHealth =
var p = ProtocolHealth.init(WakuProtocol.LightpushClientProtocol)
@ -209,6 +197,17 @@ proc getFilterClientHealth(hm: NodeHealthMonitor): ProtocolHealth =
hm.strength[WakuProtocol.FilterClientProtocol] = 0
return p.notMounted()
if isNil(hm.node.wakuRelay):
let edgeRes = RequestEdgeFilterPeerCount.request(hm.node.brokerCtx)
if edgeRes.isOk():
let peerCount = edgeRes.get().peerCount
if peerCount > 0:
hm.strength[WakuProtocol.FilterClientProtocol] = peerCount
return p.ready()
else:
error "Failed to request edge filter peer count", error = edgeRes.error
return p.notReady("Failed to request edge filter peer count: " & edgeRes.error)
let peerCount = countCapablePeers(hm, WakuFilterSubscribeCodec)
hm.strength[WakuProtocol.FilterClientProtocol] = peerCount
@ -233,23 +232,6 @@ proc getStoreClientHealth(hm: NodeHealthMonitor): ProtocolHealth =
"No Store service peer available yet, neither Store service set up for the node"
)
proc getLegacyStoreClientHealth(hm: NodeHealthMonitor): ProtocolHealth =
var p = ProtocolHealth.init(WakuProtocol.LegacyStoreClientProtocol)
if isNil(hm.node.wakuLegacyStoreClient):
hm.strength[WakuProtocol.LegacyStoreClientProtocol] = 0
return p.notMounted()
let peerCount = countCapablePeers(hm, WakuLegacyStoreCodec)
hm.strength[WakuProtocol.LegacyStoreClientProtocol] = peerCount
if peerCount > 0 or not isNil(hm.node.wakuLegacyStore):
return p.ready()
return p.notReady(
"No Legacy Store service peers are available yet, neither Store service set up for the node"
)
proc getPeerExchangeHealth(hm: NodeHealthMonitor): ProtocolHealth =
var p = ProtocolHealth.init(WakuProtocol.PeerExchangeProtocol)
@ -294,8 +276,6 @@ proc getSyncProtocolHealthInfo*(
return hm.getRelayHealth()
of WakuProtocol.StoreProtocol:
return hm.getStoreHealth()
of WakuProtocol.LegacyStoreProtocol:
return hm.getLegacyStoreHealth()
of WakuProtocol.FilterProtocol:
return hm.getFilterHealth(hm.getRelayHealth().health)
of WakuProtocol.LightpushProtocol:
@ -310,8 +290,6 @@ proc getSyncProtocolHealthInfo*(
return hm.getMixHealth()
of WakuProtocol.StoreClientProtocol:
return hm.getStoreClientHealth()
of WakuProtocol.LegacyStoreClientProtocol:
return hm.getLegacyStoreClientHealth()
of WakuProtocol.FilterClientProtocol:
return hm.getFilterClientHealth()
of WakuProtocol.LightpushClientProtocol:
@ -349,7 +327,6 @@ proc getSyncAllProtocolHealthInfo(hm: NodeHealthMonitor): seq[ProtocolHealth] =
protocols.add(hm.getLegacyLightpushHealth(relayHealth.health))
protocols.add(hm.getFilterHealth(relayHealth.health))
protocols.add(hm.getStoreHealth())
protocols.add(hm.getLegacyStoreHealth())
protocols.add(hm.getPeerExchangeHealth())
protocols.add(hm.getRendezvousHealth())
protocols.add(hm.getMixHealth())
@ -357,7 +334,6 @@ proc getSyncAllProtocolHealthInfo(hm: NodeHealthMonitor): seq[ProtocolHealth] =
protocols.add(hm.getLightpushClientHealth())
protocols.add(hm.getLegacyLightpushClientHealth())
protocols.add(hm.getStoreClientHealth())
protocols.add(hm.getLegacyStoreClientHealth())
protocols.add(hm.getFilterClientHealth())
return protocols
@ -697,14 +673,23 @@ proc startHealthMonitor*(hm: NodeHealthMonitor): Result[void, string] =
)
hm.node.wakuRelay.addObserver(hm.relayObserver)
hm.peerEventListener = EventWakuPeer.listen(
hm.peerEventListener = WakuPeerEvent.listen(
hm.node.brokerCtx,
proc(evt: EventWakuPeer): Future[void] {.async: (raises: []), gcsafe.} =
proc(evt: WakuPeerEvent): Future[void] {.async: (raises: []), gcsafe.} =
## Recompute health on any peer changing anything (join, leave, identify, metadata update)
hm.healthUpdateEvent.fire(),
).valueOr:
return err("Failed to subscribe to peer events: " & error)
hm.shardHealthListener = EventShardTopicHealthChange.listen(
hm.node.brokerCtx,
proc(
evt: EventShardTopicHealthChange
): Future[void] {.async: (raises: []), gcsafe.} =
hm.healthUpdateEvent.fire(),
).valueOr:
return err("Failed to subscribe to shard health events: " & error)
hm.healthUpdateEvent = newAsyncEvent()
hm.healthUpdateEvent.fire()
@ -724,8 +709,8 @@ proc stopHealthMonitor*(hm: NodeHealthMonitor) {.async.} =
if not isNil(hm.healthLoopFut):
await hm.healthLoopFut.cancelAndWait()
if hm.peerEventListener.id != 0:
EventWakuPeer.dropListener(hm.node.brokerCtx, hm.peerEventListener)
WakuPeerEvent.dropListener(hm.node.brokerCtx, hm.peerEventListener)
EventShardTopicHealthChange.dropListener(hm.node.brokerCtx, hm.shardHealthListener)
if not isNil(hm.node.wakuRelay) and not isNil(hm.relayObserver):
hm.node.wakuRelay.removeObserver(hm.relayObserver)

View File

@ -24,7 +24,6 @@ import
waku_core,
waku_core/topics/sharding,
waku_filter_v2,
waku_archive_legacy,
waku_archive,
waku_store_sync,
waku_rln_relay,
@ -81,11 +80,6 @@ proc registerRelayHandler(
await node.wakuFilter.handleMessage(topic, msg)
proc archiveHandler(topic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} =
if not node.wakuLegacyArchive.isNil():
## we try to store with legacy archive
await node.wakuLegacyArchive.handleMessage(topic, msg)
return
if node.wakuArchive.isNil():
return

View File

@ -20,17 +20,13 @@ import
import
../waku_node,
../../waku_core,
../../waku_store_legacy/protocol as legacy_store,
../../waku_store_legacy/client as legacy_store_client,
../../waku_store_legacy/common as legacy_store_common,
../../waku_store/protocol as store,
../../waku_store/client as store_client,
../../waku_store/common as store_common,
../../waku_store/resume,
../peer_manager,
../../common/rate_limit/setting,
../../waku_archive,
../../waku_archive_legacy
../../waku_archive
logScope:
topics = "waku node store api"
@ -50,157 +46,6 @@ proc mountArchive*(
return ok()
proc mountLegacyArchive*(
node: WakuNode, driver: waku_archive_legacy.ArchiveDriver
): Result[void, string] =
node.wakuLegacyArchive = waku_archive_legacy.WakuArchive.new(driver = driver).valueOr:
return err("error in mountLegacyArchive: " & error)
return ok()
## Legacy Waku Store
# TODO: Review this mapping logic. Maybe, move it to the appplication code
proc toArchiveQuery(
request: legacy_store_common.HistoryQuery
): waku_archive_legacy.ArchiveQuery =
waku_archive_legacy.ArchiveQuery(
pubsubTopic: request.pubsubTopic,
contentTopics: request.contentTopics,
cursor: request.cursor.map(
proc(cursor: HistoryCursor): waku_archive_legacy.ArchiveCursor =
waku_archive_legacy.ArchiveCursor(
pubsubTopic: cursor.pubsubTopic,
senderTime: cursor.senderTime,
storeTime: cursor.storeTime,
digest: cursor.digest,
)
),
startTime: request.startTime,
endTime: request.endTime,
pageSize: request.pageSize.uint,
direction: request.direction,
requestId: request.requestId,
)
# TODO: Review this mapping logic. Maybe, move it to the appplication code
proc toHistoryResult*(
res: waku_archive_legacy.ArchiveResult
): legacy_store_common.HistoryResult =
let response = res.valueOr:
case error.kind
of waku_archive_legacy.ArchiveErrorKind.DRIVER_ERROR,
waku_archive_legacy.ArchiveErrorKind.INVALID_QUERY:
return err(HistoryError(kind: HistoryErrorKind.BAD_REQUEST, cause: error.cause))
else:
return err(HistoryError(kind: HistoryErrorKind.UNKNOWN))
return ok(
HistoryResponse(
messages: response.messages,
cursor: response.cursor.map(
proc(cursor: waku_archive_legacy.ArchiveCursor): HistoryCursor =
HistoryCursor(
pubsubTopic: cursor.pubsubTopic,
senderTime: cursor.senderTime,
storeTime: cursor.storeTime,
digest: cursor.digest,
)
),
)
)
proc mountLegacyStore*(
node: WakuNode, rateLimit: RateLimitSetting = DefaultGlobalNonRelayRateLimit
) {.async.} =
info "mounting waku legacy store protocol"
if node.wakuLegacyArchive.isNil():
error "failed to mount waku legacy store protocol", error = "waku archive not set"
return
# TODO: Review this handler logic. Maybe, move it to the appplication code
let queryHandler: HistoryQueryHandler = proc(
request: HistoryQuery
): Future[legacy_store_common.HistoryResult] {.async.} =
if request.cursor.isSome():
?request.cursor.get().checkHistCursor()
let request = request.toArchiveQuery()
let response = await node.wakuLegacyArchive.findMessagesV2(request)
return response.toHistoryResult()
node.wakuLegacyStore = legacy_store.WakuStore.new(
node.peerManager, node.rng, queryHandler, some(rateLimit)
)
if node.started:
# Node has started already. Let's start store too.
await node.wakuLegacyStore.start()
node.switch.mount(
node.wakuLegacyStore, protocolMatcher(legacy_store_common.WakuLegacyStoreCodec)
)
proc mountLegacyStoreClient*(node: WakuNode) =
info "mounting legacy store client"
node.wakuLegacyStoreClient =
legacy_store_client.WakuStoreClient.new(node.peerManager, node.rng)
proc query*(
node: WakuNode, query: legacy_store_common.HistoryQuery, peer: RemotePeerInfo
): Future[legacy_store_common.WakuStoreResult[legacy_store_common.HistoryResponse]] {.
async, gcsafe
.} =
## Queries known nodes for historical messages
if node.wakuLegacyStoreClient.isNil():
return err("waku legacy store client is nil")
let response = (await node.wakuLegacyStoreClient.query(query, peer)).valueOr:
return err("legacy store client query error: " & $error)
return ok(response)
# TODO: Move to application module (e.g., wakunode2.nim)
proc query*(
node: WakuNode, query: legacy_store_common.HistoryQuery
): Future[legacy_store_common.WakuStoreResult[legacy_store_common.HistoryResponse]] {.
async, gcsafe, deprecated: "Use 'node.query()' with peer destination instead"
.} =
## Queries known nodes for historical messages
if node.wakuLegacyStoreClient.isNil():
return err("waku legacy store client is nil")
let peerOpt = node.peerManager.selectPeer(legacy_store_common.WakuLegacyStoreCodec)
if peerOpt.isNone():
error "no suitable remote peers"
return err("peer_not_found_failure")
return await node.query(query, peerOpt.get())
when defined(waku_exp_store_resume):
# TODO: Move to application module (e.g., wakunode2.nim)
proc resume*(
node: WakuNode, peerList: Option[seq[RemotePeerInfo]] = none(seq[RemotePeerInfo])
) {.async, gcsafe.} =
## resume proc retrieves the history of waku messages published on the default waku pubsub topic since the last time the waku node has been online
## for resume to work properly the waku node must have the store protocol mounted in the full mode (i.e., persisting messages)
## messages are stored in the wakuStore's messages field and in the message db
## the offline time window is measured as the difference between the current time and the timestamp of the most recent persisted waku message
## an offset of 20 second is added to the time window to count for nodes asynchrony
## peerList indicates the list of peers to query from. The history is fetched from the first available peer in this list. Such candidates should be found through a discovery method (to be developed).
## if no peerList is passed, one of the peers in the underlying peer manager unit of the store protocol is picked randomly to fetch the history from.
## The history gets fetched successfully if the dialed peer has been online during the queried time window.
if node.wakuLegacyStoreClient.isNil():
return
let retrievedMessages = (await node.wakuLegacyStoreClient.resume(peerList)).valueOr:
error "failed to resume store", error = error
return
info "the number of retrieved messages since the last online time: ",
number = retrievedMessages.value
## Waku Store
proc toArchiveQuery(request: StoreQueryRequest): waku_archive.ArchiveQuery =

View File

@ -215,31 +215,34 @@ proc loadFromStorage(pm: PeerManager) {.gcsafe.} =
trace "recovered peers from storage", amount = amount
proc selectPeer*(
proc selectPeers*(
pm: PeerManager, proto: string, shard: Option[PubsubTopic] = none(PubsubTopic)
): Option[RemotePeerInfo] =
# Selects the best peer for a given protocol
): seq[RemotePeerInfo] =
## Returns all peers that support the given protocol (and optionally shard),
## shuffled randomly. Callers can further filter or pick from this list.
var peers = pm.switch.peerStore.getPeersByProtocol(proto)
trace "Selecting peer from peerstore",
protocol = proto, peers, address = cast[uint](pm.switch.peerStore)
trace "Selecting peers from peerstore",
protocol = proto, num_peers = peers.len, address = cast[uint](pm.switch.peerStore)
if shard.isSome():
# Parse the shard from the pubsub topic to get cluster and shard ID
let shardInfo = RelayShard.parse(shard.get()).valueOr:
trace "Failed to parse shard from pubsub topic", topic = shard.get()
return none(RemotePeerInfo)
return @[]
# Filter peers that support the requested shard
# Check both ENR (if present) and the shards field on RemotePeerInfo
peers.keepItIf(
# Check ENR if available
(it.enr.isSome() and it.enr.get().containsShard(shard.get())) or
# Otherwise check the shards field directly
(it.shards.len > 0 and it.shards.contains(shardInfo.shardId))
(it.shards.len > 0 and it.shards.contains(shardInfo.shardId))
)
shuffle(peers)
return peers
proc selectPeer*(
pm: PeerManager, proto: string, shard: Option[PubsubTopic] = none(PubsubTopic)
): Option[RemotePeerInfo] =
## Selects a single peer for a given protocol, checking service slots first
## (for non-relay protocols).
let peers = pm.selectPeers(proto, shard)
# No criteria for selecting a peer for WakuRelay, random one
if proto == WakuRelayCodec:
@ -742,7 +745,7 @@ proc refreshPeerMetadata(pm: PeerManager, peerId: PeerId) {.async.} =
# TODO: should only trigger an event if metadata actually changed
# should include the shard subscription delta in the event when
# it is a MetadataUpdated event
EventWakuPeer.emit(pm.brokerCtx, peerId, WakuPeerEventKind.EventMetadataUpdated)
WakuPeerEvent.emit(pm.brokerCtx, peerId, WakuPeerEventKind.EventMetadataUpdated)
return
info "disconnecting from peer", peerId = peerId, reason = reason
@ -787,7 +790,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
asyncSpawn(pm.switch.disconnect(peerId))
peerStore.delete(peerId)
EventWakuPeer.emit(pm.brokerCtx, peerId, WakuPeerEventKind.EventConnected)
WakuPeerEvent.emit(pm.brokerCtx, peerId, WakuPeerEventKind.EventConnected)
if not pm.onConnectionChange.isNil():
# we don't want to await for the callback to finish
@ -804,7 +807,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
pm.ipTable.del(ip)
break
EventWakuPeer.emit(pm.brokerCtx, peerId, WakuPeerEventKind.EventDisconnected)
WakuPeerEvent.emit(pm.brokerCtx, peerId, WakuPeerEventKind.EventDisconnected)
if not pm.onConnectionChange.isNil():
# we don't want to await for the callback to finish
@ -812,7 +815,7 @@ proc onPeerEvent(pm: PeerManager, peerId: PeerId, event: PeerEvent) {.async.} =
of PeerEventKind.Identified:
info "event identified", peerId = peerId
EventWakuPeer.emit(pm.brokerCtx, peerId, WakuPeerEventKind.EventIdentified)
WakuPeerEvent.emit(pm.brokerCtx, peerId, WakuPeerEventKind.EventIdentified)
peerStore[ConnectionBook][peerId] = connectedness
peerStore[DirectionBook][peerId] = direction

View File

@ -1,7 +1,7 @@
{.push raises: [].}
import
std/[options, tables, strutils, sequtils, os, net, random],
std/[options, tables, strutils, sequtils, os, net, random, sets],
chronos,
chronicles,
metrics,
@ -32,17 +32,12 @@ import
waku_core/topics/sharding,
waku_relay,
waku_archive,
waku_archive_legacy,
waku_store_legacy/protocol as legacy_store,
waku_store_legacy/client as legacy_store_client,
waku_store_legacy/common as legacy_store_common,
waku_store/protocol as store,
waku_store/client as store_client,
waku_store/common as store_common,
waku_store/resume,
waku_store_sync,
waku_filter_v2,
waku_filter_v2/common as filter_common,
waku_filter_v2/client as filter_client,
waku_metadata,
waku_rendezvous/protocol,
@ -64,7 +59,7 @@ import
requests/node_requests,
requests/health_requests,
events/health_events,
events/peer_events,
events/message_events,
],
waku/discovery/waku_kademlia,
./net_config,
@ -99,9 +94,6 @@ const clientId* = "Nimbus Waku v2 node"
const WakuNodeVersionString* = "version / git commit hash: " & git_version
const EdgeTopicHealthyThreshold = 2
## Lightpush server and filter server requirement for a healthy topic in edge mode
# key and crypto modules different
type
# TODO: Move to application instance (e.g., `WakuNode2`)
@ -116,9 +108,6 @@ type
switch*: Switch
wakuRelay*: WakuRelay
wakuArchive*: waku_archive.WakuArchive
wakuLegacyArchive*: waku_archive_legacy.WakuArchive
wakuLegacyStore*: legacy_store.WakuStore
wakuLegacyStoreClient*: legacy_store_client.WakuStoreClient
wakuStore*: store.WakuStore
wakuStoreClient*: store_client.WakuStoreClient
wakuStoreResume*: StoreResume
@ -149,10 +138,6 @@ type
legacyAppHandlers*: Table[PubsubTopic, WakuRelayHandler]
## Kernel API Relay appHandlers (if any)
wakuMix*: WakuMix
edgeTopicsHealth*: Table[PubsubTopic, TopicHealth]
edgeHealthEvent*: AsyncEvent
edgeHealthLoop: Future[void]
peerEventListener*: EventWakuPeerListener
kademliaDiscoveryLoop*: Future[void]
wakuKademlia*: WakuKademlia
@ -505,52 +490,7 @@ proc updateAnnouncedAddrWithPrimaryIpAddr*(node: WakuNode): Result[void, string]
return ok()
proc calculateEdgeTopicHealth(node: WakuNode, shard: PubsubTopic): TopicHealth =
let filterPeers =
node.peerManager.getPeersForShard(filter_common.WakuFilterSubscribeCodec, shard)
let lightpushPeers =
node.peerManager.getPeersForShard(lightpush_protocol.WakuLightPushCodec, shard)
if filterPeers >= EdgeTopicHealthyThreshold and
lightpushPeers >= EdgeTopicHealthyThreshold:
return TopicHealth.SUFFICIENTLY_HEALTHY
elif filterPeers > 0 and lightpushPeers > 0:
return TopicHealth.MINIMALLY_HEALTHY
return TopicHealth.UNHEALTHY
proc loopEdgeHealth(node: WakuNode) {.async.} =
while node.started:
await node.edgeHealthEvent.wait()
node.edgeHealthEvent.clear()
try:
for shard in node.edgeTopicsHealth.keys:
if not node.wakuRelay.isNil and node.wakuRelay.isSubscribed(shard):
continue
let oldHealth = node.edgeTopicsHealth.getOrDefault(shard, TopicHealth.UNHEALTHY)
let newHealth = node.calculateEdgeTopicHealth(shard)
if newHealth != oldHealth:
node.edgeTopicsHealth[shard] = newHealth
EventShardTopicHealthChange.emit(node.brokerCtx, shard, newHealth)
except CancelledError:
break
except CatchableError as e:
warn "Error in edge health check", error = e.msg
# safety cooldown to protect from edge cases
await sleepAsync(100.milliseconds)
proc startProvidersAndListeners*(node: WakuNode) =
node.peerEventListener = EventWakuPeer.listen(
node.brokerCtx,
proc(evt: EventWakuPeer) {.async: (raises: []), gcsafe.} =
node.edgeHealthEvent.fire(),
).valueOr:
error "Failed to listen to peer events", error = error
return
RequestRelayShard.setProvider(
node.brokerCtx,
proc(
@ -568,14 +508,23 @@ proc startProvidersAndListeners*(node: WakuNode) =
var response: RequestShardTopicsHealth
for shard in topics:
var healthStatus = TopicHealth.UNHEALTHY
# Health resolution order:
# 1. Relay topicsHealth (computed from gossipsub mesh state)
# 2. If relay is active but topicsHealth hasn't computed yet, UNHEALTHY
# 3. Otherwise, ask edge filter (via broker; no-op if no provider set)
var healthStatus = TopicHealth.NOT_SUBSCRIBED
if not node.wakuRelay.isNil:
healthStatus =
node.wakuRelay.topicsHealth.getOrDefault(shard, TopicHealth.NOT_SUBSCRIBED)
if healthStatus == TopicHealth.NOT_SUBSCRIBED:
healthStatus = node.calculateEdgeTopicHealth(shard)
if not node.wakuRelay.isNil and node.wakuRelay.isSubscribed(shard):
healthStatus = TopicHealth.UNHEALTHY
else:
let edgeRes = RequestEdgeShardHealth.request(node.brokerCtx, shard)
if edgeRes.isOk():
healthStatus = edgeRes.get().health
response.topicHealth.add((shard, healthStatus))
@ -601,9 +550,10 @@ proc startProvidersAndListeners*(node: WakuNode) =
pubsubTopic, TopicHealth.NOT_SUBSCRIBED
)
if topicHealth == TopicHealth.NOT_SUBSCRIBED and
pubsubTopic in node.edgeTopicsHealth:
topicHealth = node.calculateEdgeTopicHealth(pubsubTopic)
if topicHealth == TopicHealth.NOT_SUBSCRIBED:
let edgeRes = RequestEdgeShardHealth.request(node.brokerCtx, pubsubTopic)
if edgeRes.isOk():
topicHealth = edgeRes.get().health
response.contentTopicHealth.add((topic: contentTopic, health: topicHealth))
@ -612,7 +562,6 @@ proc startProvidersAndListeners*(node: WakuNode) =
error "Can't set provider for RequestContentTopicsHealth", error = error
proc stopProvidersAndListeners*(node: WakuNode) =
EventWakuPeer.dropListener(node.brokerCtx, node.peerEventListener)
RequestRelayShard.clearProvider(node.brokerCtx)
RequestContentTopicsHealth.clearProvider(node.brokerCtx)
RequestShardTopicsHealth.clearProvider(node.brokerCtx)
@ -665,13 +614,16 @@ proc start*(node: WakuNode) {.async.} =
## The switch will update addresses after start using the addressMapper
await node.switch.start()
node.edgeHealthEvent = newAsyncEvent()
node.edgeHealthLoop = loopEdgeHealth(node)
node.started = true
if not node.wakuFilterClient.isNil():
node.wakuFilterClient.registerPushHandler(
proc(pubsubTopic: PubsubTopic, msg: WakuMessage) {.async, gcsafe.} =
MessageSeenEvent.emit(node.brokerCtx, pubsubTopic, msg)
)
node.startProvidersAndListeners()
node.started = true
if not zeroPortPresent:
updateAnnouncedAddrWithPrimaryIpAddr(node).isOkOr:
error "failed update announced addr", error = $error
@ -685,10 +637,6 @@ proc stop*(node: WakuNode) {.async.} =
node.stopProvidersAndListeners()
if not node.edgeHealthLoop.isNil:
await node.edgeHealthLoop.cancelAndWait()
node.edgeHealthLoop = nil
await node.switch.stop()
node.peerManager.stop()

View File

@ -37,3 +37,15 @@ RequestBroker:
healthStatus*: ProtocolHealth
proc signature(protocol: WakuProtocol): Future[Result[RequestProtocolHealth, string]]
# Get edge filter health for a single shard (set by DeliveryService when edge mode is active)
RequestBroker(sync):
type RequestEdgeShardHealth* = object
health*: TopicHealth
proc signature(shard: PubsubTopic): Result[RequestEdgeShardHealth, string]
# Get edge filter confirmed peer count (set by DeliveryService when edge mode is active)
RequestBroker(sync):
type RequestEdgeFilterPeerCount* = object
peerCount*: int

View File

@ -12,7 +12,6 @@ import
waku/[
waku_core,
waku_core/topics/pubsub_topic,
waku_store_legacy/common,
waku_store/common,
waku_filter_v2,
waku_lightpush_legacy/common,
@ -172,7 +171,7 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) =
let peers = populateAdminPeerInfoForCodecs(
node,
@[
WakuRelayCodec, WakuFilterSubscribeCodec, WakuStoreCodec, WakuLegacyStoreCodec,
WakuRelayCodec, WakuFilterSubscribeCodec, WakuStoreCodec,
WakuLegacyLightPushCodec, WakuLightPushCodec, WakuPeerExchangeCodec,
WakuReconciliationCodec, WakuTransferCodec,
],
@ -366,8 +365,6 @@ proc installAdminV1GetPeersHandler(router: var RestRouter, node: WakuNode) =
protoStats[WakuFilterPushCodec] =
peers.countIt(it.protocols.contains(WakuFilterPushCodec))
protoStats[WakuStoreCodec] = peers.countIt(it.protocols.contains(WakuStoreCodec))
protoStats[WakuLegacyStoreCodec] =
peers.countIt(it.protocols.contains(WakuLegacyStoreCodec))
protoStats[WakuLightPushCodec] =
peers.countIt(it.protocols.contains(WakuLightPushCodec))
protoStats[WakuLegacyLightPushCodec] =

View File

@ -14,7 +14,6 @@ import
waku/rest_api/endpoint/legacy_lightpush/handlers as rest_legacy_lightpush_endpoint,
waku/rest_api/endpoint/lightpush/handlers as rest_lightpush_endpoint,
waku/rest_api/endpoint/store/handlers as rest_store_endpoint,
waku/rest_api/endpoint/legacy_store/handlers as rest_store_legacy_endpoint,
waku/rest_api/endpoint/health/handlers as rest_health_endpoint,
waku/rest_api/endpoint/admin/handlers as rest_admin_endpoint,
waku/waku_core/topics,
@ -195,7 +194,6 @@ proc startRestServerProtocolSupport*(
none(DiscoveryHandler)
rest_store_endpoint.installStoreApiHandlers(router, node, storeDiscoHandler)
rest_store_legacy_endpoint.installStoreApiHandlers(router, node, storeDiscoHandler)
## Light push API
## Install it either if client is mounted)

View File

@ -1,75 +0,0 @@
{.push raises: [].}
import
chronicles, json_serialization, json_serialization/std/options, presto/[route, client]
import ../../../waku_store_legacy/common, ../serdes, ../responses, ./types
export types
logScope:
topics = "waku node rest legacy store_api"
proc decodeBytes*(
t: typedesc[StoreResponseRest],
data: openArray[byte],
contentType: Opt[ContentTypeData],
): RestResult[StoreResponseRest] =
if MediaType.init($contentType) == MIMETYPE_JSON:
let decoded = ?decodeFromJsonBytes(StoreResponseRest, data)
return ok(decoded)
if MediaType.init($contentType) == MIMETYPE_TEXT:
var res: string
if len(data) > 0:
res = newString(len(data))
copyMem(addr res[0], unsafeAddr data[0], len(data))
return ok(
StoreResponseRest(
messages: newSeq[StoreWakuMessage](0),
cursor: none(HistoryCursorRest),
# field that contain error information
errorMessage: some(res),
)
)
# If everything goes wrong
return err(cstring("Unsupported contentType " & $contentType))
proc getStoreMessagesV1*(
# URL-encoded reference to the store-node
peerAddr: string = "",
pubsubTopic: string = "",
# URL-encoded comma-separated list of content topics
contentTopics: string = "",
startTime: string = "",
endTime: string = "",
# Optional cursor fields
senderTime: string = "",
storeTime: string = "",
digest: string = "", # base64-encoded digest
pageSize: string = "",
ascending: string = "",
): RestResponse[StoreResponseRest] {.
rest, endpoint: "/store/v1/messages", meth: HttpMethod.MethodGet
.}
proc getStoreMessagesV1*(
# URL-encoded reference to the store-node
peerAddr: Option[string],
pubsubTopic: string = "",
# URL-encoded comma-separated list of content topics
contentTopics: string = "",
startTime: string = "",
endTime: string = "",
# Optional cursor fields
senderTime: string = "",
storeTime: string = "",
digest: string = "", # base64-encoded digest
pageSize: string = "",
ascending: string = "",
): RestResponse[StoreResponseRest] {.
rest, endpoint: "/store/v1/messages", meth: HttpMethod.MethodGet
.}

View File

@ -1,246 +0,0 @@
{.push raises: [].}
import
std/[strformat, sugar], results, chronicles, uri, json_serialization, presto/route
import
../../../waku_core,
../../../waku_store_legacy/common,
../../../waku_store_legacy/self_req_handler,
../../../waku_node,
../../../node/peer_manager,
../../../common/paging,
../../handlers,
../responses,
../serdes,
./types
export types
logScope:
topics = "waku node rest legacy store_api"
const futTimeout* = 5.seconds # Max time to wait for futures
const NoPeerNoDiscError* =
RestApiResponse.preconditionFailed("No suitable service peer & no discovery method")
# Queries the store-node with the query parameters and
# returns a RestApiResponse that is sent back to the api client.
proc performHistoryQuery(
selfNode: WakuNode, histQuery: HistoryQuery, storePeer: RemotePeerInfo
): Future[RestApiResponse] {.async.} =
let queryFut = selfNode.query(histQuery, storePeer)
if not await queryFut.withTimeout(futTimeout):
const msg = "No history response received (timeout)"
error msg
return RestApiResponse.internalServerError(msg)
let storeResp = queryFut.read().map(res => res.toStoreResponseRest()).valueOr:
const msg = "Error occurred in queryFut.read()"
error msg, error = error
return RestApiResponse.internalServerError(fmt("{msg} [{error}]"))
let resp = RestApiResponse.jsonResponse(storeResp, status = Http200).valueOr:
const msg = "Error building the json respose"
error msg, error = error
return RestApiResponse.internalServerError(fmt("{msg} [{error}]"))
return resp
# Converts a string time representation into an Option[Timestamp].
# Only positive time is considered a valid Timestamp in the request
proc parseTime(input: Option[string]): Result[Option[Timestamp], string] =
if input.isSome() and input.get() != "":
try:
let time = parseInt(input.get())
if time > 0:
return ok(some(Timestamp(time)))
except ValueError:
return err("Problem parsing time [" & getCurrentExceptionMsg() & "]")
return ok(none(Timestamp))
# Generates a history query cursor as per the given params
proc parseCursor(
parsedPubsubTopic: Option[string],
senderTime: Option[string],
storeTime: Option[string],
digest: Option[string],
): Result[Option[HistoryCursor], string] =
# Parse sender time
let parsedSenderTime = ?parseTime(senderTime)
# Parse store time
let parsedStoreTime = ?parseTime(storeTime)
# Parse message digest
let parsedMsgDigest = ?parseMsgDigest(digest)
# Parse cursor information
if parsedPubsubTopic.isSome() and parsedSenderTime.isSome() and
parsedStoreTime.isSome() and parsedMsgDigest.isSome():
return ok(
some(
HistoryCursor(
pubsubTopic: parsedPubsubTopic.get(),
senderTime: parsedSenderTime.get(),
storeTime: parsedStoreTime.get(),
digest: parsedMsgDigest.get(),
)
)
)
else:
return ok(none(HistoryCursor))
# Creates a HistoryQuery from the given params
proc createHistoryQuery(
pubsubTopic: Option[string],
contentTopics: Option[string],
senderTime: Option[string],
storeTime: Option[string],
digest: Option[string],
startTime: Option[string],
endTime: Option[string],
pageSize: Option[string],
direction: Option[string],
): Result[HistoryQuery, string] =
# Parse pubsubTopic parameter
var parsedPubsubTopic = none(string)
if pubsubTopic.isSome():
let decodedPubsubTopic = decodeUrl(pubsubTopic.get())
if decodedPubsubTopic != "":
parsedPubsubTopic = some(decodedPubsubTopic)
# Parse the content topics
var parsedContentTopics = newSeq[ContentTopic](0)
if contentTopics.isSome():
let ctList = decodeUrl(contentTopics.get())
if ctList != "":
for ct in ctList.split(','):
parsedContentTopics.add(ct)
# Parse cursor information
let parsedCursor = ?parseCursor(parsedPubsubTopic, senderTime, storeTime, digest)
# Parse page size field
var parsedPagedSize = DefaultPageSize
if pageSize.isSome() and pageSize.get() != "":
try:
parsedPagedSize = uint64(parseInt(pageSize.get()))
except CatchableError:
return err("Problem parsing page size [" & getCurrentExceptionMsg() & "]")
# Parse start time
let parsedStartTime = ?parseTime(startTime)
# Parse end time
let parsedEndTime = ?parseTime(endTime)
# Parse ascending field
var parsedDirection = default()
if direction.isSome() and direction.get() != "":
parsedDirection = direction.get().into()
return ok(
HistoryQuery(
pubsubTopic: parsedPubsubTopic,
contentTopics: parsedContentTopics,
startTime: parsedStartTime,
endTime: parsedEndTime,
direction: parsedDirection,
pageSize: parsedPagedSize,
cursor: parsedCursor,
)
)
# Simple type conversion. The "Option[Result[string, cstring]]"
# type is used by the nim-presto library.
proc toOpt(self: Option[Result[string, cstring]]): Option[string] =
if not self.isSome() or self.get().value == "":
return none(string)
if self.isSome() and self.get().value != "":
return some(self.get().value)
proc retrieveMsgsFromSelfNode(
self: WakuNode, histQuery: HistoryQuery
): Future[RestApiResponse] {.async.} =
## Performs a "store" request to the local node (self node.)
## Notice that this doesn't follow the regular store libp2p channel because a node
## it is not allowed to libp2p-dial a node to itself, by default.
##
let selfResp = (await self.wakuLegacyStore.handleSelfStoreRequest(histQuery)).valueOr:
return RestApiResponse.internalServerError($error)
let storeResp = selfResp.toStoreResponseRest()
let resp = RestApiResponse.jsonResponse(storeResp, status = Http200).valueOr:
const msg = "Error building the json respose"
let e = $error
error msg, error = e
return RestApiResponse.internalServerError(fmt("{msg} [{e}]"))
return resp
# Subscribes the rest handler to attend "/store/v1/messages" requests
proc installStoreApiHandlers*(
router: var RestRouter,
node: WakuNode,
discHandler: Option[DiscoveryHandler] = none(DiscoveryHandler),
) =
# Handles the store-query request according to the passed parameters
router.api(MethodGet, "/store/v1/messages") do(
peerAddr: Option[string],
pubsubTopic: Option[string],
contentTopics: Option[string],
senderTime: Option[string],
storeTime: Option[string],
digest: Option[string],
startTime: Option[string],
endTime: Option[string],
pageSize: Option[string],
ascending: Option[string]
) -> RestApiResponse:
info "REST-GET /store/v1/messages ", peer_addr = $peerAddr
# All the GET parameters are URL-encoded (https://en.wikipedia.org/wiki/URL_encoding)
# Example:
# /store/v1/messages?peerAddr=%2Fip4%2F127.0.0.1%2Ftcp%2F60001%2Fp2p%2F16Uiu2HAmVFXtAfSj4EiR7mL2KvL4EE2wztuQgUSBoj2Jx2KeXFLN\&pubsubTopic=my-waku-topic
# Parse the rest of the parameters and create a HistoryQuery
let histQuery = createHistoryQuery(
pubsubTopic.toOpt(),
contentTopics.toOpt(),
senderTime.toOpt(),
storeTime.toOpt(),
digest.toOpt(),
startTime.toOpt(),
endTime.toOpt(),
pageSize.toOpt(),
ascending.toOpt(),
).valueOr:
return RestApiResponse.badRequest(error)
if peerAddr.isNone() and not node.wakuLegacyStore.isNil():
## The user didn't specify a peer address and self-node is configured as a store node.
## In this case we assume that the user is willing to retrieve the messages stored by
## the local/self store node.
return await node.retrieveMsgsFromSelfNode(histQuery)
# Parse the peer address parameter
let parsedPeerAddr = parseUrlPeerAddr(peerAddr.toOpt()).valueOr:
return RestApiResponse.badRequest(error)
let peerAddr = parsedPeerAddr.valueOr:
node.peerManager.selectPeer(WakuLegacyStoreCodec).valueOr:
let handler = discHandler.valueOr:
return NoPeerNoDiscError
let peerOp = (await handler()).valueOr:
return RestApiResponse.internalServerError($error)
peerOp.valueOr:
return RestApiResponse.preconditionFailed(
"No suitable service peer & none discovered"
)
return await node.performHistoryQuery(histQuery, peerAddr)

View File

@ -1,375 +0,0 @@
{.push raises: [].}
import
std/[sets, strformat, uri],
stew/byteutils,
chronicles,
json_serialization,
json_serialization/std/options,
presto/[route, client, common]
import
../../../waku_store_legacy/common as waku_store_common,
../../../common/base64,
../../../waku_core,
../serdes
#### Types
type
HistoryCursorRest* = object
pubsubTopic*: PubsubTopic
senderTime*: Timestamp
storeTime*: Timestamp
digest*: waku_store_common.MessageDigest
StoreRequestRest* = object
# inspired by https://github.com/waku-org/nwaku/blob/f95147f5b7edfd45f914586f2d41cd18fb0e0d18/waku/v2//waku_store/common.nim#L52
pubsubTopic*: Option[PubsubTopic]
contentTopics*: seq[ContentTopic]
cursor*: Option[HistoryCursorRest]
startTime*: Option[Timestamp]
endTime*: Option[Timestamp]
pageSize*: uint64
ascending*: bool
StoreWakuMessage* = object
payload*: Base64String
contentTopic*: Option[ContentTopic]
version*: Option[uint32]
timestamp*: Option[Timestamp]
ephemeral*: Option[bool]
meta*: Option[Base64String]
StoreResponseRest* = object # inspired by https://rfc.vac.dev/spec/16/#storeresponse
messages*: seq[StoreWakuMessage]
cursor*: Option[HistoryCursorRest]
# field that contains error information
errorMessage*: Option[string]
createJsonFlavor RestJson
Json.setWriter JsonWriter, PreferredOutput = string
#### Type conversion
# Converts a URL-encoded-base64 string into a 'MessageDigest'
proc parseMsgDigest*(
input: Option[string]
): Result[Option[waku_store_common.MessageDigest], string] =
if not input.isSome() or input.get() == "":
return ok(none(waku_store_common.MessageDigest))
let decodedUrl = decodeUrl(input.get())
let base64DecodedArr = ?base64.decode(Base64String(decodedUrl))
var messageDigest = waku_store_common.MessageDigest()
# Next snippet inspired by "nwaku/waku/waku_archive/archive.nim"
# TODO: Improve coherence of MessageDigest type
messageDigest = block:
var data: array[32, byte]
for i in 0 ..< min(base64DecodedArr.len, 32):
data[i] = base64DecodedArr[i]
waku_store_common.MessageDigest(data: data)
return ok(some(messageDigest))
# Converts a given MessageDigest object into a suitable
# Base64-URL-encoded string suitable to be transmitted in a Rest
# request-response. The MessageDigest is first base64 encoded
# and this result is URL-encoded.
proc toRestStringMessageDigest*(self: waku_store_common.MessageDigest): string =
let base64Encoded = base64.encode(self.data)
encodeUrl($base64Encoded)
proc toWakuMessage*(message: StoreWakuMessage): WakuMessage =
WakuMessage(
payload: base64.decode(message.payload).get(),
contentTopic: message.contentTopic.get(),
version: message.version.get(),
timestamp: message.timestamp.get(),
ephemeral: message.ephemeral.get(),
meta: message.meta.get(Base64String("")).decode().get(),
)
# Converts a 'HistoryResponse' object to an 'StoreResponseRest'
# that can be serialized to a json object.
proc toStoreResponseRest*(histResp: HistoryResponse): StoreResponseRest =
proc toStoreWakuMessage(message: WakuMessage): StoreWakuMessage =
StoreWakuMessage(
payload: base64.encode(message.payload),
contentTopic: some(message.contentTopic),
version: some(message.version),
timestamp: some(message.timestamp),
ephemeral: some(message.ephemeral),
meta:
if message.meta.len > 0:
some(base64.encode(message.meta))
else:
none(Base64String),
)
var storeWakuMsgs: seq[StoreWakuMessage]
for m in histResp.messages:
storeWakuMsgs.add(m.toStoreWakuMessage())
var cursor = none(HistoryCursorRest)
if histResp.cursor.isSome:
cursor = some(
HistoryCursorRest(
pubsubTopic: histResp.cursor.get().pubsubTopic,
senderTime: histResp.cursor.get().senderTime,
storeTime: histResp.cursor.get().storeTime,
digest: histResp.cursor.get().digest,
)
)
StoreResponseRest(messages: storeWakuMsgs, cursor: cursor)
## Beginning of StoreWakuMessage serde
proc writeValue*(
writer: var JsonWriter, value: StoreWakuMessage
) {.gcsafe, raises: [IOError].} =
writer.beginRecord()
writer.writeField("payload", $value.payload)
if value.contentTopic.isSome():
writer.writeField("contentTopic", value.contentTopic.get())
if value.version.isSome():
writer.writeField("version", value.version.get())
if value.timestamp.isSome():
writer.writeField("timestamp", value.timestamp.get())
if value.ephemeral.isSome():
writer.writeField("ephemeral", value.ephemeral.get())
if value.meta.isSome():
writer.writeField("meta", value.meta.get())
writer.endRecord()
proc readValue*(
reader: var JsonReader, value: var StoreWakuMessage
) {.gcsafe, raises: [SerializationError, IOError].} =
var
payload = none(Base64String)
contentTopic = none(ContentTopic)
version = none(uint32)
timestamp = none(Timestamp)
ephemeral = none(bool)
meta = none(Base64String)
var keys = initHashSet[string]()
for fieldName in readObjectFields(reader):
# Check for reapeated keys
if keys.containsOrIncl(fieldName):
let err =
try:
fmt"Multiple `{fieldName}` fields found"
except CatchableError:
"Multiple fields with the same name found"
reader.raiseUnexpectedField(err, "StoreWakuMessage")
case fieldName
of "payload":
payload = some(reader.readValue(Base64String))
of "contentTopic":
contentTopic = some(reader.readValue(ContentTopic))
of "version":
version = some(reader.readValue(uint32))
of "timestamp":
timestamp = some(reader.readValue(Timestamp))
of "ephemeral":
ephemeral = some(reader.readValue(bool))
of "meta":
meta = some(reader.readValue(Base64String))
else:
reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName))
if payload.isNone():
reader.raiseUnexpectedValue("Field `payload` is missing")
value = StoreWakuMessage(
payload: payload.get(),
contentTopic: contentTopic,
version: version,
timestamp: timestamp,
ephemeral: ephemeral,
meta: meta,
)
## End of StoreWakuMessage serde
## Beginning of MessageDigest serde
proc writeValue*(
writer: var JsonWriter, value: waku_store_common.MessageDigest
) {.gcsafe, raises: [IOError].} =
writer.beginRecord()
writer.writeField("data", base64.encode(value.data))
writer.endRecord()
proc readValue*(
reader: var JsonReader, value: var waku_store_common.MessageDigest
) {.gcsafe, raises: [SerializationError, IOError].} =
var data = none(seq[byte])
for fieldName in readObjectFields(reader):
case fieldName
of "data":
if data.isSome():
reader.raiseUnexpectedField("Multiple `data` fields found", "MessageDigest")
let decoded = base64.decode(reader.readValue(Base64String)).valueOr:
reader.raiseUnexpectedField("Failed decoding data", "MessageDigest")
data = some(decoded)
else:
reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName))
if data.isNone():
reader.raiseUnexpectedValue("Field `data` is missing")
for i in 0 ..< 32:
value.data[i] = data.get()[i]
## End of MessageDigest serde
## Beginning of HistoryCursorRest serde
proc writeValue*(
writer: var JsonWriter, value: HistoryCursorRest
) {.gcsafe, raises: [IOError].} =
writer.beginRecord()
writer.writeField("pubsubTopic", value.pubsubTopic)
writer.writeField("senderTime", value.senderTime)
writer.writeField("storeTime", value.storeTime)
writer.writeField("digest", value.digest)
writer.endRecord()
proc readValue*(
reader: var JsonReader, value: var HistoryCursorRest
) {.gcsafe, raises: [SerializationError, IOError].} =
var
pubsubTopic = none(PubsubTopic)
senderTime = none(Timestamp)
storeTime = none(Timestamp)
digest = none(waku_store_common.MessageDigest)
for fieldName in readObjectFields(reader):
case fieldName
of "pubsubTopic":
if pubsubTopic.isSome():
reader.raiseUnexpectedField(
"Multiple `pubsubTopic` fields found", "HistoryCursorRest"
)
pubsubTopic = some(reader.readValue(PubsubTopic))
of "senderTime":
if senderTime.isSome():
reader.raiseUnexpectedField(
"Multiple `senderTime` fields found", "HistoryCursorRest"
)
senderTime = some(reader.readValue(Timestamp))
of "storeTime":
if storeTime.isSome():
reader.raiseUnexpectedField(
"Multiple `storeTime` fields found", "HistoryCursorRest"
)
storeTime = some(reader.readValue(Timestamp))
of "digest":
if digest.isSome():
reader.raiseUnexpectedField(
"Multiple `digest` fields found", "HistoryCursorRest"
)
digest = some(reader.readValue(waku_store_common.MessageDigest))
else:
reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName))
if pubsubTopic.isNone():
reader.raiseUnexpectedValue("Field `pubsubTopic` is missing")
if senderTime.isNone():
reader.raiseUnexpectedValue("Field `senderTime` is missing")
if storeTime.isNone():
reader.raiseUnexpectedValue("Field `storeTime` is missing")
if digest.isNone():
reader.raiseUnexpectedValue("Field `digest` is missing")
value = HistoryCursorRest(
pubsubTopic: pubsubTopic.get(),
senderTime: senderTime.get(),
storeTime: storeTime.get(),
digest: digest.get(),
)
## End of HistoryCursorRest serde
## Beginning of StoreResponseRest serde
proc writeValue*(
writer: var JsonWriter, value: StoreResponseRest
) {.gcsafe, raises: [IOError].} =
writer.beginRecord()
writer.writeField("messages", value.messages)
if value.cursor.isSome():
writer.writeField("cursor", value.cursor.get())
if value.errorMessage.isSome():
writer.writeField("errorMessage", value.errorMessage.get())
writer.endRecord()
proc readValue*(
reader: var JsonReader, value: var StoreResponseRest
) {.gcsafe, raises: [SerializationError, IOError].} =
var
messages = none(seq[StoreWakuMessage])
cursor = none(HistoryCursorRest)
errorMessage = none(string)
for fieldName in readObjectFields(reader):
case fieldName
of "messages":
if messages.isSome():
reader.raiseUnexpectedField(
"Multiple `messages` fields found", "StoreResponseRest"
)
messages = some(reader.readValue(seq[StoreWakuMessage]))
of "cursor":
if cursor.isSome():
reader.raiseUnexpectedField(
"Multiple `cursor` fields found", "StoreResponseRest"
)
cursor = some(reader.readValue(HistoryCursorRest))
of "errorMessage":
if errorMessage.isSome():
reader.raiseUnexpectedField(
"Multiple `errorMessage` fields found", "StoreResponseRest"
)
errorMessage = some(reader.readValue(string))
else:
reader.raiseUnexpectedField("Unrecognided field", cstring(fieldName))
if messages.isNone():
reader.raiseUnexpectedValue("Field `messages` is missing")
value = StoreResponseRest(
messages: messages.get(), cursor: cursor, errorMessage: errorMessage
)
## End of StoreResponseRest serde
## Beginning of StoreRequestRest serde
proc writeValue*(
writer: var JsonWriter, value: StoreRequestRest
) {.gcsafe, raises: [IOError].} =
writer.beginRecord()
if value.pubsubTopic.isSome():
writer.writeField("pubsubTopic", value.pubsubTopic.get())
writer.writeField("contentTopics", value.contentTopics)
if value.startTime.isSome():
writer.writeField("startTime", value.startTime.get())
if value.endTime.isSome():
writer.writeField("endTime", value.endTime.get())
writer.writeField("pageSize", value.pageSize)
writer.writeField("ascending", value.ascending)
writer.endRecord()
## End of StoreRequestRest serde

View File

@ -1,6 +0,0 @@
import
./waku_archive_legacy/common,
./waku_archive_legacy/archive,
./waku_archive_legacy/driver
export common, archive, driver

View File

@ -1,285 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
std/[times, options, sequtils, algorithm],
stew/byteutils,
chronicles,
chronos,
metrics,
results
import
../common/paging,
./driver,
../waku_core,
../waku_core/message/digest,
./common,
./archive_metrics
logScope:
topics = "waku archive"
const
DefaultPageSize*: uint = 20
MaxPageSize*: uint = 100
# Retention policy
WakuArchiveDefaultRetentionPolicyInterval* = chronos.minutes(30)
# Metrics reporting
WakuArchiveDefaultMetricsReportInterval* = chronos.minutes(30)
# Message validation
# 20 seconds maximum allowable sender timestamp "drift"
MaxMessageTimestampVariance* = getNanoSecondTime(20)
type MessageValidator* =
proc(msg: WakuMessage): Result[void, string] {.closure, gcsafe, raises: [].}
## Archive
type WakuArchive* = ref object
driver: ArchiveDriver
validator: MessageValidator
proc validate*(msg: WakuMessage): Result[void, string] =
if msg.ephemeral:
# Ephemeral message, do not store
return
if msg.timestamp == 0:
return ok()
let
now = getNanosecondTime(getTime().toUnixFloat())
lowerBound = now - MaxMessageTimestampVariance
upperBound = now + MaxMessageTimestampVariance
if msg.timestamp < lowerBound:
return err(invalidMessageOld)
if upperBound < msg.timestamp:
return err(invalidMessageFuture)
return ok()
proc new*(
T: type WakuArchive, driver: ArchiveDriver, validator: MessageValidator = validate
): Result[T, string] =
if driver.isNil():
return err("archive driver is Nil")
let archive = WakuArchive(driver: driver, validator: validator)
return ok(archive)
proc handleMessage*(
self: WakuArchive, pubsubTopic: PubsubTopic, msg: WakuMessage
) {.async.} =
let
msgDigest = computeDigest(msg)
msgDigestHex = msgDigest.data.to0xHex()
msgHash = computeMessageHash(pubsubTopic, msg)
msgHashHex = msgHash.to0xHex()
msgTimestamp =
if msg.timestamp > 0:
msg.timestamp
else:
getNanosecondTime(getTime().toUnixFloat())
trace "handling message",
msg_hash = msgHashHex,
pubsubTopic = pubsubTopic,
contentTopic = msg.contentTopic,
msgTimestamp = msg.timestamp,
digest = msgDigestHex
self.validator(msg).isOkOr:
waku_legacy_archive_errors.inc(labelValues = [error])
trace "invalid message",
msg_hash = msgHashHex,
pubsubTopic = pubsubTopic,
contentTopic = msg.contentTopic,
timestamp = msg.timestamp,
error = error
return
let insertStartTime = getTime().toUnixFloat()
(await self.driver.put(pubsubTopic, msg, msgDigest, msgHash, msgTimestamp)).isOkOr:
waku_legacy_archive_errors.inc(labelValues = [insertFailure])
error "failed to insert message",
msg_hash = msgHashHex,
pubsubTopic = pubsubTopic,
contentTopic = msg.contentTopic,
timestamp = msg.timestamp,
error = error
return
let insertDuration = getTime().toUnixFloat() - insertStartTime
waku_legacy_archive_insert_duration_seconds.observe(insertDuration)
info "message archived",
msg_hash = msgHashHex,
pubsubTopic = pubsubTopic,
contentTopic = msg.contentTopic,
msgTimestamp = msg.timestamp,
digest = msgDigestHex,
insertDuration = insertDuration
proc findMessages*(
self: WakuArchive, query: ArchiveQuery
): Future[ArchiveResult] {.async, gcsafe.} =
## Search the archive to return a single page of messages matching the query criteria
let maxPageSize =
if query.pageSize <= 0:
DefaultPageSize
else:
min(query.pageSize, MaxPageSize)
let isAscendingOrder = query.direction.into()
if query.contentTopics.len > 10:
return err(ArchiveError.invalidQuery("too many content topics"))
if query.cursor.isSome() and query.cursor.get().hash.len != 32:
return err(ArchiveError.invalidQuery("invalid cursor hash length"))
let queryStartTime = getTime().toUnixFloat()
let rows = (
await self.driver.getMessages(
includeData = query.includeData,
contentTopic = query.contentTopics,
pubsubTopic = query.pubsubTopic,
cursor = query.cursor,
startTime = query.startTime,
endTime = query.endTime,
hashes = query.hashes,
maxPageSize = maxPageSize + 1,
ascendingOrder = isAscendingOrder,
requestId = query.requestId,
)
).valueOr:
return err(ArchiveError(kind: ArchiveErrorKind.DRIVER_ERROR, cause: error))
let queryDuration = getTime().toUnixFloat() - queryStartTime
waku_legacy_archive_query_duration_seconds.observe(queryDuration)
var hashes = newSeq[WakuMessageHash]()
var messages = newSeq[WakuMessage]()
var topics = newSeq[PubsubTopic]()
var cursor = none(ArchiveCursor)
if rows.len == 0:
return ok(ArchiveResponse(hashes: hashes, messages: messages, cursor: cursor))
## Messages
let pageSize = min(rows.len, int(maxPageSize))
if query.includeData:
topics = rows[0 ..< pageSize].mapIt(it[0])
messages = rows[0 ..< pageSize].mapIt(it[1])
hashes = rows[0 ..< pageSize].mapIt(it[4])
## Cursor
if rows.len > int(maxPageSize):
## Build last message cursor
## The cursor is built from the last message INCLUDED in the response
## (i.e. the second last message in the rows list)
let (pubsubTopic, message, digest, storeTimestamp, hash) = rows[^2]
cursor = some(
ArchiveCursor(
digest: MessageDigest.fromBytes(digest),
storeTime: storeTimestamp,
sendertime: message.timestamp,
pubsubTopic: pubsubTopic,
hash: hash,
)
)
# All messages MUST be returned in chronological order
if not isAscendingOrder:
reverse(hashes)
reverse(messages)
reverse(topics)
return ok(
ArchiveResponse(hashes: hashes, messages: messages, topics: topics, cursor: cursor)
)
proc findMessagesV2*(
self: WakuArchive, query: ArchiveQuery
): Future[ArchiveResult] {.async, deprecated, gcsafe.} =
## Search the archive to return a single page of messages matching the query criteria
let maxPageSize =
if query.pageSize <= 0:
DefaultPageSize
else:
min(query.pageSize, MaxPageSize)
let isAscendingOrder = query.direction.into()
if query.contentTopics.len > 10:
return err(ArchiveError.invalidQuery("too many content topics"))
let queryStartTime = getTime().toUnixFloat()
let rows = (
await self.driver.getMessagesV2(
contentTopic = query.contentTopics,
pubsubTopic = query.pubsubTopic,
cursor = query.cursor,
startTime = query.startTime,
endTime = query.endTime,
maxPageSize = maxPageSize + 1,
ascendingOrder = isAscendingOrder,
requestId = query.requestId,
)
).valueOr:
return err(ArchiveError(kind: ArchiveErrorKind.DRIVER_ERROR, cause: error))
let queryDuration = getTime().toUnixFloat() - queryStartTime
waku_legacy_archive_query_duration_seconds.observe(queryDuration)
var messages = newSeq[WakuMessage]()
var cursor = none(ArchiveCursor)
if rows.len == 0:
return ok(ArchiveResponse(messages: messages, cursor: cursor))
## Messages
let pageSize = min(rows.len, int(maxPageSize))
messages = rows[0 ..< pageSize].mapIt(it[1])
## Cursor
if rows.len > int(maxPageSize):
## Build last message cursor
## The cursor is built from the last message INCLUDED in the response
## (i.e. the second last message in the rows list)
let (pubsubTopic, message, digest, storeTimestamp, _) = rows[^2]
cursor = some(
ArchiveCursor(
digest: MessageDigest.fromBytes(digest),
storeTime: storeTimestamp,
sendertime: message.timestamp,
pubsubTopic: pubsubTopic,
)
)
# All messages MUST be returned in chronological order
if not isAscendingOrder:
reverse(messages)
return ok(ArchiveResponse(messages: messages, cursor: cursor))

View File

@ -1,22 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import metrics
declarePublicGauge waku_legacy_archive_messages,
"number of historical messages", ["type"]
declarePublicCounter waku_legacy_archive_errors,
"number of store protocol errors", ["type"]
declarePublicHistogram waku_legacy_archive_insert_duration_seconds,
"message insertion duration"
declarePublicHistogram waku_legacy_archive_query_duration_seconds,
"history query duration"
# Error types (metric label values)
const
invalidMessageOld* = "invalid_message_too_old"
invalidMessageFuture* = "invalid_message_future_timestamp"
insertFailure* = "insert_failure"
retPolicyFailure* = "retpolicy_failure"

View File

@ -1,88 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/options, results, stew/byteutils, stew/arrayops, nimcrypto/sha2
import ../waku_core, ../common/paging
## Waku message digest
type MessageDigest* = MDigest[256]
proc fromBytes*(T: type MessageDigest, src: seq[byte]): T =
var data: array[32, byte]
let byteCount = copyFrom[byte](data, src)
assert byteCount == 32
return MessageDigest(data: data)
proc computeDigest*(msg: WakuMessage): MessageDigest =
var ctx: sha256
ctx.init()
defer:
ctx.clear()
ctx.update(msg.contentTopic.toBytes())
ctx.update(msg.payload)
# Computes the hash
return ctx.finish()
## API types
type
#TODO Once Store v2 is removed, the cursor becomes the hash of the last message
ArchiveCursor* = object
digest*: MessageDigest
storeTime*: Timestamp
senderTime*: Timestamp
pubsubTopic*: PubsubTopic
hash*: WakuMessageHash
ArchiveQuery* = object
includeData*: bool # indicate if messages should be returned in addition to hashes.
pubsubTopic*: Option[PubsubTopic]
contentTopics*: seq[ContentTopic]
cursor*: Option[ArchiveCursor]
startTime*: Option[Timestamp]
endTime*: Option[Timestamp]
hashes*: seq[WakuMessageHash]
pageSize*: uint
direction*: PagingDirection
requestId*: string
ArchiveResponse* = object
hashes*: seq[WakuMessageHash]
messages*: seq[WakuMessage]
topics*: seq[PubsubTopic]
cursor*: Option[ArchiveCursor]
ArchiveErrorKind* {.pure.} = enum
UNKNOWN = uint32(0)
DRIVER_ERROR = uint32(1)
INVALID_QUERY = uint32(2)
ArchiveError* = object
case kind*: ArchiveErrorKind
of DRIVER_ERROR, INVALID_QUERY:
# TODO: Add an enum to be able to distinguish between error causes
cause*: string
else:
discard
ArchiveResult* = Result[ArchiveResponse, ArchiveError]
proc `$`*(err: ArchiveError): string =
case err.kind
of ArchiveErrorKind.DRIVER_ERROR:
"DRIVER_ERROR: " & err.cause
of ArchiveErrorKind.INVALID_QUERY:
"INVALID_QUERY: " & err.cause
of ArchiveErrorKind.UNKNOWN:
"UNKNOWN"
proc invalidQuery*(T: type ArchiveError, cause: string): T =
ArchiveError(kind: ArchiveErrorKind.INVALID_QUERY, cause: cause)

View File

@ -1,121 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/options, results, chronos
import ../waku_core, ./common
const DefaultPageSize*: uint = 25
type
ArchiveDriverResult*[T] = Result[T, string]
ArchiveDriver* = ref object of RootObj
#TODO Once Store v2 is removed keep only messages and hashes
type ArchiveRow* = (PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)
# ArchiveDriver interface
method put*(
driver: ArchiveDriver,
pubsubTopic: PubsubTopic,
message: WakuMessage,
digest: MessageDigest,
messageHash: WakuMessageHash,
receivedTime: Timestamp,
): Future[ArchiveDriverResult[void]] {.base, async.} =
discard
method getAllMessages*(
driver: ArchiveDriver
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} =
discard
method getMessagesV2*(
driver: ArchiveDriver,
contentTopic = newSeq[ContentTopic](0),
pubsubTopic = none(PubsubTopic),
cursor = none(ArchiveCursor),
startTime = none(Timestamp),
endTime = none(Timestamp),
maxPageSize = DefaultPageSize,
ascendingOrder = true,
requestId: string,
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, deprecated, async.} =
discard
method getMessages*(
driver: ArchiveDriver,
includeData = true,
contentTopic = newSeq[ContentTopic](0),
pubsubTopic = none(PubsubTopic),
cursor = none(ArchiveCursor),
startTime = none(Timestamp),
endTime = none(Timestamp),
hashes = newSeq[WakuMessageHash](0),
maxPageSize = DefaultPageSize,
ascendingOrder = true,
requestId = "",
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.base, async.} =
discard
method getMessagesCount*(
driver: ArchiveDriver
): Future[ArchiveDriverResult[int64]] {.base, async.} =
discard
method getPagesCount*(
driver: ArchiveDriver
): Future[ArchiveDriverResult[int64]] {.base, async.} =
discard
method getPagesSize*(
driver: ArchiveDriver
): Future[ArchiveDriverResult[int64]] {.base, async.} =
discard
method getDatabaseSize*(
driver: ArchiveDriver
): Future[ArchiveDriverResult[int64]] {.base, async.} =
discard
method performVacuum*(
driver: ArchiveDriver
): Future[ArchiveDriverResult[void]] {.base, async.} =
discard
method getOldestMessageTimestamp*(
driver: ArchiveDriver
): Future[ArchiveDriverResult[Timestamp]] {.base, async.} =
discard
method getNewestMessageTimestamp*(
driver: ArchiveDriver
): Future[ArchiveDriverResult[Timestamp]] {.base, async.} =
discard
method deleteMessagesOlderThanTimestamp*(
driver: ArchiveDriver, ts: Timestamp
): Future[ArchiveDriverResult[void]] {.base, async.} =
discard
method deleteOldestMessagesNotWithinLimit*(
driver: ArchiveDriver, limit: int
): Future[ArchiveDriverResult[void]] {.base, async.} =
discard
method decreaseDatabaseSize*(
driver: ArchiveDriver, targetSizeInBytes: int64, forceRemoval: bool = false
): Future[ArchiveDriverResult[void]] {.base, async.} =
discard
method close*(
driver: ArchiveDriver
): Future[ArchiveDriverResult[void]] {.base, async.} =
discard
method existsTable*(
driver: ArchiveDriver, tableName: string
): Future[ArchiveDriverResult[bool]] {.base, async.} =
discard

View File

@ -1,89 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import results, chronicles, chronos
import
../driver,
../../common/databases/dburl,
../../common/databases/db_sqlite,
../../common/error_handling,
./sqlite_driver,
./sqlite_driver/migrations as archive_driver_sqlite_migrations,
./queue_driver
export sqlite_driver, queue_driver
when defined(postgres):
import ## These imports add dependency with an external libpq library
./postgres_driver
export postgres_driver
proc new*(
T: type ArchiveDriver,
url: string,
vacuum: bool,
migrate: bool,
maxNumConn: int,
onFatalErrorAction: OnFatalErrorHandler,
): Future[Result[T, string]] {.async.} =
## url - string that defines the database
## vacuum - if true, a cleanup operation will be applied to the database
## migrate - if true, the database schema will be updated
## maxNumConn - defines the maximum number of connections to handle simultaneously (Postgres)
## onFatalErrorAction - called if, e.g., the connection with db got lost
dburl.validateDbUrl(url).isOkOr:
return err("DbUrl failure in ArchiveDriver.new: " & error)
let engine = dburl.getDbEngine(url).valueOr:
return err("error getting db engine in setupWakuArchiveDriver: " & error)
case engine
of "sqlite":
let path = dburl.getDbPath(url).valueOr:
return err("error get path in setupWakuArchiveDriver: " & error)
let db = SqliteDatabase.new(path).valueOr:
return err("error in setupWakuArchiveDriver: " & error)
# SQLite vacuum
let (pageSize, pageCount, freelistCount) = db.gatherSqlitePageStats().valueOr:
return err("error while gathering sqlite stats: " & $error)
info "sqlite database page stats",
pageSize = pageSize, pages = pageCount, freePages = freelistCount
if vacuum and (pageCount > 0 and freelistCount > 0):
db.performSqliteVacuum().isOkOr:
return err("error in vacuum sqlite: " & $error)
# Database migration
if migrate:
archive_driver_sqlite_migrations.migrate(db).isOkOr:
return err("error in migrate sqlite: " & $error)
info "setting up sqlite waku archive driver"
let res = SqliteDriver.new(db).valueOr:
return err("failed to init sqlite archive driver: " & error)
return ok(res)
of "postgres":
when defined(postgres):
let driver = PostgresDriver.new(
dbUrl = url,
maxConnections = maxNumConn,
onFatalErrorAction = onFatalErrorAction,
).valueOr:
return err("failed to init postgres archive driver: " & error)
return ok(driver)
else:
return err(
"Postgres has been configured but not been compiled. Check compiler definitions."
)
else:
info "setting up in-memory waku archive driver"
let driver = QueueDriver.new() # Defaults to a capacity of 25.000 messages
return ok(driver)

View File

@ -1,8 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import ./postgres_driver/postgres_driver
export postgres_driver

View File

@ -1,976 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import
std/[options, sequtils, strutils, strformat, times],
stew/[byteutils, arrayops],
results,
chronos,
db_connector/[postgres, db_common],
chronicles
import
../../../common/error_handling,
../../../waku_core,
../../common,
../../driver,
./postgres_healthcheck,
../../../common/databases/db_postgres as waku_postgres
type PostgresDriver* = ref object of ArchiveDriver
## Establish a separate pools for read/write operations
writeConnPool: PgAsyncPool
readConnPool: PgAsyncPool
const InsertRowStmtName = "InsertRow"
const InsertRowStmtDefinition = # TODO: get the sql queries from a file
"""INSERT INTO messages (id, messageHash, contentTopic, payload, pubsubTopic,
version, timestamp, meta) VALUES ($1, $2, $3, $4, $5, $6, $7, CASE WHEN $8 = '' THEN NULL ELSE $8 END) ON CONFLICT DO NOTHING;"""
const InsertRowInMessagesLookupStmtName = "InsertRowMessagesLookup"
const InsertRowInMessagesLookupStmtDefinition =
"""INSERT INTO messages_lookup (messageHash, timestamp) VALUES ($1, $2) ON CONFLICT DO NOTHING;"""
const SelectNoCursorAscStmtName = "SelectWithoutCursorAsc"
const SelectNoCursorAscStmtDef =
"""SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages
WHERE contentTopic IN ($1) AND
messageHash IN ($2) AND
pubsubTopic = $3 AND
timestamp >= $4 AND
timestamp <= $5
ORDER BY timestamp ASC, messageHash ASC LIMIT $6;"""
const SelectNoCursorDescStmtName = "SelectWithoutCursorDesc"
const SelectNoCursorDescStmtDef =
"""SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages
WHERE contentTopic IN ($1) AND
messageHash IN ($2) AND
pubsubTopic = $3 AND
timestamp >= $4 AND
timestamp <= $5
ORDER BY timestamp DESC, messageHash DESC LIMIT $6;"""
const SelectWithCursorDescStmtName = "SelectWithCursorDesc"
const SelectWithCursorDescStmtDef =
"""SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages
WHERE contentTopic IN ($1) AND
messageHash IN ($2) AND
pubsubTopic = $3 AND
(timestamp, messageHash) < ($4,$5) AND
timestamp >= $6 AND
timestamp <= $7
ORDER BY timestamp DESC, messageHash DESC LIMIT $8;"""
const SelectWithCursorAscStmtName = "SelectWithCursorAsc"
const SelectWithCursorAscStmtDef =
"""SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages
WHERE contentTopic IN ($1) AND
messageHash IN ($2) AND
pubsubTopic = $3 AND
(timestamp, messageHash) > ($4,$5) AND
timestamp >= $6 AND
timestamp <= $7
ORDER BY timestamp ASC, messageHash ASC LIMIT $8;"""
const SelectMessageByHashName = "SelectMessageByHash"
const SelectMessageByHashDef =
"""SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages WHERE messageHash = $1"""
const SelectNoCursorV2AscStmtName = "SelectWithoutCursorV2Asc"
const SelectNoCursorV2AscStmtDef =
"""SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages
WHERE contentTopic IN ($1) AND
pubsubTopic = $2 AND
timestamp >= $3 AND
timestamp <= $4
ORDER BY timestamp ASC LIMIT $5;"""
const SelectNoCursorV2DescStmtName = "SelectWithoutCursorV2Desc"
const SelectNoCursorV2DescStmtDef =
"""SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages
WHERE contentTopic IN ($1) AND
pubsubTopic = $2 AND
timestamp >= $3 AND
timestamp <= $4
ORDER BY timestamp DESC LIMIT $5;"""
const SelectWithCursorV2DescStmtName = "SelectWithCursorV2Desc"
const SelectWithCursorV2DescStmtDef =
"""SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages
WHERE contentTopic IN ($1) AND
pubsubTopic = $2 AND
(timestamp, id) < ($3,$4) AND
timestamp >= $5 AND
timestamp <= $6
ORDER BY timestamp DESC LIMIT $7;"""
const SelectWithCursorV2AscStmtName = "SelectWithCursorV2Asc"
const SelectWithCursorV2AscStmtDef =
"""SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages
WHERE contentTopic IN ($1) AND
pubsubTopic = $2 AND
(timestamp, id) > ($3,$4) AND
timestamp >= $5 AND
timestamp <= $6
ORDER BY timestamp ASC LIMIT $7;"""
const DefaultMaxNumConns = 50
proc new*(
T: type PostgresDriver,
dbUrl: string,
maxConnections = DefaultMaxNumConns,
onFatalErrorAction: OnFatalErrorHandler = nil,
): ArchiveDriverResult[T] =
## Very simplistic split of max connections
let maxNumConnOnEachPool = int(maxConnections / 2)
let readConnPool = PgAsyncPool.new(dbUrl, maxNumConnOnEachPool).valueOr:
return err("error creating read conn pool PgAsyncPool")
let writeConnPool = PgAsyncPool.new(dbUrl, maxNumConnOnEachPool).valueOr:
return err("error creating write conn pool PgAsyncPool")
if not isNil(onFatalErrorAction):
asyncSpawn checkConnectivity(readConnPool, onFatalErrorAction)
if not isNil(onFatalErrorAction):
asyncSpawn checkConnectivity(writeConnPool, onFatalErrorAction)
let driver = PostgresDriver(writeConnPool: writeConnPool, readConnPool: readConnPool)
return ok(driver)
proc reset*(s: PostgresDriver): Future[ArchiveDriverResult[void]] {.async.} =
## Clear the database partitions
let targetSize = 0
let forceRemoval = true
let ret = await s.decreaseDatabaseSize(targetSize, forceRemoval)
return ret
proc rowCallbackImpl(
pqResult: ptr PGresult,
outRows: var seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)],
) =
## Proc aimed to contain the logic of the callback passed to the `psasyncpool`.
## That callback is used in "SELECT" queries.
##
## pqResult - contains the query results
## outRows - seq of Store-rows. This is populated from the info contained in pqResult
let numFields = pqResult.pqnfields()
if numFields != 8:
error "Wrong number of fields, expected 8", numFields
return
for iRow in 0 ..< pqResult.pqNtuples():
var wakuMessage: WakuMessage
var timestamp: Timestamp
var version: uint
var pubSubTopic: string
var contentTopic: string
var digest: string
var payload: string
var hashHex: string
var msgHash: WakuMessageHash
var meta: string
try:
contentTopic = $(pqgetvalue(pqResult, iRow, 0))
payload = parseHexStr($(pqgetvalue(pqResult, iRow, 1)))
pubSubTopic = $(pqgetvalue(pqResult, iRow, 2))
version = parseUInt($(pqgetvalue(pqResult, iRow, 3)))
timestamp = parseInt($(pqgetvalue(pqResult, iRow, 4)))
digest = parseHexStr($(pqgetvalue(pqResult, iRow, 5)))
hashHex = parseHexStr($(pqgetvalue(pqResult, iRow, 6)))
meta = parseHexStr($(pqgetvalue(pqResult, iRow, 7)))
msgHash = fromBytes(hashHex.toOpenArrayByte(0, 31))
except ValueError:
error "could not parse correctly", error = getCurrentExceptionMsg()
wakuMessage.timestamp = timestamp
wakuMessage.version = uint32(version)
wakuMessage.contentTopic = contentTopic
wakuMessage.payload = @(payload.toOpenArrayByte(0, payload.high))
wakuMessage.meta = @(meta.toOpenArrayByte(0, meta.high))
outRows.add(
(
pubSubTopic,
wakuMessage,
@(digest.toOpenArrayByte(0, digest.high)),
timestamp,
msgHash,
)
)
method put*(
s: PostgresDriver,
pubsubTopic: PubsubTopic,
message: WakuMessage,
digest: MessageDigest,
messageHash: WakuMessageHash,
receivedTime: Timestamp,
): Future[ArchiveDriverResult[void]] {.async.} =
let digest = byteutils.toHex(digest.data)
let messageHash = byteutils.toHex(messageHash)
let contentTopic = message.contentTopic
let payload = byteutils.toHex(message.payload)
let version = $message.version
let timestamp = $message.timestamp
let meta = byteutils.toHex(message.meta)
trace "put PostgresDriver", timestamp = timestamp
(
await s.writeConnPool.runStmt(
InsertRowStmtName,
InsertRowStmtDefinition,
@[
digest, messageHash, contentTopic, payload, pubsubTopic, version, timestamp,
meta,
],
@[
int32(digest.len),
int32(messageHash.len),
int32(contentTopic.len),
int32(payload.len),
int32(pubsubTopic.len),
int32(version.len),
int32(timestamp.len),
int32(meta.len),
],
@[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)],
)
).isOkOr:
return err("could not put msg in messages table: " & $error)
## Now add the row to messages_lookup
return await s.writeConnPool.runStmt(
InsertRowInMessagesLookupStmtName,
InsertRowInMessagesLookupStmtDefinition,
@[messageHash, timestamp],
@[int32(messageHash.len), int32(timestamp.len)],
@[int32(0), int32(0)],
)
method getAllMessages*(
s: PostgresDriver
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
## Retrieve all messages from the store.
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
proc rowCallback(pqResult: ptr PGresult) =
rowCallbackImpl(pqResult, rows)
(
await s.readConnPool.pgQuery(
"""SELECT contentTopic,
payload, pubsubTopic, version, timestamp,
id, messageHash, meta FROM messages ORDER BY timestamp ASC""",
newSeq[string](0),
rowCallback,
)
).isOkOr:
return err("failed in query: " & $error)
return ok(rows)
proc getMessagesArbitraryQuery(
s: PostgresDriver,
contentTopic: seq[ContentTopic] = @[],
pubsubTopic = none(PubsubTopic),
cursor = none(ArchiveCursor),
startTime = none(Timestamp),
endTime = none(Timestamp),
hexHashes: seq[string] = @[],
maxPageSize = DefaultPageSize,
ascendingOrder = true,
requestId: string,
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
## This proc allows to handle atypical queries. We don't use prepared statements for those.
var query =
"""SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages"""
var statements: seq[string]
var args: seq[string]
if contentTopic.len > 0:
let cstmt = "contentTopic IN (" & "?".repeat(contentTopic.len).join(",") & ")"
statements.add(cstmt)
for t in contentTopic:
args.add(t)
if hexHashes.len > 0:
let cstmt = "messageHash IN (" & "?".repeat(hexHashes.len).join(",") & ")"
statements.add(cstmt)
for t in hexHashes:
args.add(t)
if pubsubTopic.isSome():
statements.add("pubsubTopic = ?")
args.add(pubsubTopic.get())
if cursor.isSome():
let hashHex = byteutils.toHex(cursor.get().hash)
var entree: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
proc entreeCallback(pqResult: ptr PGresult) =
rowCallbackImpl(pqResult, entree)
(
await s.readConnPool.runStmt(
SelectMessageByHashName,
SelectMessageByHashDef,
@[hashHex],
@[int32(hashHex.len)],
@[int32(0)],
entreeCallback,
requestId,
)
).isOkOr:
return err("failed to run query with cursor: " & $error)
if entree.len == 0:
return ok(entree)
let storetime = entree[0][3]
let comp = if ascendingOrder: ">" else: "<"
statements.add("(timestamp, messageHash) " & comp & " (?,?)")
args.add($storetime)
args.add(hashHex)
if startTime.isSome():
statements.add("timestamp >= ?")
args.add($startTime.get())
if endTime.isSome():
statements.add("timestamp <= ?")
args.add($endTime.get())
if statements.len > 0:
query &= " WHERE " & statements.join(" AND ")
var direction: string
if ascendingOrder:
direction = "ASC"
else:
direction = "DESC"
query &= " ORDER BY timestamp " & direction & ", messageHash " & direction
query &= " LIMIT ?"
args.add($maxPageSize)
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
proc rowCallback(pqResult: ptr PGresult) =
rowCallbackImpl(pqResult, rows)
(await s.readConnPool.pgQuery(query, args, rowCallback, requestId)).isOkOr:
return err("failed to run query: " & $error)
return ok(rows)
proc getMessagesV2ArbitraryQuery(
s: PostgresDriver,
contentTopic: seq[ContentTopic] = @[],
pubsubTopic = none(PubsubTopic),
cursor = none(ArchiveCursor),
startTime = none(Timestamp),
endTime = none(Timestamp),
maxPageSize = DefaultPageSize,
ascendingOrder = true,
requestId: string,
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async, deprecated.} =
## This proc allows to handle atypical queries. We don't use prepared statements for those.
var query =
"""SELECT contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta FROM messages"""
var statements: seq[string]
var args: seq[string]
if contentTopic.len > 0:
let cstmt = "contentTopic IN (" & "?".repeat(contentTopic.len).join(",") & ")"
statements.add(cstmt)
for t in contentTopic:
args.add(t)
if pubsubTopic.isSome():
statements.add("pubsubTopic = ?")
args.add(pubsubTopic.get())
if cursor.isSome():
let comp = if ascendingOrder: ">" else: "<"
statements.add("(timestamp, id) " & comp & " (?,?)")
args.add($cursor.get().storeTime)
args.add(toHex(cursor.get().digest.data))
if startTime.isSome():
statements.add("timestamp >= ?")
args.add($startTime.get())
if endTime.isSome():
statements.add("timestamp <= ?")
args.add($endTime.get())
if statements.len > 0:
query &= " WHERE " & statements.join(" AND ")
var direction: string
if ascendingOrder:
direction = "ASC"
else:
direction = "DESC"
query &= " ORDER BY timestamp " & direction & ", id " & direction
query &= " LIMIT ?"
args.add($maxPageSize)
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
proc rowCallback(pqResult: ptr PGresult) =
rowCallbackImpl(pqResult, rows)
(await s.readConnPool.pgQuery(query, args, rowCallback, requestId)).isOkOr:
return err("failed to run query: " & $error)
return ok(rows)
proc getMessagesPreparedStmt(
s: PostgresDriver,
contentTopic: string,
pubsubTopic: PubsubTopic,
cursor = none(ArchiveCursor),
startTime: Timestamp,
endTime: Timestamp,
hashes: string,
maxPageSize = DefaultPageSize,
ascOrder = true,
requestId: string,
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
## This proc aims to run the most typical queries in a more performant way, i.e. by means of
## prepared statements.
##
## contentTopic - string with list of conten topics. e.g: "'ctopic1','ctopic2','ctopic3'"
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
proc rowCallback(pqResult: ptr PGresult) =
rowCallbackImpl(pqResult, rows)
let startTimeStr = $startTime
let endTimeStr = $endTime
let limit = $maxPageSize
if cursor.isSome():
let hash = byteutils.toHex(cursor.get().hash)
var entree: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
proc entreeCallback(pqResult: ptr PGresult) =
rowCallbackImpl(pqResult, entree)
(
await s.readConnPool.runStmt(
SelectMessageByHashName,
SelectMessageByHashDef,
@[hash],
@[int32(hash.len)],
@[int32(0)],
entreeCallback,
requestId,
)
).isOkOr:
return err("failed to run query with cursor: " & $error)
if entree.len == 0:
return ok(entree)
let timestamp = $entree[0][3]
var stmtName =
if ascOrder: SelectWithCursorAscStmtName else: SelectWithCursorDescStmtName
var stmtDef =
if ascOrder: SelectWithCursorAscStmtDef else: SelectWithCursorDescStmtDef
(
await s.readConnPool.runStmt(
stmtName,
stmtDef,
@[
contentTopic, hashes, pubsubTopic, timestamp, hash, startTimeStr, endTimeStr,
limit,
],
@[
int32(contentTopic.len),
int32(hashes.len),
int32(pubsubTopic.len),
int32(timestamp.len),
int32(hash.len),
int32(startTimeStr.len),
int32(endTimeStr.len),
int32(limit.len),
],
@[
int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)
],
rowCallback,
requestId,
)
).isOkOr:
return err("failed to run query with cursor: " & $error)
else:
var stmtName =
if ascOrder: SelectNoCursorAscStmtName else: SelectNoCursorDescStmtName
var stmtDef = if ascOrder: SelectNoCursorAscStmtDef else: SelectNoCursorDescStmtDef
(
await s.readConnPool.runStmt(
stmtName,
stmtDef,
@[contentTopic, hashes, pubsubTopic, startTimeStr, endTimeStr, limit],
@[
int32(contentTopic.len),
int32(hashes.len),
int32(pubsubTopic.len),
int32(startTimeStr.len),
int32(endTimeStr.len),
int32(limit.len),
],
@[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)],
rowCallback,
requestId,
)
).isOkOr:
return err("failed to run query without cursor: " & $error)
return ok(rows)
proc getMessagesV2PreparedStmt(
s: PostgresDriver,
contentTopic: string,
pubsubTopic: PubsubTopic,
cursor = none(ArchiveCursor),
startTime: Timestamp,
endTime: Timestamp,
maxPageSize = DefaultPageSize,
ascOrder = true,
requestId: string,
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async, deprecated.} =
## This proc aims to run the most typical queries in a more performant way, i.e. by means of
## prepared statements.
##
## contentTopic - string with list of conten topics. e.g: "'ctopic1','ctopic2','ctopic3'"
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
proc rowCallback(pqResult: ptr PGresult) =
rowCallbackImpl(pqResult, rows)
let startTimeStr = $startTime
let endTimeStr = $endTime
let limit = $maxPageSize
if cursor.isSome():
var stmtName =
if ascOrder: SelectWithCursorV2AscStmtName else: SelectWithCursorV2DescStmtName
var stmtDef =
if ascOrder: SelectWithCursorV2AscStmtDef else: SelectWithCursorV2DescStmtDef
let digest = byteutils.toHex(cursor.get().digest.data)
let timestamp = $cursor.get().storeTime
(
await s.readConnPool.runStmt(
stmtName,
stmtDef,
@[contentTopic, pubsubTopic, timestamp, digest, startTimeStr, endTimeStr, limit],
@[
int32(contentTopic.len),
int32(pubsubTopic.len),
int32(timestamp.len),
int32(digest.len),
int32(startTimeStr.len),
int32(endTimeStr.len),
int32(limit.len),
],
@[int32(0), int32(0), int32(0), int32(0), int32(0), int32(0), int32(0)],
rowCallback,
requestId,
)
).isOkOr:
return err("failed to run query with cursor: " & $error)
else:
var stmtName =
if ascOrder: SelectNoCursorV2AscStmtName else: SelectNoCursorV2DescStmtName
var stmtDef =
if ascOrder: SelectNoCursorV2AscStmtDef else: SelectNoCursorV2DescStmtDef
(
await s.readConnPool.runStmt(
stmtName,
stmtDef,
@[contentTopic, pubsubTopic, startTimeStr, endTimeStr, limit],
@[
int32(contentTopic.len),
int32(pubsubTopic.len),
int32(startTimeStr.len),
int32(endTimeStr.len),
int32(limit.len),
],
@[int32(0), int32(0), int32(0), int32(0), int32(0)],
rowCallback,
requestId,
)
).isOkOr:
return err("failed to run query without cursor: " & $error)
return ok(rows)
proc getMessagesByMessageHashes(
s: PostgresDriver, hashes: string, maxPageSize: uint, requestId: string
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
## Retrieves information only filtering by a given messageHashes list.
## This proc levarages on the messages_lookup table to have better query performance
## and only query the desired partitions in the partitioned messages table
var query =
fmt"""
WITH min_timestamp AS (
SELECT MIN(timestamp) AS min_ts
FROM messages_lookup
WHERE messagehash IN (
{hashes}
)
)
SELECT contentTopic, payload, pubsubTopic, version, m.timestamp, id, m.messageHash, meta
FROM messages m
INNER JOIN
messages_lookup l
ON
m.timestamp = l.timestamp
AND m.messagehash = l.messagehash
WHERE
l.timestamp >= (SELECT min_ts FROM min_timestamp)
AND l.messagehash IN (
{hashes}
)
ORDER BY
m.timestamp DESC,
m.messagehash DESC
LIMIT {maxPageSize};
"""
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
proc rowCallback(pqResult: ptr PGresult) =
rowCallbackImpl(pqResult, rows)
(
await s.readConnPool.pgQuery(
query = query, rowCallback = rowCallback, requestId = requestId
)
).isOkOr:
return err("failed to run query: " & $error)
return ok(rows)
method getMessages*(
s: PostgresDriver,
includeData = true,
contentTopicSeq = newSeq[ContentTopic](0),
pubsubTopic = none(PubsubTopic),
cursor = none(ArchiveCursor),
startTime = none(Timestamp),
endTime = none(Timestamp),
hashes = newSeq[WakuMessageHash](0),
maxPageSize = DefaultPageSize,
ascendingOrder = true,
requestId = "",
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
let hexHashes = hashes.mapIt(toHex(it))
if cursor.isNone() and pubsubTopic.isNone() and contentTopicSeq.len == 0 and
startTime.isNone() and endTime.isNone() and hexHashes.len > 0:
return await s.getMessagesByMessageHashes(
"'" & hexHashes.join("','") & "'", maxPageSize, requestId
)
if contentTopicSeq.len == 1 and hexHashes.len == 1 and pubsubTopic.isSome() and
startTime.isSome() and endTime.isSome():
## Considered the most common query. Therefore, we use prepared statements to optimize it.
return await s.getMessagesPreparedStmt(
contentTopicSeq.join(","),
PubsubTopic(pubsubTopic.get()),
cursor,
startTime.get(),
endTime.get(),
hexHashes.join(","),
maxPageSize,
ascendingOrder,
requestId,
)
else:
## We will run atypical query. In this case we don't use prepared statemets
return await s.getMessagesArbitraryQuery(
contentTopicSeq, pubsubTopic, cursor, startTime, endTime, hexHashes, maxPageSize,
ascendingOrder, requestId,
)
method getMessagesV2*(
s: PostgresDriver,
contentTopicSeq = newSeq[ContentTopic](0),
pubsubTopic = none(PubsubTopic),
cursor = none(ArchiveCursor),
startTime = none(Timestamp),
endTime = none(Timestamp),
maxPageSize = DefaultPageSize,
ascendingOrder = true,
requestId: string,
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async, deprecated.} =
if contentTopicSeq.len == 1 and pubsubTopic.isSome() and startTime.isSome() and
endTime.isSome():
## Considered the most common query. Therefore, we use prepared statements to optimize it.
return await s.getMessagesV2PreparedStmt(
contentTopicSeq.join(","),
PubsubTopic(pubsubTopic.get()),
cursor,
startTime.get(),
endTime.get(),
maxPageSize,
ascendingOrder,
requestId,
)
else:
## We will run atypical query. In this case we don't use prepared statemets
return await s.getMessagesV2ArbitraryQuery(
contentTopicSeq, pubsubTopic, cursor, startTime, endTime, maxPageSize,
ascendingOrder, requestId,
)
proc getStr(
s: PostgresDriver, query: string
): Future[ArchiveDriverResult[string]] {.async.} =
# Performs a query that is expected to return a single string
var ret: string
proc rowCallback(pqResult: ptr PGresult) =
if pqResult.pqnfields() != 1:
error "Wrong number of fields in getStr"
return
if pqResult.pqNtuples() != 1:
error "Wrong number of rows in getStr"
return
ret = $(pqgetvalue(pqResult, 0, 0))
(await s.readConnPool.pgQuery(query, newSeq[string](0), rowCallback)).isOkOr:
return err("failed in getRow: " & $error)
return ok(ret)
proc getInt(
s: PostgresDriver, query: string
): Future[ArchiveDriverResult[int64]] {.async.} =
# Performs a query that is expected to return a single numeric value (int64)
var retInt = 0'i64
let str = (await s.getStr(query)).valueOr:
return err("could not get str in getInt: " & $error)
try:
retInt = parseInt(str)
except ValueError:
return err(
"exception in getInt, parseInt, str: " & str & " query: " & query & " exception: " &
getCurrentExceptionMsg()
)
return ok(retInt)
method getDatabaseSize*(
s: PostgresDriver
): Future[ArchiveDriverResult[int64]] {.async.} =
let intRes = (await s.getInt("SELECT pg_database_size(current_database())")).valueOr:
return err("error in getDatabaseSize: " & error)
let databaseSize: int64 = int64(intRes)
return ok(databaseSize)
method getMessagesCount*(
s: PostgresDriver
): Future[ArchiveDriverResult[int64]] {.async.} =
let intRes = (await s.getInt("SELECT COUNT(1) FROM messages")).valueOr:
return err("error in getMessagesCount: " & error)
return ok(intRes)
method getOldestMessageTimestamp*(
s: PostgresDriver
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
return err("not implemented because legacy will get deprecated")
method getNewestMessageTimestamp*(
s: PostgresDriver
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
let intRes = (await s.getInt("SELECT MAX(timestamp) FROM messages")).valueOr:
return err("error in getNewestMessageTimestamp: " & error)
return ok(Timestamp(intRes))
method deleteOldestMessagesNotWithinLimit*(
s: PostgresDriver, limit: int
): Future[ArchiveDriverResult[void]] {.async.} =
## Will be completely removed when deprecating store legacy
# let execRes = await s.writeConnPool.pgQuery(
# """DELETE FROM messages WHERE id NOT IN
# (
# SELECT id FROM messages ORDER BY timestamp DESC LIMIT ?
# );""",
# @[$limit],
# )
# if execRes.isErr():
# return err("error in deleteOldestMessagesNotWithinLimit: " & execRes.error)
return ok()
method close*(s: PostgresDriver): Future[ArchiveDriverResult[void]] {.async.} =
## Close the database connection
let writeCloseRes = await s.writeConnPool.close()
let readCloseRes = await s.readConnPool.close()
writeCloseRes.isOkOr:
return err("error closing write pool: " & $error)
readCloseRes.isOkOr:
return err("error closing read pool: " & $error)
return ok()
proc sleep*(
s: PostgresDriver, seconds: int
): Future[ArchiveDriverResult[void]] {.async.} =
# This is for testing purposes only. It is aimed to test the proper
# implementation of asynchronous requests. It merely triggers a sleep in the
# database for the amount of seconds given as a parameter.
proc rowCallback(result: ptr PGresult) =
## We are not interested in any value in this case
discard
try:
let params = @[$seconds]
(await s.writeConnPool.pgQuery("SELECT pg_sleep(?)", params, rowCallback)).isOkOr:
return err("error in postgres_driver sleep: " & $error)
except DbError:
# This always raises an exception although the sleep works
return err("exception sleeping: " & getCurrentExceptionMsg())
return ok()
proc performWriteQuery*(
s: PostgresDriver, query: string
): Future[ArchiveDriverResult[void]] {.async.} =
## Performs a query that somehow changes the state of the database
(await s.writeConnPool.pgQuery(query)).isOkOr:
return err("error in performWriteQuery: " & $error)
return ok()
method decreaseDatabaseSize*(
driver: PostgresDriver, targetSizeInBytes: int64, forceRemoval: bool = false
): Future[ArchiveDriverResult[void]] {.async.} =
## This is completely disabled and only the non-legacy driver
## will take care of that
# var dbSize = (await driver.getDatabaseSize()).valueOr:
# return err("decreaseDatabaseSize failed to get database size: " & $error)
# ## database size in bytes
# var totalSizeOfDB: int64 = int64(dbSize)
# if totalSizeOfDB <= targetSizeInBytes:
# return ok()
# info "start reducing database size",
# targetSize = $targetSizeInBytes, currentSize = $totalSizeOfDB
# while totalSizeOfDB > targetSizeInBytes and driver.containsAnyPartition():
# (await driver.removeOldestPartition(forceRemoval)).isOkOr:
# return err(
# "decreaseDatabaseSize inside loop failed to remove oldest partition: " & $error
# )
# dbSize = (await driver.getDatabaseSize()).valueOr:
# return
# err("decreaseDatabaseSize inside loop failed to get database size: " & $error)
# let newCurrentSize = int64(dbSize)
# if newCurrentSize == totalSizeOfDB:
# return err("the previous partition removal didn't clear database size")
# totalSizeOfDB = newCurrentSize
# info "reducing database size",
# targetSize = $targetSizeInBytes, newCurrentSize = $totalSizeOfDB
return ok()
method existsTable*(
s: PostgresDriver, tableName: string
): Future[ArchiveDriverResult[bool]] {.async.} =
let query: string =
fmt"""
SELECT EXISTS (
SELECT FROM
pg_tables
WHERE
tablename = '{tableName}'
);
"""
var exists: string
proc rowCallback(pqResult: ptr PGresult) =
if pqResult.pqnfields() != 1:
error "Wrong number of fields in existsTable"
return
if pqResult.pqNtuples() != 1:
error "Wrong number of rows in existsTable"
return
exists = $(pqgetvalue(pqResult, 0, 0))
(await s.readConnPool.pgQuery(query, newSeq[string](0), rowCallback)).isOkOr:
return err("existsTable failed in getRow: " & $error)
return ok(exists == "t")
proc getCurrentVersion*(
s: PostgresDriver
): Future[ArchiveDriverResult[int64]] {.async.} =
let existsVersionTable = (await s.existsTable("version")).valueOr:
return err("error in getCurrentVersion-existsTable: " & $error)
if not existsVersionTable:
return ok(0)
let res = (await s.getInt(fmt"SELECT version FROM version")).valueOr:
return err("error in getMessagesCount: " & $error)
return ok(res)
method deleteMessagesOlderThanTimestamp*(
s: PostgresDriver, tsNanoSec: Timestamp
): Future[ArchiveDriverResult[void]] {.async.} =
## First of all, let's remove the older partitions so that we can reduce
## the database size.
# (await s.removePartitionsOlderThan(tsNanoSec)).isOkOr:
# return err("error while removing older partitions: " & $error)
# (
# await s.writeConnPool.pgQuery(
# "DELETE FROM messages WHERE timestamp < " & $tsNanoSec
# )
# ).isOkOr:
# return err("error in deleteMessagesOlderThanTimestamp: " & $error)
return ok()

View File

@ -1,37 +0,0 @@
{.push raises: [].}
import chronos, chronicles, results
import ../../../common/databases/db_postgres, ../../../common/error_handling
## Simple query to validate that the postgres is working and attending requests
const HealthCheckQuery = "SELECT version();"
const CheckConnectivityInterval = 60.seconds
const MaxNumTrials = 20
const TrialInterval = 1.seconds
proc checkConnectivity*(
connPool: PgAsyncPool, onFatalErrorAction: OnFatalErrorHandler
) {.async.} =
while true:
(await connPool.pgQuery(HealthCheckQuery)).isOkOr:
## The connection failed once. Let's try reconnecting for a while.
## Notice that the 'pgQuery' proc tries to establish a new connection.
block errorBlock:
## Force close all the opened connections. No need to close gracefully.
(await connPool.resetConnPool()).isOkOr:
onFatalErrorAction("checkConnectivity legacy resetConnPool error: " & error)
var numTrial = 0
while numTrial < MaxNumTrials:
(await connPool.pgQuery(HealthCheckQuery)).isErrOr:
## Connection resumed. Let's go back to the normal healthcheck.
break errorBlock
await sleepAsync(TrialInterval)
numTrial.inc()
## The connection couldn't be resumed. Let's inform the upper layers.
onFatalErrorAction("postgres legacy health check error: " & error)
await sleepAsync(CheckConnectivityInterval)

View File

@ -1,8 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import ./queue_driver/queue_driver, ./queue_driver/index
export queue_driver, index

View File

@ -1,91 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import nimcrypto/sha2
import ../../../waku_core, ../../common
type Index* = object
## This type contains the description of an Index used in the pagination of WakuMessages
pubsubTopic*: string
senderTime*: Timestamp # the time at which the message is generated
receiverTime*: Timestamp
digest*: MessageDigest # calculated over payload and content topic
hash*: WakuMessageHash
proc compute*(
T: type Index, msg: WakuMessage, receivedTime: Timestamp, pubsubTopic: PubsubTopic
): T =
## Takes a WakuMessage with received timestamp and returns its Index.
let
digest = computeDigest(msg)
senderTime = msg.timestamp
hash = computeMessageHash(pubsubTopic, msg)
return Index(
pubsubTopic: pubsubTopic,
senderTime: senderTime,
receiverTime: receivedTime,
digest: digest,
hash: hash,
)
proc tohistoryCursor*(index: Index): ArchiveCursor =
return ArchiveCursor(
pubsubTopic: index.pubsubTopic,
senderTime: index.senderTime,
storeTime: index.receiverTime,
digest: index.digest,
hash: index.hash,
)
proc toIndex*(index: ArchiveCursor): Index =
return Index(
pubsubTopic: index.pubsubTopic,
senderTime: index.senderTime,
receiverTime: index.storeTime,
digest: index.digest,
hash: index.hash,
)
proc `==`*(x, y: Index): bool =
## receiverTime plays no role in index equality
return
(
(x.senderTime == y.senderTime) and (x.digest == y.digest) and
(x.pubsubTopic == y.pubsubTopic)
) or (x.hash == y.hash) # this applies to store v3 queries only
proc cmp*(x, y: Index): int =
## compares x and y
## returns 0 if they are equal
## returns -1 if x < y
## returns 1 if x > y
##
## Default sorting order priority is:
## 1. senderTimestamp
## 2. receiverTimestamp (a fallback only if senderTimestamp unset on either side, and all other fields unequal)
## 3. message digest
## 4. pubsubTopic
if x == y:
# Quick exit ensures receiver time does not affect index equality
return 0
# Timestamp has a higher priority for comparison
let
# Use receiverTime where senderTime is unset
xTimestamp = if x.senderTime == 0: x.receiverTime else: x.senderTime
yTimestamp = if y.senderTime == 0: y.receiverTime else: y.senderTime
let timecmp = cmp(xTimestamp, yTimestamp)
if timecmp != 0:
return timecmp
# Continue only when timestamps are equal
let digestcmp = cmp(x.digest.data, y.digest.data)
if digestcmp != 0:
return digestcmp
return cmp(x.pubsubTopic, y.pubsubTopic)

View File

@ -1,363 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/options, results, stew/sorted_set, chronicles, chronos
import ../../../waku_core, ../../common, ../../driver, ./index
logScope:
topics = "waku archive queue_store"
const QueueDriverDefaultMaxCapacity* = 25_000
type
QueryFilterMatcher =
proc(index: Index, msg: WakuMessage): bool {.gcsafe, raises: [], closure.}
QueueDriver* = ref object of ArchiveDriver
## Bounded repository for indexed messages
##
## The store queue will keep messages up to its
## configured capacity. As soon as this capacity
## is reached and a new message is added, the oldest
## item will be removed to make space for the new one.
## This implies both a `delete` and `add` operation
## for new items.
# TODO: a circular/ring buffer may be a more efficient implementation
items: SortedSet[Index, WakuMessage] # sorted set of stored messages
capacity: int # Maximum amount of messages to keep
QueueDriverErrorKind {.pure.} = enum
INVALID_CURSOR
QueueDriverGetPageResult = Result[seq[ArchiveRow], QueueDriverErrorKind]
proc `$`(error: QueueDriverErrorKind): string =
case error
of INVALID_CURSOR: "invalid_cursor"
### Helpers
proc walkToCursor(
w: SortedSetWalkRef[Index, WakuMessage], startCursor: Index, forward: bool
): SortedSetResult[Index, WakuMessage] =
## Walk to util we find the cursor
## TODO: Improve performance here with a binary/tree search
var nextItem =
if forward:
w.first()
else:
w.last()
## Fast forward until we reach the startCursor
while nextItem.isOk():
if nextItem.value.key == startCursor:
break
# Not yet at cursor. Continue advancing
nextItem =
if forward:
w.next()
else:
w.prev()
return nextItem
#### API
proc new*(T: type QueueDriver, capacity: int = QueueDriverDefaultMaxCapacity): T =
var items = SortedSet[Index, WakuMessage].init()
return QueueDriver(items: items, capacity: capacity)
proc contains*(driver: QueueDriver, index: Index): bool =
## Return `true` if the store queue already contains the `index`, `false` otherwise.
return driver.items.eq(index).isOk()
proc len*(driver: QueueDriver): int {.noSideEffect.} =
return driver.items.len
proc getPage(
driver: QueueDriver,
pageSize: uint = 0,
forward: bool = true,
cursor: Option[Index] = none(Index),
predicate: QueryFilterMatcher = nil,
): QueueDriverGetPageResult {.raises: [].} =
## Populate a single page in forward direction
## Start at the `startCursor` (exclusive), or first entry (inclusive) if not defined.
## Page size must not exceed `maxPageSize`
## Each entry must match the `pred`
var outSeq: seq[ArchiveRow]
var w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
defer:
w.destroy()
var currentEntry: SortedSetResult[Index, WakuMessage]
# Find starting entry
if cursor.isSome():
w.walkToCursor(cursor.get(), forward).isOkOr:
return err(QueueDriverErrorKind.INVALID_CURSOR)
# Advance walker once more
currentEntry =
if forward:
w.next()
else:
w.prev()
else:
# Start from the beginning of the queue
currentEntry =
if forward:
w.first()
else:
w.last()
trace "Starting page query", currentEntry = currentEntry
## This loop walks forward over the queue:
## 1. from the given cursor (or first/last entry, if not provided)
## 2. adds entries matching the predicate function to output page
## 3. until either the end of the queue or maxPageSize is reached
var numberOfItems: uint = 0
while currentEntry.isOk() and numberOfItems < pageSize:
trace "Continuing page query",
currentEntry = currentEntry, numberOfItems = numberOfItems
let
key = currentEntry.value.key
data = currentEntry.value.data
if predicate.isNil() or predicate(key, data):
numberOfItems += 1
outSeq.add(
(key.pubsubTopic, data, @(key.digest.data), key.receiverTime, key.hash)
)
currentEntry =
if forward:
w.next()
else:
w.prev()
trace "Successfully retrieved page", len = outSeq.len
return ok(outSeq)
## --- SortedSet accessors ---
iterator fwdIterator*(driver: QueueDriver): (Index, WakuMessage) =
## Forward iterator over the entire store queue
var
w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
res = w.first()
while res.isOk():
yield (res.value.key, res.value.data)
res = w.next()
w.destroy()
iterator bwdIterator*(driver: QueueDriver): (Index, WakuMessage) =
## Backwards iterator over the entire store queue
var
w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
res = w.last()
while res.isOk():
yield (res.value.key, res.value.data)
res = w.prev()
w.destroy()
proc first*(driver: QueueDriver): ArchiveDriverResult[Index] =
var
w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
res = w.first()
w.destroy()
res.isOkOr:
return err("Not found")
return ok(res.value.key)
proc last*(driver: QueueDriver): ArchiveDriverResult[Index] =
var
w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
res = w.last()
w.destroy()
res.isOkOr:
return err("Not found")
return ok(res.value.key)
## --- Queue API ---
proc add*(
driver: QueueDriver, index: Index, msg: WakuMessage
): ArchiveDriverResult[void] =
## Add a message to the queue
##
## If we're at capacity, we will be removing, the oldest (first) item
if driver.contains(index):
trace "could not add item to store queue. Index already exists", index = index
return err("duplicate")
# TODO: the below delete block can be removed if we convert to circular buffer
if driver.items.len >= driver.capacity:
var
w = SortedSetWalkRef[Index, WakuMessage].init(driver.items)
firstItem = w.first
if cmp(index, firstItem.value.key) < 0:
# When at capacity, we won't add if message index is smaller (older) than our oldest item
w.destroy # Clean up walker
return err("too_old")
discard driver.items.delete(firstItem.value.key)
w.destroy # better to destroy walker after a delete operation
driver.items.insert(index).value.data = msg
return ok()
method put*(
driver: QueueDriver,
pubsubTopic: PubsubTopic,
message: WakuMessage,
digest: MessageDigest,
messageHash: WakuMessageHash,
receivedTime: Timestamp,
): Future[ArchiveDriverResult[void]] {.async.} =
let index = Index(
pubsubTopic: pubsubTopic,
senderTime: message.timestamp,
receiverTime: receivedTime,
digest: digest,
hash: messageHash,
)
return driver.add(index, message)
method getAllMessages*(
driver: QueueDriver
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
# TODO: Implement this message_store method
return err("interface method not implemented")
method existsTable*(
driver: QueueDriver, tableName: string
): Future[ArchiveDriverResult[bool]] {.async.} =
return err("interface method not implemented")
method getMessages*(
driver: QueueDriver,
includeData = true,
contentTopic: seq[ContentTopic] = @[],
pubsubTopic = none(PubsubTopic),
cursor = none(ArchiveCursor),
startTime = none(Timestamp),
endTime = none(Timestamp),
hashes: seq[WakuMessageHash] = @[],
maxPageSize = DefaultPageSize,
ascendingOrder = true,
requestId = "",
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
let cursor = cursor.map(toIndex)
let matchesQuery: QueryFilterMatcher =
func (index: Index, msg: WakuMessage): bool =
if pubsubTopic.isSome() and index.pubsubTopic != pubsubTopic.get():
return false
if contentTopic.len > 0 and msg.contentTopic notin contentTopic:
return false
if startTime.isSome() and msg.timestamp < startTime.get():
return false
if endTime.isSome() and msg.timestamp > endTime.get():
return false
if hashes.len > 0 and index.hash notin hashes:
return false
return true
var pageRes: QueueDriverGetPageResult
try:
pageRes = driver.getPage(maxPageSize, ascendingOrder, cursor, matchesQuery)
except CatchableError, Exception:
return err(getCurrentExceptionMsg())
pageRes.isOkOr:
return err($error)
return ok(pageRes.value)
method getMessagesCount*(
driver: QueueDriver
): Future[ArchiveDriverResult[int64]] {.async.} =
return ok(int64(driver.len()))
method getPagesCount*(
driver: QueueDriver
): Future[ArchiveDriverResult[int64]] {.async.} =
return ok(int64(driver.len()))
method getPagesSize*(
driver: QueueDriver
): Future[ArchiveDriverResult[int64]] {.async.} =
return ok(int64(driver.len()))
method getDatabaseSize*(
driver: QueueDriver
): Future[ArchiveDriverResult[int64]] {.async.} =
return ok(int64(driver.len()))
method performVacuum*(
driver: QueueDriver
): Future[ArchiveDriverResult[void]] {.async.} =
return err("interface method not implemented")
method getOldestMessageTimestamp*(
driver: QueueDriver
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
return driver.first().map(
proc(index: Index): Timestamp =
index.receiverTime
)
method getNewestMessageTimestamp*(
driver: QueueDriver
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
return driver.last().map(
proc(index: Index): Timestamp =
index.receiverTime
)
method deleteMessagesOlderThanTimestamp*(
driver: QueueDriver, ts: Timestamp
): Future[ArchiveDriverResult[void]] {.async.} =
# TODO: Implement this message_store method
return err("interface method not implemented")
method deleteOldestMessagesNotWithinLimit*(
driver: QueueDriver, limit: int
): Future[ArchiveDriverResult[void]] {.async.} =
# TODO: Implement this message_store method
return err("interface method not implemented")
method decreaseDatabaseSize*(
driver: QueueDriver, targetSizeInBytes: int64, forceRemoval: bool = false
): Future[ArchiveDriverResult[void]] {.async.} =
return err("interface method not implemented")
method close*(driver: QueueDriver): Future[ArchiveDriverResult[void]] {.async.} =
return ok()

View File

@ -1,8 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import ./sqlite_driver/sqlite_driver
export sqlite_driver

View File

@ -1,11 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import ../../../waku_core, ../../common
type DbCursor* = (Timestamp, seq[byte], PubsubTopic)
proc toDbCursor*(c: ArchiveCursor): DbCursor =
(c.storeTime, @(c.digest.data), c.pubsubTopic)

View File

@ -1,71 +0,0 @@
{.push raises: [].}
import
std/[tables, strutils, os], results, chronicles, sqlite3_abi # sqlite3_column_int64
import ../../../common/databases/db_sqlite, ../../../common/databases/common
logScope:
topics = "waku archive migration"
const SchemaVersion* = 9 # increase this when there is an update in the database schema
template projectRoot(): string =
currentSourcePath.rsplit(DirSep, 1)[0] / ".." / ".." / ".." / ".."
const MessageStoreMigrationPath: string = projectRoot / "migrations" / "message_store"
proc isSchemaVersion7*(db: SqliteDatabase): DatabaseResult[bool] =
## Temporary proc created to analyse when the table actually belongs to the SchemaVersion 7.
##
## During many nwaku versions, 0.14.0 until 0.18.0, the SchemaVersion wasn't set or checked.
## Docker `nwaku` nodes that start working from these versions, 0.14.0 until 0.18.0, they started
## with this discrepancy: `user_version`== 0 (not set) but Message table with SchemaVersion 7.
##
## We found issues where `user_version` (SchemaVersion) was set to 0 in the database even though
## its scheme structure reflected SchemaVersion 7. In those cases, when `nwaku` re-started to
## apply the migration scripts (in 0.19.0) the node didn't start properly because it tried to
## migrate a database that already had the Schema structure #7, so it failed when changing the PK.
##
## TODO: This was added in version 0.20.0. We might remove this in version 0.30.0, as we
## could consider that many users use +0.20.0.
var pkColumns = newSeq[string]()
proc queryRowCallback(s: ptr sqlite3_stmt) =
let colName = cstring sqlite3_column_text(s, 0)
pkColumns.add($colName)
let query =
"""SELECT l.name FROM pragma_table_info("Message") as l WHERE l.pk != 0;"""
db.query(query, queryRowCallback).isOkOr:
return err("failed to determine the current SchemaVersion: " & $error)
if pkColumns == @["pubsubTopic", "id", "storedAt"]:
return ok(true)
else:
info "Not considered schema version 7"
return ok(false)
proc migrate*(db: SqliteDatabase, targetVersion = SchemaVersion): DatabaseResult[void] =
## Compares the `user_version` of the sqlite database with the provided `targetVersion`, then
## it runs migration scripts if the `user_version` is outdated. The `migrationScriptsDir` path
## points to the directory holding the migrations scripts once the db is updated, it sets the
## `user_version` to the `tragetVersion`.
##
## If not `targetVersion` is provided, it defaults to `SchemaVersion`.
##
## NOTE: Down migration it is not currently supported
info "starting message store's sqlite database migration"
let userVersion = ?db.getUserVersion()
let isSchemaVersion7 = ?db.isSchemaVersion7()
if userVersion == 0'i64 and isSchemaVersion7:
info "We found user_version 0 but the database schema reflects the user_version 7"
## Force the correct schema version
?db.setUserVersion(7)
migrate(db, targetVersion, migrationsScriptsDir = MessageStoreMigrationPath).isOkOr:
return err("failed to execute migration scripts: " & error)
info "finished message store's sqlite database migration"
return ok()

View File

@ -1,729 +0,0 @@
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/[options, sequtils], stew/byteutils, sqlite3_abi, results
import
../../../common/databases/db_sqlite,
../../../common/databases/common,
../../../waku_core,
./cursor
const DbTable = "Message"
type SqlQueryStr = string
### SQLite column helper methods
proc queryRowWakuMessageCallback(
s: ptr sqlite3_stmt,
contentTopicCol, payloadCol, versionCol, senderTimestampCol, metaCol: cint,
): WakuMessage =
let
topic = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, contentTopicCol))
topicLength = sqlite3_column_bytes(s, contentTopicCol)
contentTopic = string.fromBytes(@(toOpenArray(topic, 0, topicLength - 1)))
p = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, payloadCol))
m = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, metaCol))
payloadLength = sqlite3_column_bytes(s, payloadCol)
metaLength = sqlite3_column_bytes(s, metaCol)
payload = @(toOpenArray(p, 0, payloadLength - 1))
version = sqlite3_column_int64(s, versionCol)
senderTimestamp = sqlite3_column_int64(s, senderTimestampCol)
meta = @(toOpenArray(m, 0, metaLength - 1))
return WakuMessage(
contentTopic: ContentTopic(contentTopic),
payload: payload,
version: uint32(version),
timestamp: Timestamp(senderTimestamp),
meta: meta,
)
proc queryRowReceiverTimestampCallback(
s: ptr sqlite3_stmt, storedAtCol: cint
): Timestamp =
let storedAt = sqlite3_column_int64(s, storedAtCol)
return Timestamp(storedAt)
proc queryRowPubsubTopicCallback(
s: ptr sqlite3_stmt, pubsubTopicCol: cint
): PubsubTopic =
let
pubsubTopicPointer =
cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, pubsubTopicCol))
pubsubTopicLength = sqlite3_column_bytes(s, pubsubTopicCol)
pubsubTopic =
string.fromBytes(@(toOpenArray(pubsubTopicPointer, 0, pubsubTopicLength - 1)))
return pubsubTopic
proc queryRowDigestCallback(s: ptr sqlite3_stmt, digestCol: cint): seq[byte] =
let
digestPointer = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, digestCol))
digestLength = sqlite3_column_bytes(s, digestCol)
digest = @(toOpenArray(digestPointer, 0, digestLength - 1))
return digest
proc queryRowWakuMessageHashCallback(
s: ptr sqlite3_stmt, hashCol: cint
): WakuMessageHash =
let
hashPointer = cast[ptr UncheckedArray[byte]](sqlite3_column_blob(s, hashCol))
hashLength = sqlite3_column_bytes(s, hashCol)
hash = fromBytes(toOpenArray(hashPointer, 0, hashLength - 1))
return hash
### SQLite queries
## Create table
proc createTableQuery(table: string): SqlQueryStr =
"CREATE TABLE IF NOT EXISTS " & table & " (" & " pubsubTopic BLOB NOT NULL," &
" contentTopic BLOB NOT NULL," & " payload BLOB," & " version INTEGER NOT NULL," &
" timestamp INTEGER NOT NULL," & " id BLOB," & " messageHash BLOB," &
" storedAt INTEGER NOT NULL," & " meta BLOB," &
" CONSTRAINT messageIndex PRIMARY KEY (messageHash)" & ") WITHOUT ROWID;"
proc createTable*(db: SqliteDatabase): DatabaseResult[void] =
let query = createTableQuery(DbTable)
discard ?db.query(
query,
proc(s: ptr sqlite3_stmt) =
discard,
)
return ok()
## Create indices
proc createOldestMessageTimestampIndexQuery(table: string): SqlQueryStr =
"CREATE INDEX IF NOT EXISTS i_ts ON " & table & " (storedAt);"
proc createOldestMessageTimestampIndex*(db: SqliteDatabase): DatabaseResult[void] =
let query = createOldestMessageTimestampIndexQuery(DbTable)
discard ?db.query(
query,
proc(s: ptr sqlite3_stmt) =
discard,
)
return ok()
proc createHistoryQueryIndexQuery(table: string): SqlQueryStr =
"CREATE INDEX IF NOT EXISTS i_query ON " & table &
" (contentTopic, pubsubTopic, storedAt, id);"
proc createHistoryQueryIndex*(db: SqliteDatabase): DatabaseResult[void] =
let query = createHistoryQueryIndexQuery(DbTable)
discard ?db.query(
query,
proc(s: ptr sqlite3_stmt) =
discard,
)
return ok()
## Insert message
type InsertMessageParams* = (
seq[byte],
seq[byte],
Timestamp,
seq[byte],
seq[byte],
seq[byte],
int64,
Timestamp,
seq[byte],
)
proc insertMessageQuery(table: string): SqlQueryStr =
return
"INSERT INTO " & table &
"(id, messageHash, storedAt, contentTopic, payload, pubsubTopic, version, timestamp, meta)" &
" VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);"
proc prepareInsertMessageStmt*(
db: SqliteDatabase
): SqliteStmt[InsertMessageParams, void] =
let query = insertMessageQuery(DbTable)
return
db.prepareStmt(query, InsertMessageParams, void).expect("this is a valid statement")
## Count table messages
proc countMessagesQuery(table: string): SqlQueryStr =
return "SELECT COUNT(*) FROM " & table
proc getMessageCount*(db: SqliteDatabase): DatabaseResult[int64] =
var count: int64
proc queryRowCallback(s: ptr sqlite3_stmt) =
count = sqlite3_column_int64(s, 0)
let query = countMessagesQuery(DbTable)
db.query(query, queryRowCallback).isOkOr:
return err("failed to count number of messages in the database")
return ok(count)
## Get oldest message receiver timestamp
proc selectOldestMessageTimestampQuery(table: string): SqlQueryStr =
return "SELECT MIN(storedAt) FROM " & table
proc selectOldestReceiverTimestamp*(
db: SqliteDatabase
): DatabaseResult[Timestamp] {.inline.} =
var timestamp: Timestamp
proc queryRowCallback(s: ptr sqlite3_stmt) =
timestamp = queryRowReceiverTimestampCallback(s, 0)
let query = selectOldestMessageTimestampQuery(DbTable)
db.query(query, queryRowCallback).isOkOr:
return err("failed to get the oldest receiver timestamp from the database")
return ok(timestamp)
## Get newest message receiver timestamp
proc selectNewestMessageTimestampQuery(table: string): SqlQueryStr =
return "SELECT MAX(storedAt) FROM " & table
proc selectNewestReceiverTimestamp*(
db: SqliteDatabase
): DatabaseResult[Timestamp] {.inline.} =
var timestamp: Timestamp
proc queryRowCallback(s: ptr sqlite3_stmt) =
timestamp = queryRowReceiverTimestampCallback(s, 0)
let query = selectNewestMessageTimestampQuery(DbTable)
db.query(query, queryRowCallback).isOkOr:
return err("failed to get the newest receiver timestamp from the database")
return ok(timestamp)
## Delete messages older than timestamp
proc deleteMessagesOlderThanTimestampQuery(table: string, ts: Timestamp): SqlQueryStr =
return "DELETE FROM " & table & " WHERE storedAt < " & $ts
proc deleteMessagesOlderThanTimestamp*(
db: SqliteDatabase, ts: int64
): DatabaseResult[void] =
let query = deleteMessagesOlderThanTimestampQuery(DbTable, ts)
discard ?db.query(
query,
proc(s: ptr sqlite3_stmt) =
discard,
)
return ok()
## Delete oldest messages not within limit
proc deleteOldestMessagesNotWithinLimitQuery(table: string, limit: int): SqlQueryStr =
return
"DELETE FROM " & table & " WHERE (storedAt, id, pubsubTopic) NOT IN (" &
" SELECT storedAt, id, pubsubTopic FROM " & table &
" ORDER BY storedAt DESC, id DESC" & " LIMIT " & $limit & ");"
proc deleteOldestMessagesNotWithinLimit*(
db: SqliteDatabase, limit: int
): DatabaseResult[void] =
# NOTE: The word `limit` here refers the store capacity/maximum number-of-messages allowed limit
let query = deleteOldestMessagesNotWithinLimitQuery(DbTable, limit = limit)
discard ?db.query(
query,
proc(s: ptr sqlite3_stmt) =
discard,
)
return ok()
## Select all messages
proc selectAllMessagesQuery(table: string): SqlQueryStr =
return
"SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta" &
" FROM " & table & " ORDER BY storedAt ASC"
proc selectAllMessages*(
db: SqliteDatabase
): DatabaseResult[
seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
] {.gcsafe.} =
## Retrieve all messages from the store.
var rows: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
proc queryRowCallback(s: ptr sqlite3_stmt) =
let
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3)
wakuMessage = queryRowWakuMessageCallback(
s,
contentTopicCol = 1,
payloadCol = 2,
versionCol = 4,
senderTimestampCol = 5,
metaCol = 8,
)
digest = queryRowDigestCallback(s, digestCol = 6)
storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0)
hash = queryRowWakuMessageHashCallback(s, hashCol = 7)
rows.add((pubsubTopic, wakuMessage, digest, storedAt, hash))
let query = selectAllMessagesQuery(DbTable)
discard ?db.query(query, queryRowCallback)
return ok(rows)
## Select messages by history query with limit
proc combineClauses(clauses: varargs[Option[string]]): Option[string] =
let whereSeq = @clauses.filterIt(it.isSome()).mapIt(it.get())
if whereSeq.len <= 0:
return none(string)
var where: string = whereSeq[0]
for clause in whereSeq[1 ..^ 1]:
where &= " AND " & clause
return some(where)
proc whereClausev2(
cursor: bool,
pubsubTopic: Option[PubsubTopic],
contentTopic: seq[ContentTopic],
startTime: Option[Timestamp],
endTime: Option[Timestamp],
ascending: bool,
): Option[string] {.deprecated.} =
let cursorClause =
if cursor:
let comp = if ascending: ">" else: "<"
some("(storedAt, id) " & comp & " (?, ?)")
else:
none(string)
let pubsubTopicClause =
if pubsubTopic.isNone():
none(string)
else:
some("pubsubTopic = (?)")
let contentTopicClause =
if contentTopic.len <= 0:
none(string)
else:
var where = "contentTopic IN ("
where &= "?"
for _ in 1 ..< contentTopic.len:
where &= ", ?"
where &= ")"
some(where)
let startTimeClause =
if startTime.isNone():
none(string)
else:
some("storedAt >= (?)")
let endTimeClause =
if endTime.isNone():
none(string)
else:
some("storedAt <= (?)")
return combineClauses(
cursorClause, pubsubTopicClause, contentTopicClause, startTimeClause, endTimeClause
)
proc selectMessagesWithLimitQueryv2(
table: string, where: Option[string], limit: uint, ascending = true, v3 = false
): SqlQueryStr {.deprecated.} =
let order = if ascending: "ASC" else: "DESC"
var query: string
query =
"SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta"
query &= " FROM " & table
if where.isSome():
query &= " WHERE " & where.get()
query &= " ORDER BY storedAt " & order & ", id " & order
query &= " LIMIT " & $limit & ";"
return query
proc prepareStmt(
db: SqliteDatabase, stmt: string
): DatabaseResult[SqliteStmt[void, void]] =
var s: RawStmtPtr
checkErr sqlite3_prepare_v2(db.env, stmt, stmt.len.cint, addr s, nil)
return ok(SqliteStmt[void, void](s))
proc execSelectMessagesV2WithLimitStmt(
s: SqliteStmt,
cursor: Option[DbCursor],
pubsubTopic: Option[PubsubTopic],
contentTopic: seq[ContentTopic],
startTime: Option[Timestamp],
endTime: Option[Timestamp],
onRowCallback: DataProc,
): DatabaseResult[void] {.deprecated.} =
let s = RawStmtPtr(s)
# Bind params
var paramIndex = 1
if cursor.isSome():
let (storedAt, id, _) = cursor.get()
checkErr bindParam(s, paramIndex, storedAt)
paramIndex += 1
checkErr bindParam(s, paramIndex, id)
paramIndex += 1
if pubsubTopic.isSome():
let pubsubTopic = toBytes(pubsubTopic.get())
checkErr bindParam(s, paramIndex, pubsubTopic)
paramIndex += 1
for topic in contentTopic:
checkErr bindParam(s, paramIndex, topic.toBytes())
paramIndex += 1
if startTime.isSome():
let time = startTime.get()
checkErr bindParam(s, paramIndex, time)
paramIndex += 1
if endTime.isSome():
let time = endTime.get()
checkErr bindParam(s, paramIndex, time)
paramIndex += 1
try:
while true:
let v = sqlite3_step(s)
case v
of SQLITE_ROW:
onRowCallback(s)
of SQLITE_DONE:
return ok()
else:
return err($sqlite3_errstr(v))
except Exception, CatchableError:
# release implicit transaction
discard sqlite3_reset(s) # same return information as step
discard sqlite3_clear_bindings(s) # no errors possible
proc selectMessagesByHistoryQueryWithLimit*(
db: SqliteDatabase,
contentTopic: seq[ContentTopic],
pubsubTopic: Option[PubsubTopic],
cursor: Option[DbCursor],
startTime: Option[Timestamp],
endTime: Option[Timestamp],
limit: uint,
ascending: bool,
): DatabaseResult[
seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
] {.deprecated.} =
var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] =
@[]
proc queryRowCallback(s: ptr sqlite3_stmt) =
let
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3)
message = queryRowWakuMessageCallback(
s,
contentTopicCol = 1,
payloadCol = 2,
versionCol = 4,
senderTimestampCol = 5,
metaCol = 8,
)
digest = queryRowDigestCallback(s, digestCol = 6)
storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0)
hash = queryRowWakuMessageHashCallback(s, hashCol = 7)
messages.add((pubsubTopic, message, digest, storedAt, hash))
let query = block:
let where = whereClausev2(
cursor.isSome(), pubsubTopic, contentTopic, startTime, endTime, ascending
)
selectMessagesWithLimitQueryv2(DbTable, where, limit, ascending)
let dbStmt = ?db.prepareStmt(query)
?dbStmt.execSelectMessagesV2WithLimitStmt(
cursor, pubsubTopic, contentTopic, startTime, endTime, queryRowCallback
)
dbStmt.dispose()
return ok(messages)
### Store v3 ###
proc execSelectMessageByHash(
s: SqliteStmt, hash: WakuMessageHash, onRowCallback: DataProc
): DatabaseResult[void] =
let s = RawStmtPtr(s)
checkErr bindParam(s, 1, toSeq(hash))
try:
while true:
let v = sqlite3_step(s)
case v
of SQLITE_ROW:
onRowCallback(s)
of SQLITE_DONE:
return ok()
else:
return err($sqlite3_errstr(v))
except Exception, CatchableError:
# release implicit transaction
discard sqlite3_reset(s) # same return information as step
discard sqlite3_clear_bindings(s) # no errors possible
proc selectMessageByHashQuery(): SqlQueryStr =
var query: string
query = "SELECT contentTopic, payload, version, timestamp, meta, messageHash"
query &= " FROM " & DbTable
query &= " WHERE messageHash = (?)"
return query
proc whereClause(
cursor: bool,
pubsubTopic: Option[PubsubTopic],
contentTopic: seq[ContentTopic],
startTime: Option[Timestamp],
endTime: Option[Timestamp],
hashes: seq[WakuMessageHash],
ascending: bool,
): Option[string] =
let cursorClause =
if cursor:
let comp = if ascending: ">" else: "<"
some("(timestamp, messageHash) " & comp & " (?, ?)")
else:
none(string)
let pubsubTopicClause =
if pubsubTopic.isNone():
none(string)
else:
some("pubsubTopic = (?)")
let contentTopicClause =
if contentTopic.len <= 0:
none(string)
else:
var where = "contentTopic IN ("
where &= "?"
for _ in 1 ..< contentTopic.len:
where &= ", ?"
where &= ")"
some(where)
let startTimeClause =
if startTime.isNone():
none(string)
else:
some("storedAt >= (?)")
let endTimeClause =
if endTime.isNone():
none(string)
else:
some("storedAt <= (?)")
let hashesClause =
if hashes.len <= 0:
none(string)
else:
var where = "messageHash IN ("
where &= "?"
for _ in 1 ..< hashes.len:
where &= ", ?"
where &= ")"
some(where)
return combineClauses(
cursorClause, pubsubTopicClause, contentTopicClause, startTimeClause, endTimeClause,
hashesClause,
)
proc execSelectMessagesWithLimitStmt(
s: SqliteStmt,
cursor: Option[(Timestamp, WakuMessageHash)],
pubsubTopic: Option[PubsubTopic],
contentTopic: seq[ContentTopic],
startTime: Option[Timestamp],
endTime: Option[Timestamp],
hashes: seq[WakuMessageHash],
onRowCallback: DataProc,
): DatabaseResult[void] =
let s = RawStmtPtr(s)
# Bind params
var paramIndex = 1
if cursor.isSome():
let (time, hash) = cursor.get()
checkErr bindParam(s, paramIndex, time)
paramIndex += 1
checkErr bindParam(s, paramIndex, toSeq(hash))
paramIndex += 1
if pubsubTopic.isSome():
let pubsubTopic = toBytes(pubsubTopic.get())
checkErr bindParam(s, paramIndex, pubsubTopic)
paramIndex += 1
for topic in contentTopic:
checkErr bindParam(s, paramIndex, topic.toBytes())
paramIndex += 1
for hash in hashes:
checkErr bindParam(s, paramIndex, toSeq(hash))
paramIndex += 1
if startTime.isSome():
let time = startTime.get()
checkErr bindParam(s, paramIndex, time)
paramIndex += 1
if endTime.isSome():
let time = endTime.get()
checkErr bindParam(s, paramIndex, time)
paramIndex += 1
try:
while true:
let v = sqlite3_step(s)
case v
of SQLITE_ROW:
onRowCallback(s)
of SQLITE_DONE:
return ok()
else:
return err($sqlite3_errstr(v))
except Exception, CatchableError:
# release implicit transaction
discard sqlite3_reset(s) # same return information as step
discard sqlite3_clear_bindings(s) # no errors possible
proc selectMessagesWithLimitQuery(
table: string, where: Option[string], limit: uint, ascending = true, v3 = false
): SqlQueryStr =
let order = if ascending: "ASC" else: "DESC"
var query: string
query =
"SELECT storedAt, contentTopic, payload, pubsubTopic, version, timestamp, id, messageHash, meta"
query &= " FROM " & table
if where.isSome():
query &= " WHERE " & where.get()
query &= " ORDER BY storedAt " & order & ", messageHash " & order
query &= " LIMIT " & $limit & ";"
return query
proc selectMessagesByStoreQueryWithLimit*(
db: SqliteDatabase,
contentTopic: seq[ContentTopic],
pubsubTopic: Option[PubsubTopic],
cursor: Option[WakuMessageHash],
startTime: Option[Timestamp],
endTime: Option[Timestamp],
hashes: seq[WakuMessageHash],
limit: uint,
ascending: bool,
): DatabaseResult[
seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)]
] =
# Must first get the message timestamp before paginating by time
let newCursor =
if cursor.isSome() and cursor.get() != EmptyWakuMessageHash:
let hash: WakuMessageHash = cursor.get()
var wakuMessage: Option[WakuMessage]
proc queryRowCallback(s: ptr sqlite3_stmt) =
wakuMessage = some(
queryRowWakuMessageCallback(
s,
contentTopicCol = 0,
payloadCol = 1,
versionCol = 2,
senderTimestampCol = 3,
metaCol = 4,
)
)
let query = selectMessageByHashQuery()
let dbStmt = ?db.prepareStmt(query)
?dbStmt.execSelectMessageByHash(hash, queryRowCallback)
dbStmt.dispose()
if wakuMessage.isSome():
let time = wakuMessage.get().timestamp
some((time, hash))
else:
return err("cursor not found")
else:
none((Timestamp, WakuMessageHash))
var messages: seq[(PubsubTopic, WakuMessage, seq[byte], Timestamp, WakuMessageHash)] =
@[]
proc queryRowCallback(s: ptr sqlite3_stmt) =
let
pubsubTopic = queryRowPubsubTopicCallback(s, pubsubTopicCol = 3)
message = queryRowWakuMessageCallback(
s,
contentTopicCol = 1,
payloadCol = 2,
versionCol = 4,
senderTimestampCol = 5,
metaCol = 8,
)
digest = queryRowDigestCallback(s, digestCol = 6)
storedAt = queryRowReceiverTimestampCallback(s, storedAtCol = 0)
hash = queryRowWakuMessageHashCallback(s, hashCol = 7)
messages.add((pubsubTopic, message, digest, storedAt, hash))
let query = block:
let where = whereClause(
newCursor.isSome(),
pubsubTopic,
contentTopic,
startTime,
endTime,
hashes,
ascending,
)
selectMessagesWithLimitQuery(DbTable, where, limit, ascending, true)
let dbStmt = ?db.prepareStmt(query)
?dbStmt.execSelectMessagesWithLimitStmt(
newCursor, pubsubTopic, contentTopic, startTime, endTime, hashes, queryRowCallback
)
dbStmt.dispose()
return ok(messages)

View File

@ -1,220 +0,0 @@
# The code in this file is an adaptation of the Sqlite KV Store found in nim-eth.
# https://github.com/status-im/nim-eth/blob/master/eth/db/kvstore_sqlite3.nim
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
import std/options, stew/byteutils, chronicles, chronos, results
import
../../../common/databases/db_sqlite,
../../../waku_core,
../../../waku_core/message/digest,
../../common,
../../driver,
./cursor,
./queries
logScope:
topics = "waku archive sqlite"
proc init(db: SqliteDatabase): ArchiveDriverResult[void] =
## Misconfiguration can lead to nil DB
if db.isNil():
return err("db not initialized")
# Create table, if doesn't exist
createTable(db).isOkOr:
return err("failed to create table: " & error)
# Create indices, if don't exist
createOldestMessageTimestampIndex(db).isOkOr:
return err("failed to create i_rt index: " & error)
createHistoryQueryIndex(db).isOkOr:
return err("failed to create i_query index: " & error)
return ok()
type SqliteDriver* = ref object of ArchiveDriver
db: SqliteDatabase
insertStmt: SqliteStmt[InsertMessageParams, void]
proc new*(T: type SqliteDriver, db: SqliteDatabase): ArchiveDriverResult[T] =
# Database initialization
?init(db)
# General initialization
let insertStmt = db.prepareInsertMessageStmt()
return ok(SqliteDriver(db: db, insertStmt: insertStmt))
method put*(
s: SqliteDriver,
pubsubTopic: PubsubTopic,
message: WakuMessage,
digest: MessageDigest,
messageHash: WakuMessageHash,
receivedTime: Timestamp,
): Future[ArchiveDriverResult[void]] {.async.} =
## Inserts a message into the store
let res = s.insertStmt.exec(
(
@(digest.data), # id
@(messageHash), # messageHash
receivedTime, # storedAt
toBytes(message.contentTopic), # contentTopic
message.payload, # payload
toBytes(pubsubTopic), # pubsubTopic
int64(message.version), # version
message.timestamp, # senderTimestamp
message.meta, # meta
)
)
return res
method getAllMessages*(
s: SqliteDriver
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
## Retrieve all messages from the store.
return s.db.selectAllMessages()
method getMessagesV2*(
s: SqliteDriver,
contentTopic = newSeq[ContentTopic](0),
pubsubTopic = none(PubsubTopic),
cursor = none(ArchiveCursor),
startTime = none(Timestamp),
endTime = none(Timestamp),
maxPageSize = DefaultPageSize,
ascendingOrder = true,
requestId: string,
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async, deprecated.} =
let cursor = cursor.map(toDbCursor)
let rowsRes = s.db.selectMessagesByHistoryQueryWithLimit(
contentTopic,
pubsubTopic,
cursor,
startTime,
endTime,
limit = maxPageSize,
ascending = ascendingOrder,
)
return rowsRes
method getMessages*(
s: SqliteDriver,
includeData = true,
contentTopic = newSeq[ContentTopic](0),
pubsubTopic = none(PubsubTopic),
cursor = none(ArchiveCursor),
startTime = none(Timestamp),
endTime = none(Timestamp),
hashes = newSeq[WakuMessageHash](0),
maxPageSize = DefaultPageSize,
ascendingOrder = true,
requestId = "",
): Future[ArchiveDriverResult[seq[ArchiveRow]]] {.async.} =
let cursor =
if cursor.isSome():
some(cursor.get().hash)
else:
none(WakuMessageHash)
let rowsRes = s.db.selectMessagesByStoreQueryWithLimit(
contentTopic,
pubsubTopic,
cursor,
startTime,
endTime,
hashes,
limit = maxPageSize,
ascending = ascendingOrder,
)
return rowsRes
method getMessagesCount*(
s: SqliteDriver
): Future[ArchiveDriverResult[int64]] {.async.} =
return s.db.getMessageCount()
method getPagesCount*(s: SqliteDriver): Future[ArchiveDriverResult[int64]] {.async.} =
return s.db.getPageCount()
method getPagesSize*(s: SqliteDriver): Future[ArchiveDriverResult[int64]] {.async.} =
return s.db.getPageSize()
method getDatabaseSize*(s: SqliteDriver): Future[ArchiveDriverResult[int64]] {.async.} =
return s.db.getDatabaseSize()
method performVacuum*(s: SqliteDriver): Future[ArchiveDriverResult[void]] {.async.} =
return s.db.performSqliteVacuum()
method getOldestMessageTimestamp*(
s: SqliteDriver
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
return s.db.selectOldestReceiverTimestamp()
method getNewestMessageTimestamp*(
s: SqliteDriver
): Future[ArchiveDriverResult[Timestamp]] {.async.} =
return s.db.selectnewestReceiverTimestamp()
method deleteMessagesOlderThanTimestamp*(
s: SqliteDriver, ts: Timestamp
): Future[ArchiveDriverResult[void]] {.async.} =
return s.db.deleteMessagesOlderThanTimestamp(ts)
method deleteOldestMessagesNotWithinLimit*(
s: SqliteDriver, limit: int
): Future[ArchiveDriverResult[void]] {.async.} =
return s.db.deleteOldestMessagesNotWithinLimit(limit)
method decreaseDatabaseSize*(
driver: SqliteDriver, targetSizeInBytes: int64, forceRemoval: bool = false
): Future[ArchiveDriverResult[void]] {.async.} =
## To remove 20% of the outdated data from database
const DeleteLimit = 0.80
## when db size overshoots the database limit, shread 20% of outdated messages
## get size of database
let dbSize = (await driver.getDatabaseSize()).valueOr:
return err("failed to get database size: " & $error)
## database size in bytes
let totalSizeOfDB: int64 = int64(dbSize)
if totalSizeOfDB < targetSizeInBytes:
return ok()
## to shread/delete messsges, get the total row/message count
let numMessages = (await driver.getMessagesCount()).valueOr:
return err("failed to get messages count: " & error)
## NOTE: Using SQLite vacuuming is done manually, we delete a percentage of rows
## if vacumming is done automatically then we aim to check DB size periodially for efficient
## retention policy implementation.
## 80% of the total messages are to be kept, delete others
let pageDeleteWindow = int(float(numMessages) * DeleteLimit)
(await driver.deleteOldestMessagesNotWithinLimit(limit = pageDeleteWindow)).isOkOr:
return err("deleting oldest messages failed: " & error)
return ok()
method close*(s: SqliteDriver): Future[ArchiveDriverResult[void]] {.async.} =
## Close the database connection
# Dispose statements
s.insertStmt.dispose()
# Close connection
s.db.close()
return ok()
method existsTable*(
s: SqliteDriver, tableName: string
): Future[ArchiveDriverResult[bool]] {.async.} =
return err("existsTable method not implemented in sqlite_driver")

View File

@ -9,5 +9,4 @@ const
WakuTransferCodec* = "/vac/waku/transfer/1.0.0"
WakuMetadataCodec* = "/vac/waku/metadata/1.0.0"
WakuPeerExchangeCodec* = "/vac/waku/peer-exchange/2.0.0-alpha1"
WakuLegacyStoreCodec* = "/vac/waku/store/2.0.0-beta4"
WakuRendezVousCodec* = "/vac/waku/rendezvous/1.0.0"

View File

@ -1,3 +1,3 @@
import ./subscription/subscription_manager, ./subscription/push_handler
import ./subscription/push_handler
export subscription_manager, push_handler
export push_handler

View File

@ -1,52 +0,0 @@
{.push raises: [].}
import std/tables, results, chronicles, chronos
import ./push_handler, ../topics, ../message
## Subscription manager
type LegacySubscriptionManager* = object
subscriptions: TableRef[(string, ContentTopic), FilterPushHandler]
proc init*(T: type LegacySubscriptionManager): T =
LegacySubscriptionManager(
subscriptions: newTable[(string, ContentTopic), FilterPushHandler]()
)
proc clear*(m: var LegacySubscriptionManager) =
m.subscriptions.clear()
proc registerSubscription*(
m: LegacySubscriptionManager,
pubsubTopic: PubsubTopic,
contentTopic: ContentTopic,
handler: FilterPushHandler,
) =
try:
# TODO: Handle over subscription surprises
m.subscriptions[(pubsubTopic, contentTopic)] = handler
except CatchableError:
error "failed to register filter subscription", error = getCurrentExceptionMsg()
proc removeSubscription*(
m: LegacySubscriptionManager, pubsubTopic: PubsubTopic, contentTopic: ContentTopic
) =
m.subscriptions.del((pubsubTopic, contentTopic))
proc notifySubscriptionHandler*(
m: LegacySubscriptionManager,
pubsubTopic: PubsubTopic,
contentTopic: ContentTopic,
message: WakuMessage,
) =
if not m.subscriptions.hasKey((pubsubTopic, contentTopic)):
return
try:
let handler = m.subscriptions[(pubsubTopic, contentTopic)]
asyncSpawn handler(pubsubTopic, message)
except CatchableError:
discard
proc getSubscriptionsCount*(m: LegacySubscriptionManager): int =
m.subscriptions.len()

View File

@ -101,12 +101,20 @@ proc sendSubscribeRequest(
return ok()
proc ping*(
wfc: WakuFilterClient, servicePeer: RemotePeerInfo
wfc: WakuFilterClient, servicePeer: RemotePeerInfo, timeout = chronos.seconds(0)
): Future[FilterSubscribeResult] {.async.} =
info "sending ping", servicePeer = shortLog($servicePeer)
let requestId = generateRequestId(wfc.rng)
let filterSubscribeRequest = FilterSubscribeRequest.ping(requestId)
if timeout > chronos.seconds(0):
let fut = wfc.sendSubscribeRequest(servicePeer, filterSubscribeRequest)
if not await fut.withTimeout(timeout):
return err(
FilterSubscribeError.parse(uint32(FilterSubscribeErrorKind.PEER_DIAL_FAILURE))
)
return fut.read()
return await wfc.sendSubscribeRequest(servicePeer, filterSubscribeRequest)
proc subscribe*(

View File

@ -157,7 +157,7 @@ type
): Future[ValidationResult] {.gcsafe, raises: [Defect].}
WakuRelay* = ref object of GossipSub
brokerCtx: BrokerContext
peerEventListener: EventWakuPeerListener
peerEventListener: WakuPeerEventListener
# seq of tuples: the first entry in the tuple contains the validators are called for every topic
# the second entry contains the error messages to be returned when the validator fails
wakuValidators: seq[tuple[handler: WakuValidatorHandler, errorMessage: string]]
@ -376,9 +376,9 @@ proc new*(
w.initProtocolHandler()
w.initRelayObservers()
w.peerEventListener = EventWakuPeer.listen(
w.peerEventListener = WakuPeerEvent.listen(
w.brokerCtx,
proc(evt: EventWakuPeer): Future[void] {.async: (raises: []), gcsafe.} =
proc(evt: WakuPeerEvent): Future[void] {.async: (raises: []), gcsafe.} =
if evt.kind == WakuPeerEventKind.EventDisconnected:
w.topicHealthCheckAll = true
w.topicHealthUpdateEvent.fire()
@ -524,8 +524,7 @@ method stop*(w: WakuRelay) {.async, base.} =
info "stop"
await procCall GossipSub(w).stop()
if w.peerEventListener.id != 0:
EventWakuPeer.dropListener(w.brokerCtx, w.peerEventListener)
WakuPeerEvent.dropListener(w.brokerCtx, w.peerEventListener)
if not w.topicHealthLoopHandle.isNil():
await w.topicHealthLoopHandle.cancelAndWait()

View File

@ -1,3 +0,0 @@
import ./waku_store_legacy/common, ./waku_store_legacy/protocol
export common, protocol

View File

@ -1,3 +0,0 @@
# Waku Store protocol
The store protocol implements historical message support. See https://rfc.vac.dev/spec/13/ for more information.

View File

@ -1,241 +0,0 @@
{.push raises: [].}
import std/options, results, chronicles, chronos, metrics, bearssl/rand
import
../node/peer_manager,
../utils/requests,
./protocol_metrics,
./common,
./rpc,
./rpc_codec
when defined(waku_exp_store_resume):
import std/[sequtils, times]
import ../waku_archive
import ../waku_core/message/digest
logScope:
topics = "waku legacy store client"
const DefaultPageSize*: uint = 20
# A recommended default number of waku messages per page
type WakuStoreClient* = ref object
peerManager: PeerManager
rng: ref rand.HmacDrbgContext
# TODO: Move outside of the client
when defined(waku_exp_store_resume):
store: ArchiveDriver
proc new*(
T: type WakuStoreClient, peerManager: PeerManager, rng: ref rand.HmacDrbgContext
): T =
WakuStoreClient(peerManager: peerManager, rng: rng)
proc sendHistoryQueryRPC(
w: WakuStoreClient, req: HistoryQuery, peer: RemotePeerInfo
): Future[HistoryResult] {.async, gcsafe.} =
let connOpt = await w.peerManager.dialPeer(peer, WakuLegacyStoreCodec)
if connOpt.isNone():
waku_legacy_store_errors.inc(labelValues = [dialFailure])
return err(HistoryError(kind: HistoryErrorKind.PEER_DIAL_FAILURE, address: $peer))
let connection = connOpt.get()
defer:
await connection.closeWithEof()
let requestId =
if req.requestId != "":
req.requestId
else:
generateRequestId(w.rng)
let reqRpc = HistoryRPC(requestId: requestId, query: some(req.toRPC()))
await connection.writeLP(reqRpc.encode().buffer)
#TODO: I see a challenge here, if storeNode uses a different MaxRPCSize this read will fail.
# Need to find a workaround for this.
let buf = await connection.readLp(DefaultMaxRpcSize.int)
let respRpc = HistoryRPC.decode(buf).valueOr:
waku_legacy_store_errors.inc(labelValues = [decodeRpcFailure])
return
err(HistoryError(kind: HistoryErrorKind.BAD_RESPONSE, cause: decodeRpcFailure))
# Disabled ,for now, since the default response is a possible case (no messages, pagesize = 0, error = NONE(0))
# TODO: Rework the RPC protocol to differentiate the default value from an empty value (e.g., status = 200 (OK))
# and rework the protobuf parsing to return Option[T] when empty values are received
if respRpc.response.isNone():
waku_legacy_store_errors.inc(labelValues = [emptyRpcResponseFailure])
return err(
HistoryError(kind: HistoryErrorKind.BAD_RESPONSE, cause: emptyRpcResponseFailure)
)
let resp = respRpc.response.get()
return resp.toAPI()
proc query*(
w: WakuStoreClient, req: HistoryQuery, peer: RemotePeerInfo
): Future[HistoryResult] {.async, gcsafe.} =
return await w.sendHistoryQueryRPC(req, peer)
# TODO: Move outside of the client
when defined(waku_exp_store_resume):
## Resume store
const StoreResumeTimeWindowOffset: Timestamp = getNanosecondTime(20)
## Adjust the time window with an offset of 20 seconds
proc new*(
T: type WakuStoreClient,
peerManager: PeerManager,
rng: ref rand.HmacDrbgContext,
store: ArchiveDriver,
): T =
WakuStoreClient(peerManager: peerManager, rng: rng, store: store)
proc queryAll(
w: WakuStoreClient, query: HistoryQuery, peer: RemotePeerInfo
): Future[WakuStoreResult[seq[WakuMessage]]] {.async, gcsafe.} =
## A thin wrapper for query. Sends the query to the given peer. when the query has a valid pagingInfo,
## it retrieves the historical messages in pages.
## Returns all the fetched messages, if error occurs, returns an error string
# Make a copy of the query
var req = query
var messageList: seq[WakuMessage] = @[]
while true:
let response = (await w.query(req, peer)).valueOr:
return err($error)
messageList.add(response.messages)
# Check whether it is the last page
if response.cursor.isNone():
break
# Update paging cursor
req.cursor = response.cursor
return ok(messageList)
proc queryLoop(
w: WakuStoreClient, req: HistoryQuery, peers: seq[RemotePeerInfo]
): Future[WakuStoreResult[seq[WakuMessage]]] {.async, gcsafe.} =
## Loops through the peers candidate list in order and sends the query to each
##
## Once all responses have been received, the retrieved messages are consolidated into one deduplicated list.
## if no messages have been retrieved, the returned future will resolve into a result holding an empty seq.
let queryFuturesList = peers.mapIt(w.queryAll(req, it))
await allFutures(queryFuturesList)
let messagesList = queryFuturesList
.map(
proc(fut: Future[WakuStoreResult[seq[WakuMessage]]]): seq[WakuMessage] =
try:
# fut.read() can raise a CatchableError
# These futures have been awaited before using allFutures(). Call completed() just as a sanity check.
if not fut.completed() or fut.read().isErr():
return @[]
fut.read().value
except CatchableError:
return @[]
)
.concat()
.deduplicate()
return ok(messagesList)
proc put(
store: ArchiveDriver, pubsubTopic: PubsubTopic, message: WakuMessage
): Result[void, string] =
let
digest = waku_archive.computeDigest(message)
messageHash = computeMessageHash(pubsubTopic, message)
receivedTime =
if message.timestamp > 0:
message.timestamp
else:
getNanosecondTime(getTime().toUnixFloat())
store.put(pubsubTopic, message, digest, messageHash, receivedTime)
proc resume*(
w: WakuStoreClient,
peerList = none(seq[RemotePeerInfo]),
pageSize = DefaultPageSize,
pubsubTopic = DefaultPubsubTopic,
): Future[WakuStoreResult[uint64]] {.async, gcsafe.} =
## resume proc retrieves the history of waku messages published on the default waku pubsub topic since the last time the waku store node has been online
## messages are stored in the store node's messages field and in the message db
## the offline time window is measured as the difference between the current time and the timestamp of the most recent persisted waku message
## an offset of 20 second is added to the time window to count for nodes asynchrony
## peerList indicates the list of peers to query from.
## The history is fetched from all available peers in this list and then consolidated into one deduplicated list.
## Such candidates should be found through a discovery method (to be developed).
## if no peerList is passed, one of the peers in the underlying peer manager unit of the store protocol is picked randomly to fetch the history from.
## The history gets fetched successfully if the dialed peer has been online during the queried time window.
## the resume proc returns the number of retrieved messages if no error occurs, otherwise returns the error string
# If store has not been provided, don't even try
if w.store.isNil():
return err("store not provided (nil)")
# NOTE: Original implementation is based on the message's sender timestamp. At the moment
# of writing, the sqlite store implementation returns the last message's receiver
# timestamp.
# lastSeenTime = lastSeenItem.get().msg.timestamp
let
lastSeenTime = w.store.getNewestMessageTimestamp().get(Timestamp(0))
now = getNanosecondTime(getTime().toUnixFloat())
info "resuming with offline time window",
lastSeenTime = lastSeenTime, currentTime = now
let
queryEndTime = now + StoreResumeTimeWindowOffset
queryStartTime = max(lastSeenTime - StoreResumeTimeWindowOffset, 0)
let req = HistoryQuery(
pubsubTopic: some(pubsubTopic),
startTime: some(queryStartTime),
endTime: some(queryEndTime),
pageSize: uint64(pageSize),
direction: default(),
)
var res: WakuStoreResult[seq[WakuMessage]]
if peerList.isSome():
info "trying the candidate list to fetch the history"
res = await w.queryLoop(req, peerList.get())
else:
info "no candidate list is provided, selecting a random peer"
# if no peerList is set then query from one of the peers stored in the peer manager
let peerOpt = w.peerManager.selectPeer(WakuLegacyStoreCodec)
if peerOpt.isNone():
warn "no suitable remote peers"
waku_legacy_store_errors.inc(labelValues = [peerNotFoundFailure])
return err("no suitable remote peers")
info "a peer is selected from peer manager"
res = await w.queryAll(req, peerOpt.get())
res.isOkOr:
info "failed to resume the history"
return err("failed to resume the history")
# Save the retrieved messages in the store
var added: uint = 0
for msg in res.get():
w.store.put(pubsubTopic, msg).isOkOr:
continue
added.inc()
return ok(added)

View File

@ -1,108 +0,0 @@
{.push raises: [].}
import std/[options, sequtils], results, stew/byteutils, nimcrypto/sha2
import ../waku_core, ../common/paging
from ../waku_core/codecs import WakuLegacyStoreCodec
export WakuLegacyStoreCodec
const
DefaultPageSize*: uint64 = 20
MaxPageSize*: uint64 = 100
type WakuStoreResult*[T] = Result[T, string]
## Waku message digest
type MessageDigest* = MDigest[256]
proc computeDigest*(msg: WakuMessage): MessageDigest =
var ctx: sha256
ctx.init()
defer:
ctx.clear()
ctx.update(msg.contentTopic.toBytes())
ctx.update(msg.payload)
# Computes the hash
return ctx.finish()
## API types
type
HistoryCursor* = object
pubsubTopic*: PubsubTopic
senderTime*: Timestamp
storeTime*: Timestamp
digest*: MessageDigest
HistoryQuery* = object
pubsubTopic*: Option[PubsubTopic]
contentTopics*: seq[ContentTopic]
cursor*: Option[HistoryCursor]
startTime*: Option[Timestamp]
endTime*: Option[Timestamp]
pageSize*: uint64
direction*: PagingDirection
requestId*: string
HistoryResponse* = object
messages*: seq[WakuMessage]
cursor*: Option[HistoryCursor]
HistoryErrorKind* {.pure.} = enum
UNKNOWN = uint32(000)
BAD_RESPONSE = uint32(300)
BAD_REQUEST = uint32(400)
TOO_MANY_REQUESTS = uint32(429)
SERVICE_UNAVAILABLE = uint32(503)
PEER_DIAL_FAILURE = uint32(504)
HistoryError* = object
case kind*: HistoryErrorKind
of PEER_DIAL_FAILURE:
address*: string
of BAD_RESPONSE, BAD_REQUEST:
cause*: string
else:
discard
HistoryResult* = Result[HistoryResponse, HistoryError]
proc parse*(T: type HistoryErrorKind, kind: uint32): T =
case kind
of 000, 200, 300, 400, 429, 503:
HistoryErrorKind(kind)
else:
HistoryErrorKind.UNKNOWN
proc `$`*(err: HistoryError): string =
case err.kind
of HistoryErrorKind.PEER_DIAL_FAILURE:
"PEER_DIAL_FAILURE: " & err.address
of HistoryErrorKind.BAD_RESPONSE:
"BAD_RESPONSE: " & err.cause
of HistoryErrorKind.BAD_REQUEST:
"BAD_REQUEST: " & err.cause
of HistoryErrorKind.TOO_MANY_REQUESTS:
"TOO_MANY_REQUESTS"
of HistoryErrorKind.SERVICE_UNAVAILABLE:
"SERVICE_UNAVAILABLE"
of HistoryErrorKind.UNKNOWN:
"UNKNOWN"
proc checkHistCursor*(self: HistoryCursor): Result[void, HistoryError] =
if self.pubsubTopic.len == 0:
return err(HistoryError(kind: BAD_REQUEST, cause: "empty pubsubTopic"))
if self.senderTime == 0:
return err(HistoryError(kind: BAD_REQUEST, cause: "invalid senderTime"))
if self.storeTime == 0:
return err(HistoryError(kind: BAD_REQUEST, cause: "invalid storeTime"))
if self.digest.data.all(
proc(x: byte): bool =
x == 0
):
return err(HistoryError(kind: BAD_REQUEST, cause: "empty digest"))
return ok()

View File

@ -1,188 +0,0 @@
## Waku Store protocol for historical messaging support.
## See spec for more details:
## https://github.com/vacp2p/specs/blob/master/specs/waku/v2/waku-store.md
{.push raises: [].}
import
std/[options, times],
results,
chronicles,
chronos,
bearssl/rand,
libp2p/crypto/crypto,
libp2p/protocols/protocol,
libp2p/protobuf/minprotobuf,
libp2p/stream/connection,
metrics
import
../waku_core,
../node/peer_manager,
./common,
./rpc,
./rpc_codec,
./protocol_metrics,
../common/rate_limit/request_limiter
logScope:
topics = "waku legacy store"
type HistoryQueryHandler* =
proc(req: HistoryQuery): Future[HistoryResult] {.async, gcsafe.}
type WakuStore* = ref object of LPProtocol
peerManager: PeerManager
rng: ref rand.HmacDrbgContext
queryHandler*: HistoryQueryHandler
requestRateLimiter*: RequestRateLimiter
## Protocol
type StoreResp = tuple[resp: seq[byte], requestId: string]
proc handleLegacyQueryRequest(
self: WakuStore, requestor: PeerId, raw_request: seq[byte]
): Future[StoreResp] {.async.} =
let reqRpc = HistoryRPC.decode(raw_request).valueOr:
error "failed to decode rpc", peerId = requestor, error = $error
waku_legacy_store_errors.inc(labelValues = [decodeRpcFailure])
return (newSeq[byte](), "failed to decode rpc")
if reqRpc.query.isNone():
error "empty query rpc", peerId = requestor, requestId = reqRpc.requestId
waku_legacy_store_errors.inc(labelValues = [emptyRpcQueryFailure])
return (newSeq[byte](), "empty query rpc")
let requestId = reqRpc.requestId
var request = reqRpc.query.get().toAPI()
request.requestId = requestId
info "received history query",
peerId = requestor, requestId = requestId, query = request
waku_legacy_store_queries.inc()
var responseRes: HistoryResult
try:
responseRes = await self.queryHandler(request)
except Exception:
error "history query failed",
peerId = requestor, requestId = requestId, error = getCurrentExceptionMsg()
let error = HistoryError(kind: HistoryErrorKind.UNKNOWN).toRPC()
let response = HistoryResponseRPC(error: error)
return (
HistoryRPC(requestId: requestId, response: some(response)).encode().buffer,
requestId,
)
responseRes.isOkOr:
error "history query failed",
peerId = requestor, requestId = requestId, error = error
let response = responseRes.toRPC()
return (
HistoryRPC(requestId: requestId, response: some(response)).encode().buffer,
requestId,
)
let response = responseRes.toRPC()
info "sending history response",
peerId = requestor, requestId = requestId, messages = response.messages.len
return (
HistoryRPC(requestId: requestId, response: some(response)).encode().buffer,
requestId,
)
proc initProtocolHandler(ws: WakuStore) =
let rejectResponseBuf = HistoryRPC(
## We will not copy and decode RPC buffer from stream only for requestId
## in reject case as it is comparably too expensive and opens possible
## attack surface
requestId: "N/A",
response: some(
HistoryResponseRPC(
error: HistoryError(kind: HistoryErrorKind.TOO_MANY_REQUESTS).toRPC()
)
),
).encode().buffer
proc handler(conn: Connection, proto: string) {.async: (raises: [CancelledError]).} =
var successfulQuery = false ## only consider the correct queries in metrics
var resBuf: StoreResp
var queryDuration: float
defer:
await conn.closeWithEof()
ws.requestRateLimiter.checkUsageLimit(WakuLegacyStoreCodec, conn):
let readRes = catch:
await conn.readLp(DefaultMaxRpcSize.int)
let reqBuf = readRes.valueOr:
error "Connection read error", error = error.msg
return
waku_service_network_bytes.inc(
amount = reqBuf.len().int64, labelValues = [WakuLegacyStoreCodec, "in"]
)
let queryStartTime = getTime().toUnixFloat()
try:
resBuf = await ws.handleLegacyQueryRequest(conn.peerId, reqBuf)
except CatchableError:
error "legacy store query handler failed",
remote_peer_id = conn.peerId, error = getCurrentExceptionMsg()
return
queryDuration = getTime().toUnixFloat() - queryStartTime
waku_legacy_store_time_seconds.set(queryDuration, ["query-db-time"])
successfulQuery = true
do:
info "Legacy store query request rejected due rate limit exceeded",
peerId = conn.peerId, limit = $ws.requestRateLimiter.setting
resBuf = (rejectResponseBuf, "rejected")
let writeRespStartTime = getTime().toUnixFloat()
let writeRes = catch:
await conn.writeLp(resBuf.resp)
writeRes.isOkOr:
error "Connection write error", error = error.msg
return
if successfulQuery:
let writeDuration = getTime().toUnixFloat() - writeRespStartTime
waku_legacy_store_time_seconds.set(writeDuration, ["send-store-resp-time"])
info "after sending response",
requestId = resBuf.requestId,
queryDurationSecs = queryDuration,
writeStreamDurationSecs = writeDuration
waku_service_network_bytes.inc(
amount = resBuf.resp.len().int64, labelValues = [WakuLegacyStoreCodec, "out"]
)
ws.handler = handler
ws.codec = WakuLegacyStoreCodec
proc new*(
T: type WakuStore,
peerManager: PeerManager,
rng: ref rand.HmacDrbgContext,
queryHandler: HistoryQueryHandler,
rateLimitSetting: Option[RateLimitSetting] = none[RateLimitSetting](),
): T =
# Raise a defect if history query handler is nil
if queryHandler.isNil():
raise newException(NilAccessDefect, "history query handler is nil")
let ws = WakuStore(
rng: rng,
peerManager: peerManager,
queryHandler: queryHandler,
requestRateLimiter: newRequestRateLimiter(rateLimitSetting),
)
ws.initProtocolHandler()
setServiceLimitMetric(WakuLegacyStoreCodec, rateLimitSetting)
ws

View File

@ -1,21 +0,0 @@
{.push raises: [].}
import metrics
declarePublicCounter waku_legacy_store_errors,
"number of legacy store protocol errors", ["type"]
declarePublicCounter waku_legacy_store_queries,
"number of legacy store queries received"
## "query-db-time" phase considers the time when node performs the query to the database.
## "send-store-resp-time" phase is the time when node writes the store response to the store-client.
declarePublicGauge waku_legacy_store_time_seconds,
"Time in seconds spent by each store phase", labels = ["phase"]
# Error types (metric label values)
const
dialFailure* = "dial_failure_legacy"
decodeRpcFailure* = "decode_rpc_failure_legacy"
peerNotFoundFailure* = "peer_not_found_failure_legacy"
emptyRpcQueryFailure* = "empty_rpc_query_failure_legacy"
emptyRpcResponseFailure* = "empty_rpc_response_failure_legacy"

View File

@ -1,218 +0,0 @@
{.push raises: [].}
import std/[options, sequtils], results
import ../waku_core, ../common/paging, ./common
## Wire protocol
const HistoryQueryDirectionDefaultValue = default(type HistoryQuery.direction)
type PagingIndexRPC* = object
## This type contains the description of an Index used in the pagination of WakuMessages
pubsubTopic*: PubsubTopic
senderTime*: Timestamp # the time at which the message is generated
receiverTime*: Timestamp
digest*: MessageDigest # calculated over payload and content topic
proc `==`*(x, y: PagingIndexRPC): bool =
## receiverTime plays no role in index equality
(x.senderTime == y.senderTime) and (x.digest == y.digest) and
(x.pubsubTopic == y.pubsubTopic)
proc compute*(
T: type PagingIndexRPC,
msg: WakuMessage,
receivedTime: Timestamp,
pubsubTopic: PubsubTopic,
): T =
## Takes a WakuMessage with received timestamp and returns its Index.
let
digest = computeDigest(msg)
senderTime = msg.timestamp
PagingIndexRPC(
pubsubTopic: pubsubTopic,
senderTime: senderTime,
receiverTime: receivedTime,
digest: digest,
)
type PagingInfoRPC* = object
## This type holds the information needed for the pagination
pageSize*: Option[uint64]
cursor*: Option[PagingIndexRPC]
direction*: Option[PagingDirection]
type
HistoryContentFilterRPC* = object
contentTopic*: ContentTopic
HistoryQueryRPC* = object
contentFilters*: seq[HistoryContentFilterRPC]
pubsubTopic*: Option[PubsubTopic]
pagingInfo*: Option[PagingInfoRPC]
startTime*: Option[int64]
endTime*: Option[int64]
HistoryResponseErrorRPC* {.pure.} = enum
## HistoryResponseErrorRPC contains error message to inform the querying node about
## the state of its request
NONE = uint32(0)
INVALID_CURSOR = uint32(1)
TOO_MANY_REQUESTS = uint32(429)
SERVICE_UNAVAILABLE = uint32(503)
HistoryResponseRPC* = object
messages*: seq[WakuMessage]
pagingInfo*: Option[PagingInfoRPC]
error*: HistoryResponseErrorRPC
HistoryRPC* = object
requestId*: string
query*: Option[HistoryQueryRPC]
response*: Option[HistoryResponseRPC]
proc parse*(T: type HistoryResponseErrorRPC, kind: uint32): T =
case kind
of 0, 1, 429, 503:
cast[HistoryResponseErrorRPC](kind)
else:
# TODO: Improve error variants/move to satus codes
HistoryResponseErrorRPC.INVALID_CURSOR
## Wire protocol type mappings
proc toRPC*(cursor: HistoryCursor): PagingIndexRPC {.gcsafe.} =
PagingIndexRPC(
pubsubTopic: cursor.pubsubTopic,
senderTime: cursor.senderTime,
receiverTime: cursor.storeTime,
digest: cursor.digest,
)
proc toAPI*(rpc: PagingIndexRPC): HistoryCursor =
HistoryCursor(
pubsubTopic: rpc.pubsubTopic,
senderTime: rpc.senderTime,
storeTime: rpc.receiverTime,
digest: rpc.digest,
)
proc toRPC*(query: HistoryQuery): HistoryQueryRPC =
var rpc = HistoryQueryRPC()
rpc.contentFilters =
query.contentTopics.mapIt(HistoryContentFilterRPC(contentTopic: it))
rpc.pubsubTopic = query.pubsubTopic
rpc.pagingInfo = block:
if query.cursor.isNone() and query.pageSize == default(type query.pageSize) and
query.direction == HistoryQueryDirectionDefaultValue:
none(PagingInfoRPC)
else:
let
pageSize = some(query.pageSize)
cursor = query.cursor.map(toRPC)
direction = some(query.direction)
some(PagingInfoRPC(pageSize: pageSize, cursor: cursor, direction: direction))
rpc.startTime = query.startTime
rpc.endTime = query.endTime
rpc
proc toAPI*(rpc: HistoryQueryRPC): HistoryQuery =
let
pubsubTopic = rpc.pubsubTopic
contentTopics = rpc.contentFilters.mapIt(it.contentTopic)
cursor =
if rpc.pagingInfo.isNone() or rpc.pagingInfo.get().cursor.isNone():
none(HistoryCursor)
else:
rpc.pagingInfo.get().cursor.map(toAPI)
startTime = rpc.startTime
endTime = rpc.endTime
pageSize =
if rpc.pagingInfo.isNone() or rpc.pagingInfo.get().pageSize.isNone():
0'u64
else:
rpc.pagingInfo.get().pageSize.get()
direction =
if rpc.pagingInfo.isNone() or rpc.pagingInfo.get().direction.isNone():
HistoryQueryDirectionDefaultValue
else:
rpc.pagingInfo.get().direction.get()
HistoryQuery(
pubsubTopic: pubsubTopic,
contentTopics: contentTopics,
cursor: cursor,
startTime: startTime,
endTime: endTime,
pageSize: pageSize,
direction: direction,
)
proc toRPC*(err: HistoryError): HistoryResponseErrorRPC =
# TODO: Better error mappings/move to error codes
case err.kind
of HistoryErrorKind.BAD_REQUEST:
# TODO: Respond aksi with the reason
HistoryResponseErrorRPC.INVALID_CURSOR
of HistoryErrorKind.TOO_MANY_REQUESTS:
HistoryResponseErrorRPC.TOO_MANY_REQUESTS
of HistoryErrorKind.SERVICE_UNAVAILABLE:
HistoryResponseErrorRPC.SERVICE_UNAVAILABLE
else:
HistoryResponseErrorRPC.INVALID_CURSOR
proc toAPI*(err: HistoryResponseErrorRPC): HistoryError =
# TODO: Better error mappings/move to error codes
case err
of HistoryResponseErrorRPC.INVALID_CURSOR:
HistoryError(kind: HistoryErrorKind.BAD_REQUEST, cause: "invalid cursor")
of HistoryResponseErrorRPC.TOO_MANY_REQUESTS:
HistoryError(kind: HistoryErrorKind.TOO_MANY_REQUESTS)
of HistoryResponseErrorRPC.SERVICE_UNAVAILABLE:
HistoryError(kind: HistoryErrorKind.SERVICE_UNAVAILABLE)
else:
HistoryError(kind: HistoryErrorKind.UNKNOWN)
proc toRPC*(res: HistoryResult): HistoryResponseRPC =
let resp = res.valueOr:
return HistoryResponseRPC(error: error.toRPC())
let
messages = resp.messages
pagingInfo = block:
if resp.cursor.isNone():
none(PagingInfoRPC)
else:
some(PagingInfoRPC(cursor: resp.cursor.map(toRPC)))
error = HistoryResponseErrorRPC.NONE
HistoryResponseRPC(messages: messages, pagingInfo: pagingInfo, error: error)
proc toAPI*(rpc: HistoryResponseRPC): HistoryResult =
if rpc.error != HistoryResponseErrorRPC.NONE:
err(rpc.error.toAPI())
else:
let
messages = rpc.messages
cursor =
if rpc.pagingInfo.isNone():
none(HistoryCursor)
else:
rpc.pagingInfo.get().cursor.map(toAPI)
ok(HistoryResponse(messages: messages, cursor: cursor))

View File

@ -1,255 +0,0 @@
{.push raises: [].}
import std/options, nimcrypto/hash
import ../common/[protobuf, paging], ../waku_core, ./common, ./rpc
const DefaultMaxRpcSize* = -1
## Pagination
proc encode*(index: PagingIndexRPC): ProtoBuffer =
## Encode an Index object into a ProtoBuffer
## returns the resultant ProtoBuffer
var pb = initProtoBuffer()
pb.write3(1, index.digest.data)
pb.write3(2, zint64(index.receiverTime))
pb.write3(3, zint64(index.senderTime))
pb.write3(4, index.pubsubTopic)
pb.finish3()
pb
proc decode*(T: type PagingIndexRPC, buffer: seq[byte]): ProtobufResult[T] =
## creates and returns an Index object out of buffer
var rpc = PagingIndexRPC()
let pb = initProtoBuffer(buffer)
var data: seq[byte]
if not ?pb.getField(1, data):
return err(ProtobufError.missingRequiredField("digest"))
else:
var digest = MessageDigest()
for count, b in data:
digest.data[count] = b
rpc.digest = digest
var receiverTime: zint64
if not ?pb.getField(2, receiverTime):
return err(ProtobufError.missingRequiredField("receiver_time"))
else:
rpc.receiverTime = int64(receiverTime)
var senderTime: zint64
if not ?pb.getField(3, senderTime):
return err(ProtobufError.missingRequiredField("sender_time"))
else:
rpc.senderTime = int64(senderTime)
var pubsubTopic: string
if not ?pb.getField(4, pubsubTopic):
return err(ProtobufError.missingRequiredField("pubsub_topic"))
else:
rpc.pubsubTopic = pubsubTopic
ok(rpc)
proc encode*(rpc: PagingInfoRPC): ProtoBuffer =
## Encodes a PagingInfo object into a ProtoBuffer
## returns the resultant ProtoBuffer
var pb = initProtoBuffer()
pb.write3(1, rpc.pageSize)
pb.write3(2, rpc.cursor.map(encode))
pb.write3(
3,
rpc.direction.map(
proc(d: PagingDirection): uint32 =
uint32(ord(d))
),
)
pb.finish3()
pb
proc decode*(T: type PagingInfoRPC, buffer: seq[byte]): ProtobufResult[T] =
## creates and returns a PagingInfo object out of buffer
var rpc = PagingInfoRPC()
let pb = initProtoBuffer(buffer)
var pageSize: uint64
if not ?pb.getField(1, pageSize):
rpc.pageSize = none(uint64)
else:
rpc.pageSize = some(pageSize)
var cursorBuffer: seq[byte]
if not ?pb.getField(2, cursorBuffer):
rpc.cursor = none(PagingIndexRPC)
else:
let cursor = ?PagingIndexRPC.decode(cursorBuffer)
rpc.cursor = some(cursor)
var direction: uint32
if not ?pb.getField(3, direction):
rpc.direction = none(PagingDirection)
else:
rpc.direction = some(PagingDirection(direction))
ok(rpc)
## Wire protocol
proc encode*(rpc: HistoryContentFilterRPC): ProtoBuffer =
var pb = initProtoBuffer()
pb.write3(1, rpc.contentTopic)
pb.finish3()
pb
proc decode*(T: type HistoryContentFilterRPC, buffer: seq[byte]): ProtobufResult[T] =
let pb = initProtoBuffer(buffer)
var contentTopic: ContentTopic
if not ?pb.getField(1, contentTopic):
return err(ProtobufError.missingRequiredField("content_topic"))
ok(HistoryContentFilterRPC(contentTopic: contentTopic))
proc encode*(rpc: HistoryQueryRPC): ProtoBuffer =
var pb = initProtoBuffer()
pb.write3(2, rpc.pubsubTopic)
for filter in rpc.contentFilters:
pb.write3(3, filter.encode())
pb.write3(4, rpc.pagingInfo.map(encode))
pb.write3(
5,
rpc.startTime.map(
proc(time: int64): zint64 =
zint64(time)
),
)
pb.write3(
6,
rpc.endTime.map(
proc(time: int64): zint64 =
zint64(time)
),
)
pb.finish3()
pb
proc decode*(T: type HistoryQueryRPC, buffer: seq[byte]): ProtobufResult[T] =
var rpc = HistoryQueryRPC()
let pb = initProtoBuffer(buffer)
var pubsubTopic: string
if not ?pb.getField(2, pubsubTopic):
rpc.pubsubTopic = none(string)
else:
rpc.pubsubTopic = some(pubsubTopic)
var buffs: seq[seq[byte]]
if not ?pb.getRepeatedField(3, buffs):
rpc.contentFilters = @[]
else:
for pb in buffs:
let filter = ?HistoryContentFilterRPC.decode(pb)
rpc.contentFilters.add(filter)
var pagingInfoBuffer: seq[byte]
if not ?pb.getField(4, pagingInfoBuffer):
rpc.pagingInfo = none(PagingInfoRPC)
else:
let pagingInfo = ?PagingInfoRPC.decode(pagingInfoBuffer)
rpc.pagingInfo = some(pagingInfo)
var startTime: zint64
if not ?pb.getField(5, startTime):
rpc.startTime = none(int64)
else:
rpc.startTime = some(int64(startTime))
var endTime: zint64
if not ?pb.getField(6, endTime):
rpc.endTime = none(int64)
else:
rpc.endTime = some(int64(endTime))
ok(rpc)
proc encode*(response: HistoryResponseRPC): ProtoBuffer =
var pb = initProtoBuffer()
for rpc in response.messages:
pb.write3(2, rpc.encode())
pb.write3(3, response.pagingInfo.map(encode))
pb.write3(4, uint32(ord(response.error)))
pb.finish3()
pb
proc decode*(T: type HistoryResponseRPC, buffer: seq[byte]): ProtobufResult[T] =
var rpc = HistoryResponseRPC()
let pb = initProtoBuffer(buffer)
var messages: seq[seq[byte]]
if ?pb.getRepeatedField(2, messages):
for pb in messages:
let message = ?WakuMessage.decode(pb)
rpc.messages.add(message)
else:
rpc.messages = @[]
var pagingInfoBuffer: seq[byte]
if ?pb.getField(3, pagingInfoBuffer):
let pagingInfo = ?PagingInfoRPC.decode(pagingInfoBuffer)
rpc.pagingInfo = some(pagingInfo)
else:
rpc.pagingInfo = none(PagingInfoRPC)
var error: uint32
if not ?pb.getField(4, error):
return err(ProtobufError.missingRequiredField("error"))
else:
rpc.error = HistoryResponseErrorRPC.parse(error)
ok(rpc)
proc encode*(rpc: HistoryRPC): ProtoBuffer =
var pb = initProtoBuffer()
pb.write3(1, rpc.requestId)
pb.write3(2, rpc.query.map(encode))
pb.write3(3, rpc.response.map(encode))
pb.finish3()
pb
proc decode*(T: type HistoryRPC, buffer: seq[byte]): ProtobufResult[T] =
var rpc = HistoryRPC()
let pb = initProtoBuffer(buffer)
if not ?pb.getField(1, rpc.requestId):
return err(ProtobufError.missingRequiredField("request_id"))
var queryBuffer: seq[byte]
if not ?pb.getField(2, queryBuffer):
rpc.query = none(HistoryQueryRPC)
else:
let query = ?HistoryQueryRPC.decode(queryBuffer)
rpc.query = some(query)
var responseBuffer: seq[byte]
if not ?pb.getField(3, responseBuffer):
rpc.response = none(HistoryResponseRPC)
else:
let response = ?HistoryResponseRPC.decode(responseBuffer)
rpc.response = some(response)
ok(rpc)

View File

@ -1,31 +0,0 @@
##
## This file is aimed to attend the requests that come directly
## from the 'self' node. It is expected to attend the store requests that
## come from REST-store endpoint when those requests don't indicate
## any store-peer address.
##
## Notice that the REST-store requests normally assume that the REST
## server is acting as a store-client. In this module, we allow that
## such REST-store node can act as store-server as well by retrieving
## its own stored messages. The typical use case for that is when
## using `nwaku-compose`, which spawn a Waku node connected to a local
## database, and the user is interested in retrieving the messages
## stored by that local store node.
##
import results, chronos
import ./protocol, ./common
proc handleSelfStoreRequest*(
self: WakuStore, histQuery: HistoryQuery
): Future[WakuStoreResult[HistoryResponse]] {.async.} =
## Handles the store requests made by the node to itself.
## Normally used in REST-store requests
try:
let resp: HistoryResponse = (await self.queryHandler(histQuery)).valueOr:
return err("error in handleSelfStoreRequest: " & $error)
return WakuStoreResult[HistoryResponse].ok(resp)
except Exception:
return err("exception in handleSelfStoreRequest: " & getCurrentExceptionMsg())